1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "direct-io.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "qgroup.h"
28 #include "compression.h"
29 #include "delalloc-space.h"
30 #include "reflink.h"
31 #include "subpage.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "extent-tree.h"
35 #include "file-item.h"
36 #include "ioctl.h"
37 #include "file.h"
38 #include "super.h"
39
40 /* simple helper to fault in pages and copy. This should go away
41 * and be replaced with calls into generic code.
42 */
btrfs_copy_from_user(loff_t pos,size_t write_bytes,struct page ** prepared_pages,struct iov_iter * i)43 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
44 struct page **prepared_pages,
45 struct iov_iter *i)
46 {
47 size_t copied = 0;
48 size_t total_copied = 0;
49 int pg = 0;
50 int offset = offset_in_page(pos);
51
52 while (write_bytes > 0) {
53 size_t count = min_t(size_t,
54 PAGE_SIZE - offset, write_bytes);
55 struct page *page = prepared_pages[pg];
56 /*
57 * Copy data from userspace to the current page
58 */
59 copied = copy_page_from_iter_atomic(page, offset, count, i);
60
61 /* Flush processor's dcache for this page */
62 flush_dcache_page(page);
63
64 /*
65 * if we get a partial write, we can end up with
66 * partially up to date pages. These add
67 * a lot of complexity, so make sure they don't
68 * happen by forcing this copy to be retried.
69 *
70 * The rest of the btrfs_file_write code will fall
71 * back to page at a time copies after we return 0.
72 */
73 if (unlikely(copied < count)) {
74 if (!PageUptodate(page)) {
75 iov_iter_revert(i, copied);
76 copied = 0;
77 }
78 if (!copied)
79 break;
80 }
81
82 write_bytes -= copied;
83 total_copied += copied;
84 offset += copied;
85 if (offset == PAGE_SIZE) {
86 pg++;
87 offset = 0;
88 }
89 }
90 return total_copied;
91 }
92
93 /*
94 * unlocks pages after btrfs_file_write is done with them
95 */
btrfs_drop_pages(struct btrfs_fs_info * fs_info,struct page ** pages,size_t num_pages,u64 pos,u64 copied)96 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
97 struct page **pages, size_t num_pages,
98 u64 pos, u64 copied)
99 {
100 size_t i;
101 u64 block_start = round_down(pos, fs_info->sectorsize);
102 u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
103
104 ASSERT(block_len <= U32_MAX);
105 for (i = 0; i < num_pages; i++) {
106 /* page checked is some magic around finding pages that
107 * have been modified without going through btrfs_set_page_dirty
108 * clear it here. There should be no need to mark the pages
109 * accessed as prepare_pages should have marked them accessed
110 * in prepare_pages via find_or_create_page()
111 */
112 btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]),
113 block_start, block_len);
114 unlock_page(pages[i]);
115 put_page(pages[i]);
116 }
117 }
118
119 /*
120 * After btrfs_copy_from_user(), update the following things for delalloc:
121 * - Mark newly dirtied pages as DELALLOC in the io tree.
122 * Used to advise which range is to be written back.
123 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
124 * - Update inode size for past EOF write
125 */
btrfs_dirty_pages(struct btrfs_inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,struct extent_state ** cached,bool noreserve)126 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
127 size_t num_pages, loff_t pos, size_t write_bytes,
128 struct extent_state **cached, bool noreserve)
129 {
130 struct btrfs_fs_info *fs_info = inode->root->fs_info;
131 int ret = 0;
132 int i;
133 u64 num_bytes;
134 u64 start_pos;
135 u64 end_of_last_block;
136 u64 end_pos = pos + write_bytes;
137 loff_t isize = i_size_read(&inode->vfs_inode);
138 unsigned int extra_bits = 0;
139
140 if (write_bytes == 0)
141 return 0;
142
143 if (noreserve)
144 extra_bits |= EXTENT_NORESERVE;
145
146 start_pos = round_down(pos, fs_info->sectorsize);
147 num_bytes = round_up(write_bytes + pos - start_pos,
148 fs_info->sectorsize);
149 ASSERT(num_bytes <= U32_MAX);
150
151 end_of_last_block = start_pos + num_bytes - 1;
152
153 /*
154 * The pages may have already been dirty, clear out old accounting so
155 * we can set things up properly
156 */
157 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
158 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
159 cached);
160
161 ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
162 extra_bits, cached);
163 if (ret)
164 return ret;
165
166 for (i = 0; i < num_pages; i++) {
167 struct page *p = pages[i];
168
169 btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p),
170 start_pos, num_bytes);
171 btrfs_folio_clamp_clear_checked(fs_info, page_folio(p),
172 start_pos, num_bytes);
173 btrfs_folio_clamp_set_dirty(fs_info, page_folio(p),
174 start_pos, num_bytes);
175 }
176
177 /*
178 * we've only changed i_size in ram, and we haven't updated
179 * the disk i_size. There is no need to log the inode
180 * at this time.
181 */
182 if (end_pos > isize)
183 i_size_write(&inode->vfs_inode, end_pos);
184 return 0;
185 }
186
187 /*
188 * this is very complex, but the basic idea is to drop all extents
189 * in the range start - end. hint_block is filled in with a block number
190 * that would be a good hint to the block allocator for this file.
191 *
192 * If an extent intersects the range but is not entirely inside the range
193 * it is either truncated or split. Anything entirely inside the range
194 * is deleted from the tree.
195 *
196 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
197 * to deal with that. We set the field 'bytes_found' of the arguments structure
198 * with the number of allocated bytes found in the target range, so that the
199 * caller can update the inode's number of bytes in an atomic way when
200 * replacing extents in a range to avoid races with stat(2).
201 */
btrfs_drop_extents(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_drop_extents_args * args)202 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
203 struct btrfs_root *root, struct btrfs_inode *inode,
204 struct btrfs_drop_extents_args *args)
205 {
206 struct btrfs_fs_info *fs_info = root->fs_info;
207 struct extent_buffer *leaf;
208 struct btrfs_file_extent_item *fi;
209 struct btrfs_key key;
210 struct btrfs_key new_key;
211 u64 ino = btrfs_ino(inode);
212 u64 search_start = args->start;
213 u64 disk_bytenr = 0;
214 u64 num_bytes = 0;
215 u64 extent_offset = 0;
216 u64 extent_end = 0;
217 u64 last_end = args->start;
218 int del_nr = 0;
219 int del_slot = 0;
220 int extent_type;
221 int recow;
222 int ret;
223 int modify_tree = -1;
224 int update_refs;
225 int found = 0;
226 struct btrfs_path *path = args->path;
227
228 args->bytes_found = 0;
229 args->extent_inserted = false;
230
231 /* Must always have a path if ->replace_extent is true */
232 ASSERT(!(args->replace_extent && !args->path));
233
234 if (!path) {
235 path = btrfs_alloc_path();
236 if (!path) {
237 ret = -ENOMEM;
238 goto out;
239 }
240 }
241
242 if (args->drop_cache)
243 btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
244
245 if (args->start >= inode->disk_i_size && !args->replace_extent)
246 modify_tree = 0;
247
248 update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
249 while (1) {
250 recow = 0;
251 ret = btrfs_lookup_file_extent(trans, root, path, ino,
252 search_start, modify_tree);
253 if (ret < 0)
254 break;
255 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
256 leaf = path->nodes[0];
257 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
258 if (key.objectid == ino &&
259 key.type == BTRFS_EXTENT_DATA_KEY)
260 path->slots[0]--;
261 }
262 ret = 0;
263 next_slot:
264 leaf = path->nodes[0];
265 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
266 BUG_ON(del_nr > 0);
267 ret = btrfs_next_leaf(root, path);
268 if (ret < 0)
269 break;
270 if (ret > 0) {
271 ret = 0;
272 break;
273 }
274 leaf = path->nodes[0];
275 recow = 1;
276 }
277
278 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
279
280 if (key.objectid > ino)
281 break;
282 if (WARN_ON_ONCE(key.objectid < ino) ||
283 key.type < BTRFS_EXTENT_DATA_KEY) {
284 ASSERT(del_nr == 0);
285 path->slots[0]++;
286 goto next_slot;
287 }
288 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
289 break;
290
291 fi = btrfs_item_ptr(leaf, path->slots[0],
292 struct btrfs_file_extent_item);
293 extent_type = btrfs_file_extent_type(leaf, fi);
294
295 if (extent_type == BTRFS_FILE_EXTENT_REG ||
296 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
297 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
298 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
299 extent_offset = btrfs_file_extent_offset(leaf, fi);
300 extent_end = key.offset +
301 btrfs_file_extent_num_bytes(leaf, fi);
302 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
303 extent_end = key.offset +
304 btrfs_file_extent_ram_bytes(leaf, fi);
305 } else {
306 /* can't happen */
307 BUG();
308 }
309
310 /*
311 * Don't skip extent items representing 0 byte lengths. They
312 * used to be created (bug) if while punching holes we hit
313 * -ENOSPC condition. So if we find one here, just ensure we
314 * delete it, otherwise we would insert a new file extent item
315 * with the same key (offset) as that 0 bytes length file
316 * extent item in the call to setup_items_for_insert() later
317 * in this function.
318 */
319 if (extent_end == key.offset && extent_end >= search_start) {
320 last_end = extent_end;
321 goto delete_extent_item;
322 }
323
324 if (extent_end <= search_start) {
325 path->slots[0]++;
326 goto next_slot;
327 }
328
329 found = 1;
330 search_start = max(key.offset, args->start);
331 if (recow || !modify_tree) {
332 modify_tree = -1;
333 btrfs_release_path(path);
334 continue;
335 }
336
337 /*
338 * | - range to drop - |
339 * | -------- extent -------- |
340 */
341 if (args->start > key.offset && args->end < extent_end) {
342 BUG_ON(del_nr > 0);
343 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
344 ret = -EOPNOTSUPP;
345 break;
346 }
347
348 memcpy(&new_key, &key, sizeof(new_key));
349 new_key.offset = args->start;
350 ret = btrfs_duplicate_item(trans, root, path,
351 &new_key);
352 if (ret == -EAGAIN) {
353 btrfs_release_path(path);
354 continue;
355 }
356 if (ret < 0)
357 break;
358
359 leaf = path->nodes[0];
360 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
361 struct btrfs_file_extent_item);
362 btrfs_set_file_extent_num_bytes(leaf, fi,
363 args->start - key.offset);
364
365 fi = btrfs_item_ptr(leaf, path->slots[0],
366 struct btrfs_file_extent_item);
367
368 extent_offset += args->start - key.offset;
369 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
370 btrfs_set_file_extent_num_bytes(leaf, fi,
371 extent_end - args->start);
372 btrfs_mark_buffer_dirty(trans, leaf);
373
374 if (update_refs && disk_bytenr > 0) {
375 struct btrfs_ref ref = {
376 .action = BTRFS_ADD_DELAYED_REF,
377 .bytenr = disk_bytenr,
378 .num_bytes = num_bytes,
379 .parent = 0,
380 .owning_root = btrfs_root_id(root),
381 .ref_root = btrfs_root_id(root),
382 };
383 btrfs_init_data_ref(&ref, new_key.objectid,
384 args->start - extent_offset,
385 0, false);
386 ret = btrfs_inc_extent_ref(trans, &ref);
387 if (ret) {
388 btrfs_abort_transaction(trans, ret);
389 break;
390 }
391 }
392 key.offset = args->start;
393 }
394 /*
395 * From here on out we will have actually dropped something, so
396 * last_end can be updated.
397 */
398 last_end = extent_end;
399
400 /*
401 * | ---- range to drop ----- |
402 * | -------- extent -------- |
403 */
404 if (args->start <= key.offset && args->end < extent_end) {
405 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
406 ret = -EOPNOTSUPP;
407 break;
408 }
409
410 memcpy(&new_key, &key, sizeof(new_key));
411 new_key.offset = args->end;
412 btrfs_set_item_key_safe(trans, path, &new_key);
413
414 extent_offset += args->end - key.offset;
415 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
416 btrfs_set_file_extent_num_bytes(leaf, fi,
417 extent_end - args->end);
418 btrfs_mark_buffer_dirty(trans, leaf);
419 if (update_refs && disk_bytenr > 0)
420 args->bytes_found += args->end - key.offset;
421 break;
422 }
423
424 search_start = extent_end;
425 /*
426 * | ---- range to drop ----- |
427 * | -------- extent -------- |
428 */
429 if (args->start > key.offset && args->end >= extent_end) {
430 BUG_ON(del_nr > 0);
431 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
432 ret = -EOPNOTSUPP;
433 break;
434 }
435
436 btrfs_set_file_extent_num_bytes(leaf, fi,
437 args->start - key.offset);
438 btrfs_mark_buffer_dirty(trans, leaf);
439 if (update_refs && disk_bytenr > 0)
440 args->bytes_found += extent_end - args->start;
441 if (args->end == extent_end)
442 break;
443
444 path->slots[0]++;
445 goto next_slot;
446 }
447
448 /*
449 * | ---- range to drop ----- |
450 * | ------ extent ------ |
451 */
452 if (args->start <= key.offset && args->end >= extent_end) {
453 delete_extent_item:
454 if (del_nr == 0) {
455 del_slot = path->slots[0];
456 del_nr = 1;
457 } else {
458 BUG_ON(del_slot + del_nr != path->slots[0]);
459 del_nr++;
460 }
461
462 if (update_refs &&
463 extent_type == BTRFS_FILE_EXTENT_INLINE) {
464 args->bytes_found += extent_end - key.offset;
465 extent_end = ALIGN(extent_end,
466 fs_info->sectorsize);
467 } else if (update_refs && disk_bytenr > 0) {
468 struct btrfs_ref ref = {
469 .action = BTRFS_DROP_DELAYED_REF,
470 .bytenr = disk_bytenr,
471 .num_bytes = num_bytes,
472 .parent = 0,
473 .owning_root = btrfs_root_id(root),
474 .ref_root = btrfs_root_id(root),
475 };
476 btrfs_init_data_ref(&ref, key.objectid,
477 key.offset - extent_offset,
478 0, false);
479 ret = btrfs_free_extent(trans, &ref);
480 if (ret) {
481 btrfs_abort_transaction(trans, ret);
482 break;
483 }
484 args->bytes_found += extent_end - key.offset;
485 }
486
487 if (args->end == extent_end)
488 break;
489
490 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
491 path->slots[0]++;
492 goto next_slot;
493 }
494
495 ret = btrfs_del_items(trans, root, path, del_slot,
496 del_nr);
497 if (ret) {
498 btrfs_abort_transaction(trans, ret);
499 break;
500 }
501
502 del_nr = 0;
503 del_slot = 0;
504
505 btrfs_release_path(path);
506 continue;
507 }
508
509 BUG();
510 }
511
512 if (!ret && del_nr > 0) {
513 /*
514 * Set path->slots[0] to first slot, so that after the delete
515 * if items are move off from our leaf to its immediate left or
516 * right neighbor leafs, we end up with a correct and adjusted
517 * path->slots[0] for our insertion (if args->replace_extent).
518 */
519 path->slots[0] = del_slot;
520 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
521 if (ret)
522 btrfs_abort_transaction(trans, ret);
523 }
524
525 leaf = path->nodes[0];
526 /*
527 * If btrfs_del_items() was called, it might have deleted a leaf, in
528 * which case it unlocked our path, so check path->locks[0] matches a
529 * write lock.
530 */
531 if (!ret && args->replace_extent &&
532 path->locks[0] == BTRFS_WRITE_LOCK &&
533 btrfs_leaf_free_space(leaf) >=
534 sizeof(struct btrfs_item) + args->extent_item_size) {
535
536 key.objectid = ino;
537 key.type = BTRFS_EXTENT_DATA_KEY;
538 key.offset = args->start;
539 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
540 struct btrfs_key slot_key;
541
542 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
543 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
544 path->slots[0]++;
545 }
546 btrfs_setup_item_for_insert(trans, root, path, &key,
547 args->extent_item_size);
548 args->extent_inserted = true;
549 }
550
551 if (!args->path)
552 btrfs_free_path(path);
553 else if (!args->extent_inserted)
554 btrfs_release_path(path);
555 out:
556 args->drop_end = found ? min(args->end, last_end) : args->end;
557
558 return ret;
559 }
560
extent_mergeable(struct extent_buffer * leaf,int slot,u64 objectid,u64 bytenr,u64 orig_offset,u64 * start,u64 * end)561 static int extent_mergeable(struct extent_buffer *leaf, int slot,
562 u64 objectid, u64 bytenr, u64 orig_offset,
563 u64 *start, u64 *end)
564 {
565 struct btrfs_file_extent_item *fi;
566 struct btrfs_key key;
567 u64 extent_end;
568
569 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
570 return 0;
571
572 btrfs_item_key_to_cpu(leaf, &key, slot);
573 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
574 return 0;
575
576 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
577 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
578 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
579 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
580 btrfs_file_extent_compression(leaf, fi) ||
581 btrfs_file_extent_encryption(leaf, fi) ||
582 btrfs_file_extent_other_encoding(leaf, fi))
583 return 0;
584
585 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
586 if ((*start && *start != key.offset) || (*end && *end != extent_end))
587 return 0;
588
589 *start = key.offset;
590 *end = extent_end;
591 return 1;
592 }
593
594 /*
595 * Mark extent in the range start - end as written.
596 *
597 * This changes extent type from 'pre-allocated' to 'regular'. If only
598 * part of extent is marked as written, the extent will be split into
599 * two or three.
600 */
btrfs_mark_extent_written(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 start,u64 end)601 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
602 struct btrfs_inode *inode, u64 start, u64 end)
603 {
604 struct btrfs_root *root = inode->root;
605 struct extent_buffer *leaf;
606 struct btrfs_path *path;
607 struct btrfs_file_extent_item *fi;
608 struct btrfs_ref ref = { 0 };
609 struct btrfs_key key;
610 struct btrfs_key new_key;
611 u64 bytenr;
612 u64 num_bytes;
613 u64 extent_end;
614 u64 orig_offset;
615 u64 other_start;
616 u64 other_end;
617 u64 split;
618 int del_nr = 0;
619 int del_slot = 0;
620 int recow;
621 int ret = 0;
622 u64 ino = btrfs_ino(inode);
623
624 path = btrfs_alloc_path();
625 if (!path)
626 return -ENOMEM;
627 again:
628 recow = 0;
629 split = start;
630 key.objectid = ino;
631 key.type = BTRFS_EXTENT_DATA_KEY;
632 key.offset = split;
633
634 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
635 if (ret < 0)
636 goto out;
637 if (ret > 0 && path->slots[0] > 0)
638 path->slots[0]--;
639
640 leaf = path->nodes[0];
641 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
642 if (key.objectid != ino ||
643 key.type != BTRFS_EXTENT_DATA_KEY) {
644 ret = -EINVAL;
645 btrfs_abort_transaction(trans, ret);
646 goto out;
647 }
648 fi = btrfs_item_ptr(leaf, path->slots[0],
649 struct btrfs_file_extent_item);
650 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
651 ret = -EINVAL;
652 btrfs_abort_transaction(trans, ret);
653 goto out;
654 }
655 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
656 if (key.offset > start || extent_end < end) {
657 ret = -EINVAL;
658 btrfs_abort_transaction(trans, ret);
659 goto out;
660 }
661
662 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
663 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
664 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
665 memcpy(&new_key, &key, sizeof(new_key));
666
667 if (start == key.offset && end < extent_end) {
668 other_start = 0;
669 other_end = start;
670 if (extent_mergeable(leaf, path->slots[0] - 1,
671 ino, bytenr, orig_offset,
672 &other_start, &other_end)) {
673 new_key.offset = end;
674 btrfs_set_item_key_safe(trans, path, &new_key);
675 fi = btrfs_item_ptr(leaf, path->slots[0],
676 struct btrfs_file_extent_item);
677 btrfs_set_file_extent_generation(leaf, fi,
678 trans->transid);
679 btrfs_set_file_extent_num_bytes(leaf, fi,
680 extent_end - end);
681 btrfs_set_file_extent_offset(leaf, fi,
682 end - orig_offset);
683 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
684 struct btrfs_file_extent_item);
685 btrfs_set_file_extent_generation(leaf, fi,
686 trans->transid);
687 btrfs_set_file_extent_num_bytes(leaf, fi,
688 end - other_start);
689 btrfs_mark_buffer_dirty(trans, leaf);
690 goto out;
691 }
692 }
693
694 if (start > key.offset && end == extent_end) {
695 other_start = end;
696 other_end = 0;
697 if (extent_mergeable(leaf, path->slots[0] + 1,
698 ino, bytenr, orig_offset,
699 &other_start, &other_end)) {
700 fi = btrfs_item_ptr(leaf, path->slots[0],
701 struct btrfs_file_extent_item);
702 btrfs_set_file_extent_num_bytes(leaf, fi,
703 start - key.offset);
704 btrfs_set_file_extent_generation(leaf, fi,
705 trans->transid);
706 path->slots[0]++;
707 new_key.offset = start;
708 btrfs_set_item_key_safe(trans, path, &new_key);
709
710 fi = btrfs_item_ptr(leaf, path->slots[0],
711 struct btrfs_file_extent_item);
712 btrfs_set_file_extent_generation(leaf, fi,
713 trans->transid);
714 btrfs_set_file_extent_num_bytes(leaf, fi,
715 other_end - start);
716 btrfs_set_file_extent_offset(leaf, fi,
717 start - orig_offset);
718 btrfs_mark_buffer_dirty(trans, leaf);
719 goto out;
720 }
721 }
722
723 while (start > key.offset || end < extent_end) {
724 if (key.offset == start)
725 split = end;
726
727 new_key.offset = split;
728 ret = btrfs_duplicate_item(trans, root, path, &new_key);
729 if (ret == -EAGAIN) {
730 btrfs_release_path(path);
731 goto again;
732 }
733 if (ret < 0) {
734 btrfs_abort_transaction(trans, ret);
735 goto out;
736 }
737
738 leaf = path->nodes[0];
739 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
740 struct btrfs_file_extent_item);
741 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
742 btrfs_set_file_extent_num_bytes(leaf, fi,
743 split - key.offset);
744
745 fi = btrfs_item_ptr(leaf, path->slots[0],
746 struct btrfs_file_extent_item);
747
748 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
749 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
750 btrfs_set_file_extent_num_bytes(leaf, fi,
751 extent_end - split);
752 btrfs_mark_buffer_dirty(trans, leaf);
753
754 ref.action = BTRFS_ADD_DELAYED_REF;
755 ref.bytenr = bytenr;
756 ref.num_bytes = num_bytes;
757 ref.parent = 0;
758 ref.owning_root = btrfs_root_id(root);
759 ref.ref_root = btrfs_root_id(root);
760 btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
761 ret = btrfs_inc_extent_ref(trans, &ref);
762 if (ret) {
763 btrfs_abort_transaction(trans, ret);
764 goto out;
765 }
766
767 if (split == start) {
768 key.offset = start;
769 } else {
770 if (start != key.offset) {
771 ret = -EINVAL;
772 btrfs_abort_transaction(trans, ret);
773 goto out;
774 }
775 path->slots[0]--;
776 extent_end = end;
777 }
778 recow = 1;
779 }
780
781 other_start = end;
782 other_end = 0;
783
784 ref.action = BTRFS_DROP_DELAYED_REF;
785 ref.bytenr = bytenr;
786 ref.num_bytes = num_bytes;
787 ref.parent = 0;
788 ref.owning_root = btrfs_root_id(root);
789 ref.ref_root = btrfs_root_id(root);
790 btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
791 if (extent_mergeable(leaf, path->slots[0] + 1,
792 ino, bytenr, orig_offset,
793 &other_start, &other_end)) {
794 if (recow) {
795 btrfs_release_path(path);
796 goto again;
797 }
798 extent_end = other_end;
799 del_slot = path->slots[0] + 1;
800 del_nr++;
801 ret = btrfs_free_extent(trans, &ref);
802 if (ret) {
803 btrfs_abort_transaction(trans, ret);
804 goto out;
805 }
806 }
807 other_start = 0;
808 other_end = start;
809 if (extent_mergeable(leaf, path->slots[0] - 1,
810 ino, bytenr, orig_offset,
811 &other_start, &other_end)) {
812 if (recow) {
813 btrfs_release_path(path);
814 goto again;
815 }
816 key.offset = other_start;
817 del_slot = path->slots[0];
818 del_nr++;
819 ret = btrfs_free_extent(trans, &ref);
820 if (ret) {
821 btrfs_abort_transaction(trans, ret);
822 goto out;
823 }
824 }
825 if (del_nr == 0) {
826 fi = btrfs_item_ptr(leaf, path->slots[0],
827 struct btrfs_file_extent_item);
828 btrfs_set_file_extent_type(leaf, fi,
829 BTRFS_FILE_EXTENT_REG);
830 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
831 btrfs_mark_buffer_dirty(trans, leaf);
832 } else {
833 fi = btrfs_item_ptr(leaf, del_slot - 1,
834 struct btrfs_file_extent_item);
835 btrfs_set_file_extent_type(leaf, fi,
836 BTRFS_FILE_EXTENT_REG);
837 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
838 btrfs_set_file_extent_num_bytes(leaf, fi,
839 extent_end - key.offset);
840 btrfs_mark_buffer_dirty(trans, leaf);
841
842 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
843 if (ret < 0) {
844 btrfs_abort_transaction(trans, ret);
845 goto out;
846 }
847 }
848 out:
849 btrfs_free_path(path);
850 return ret;
851 }
852
853 /*
854 * on error we return an unlocked page and the error value
855 * on success we return a locked page and 0
856 */
prepare_uptodate_page(struct inode * inode,struct page * page,u64 pos,bool force_uptodate)857 static int prepare_uptodate_page(struct inode *inode,
858 struct page *page, u64 pos,
859 bool force_uptodate)
860 {
861 struct folio *folio = page_folio(page);
862 int ret = 0;
863
864 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
865 !PageUptodate(page)) {
866 ret = btrfs_read_folio(NULL, folio);
867 if (ret)
868 return ret;
869 lock_page(page);
870 if (!PageUptodate(page)) {
871 unlock_page(page);
872 return -EIO;
873 }
874
875 /*
876 * Since btrfs_read_folio() will unlock the folio before it
877 * returns, there is a window where btrfs_release_folio() can be
878 * called to release the page. Here we check both inode
879 * mapping and PagePrivate() to make sure the page was not
880 * released.
881 *
882 * The private flag check is essential for subpage as we need
883 * to store extra bitmap using folio private.
884 */
885 if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
886 unlock_page(page);
887 return -EAGAIN;
888 }
889 }
890 return 0;
891 }
892
get_prepare_fgp_flags(bool nowait)893 static fgf_t get_prepare_fgp_flags(bool nowait)
894 {
895 fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
896
897 if (nowait)
898 fgp_flags |= FGP_NOWAIT;
899
900 return fgp_flags;
901 }
902
get_prepare_gfp_flags(struct inode * inode,bool nowait)903 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
904 {
905 gfp_t gfp;
906
907 gfp = btrfs_alloc_write_mask(inode->i_mapping);
908 if (nowait) {
909 gfp &= ~__GFP_DIRECT_RECLAIM;
910 gfp |= GFP_NOWAIT;
911 }
912
913 return gfp;
914 }
915
916 /*
917 * this just gets pages into the page cache and locks them down.
918 */
prepare_pages(struct inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,bool force_uptodate,bool nowait)919 static noinline int prepare_pages(struct inode *inode, struct page **pages,
920 size_t num_pages, loff_t pos,
921 size_t write_bytes, bool force_uptodate,
922 bool nowait)
923 {
924 int i;
925 unsigned long index = pos >> PAGE_SHIFT;
926 gfp_t mask = get_prepare_gfp_flags(inode, nowait);
927 fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
928 int ret = 0;
929 int faili;
930
931 for (i = 0; i < num_pages; i++) {
932 again:
933 pages[i] = pagecache_get_page(inode->i_mapping, index + i,
934 fgp_flags, mask | __GFP_WRITE);
935 if (!pages[i]) {
936 faili = i - 1;
937 if (nowait)
938 ret = -EAGAIN;
939 else
940 ret = -ENOMEM;
941 goto fail;
942 }
943
944 ret = set_page_extent_mapped(pages[i]);
945 if (ret < 0) {
946 faili = i;
947 goto fail;
948 }
949
950 if (i == 0)
951 ret = prepare_uptodate_page(inode, pages[i], pos,
952 force_uptodate);
953 if (!ret && i == num_pages - 1)
954 ret = prepare_uptodate_page(inode, pages[i],
955 pos + write_bytes, false);
956 if (ret) {
957 put_page(pages[i]);
958 if (!nowait && ret == -EAGAIN) {
959 ret = 0;
960 goto again;
961 }
962 faili = i - 1;
963 goto fail;
964 }
965 wait_on_page_writeback(pages[i]);
966 }
967
968 return 0;
969 fail:
970 while (faili >= 0) {
971 unlock_page(pages[faili]);
972 put_page(pages[faili]);
973 faili--;
974 }
975 return ret;
976
977 }
978
979 /*
980 * This function locks the extent and properly waits for data=ordered extents
981 * to finish before allowing the pages to be modified if need.
982 *
983 * The return value:
984 * 1 - the extent is locked
985 * 0 - the extent is not locked, and everything is OK
986 * -EAGAIN - need re-prepare the pages
987 * the other < 0 number - Something wrong happens
988 */
989 static noinline int
lock_and_cleanup_extent_if_need(struct btrfs_inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,u64 * lockstart,u64 * lockend,bool nowait,struct extent_state ** cached_state)990 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
991 size_t num_pages, loff_t pos,
992 size_t write_bytes,
993 u64 *lockstart, u64 *lockend, bool nowait,
994 struct extent_state **cached_state)
995 {
996 struct btrfs_fs_info *fs_info = inode->root->fs_info;
997 u64 start_pos;
998 u64 last_pos;
999 int i;
1000 int ret = 0;
1001
1002 start_pos = round_down(pos, fs_info->sectorsize);
1003 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1004
1005 if (start_pos < inode->vfs_inode.i_size) {
1006 struct btrfs_ordered_extent *ordered;
1007
1008 if (nowait) {
1009 if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
1010 cached_state)) {
1011 for (i = 0; i < num_pages; i++) {
1012 unlock_page(pages[i]);
1013 put_page(pages[i]);
1014 pages[i] = NULL;
1015 }
1016
1017 return -EAGAIN;
1018 }
1019 } else {
1020 lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1021 }
1022
1023 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1024 last_pos - start_pos + 1);
1025 if (ordered &&
1026 ordered->file_offset + ordered->num_bytes > start_pos &&
1027 ordered->file_offset <= last_pos) {
1028 unlock_extent(&inode->io_tree, start_pos, last_pos,
1029 cached_state);
1030 for (i = 0; i < num_pages; i++) {
1031 unlock_page(pages[i]);
1032 put_page(pages[i]);
1033 }
1034 btrfs_start_ordered_extent(ordered);
1035 btrfs_put_ordered_extent(ordered);
1036 return -EAGAIN;
1037 }
1038 if (ordered)
1039 btrfs_put_ordered_extent(ordered);
1040
1041 *lockstart = start_pos;
1042 *lockend = last_pos;
1043 ret = 1;
1044 }
1045
1046 /*
1047 * We should be called after prepare_pages() which should have locked
1048 * all pages in the range.
1049 */
1050 for (i = 0; i < num_pages; i++)
1051 WARN_ON(!PageLocked(pages[i]));
1052
1053 return ret;
1054 }
1055
1056 /*
1057 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1058 *
1059 * @pos: File offset.
1060 * @write_bytes: The length to write, will be updated to the nocow writeable
1061 * range.
1062 *
1063 * This function will flush ordered extents in the range to ensure proper
1064 * nocow checks.
1065 *
1066 * Return:
1067 * > 0 If we can nocow, and updates @write_bytes.
1068 * 0 If we can't do a nocow write.
1069 * -EAGAIN If we can't do a nocow write because snapshoting of the inode's
1070 * root is in progress.
1071 * < 0 If an error happened.
1072 *
1073 * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1074 */
btrfs_check_nocow_lock(struct btrfs_inode * inode,loff_t pos,size_t * write_bytes,bool nowait)1075 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1076 size_t *write_bytes, bool nowait)
1077 {
1078 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1079 struct btrfs_root *root = inode->root;
1080 struct extent_state *cached_state = NULL;
1081 u64 lockstart, lockend;
1082 u64 num_bytes;
1083 int ret;
1084
1085 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1086 return 0;
1087
1088 if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1089 return -EAGAIN;
1090
1091 lockstart = round_down(pos, fs_info->sectorsize);
1092 lockend = round_up(pos + *write_bytes,
1093 fs_info->sectorsize) - 1;
1094 num_bytes = lockend - lockstart + 1;
1095
1096 if (nowait) {
1097 if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1098 &cached_state)) {
1099 btrfs_drew_write_unlock(&root->snapshot_lock);
1100 return -EAGAIN;
1101 }
1102 } else {
1103 btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1104 &cached_state);
1105 }
1106 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1107 NULL, nowait, false);
1108 if (ret <= 0)
1109 btrfs_drew_write_unlock(&root->snapshot_lock);
1110 else
1111 *write_bytes = min_t(size_t, *write_bytes ,
1112 num_bytes - pos + lockstart);
1113 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1114
1115 return ret;
1116 }
1117
btrfs_check_nocow_unlock(struct btrfs_inode * inode)1118 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1119 {
1120 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1121 }
1122
update_time_for_write(struct inode * inode)1123 static void update_time_for_write(struct inode *inode)
1124 {
1125 struct timespec64 now, ts;
1126
1127 if (IS_NOCMTIME(inode))
1128 return;
1129
1130 now = current_time(inode);
1131 ts = inode_get_mtime(inode);
1132 if (!timespec64_equal(&ts, &now))
1133 inode_set_mtime_to_ts(inode, now);
1134
1135 ts = inode_get_ctime(inode);
1136 if (!timespec64_equal(&ts, &now))
1137 inode_set_ctime_to_ts(inode, now);
1138
1139 if (IS_I_VERSION(inode))
1140 inode_inc_iversion(inode);
1141 }
1142
btrfs_write_check(struct kiocb * iocb,struct iov_iter * from,size_t count)1143 int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, size_t count)
1144 {
1145 struct file *file = iocb->ki_filp;
1146 struct inode *inode = file_inode(file);
1147 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1148 loff_t pos = iocb->ki_pos;
1149 int ret;
1150 loff_t oldsize;
1151 loff_t start_pos;
1152
1153 /*
1154 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1155 * prealloc flags, as without those flags we always have to COW. We will
1156 * later check if we can really COW into the target range (using
1157 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1158 */
1159 if ((iocb->ki_flags & IOCB_NOWAIT) &&
1160 !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1161 return -EAGAIN;
1162
1163 ret = file_remove_privs(file);
1164 if (ret)
1165 return ret;
1166
1167 /*
1168 * We reserve space for updating the inode when we reserve space for the
1169 * extent we are going to write, so we will enospc out there. We don't
1170 * need to start yet another transaction to update the inode as we will
1171 * update the inode when we finish writing whatever data we write.
1172 */
1173 update_time_for_write(inode);
1174
1175 start_pos = round_down(pos, fs_info->sectorsize);
1176 oldsize = i_size_read(inode);
1177 if (start_pos > oldsize) {
1178 /* Expand hole size to cover write data, preventing empty gap */
1179 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1180
1181 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1182 if (ret)
1183 return ret;
1184 }
1185
1186 return 0;
1187 }
1188
btrfs_buffered_write(struct kiocb * iocb,struct iov_iter * i)1189 ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
1190 {
1191 struct file *file = iocb->ki_filp;
1192 loff_t pos;
1193 struct inode *inode = file_inode(file);
1194 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1195 struct page **pages = NULL;
1196 struct extent_changeset *data_reserved = NULL;
1197 u64 release_bytes = 0;
1198 u64 lockstart;
1199 u64 lockend;
1200 size_t num_written = 0;
1201 int nrptrs;
1202 ssize_t ret;
1203 bool only_release_metadata = false;
1204 bool force_page_uptodate = false;
1205 loff_t old_isize = i_size_read(inode);
1206 unsigned int ilock_flags = 0;
1207 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1208 unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1209
1210 if (nowait)
1211 ilock_flags |= BTRFS_ILOCK_TRY;
1212
1213 ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1214 if (ret < 0)
1215 return ret;
1216
1217 ret = generic_write_checks(iocb, i);
1218 if (ret <= 0)
1219 goto out;
1220
1221 ret = btrfs_write_check(iocb, i, ret);
1222 if (ret < 0)
1223 goto out;
1224
1225 pos = iocb->ki_pos;
1226 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1227 PAGE_SIZE / (sizeof(struct page *)));
1228 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1229 nrptrs = max(nrptrs, 8);
1230 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1231 if (!pages) {
1232 ret = -ENOMEM;
1233 goto out;
1234 }
1235
1236 while (iov_iter_count(i) > 0) {
1237 struct extent_state *cached_state = NULL;
1238 size_t offset = offset_in_page(pos);
1239 size_t sector_offset;
1240 size_t write_bytes = min(iov_iter_count(i),
1241 nrptrs * (size_t)PAGE_SIZE -
1242 offset);
1243 size_t num_pages;
1244 size_t reserve_bytes;
1245 size_t dirty_pages;
1246 size_t copied;
1247 size_t dirty_sectors;
1248 size_t num_sectors;
1249 int extents_locked;
1250
1251 /*
1252 * Fault pages before locking them in prepare_pages
1253 * to avoid recursive lock
1254 */
1255 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1256 ret = -EFAULT;
1257 break;
1258 }
1259
1260 only_release_metadata = false;
1261 sector_offset = pos & (fs_info->sectorsize - 1);
1262
1263 extent_changeset_release(data_reserved);
1264 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1265 &data_reserved, pos,
1266 write_bytes, nowait);
1267 if (ret < 0) {
1268 int can_nocow;
1269
1270 if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1271 ret = -EAGAIN;
1272 break;
1273 }
1274
1275 /*
1276 * If we don't have to COW at the offset, reserve
1277 * metadata only. write_bytes may get smaller than
1278 * requested here.
1279 */
1280 can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1281 &write_bytes, nowait);
1282 if (can_nocow < 0)
1283 ret = can_nocow;
1284 if (can_nocow > 0)
1285 ret = 0;
1286 if (ret)
1287 break;
1288 only_release_metadata = true;
1289 }
1290
1291 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1292 WARN_ON(num_pages > nrptrs);
1293 reserve_bytes = round_up(write_bytes + sector_offset,
1294 fs_info->sectorsize);
1295 WARN_ON(reserve_bytes == 0);
1296 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1297 reserve_bytes,
1298 reserve_bytes, nowait);
1299 if (ret) {
1300 if (!only_release_metadata)
1301 btrfs_free_reserved_data_space(BTRFS_I(inode),
1302 data_reserved, pos,
1303 write_bytes);
1304 else
1305 btrfs_check_nocow_unlock(BTRFS_I(inode));
1306
1307 if (nowait && ret == -ENOSPC)
1308 ret = -EAGAIN;
1309 break;
1310 }
1311
1312 release_bytes = reserve_bytes;
1313 again:
1314 ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1315 if (ret) {
1316 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1317 break;
1318 }
1319
1320 /*
1321 * This is going to setup the pages array with the number of
1322 * pages we want, so we don't really need to worry about the
1323 * contents of pages from loop to loop
1324 */
1325 ret = prepare_pages(inode, pages, num_pages,
1326 pos, write_bytes, force_page_uptodate, false);
1327 if (ret) {
1328 btrfs_delalloc_release_extents(BTRFS_I(inode),
1329 reserve_bytes);
1330 break;
1331 }
1332
1333 extents_locked = lock_and_cleanup_extent_if_need(
1334 BTRFS_I(inode), pages,
1335 num_pages, pos, write_bytes, &lockstart,
1336 &lockend, nowait, &cached_state);
1337 if (extents_locked < 0) {
1338 if (!nowait && extents_locked == -EAGAIN)
1339 goto again;
1340
1341 btrfs_delalloc_release_extents(BTRFS_I(inode),
1342 reserve_bytes);
1343 ret = extents_locked;
1344 break;
1345 }
1346
1347 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1348
1349 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1350 dirty_sectors = round_up(copied + sector_offset,
1351 fs_info->sectorsize);
1352 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1353
1354 /*
1355 * if we have trouble faulting in the pages, fall
1356 * back to one page at a time
1357 */
1358 if (copied < write_bytes)
1359 nrptrs = 1;
1360
1361 if (copied == 0) {
1362 force_page_uptodate = true;
1363 dirty_sectors = 0;
1364 dirty_pages = 0;
1365 } else {
1366 force_page_uptodate = false;
1367 dirty_pages = DIV_ROUND_UP(copied + offset,
1368 PAGE_SIZE);
1369 }
1370
1371 if (num_sectors > dirty_sectors) {
1372 /* release everything except the sectors we dirtied */
1373 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1374 if (only_release_metadata) {
1375 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1376 release_bytes, true);
1377 } else {
1378 u64 __pos;
1379
1380 __pos = round_down(pos,
1381 fs_info->sectorsize) +
1382 (dirty_pages << PAGE_SHIFT);
1383 btrfs_delalloc_release_space(BTRFS_I(inode),
1384 data_reserved, __pos,
1385 release_bytes, true);
1386 }
1387 }
1388
1389 release_bytes = round_up(copied + sector_offset,
1390 fs_info->sectorsize);
1391
1392 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1393 dirty_pages, pos, copied,
1394 &cached_state, only_release_metadata);
1395
1396 /*
1397 * If we have not locked the extent range, because the range's
1398 * start offset is >= i_size, we might still have a non-NULL
1399 * cached extent state, acquired while marking the extent range
1400 * as delalloc through btrfs_dirty_pages(). Therefore free any
1401 * possible cached extent state to avoid a memory leak.
1402 */
1403 if (extents_locked)
1404 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1405 lockend, &cached_state);
1406 else
1407 free_extent_state(cached_state);
1408
1409 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1410 if (ret) {
1411 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1412 break;
1413 }
1414
1415 release_bytes = 0;
1416 if (only_release_metadata)
1417 btrfs_check_nocow_unlock(BTRFS_I(inode));
1418
1419 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1420
1421 cond_resched();
1422
1423 pos += copied;
1424 num_written += copied;
1425 }
1426
1427 kfree(pages);
1428
1429 if (release_bytes) {
1430 if (only_release_metadata) {
1431 btrfs_check_nocow_unlock(BTRFS_I(inode));
1432 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1433 release_bytes, true);
1434 } else {
1435 btrfs_delalloc_release_space(BTRFS_I(inode),
1436 data_reserved,
1437 round_down(pos, fs_info->sectorsize),
1438 release_bytes, true);
1439 }
1440 }
1441
1442 extent_changeset_free(data_reserved);
1443 if (num_written > 0) {
1444 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1445 iocb->ki_pos += num_written;
1446 }
1447 out:
1448 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1449 return num_written ? num_written : ret;
1450 }
1451
btrfs_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)1452 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1453 const struct btrfs_ioctl_encoded_io_args *encoded)
1454 {
1455 struct file *file = iocb->ki_filp;
1456 struct inode *inode = file_inode(file);
1457 loff_t count;
1458 ssize_t ret;
1459
1460 btrfs_inode_lock(BTRFS_I(inode), 0);
1461 count = encoded->len;
1462 ret = generic_write_checks_count(iocb, &count);
1463 if (ret == 0 && count != encoded->len) {
1464 /*
1465 * The write got truncated by generic_write_checks_count(). We
1466 * can't do a partial encoded write.
1467 */
1468 ret = -EFBIG;
1469 }
1470 if (ret || encoded->len == 0)
1471 goto out;
1472
1473 ret = btrfs_write_check(iocb, from, encoded->len);
1474 if (ret < 0)
1475 goto out;
1476
1477 ret = btrfs_do_encoded_write(iocb, from, encoded);
1478 out:
1479 btrfs_inode_unlock(BTRFS_I(inode), 0);
1480 return ret;
1481 }
1482
btrfs_do_write_iter(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)1483 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1484 const struct btrfs_ioctl_encoded_io_args *encoded)
1485 {
1486 struct file *file = iocb->ki_filp;
1487 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1488 ssize_t num_written, num_sync;
1489
1490 /*
1491 * If the fs flips readonly due to some impossible error, although we
1492 * have opened a file as writable, we have to stop this write operation
1493 * to ensure consistency.
1494 */
1495 if (BTRFS_FS_ERROR(inode->root->fs_info))
1496 return -EROFS;
1497
1498 if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1499 return -EOPNOTSUPP;
1500
1501 if (encoded) {
1502 num_written = btrfs_encoded_write(iocb, from, encoded);
1503 num_sync = encoded->len;
1504 } else if (iocb->ki_flags & IOCB_DIRECT) {
1505 num_written = btrfs_direct_write(iocb, from);
1506 num_sync = num_written;
1507 } else {
1508 num_written = btrfs_buffered_write(iocb, from);
1509 num_sync = num_written;
1510 }
1511
1512 btrfs_set_inode_last_sub_trans(inode);
1513
1514 if (num_sync > 0) {
1515 num_sync = generic_write_sync(iocb, num_sync);
1516 if (num_sync < 0)
1517 num_written = num_sync;
1518 }
1519
1520 return num_written;
1521 }
1522
btrfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1523 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1524 {
1525 return btrfs_do_write_iter(iocb, from, NULL);
1526 }
1527
btrfs_release_file(struct inode * inode,struct file * filp)1528 int btrfs_release_file(struct inode *inode, struct file *filp)
1529 {
1530 struct btrfs_file_private *private = filp->private_data;
1531
1532 if (private) {
1533 kfree(private->filldir_buf);
1534 free_extent_state(private->llseek_cached_state);
1535 kfree(private);
1536 filp->private_data = NULL;
1537 }
1538
1539 /*
1540 * Set by setattr when we are about to truncate a file from a non-zero
1541 * size to a zero size. This tries to flush down new bytes that may
1542 * have been written if the application were using truncate to replace
1543 * a file in place.
1544 */
1545 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1546 &BTRFS_I(inode)->runtime_flags))
1547 filemap_flush(inode->i_mapping);
1548 return 0;
1549 }
1550
start_ordered_ops(struct btrfs_inode * inode,loff_t start,loff_t end)1551 static int start_ordered_ops(struct btrfs_inode *inode, loff_t start, loff_t end)
1552 {
1553 int ret;
1554 struct blk_plug plug;
1555
1556 /*
1557 * This is only called in fsync, which would do synchronous writes, so
1558 * a plug can merge adjacent IOs as much as possible. Esp. in case of
1559 * multiple disks using raid profile, a large IO can be split to
1560 * several segments of stripe length (currently 64K).
1561 */
1562 blk_start_plug(&plug);
1563 ret = btrfs_fdatawrite_range(inode, start, end);
1564 blk_finish_plug(&plug);
1565
1566 return ret;
1567 }
1568
skip_inode_logging(const struct btrfs_log_ctx * ctx)1569 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1570 {
1571 struct btrfs_inode *inode = ctx->inode;
1572 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1573
1574 if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
1575 list_empty(&ctx->ordered_extents))
1576 return true;
1577
1578 /*
1579 * If we are doing a fast fsync we can not bail out if the inode's
1580 * last_trans is <= then the last committed transaction, because we only
1581 * update the last_trans of the inode during ordered extent completion,
1582 * and for a fast fsync we don't wait for that, we only wait for the
1583 * writeback to complete.
1584 */
1585 if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
1586 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1587 list_empty(&ctx->ordered_extents)))
1588 return true;
1589
1590 return false;
1591 }
1592
1593 /*
1594 * fsync call for both files and directories. This logs the inode into
1595 * the tree log instead of forcing full commits whenever possible.
1596 *
1597 * It needs to call filemap_fdatawait so that all ordered extent updates are
1598 * in the metadata btree are up to date for copying to the log.
1599 *
1600 * It drops the inode mutex before doing the tree log commit. This is an
1601 * important optimization for directories because holding the mutex prevents
1602 * new operations on the dir while we write to disk.
1603 */
btrfs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)1604 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1605 {
1606 struct dentry *dentry = file_dentry(file);
1607 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
1608 struct btrfs_root *root = inode->root;
1609 struct btrfs_fs_info *fs_info = root->fs_info;
1610 struct btrfs_trans_handle *trans;
1611 struct btrfs_log_ctx ctx;
1612 int ret = 0, err;
1613 u64 len;
1614 bool full_sync;
1615 bool skip_ilock = false;
1616
1617 if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
1618 skip_ilock = true;
1619 current->journal_info = NULL;
1620 btrfs_assert_inode_locked(inode);
1621 }
1622
1623 trace_btrfs_sync_file(file, datasync);
1624
1625 btrfs_init_log_ctx(&ctx, inode);
1626
1627 /*
1628 * Always set the range to a full range, otherwise we can get into
1629 * several problems, from missing file extent items to represent holes
1630 * when not using the NO_HOLES feature, to log tree corruption due to
1631 * races between hole detection during logging and completion of ordered
1632 * extents outside the range, to missing checksums due to ordered extents
1633 * for which we flushed only a subset of their pages.
1634 */
1635 start = 0;
1636 end = LLONG_MAX;
1637 len = (u64)LLONG_MAX + 1;
1638
1639 /*
1640 * We write the dirty pages in the range and wait until they complete
1641 * out of the ->i_mutex. If so, we can flush the dirty pages by
1642 * multi-task, and make the performance up. See
1643 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1644 */
1645 ret = start_ordered_ops(inode, start, end);
1646 if (ret)
1647 goto out;
1648
1649 if (skip_ilock)
1650 down_write(&inode->i_mmap_lock);
1651 else
1652 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
1653
1654 atomic_inc(&root->log_batch);
1655
1656 /*
1657 * Before we acquired the inode's lock and the mmap lock, someone may
1658 * have dirtied more pages in the target range. We need to make sure
1659 * that writeback for any such pages does not start while we are logging
1660 * the inode, because if it does, any of the following might happen when
1661 * we are not doing a full inode sync:
1662 *
1663 * 1) We log an extent after its writeback finishes but before its
1664 * checksums are added to the csum tree, leading to -EIO errors
1665 * when attempting to read the extent after a log replay.
1666 *
1667 * 2) We can end up logging an extent before its writeback finishes.
1668 * Therefore after the log replay we will have a file extent item
1669 * pointing to an unwritten extent (and no data checksums as well).
1670 *
1671 * So trigger writeback for any eventual new dirty pages and then we
1672 * wait for all ordered extents to complete below.
1673 */
1674 ret = start_ordered_ops(inode, start, end);
1675 if (ret) {
1676 if (skip_ilock)
1677 up_write(&inode->i_mmap_lock);
1678 else
1679 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1680 goto out;
1681 }
1682
1683 /*
1684 * Always check for the full sync flag while holding the inode's lock,
1685 * to avoid races with other tasks. The flag must be either set all the
1686 * time during logging or always off all the time while logging.
1687 * We check the flag here after starting delalloc above, because when
1688 * running delalloc the full sync flag may be set if we need to drop
1689 * extra extent map ranges due to temporary memory allocation failures.
1690 */
1691 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1692
1693 /*
1694 * We have to do this here to avoid the priority inversion of waiting on
1695 * IO of a lower priority task while holding a transaction open.
1696 *
1697 * For a full fsync we wait for the ordered extents to complete while
1698 * for a fast fsync we wait just for writeback to complete, and then
1699 * attach the ordered extents to the transaction so that a transaction
1700 * commit waits for their completion, to avoid data loss if we fsync,
1701 * the current transaction commits before the ordered extents complete
1702 * and a power failure happens right after that.
1703 *
1704 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1705 * logical address recorded in the ordered extent may change. We need
1706 * to wait for the IO to stabilize the logical address.
1707 */
1708 if (full_sync || btrfs_is_zoned(fs_info)) {
1709 ret = btrfs_wait_ordered_range(inode, start, len);
1710 clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
1711 } else {
1712 /*
1713 * Get our ordered extents as soon as possible to avoid doing
1714 * checksum lookups in the csum tree, and use instead the
1715 * checksums attached to the ordered extents.
1716 */
1717 btrfs_get_ordered_extents_for_logging(inode, &ctx.ordered_extents);
1718 ret = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, end);
1719 if (ret)
1720 goto out_release_extents;
1721
1722 /*
1723 * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
1724 * starting and waiting for writeback, because for buffered IO
1725 * it may have been set during the end IO callback
1726 * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
1727 * case an error happened and we need to wait for ordered
1728 * extents to complete so that any extent maps that point to
1729 * unwritten locations are dropped and we don't log them.
1730 */
1731 if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags))
1732 ret = btrfs_wait_ordered_range(inode, start, len);
1733 }
1734
1735 if (ret)
1736 goto out_release_extents;
1737
1738 atomic_inc(&root->log_batch);
1739
1740 if (skip_inode_logging(&ctx)) {
1741 /*
1742 * We've had everything committed since the last time we were
1743 * modified so clear this flag in case it was set for whatever
1744 * reason, it's no longer relevant.
1745 */
1746 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1747 /*
1748 * An ordered extent might have started before and completed
1749 * already with io errors, in which case the inode was not
1750 * updated and we end up here. So check the inode's mapping
1751 * for any errors that might have happened since we last
1752 * checked called fsync.
1753 */
1754 ret = filemap_check_wb_err(inode->vfs_inode.i_mapping, file->f_wb_err);
1755 goto out_release_extents;
1756 }
1757
1758 btrfs_init_log_ctx_scratch_eb(&ctx);
1759
1760 /*
1761 * We use start here because we will need to wait on the IO to complete
1762 * in btrfs_sync_log, which could require joining a transaction (for
1763 * example checking cross references in the nocow path). If we use join
1764 * here we could get into a situation where we're waiting on IO to
1765 * happen that is blocked on a transaction trying to commit. With start
1766 * we inc the extwriter counter, so we wait for all extwriters to exit
1767 * before we start blocking joiners. This comment is to keep somebody
1768 * from thinking they are super smart and changing this to
1769 * btrfs_join_transaction *cough*Josef*cough*.
1770 */
1771 trans = btrfs_start_transaction(root, 0);
1772 if (IS_ERR(trans)) {
1773 ret = PTR_ERR(trans);
1774 goto out_release_extents;
1775 }
1776 trans->in_fsync = true;
1777
1778 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1779 /*
1780 * Scratch eb no longer needed, release before syncing log or commit
1781 * transaction, to avoid holding unnecessary memory during such long
1782 * operations.
1783 */
1784 if (ctx.scratch_eb) {
1785 free_extent_buffer(ctx.scratch_eb);
1786 ctx.scratch_eb = NULL;
1787 }
1788 btrfs_release_log_ctx_extents(&ctx);
1789 if (ret < 0) {
1790 /* Fallthrough and commit/free transaction. */
1791 ret = BTRFS_LOG_FORCE_COMMIT;
1792 }
1793
1794 /* we've logged all the items and now have a consistent
1795 * version of the file in the log. It is possible that
1796 * someone will come in and modify the file, but that's
1797 * fine because the log is consistent on disk, and we
1798 * have references to all of the file's extents
1799 *
1800 * It is possible that someone will come in and log the
1801 * file again, but that will end up using the synchronization
1802 * inside btrfs_sync_log to keep things safe.
1803 */
1804 if (skip_ilock)
1805 up_write(&inode->i_mmap_lock);
1806 else
1807 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1808
1809 if (ret == BTRFS_NO_LOG_SYNC) {
1810 ret = btrfs_end_transaction(trans);
1811 goto out;
1812 }
1813
1814 /* We successfully logged the inode, attempt to sync the log. */
1815 if (!ret) {
1816 ret = btrfs_sync_log(trans, root, &ctx);
1817 if (!ret) {
1818 ret = btrfs_end_transaction(trans);
1819 goto out;
1820 }
1821 }
1822
1823 /*
1824 * At this point we need to commit the transaction because we had
1825 * btrfs_need_log_full_commit() or some other error.
1826 *
1827 * If we didn't do a full sync we have to stop the trans handle, wait on
1828 * the ordered extents, start it again and commit the transaction. If
1829 * we attempt to wait on the ordered extents here we could deadlock with
1830 * something like fallocate() that is holding the extent lock trying to
1831 * start a transaction while some other thread is trying to commit the
1832 * transaction while we (fsync) are currently holding the transaction
1833 * open.
1834 */
1835 if (!full_sync) {
1836 ret = btrfs_end_transaction(trans);
1837 if (ret)
1838 goto out;
1839 ret = btrfs_wait_ordered_range(inode, start, len);
1840 if (ret)
1841 goto out;
1842
1843 /*
1844 * This is safe to use here because we're only interested in
1845 * making sure the transaction that had the ordered extents is
1846 * committed. We aren't waiting on anything past this point,
1847 * we're purely getting the transaction and committing it.
1848 */
1849 trans = btrfs_attach_transaction_barrier(root);
1850 if (IS_ERR(trans)) {
1851 ret = PTR_ERR(trans);
1852
1853 /*
1854 * We committed the transaction and there's no currently
1855 * running transaction, this means everything we care
1856 * about made it to disk and we are done.
1857 */
1858 if (ret == -ENOENT)
1859 ret = 0;
1860 goto out;
1861 }
1862 }
1863
1864 ret = btrfs_commit_transaction(trans);
1865 out:
1866 free_extent_buffer(ctx.scratch_eb);
1867 ASSERT(list_empty(&ctx.list));
1868 ASSERT(list_empty(&ctx.conflict_inodes));
1869 err = file_check_and_advance_wb_err(file);
1870 if (!ret)
1871 ret = err;
1872 return ret > 0 ? -EIO : ret;
1873
1874 out_release_extents:
1875 btrfs_release_log_ctx_extents(&ctx);
1876 if (skip_ilock)
1877 up_write(&inode->i_mmap_lock);
1878 else
1879 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1880 goto out;
1881 }
1882
1883 /*
1884 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
1885 * called from a page fault handler when a page is first dirtied. Hence we must
1886 * be careful to check for EOF conditions here. We set the page up correctly
1887 * for a written page which means we get ENOSPC checking when writing into
1888 * holes and correct delalloc and unwritten extent mapping on filesystems that
1889 * support these features.
1890 *
1891 * We are not allowed to take the i_mutex here so we have to play games to
1892 * protect against truncate races as the page could now be beyond EOF. Because
1893 * truncate_setsize() writes the inode size before removing pages, once we have
1894 * the page lock we can determine safely if the page is beyond EOF. If it is not
1895 * beyond EOF, then the page is guaranteed safe against truncation until we
1896 * unlock the page.
1897 */
btrfs_page_mkwrite(struct vm_fault * vmf)1898 static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
1899 {
1900 struct page *page = vmf->page;
1901 struct folio *folio = page_folio(page);
1902 struct inode *inode = file_inode(vmf->vma->vm_file);
1903 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1904 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1905 struct btrfs_ordered_extent *ordered;
1906 struct extent_state *cached_state = NULL;
1907 struct extent_changeset *data_reserved = NULL;
1908 unsigned long zero_start;
1909 loff_t size;
1910 vm_fault_t ret;
1911 int ret2;
1912 int reserved = 0;
1913 u64 reserved_space;
1914 u64 page_start;
1915 u64 page_end;
1916 u64 end;
1917
1918 ASSERT(folio_order(folio) == 0);
1919
1920 reserved_space = PAGE_SIZE;
1921
1922 sb_start_pagefault(inode->i_sb);
1923 page_start = folio_pos(folio);
1924 page_end = page_start + folio_size(folio) - 1;
1925 end = page_end;
1926
1927 /*
1928 * Reserving delalloc space after obtaining the page lock can lead to
1929 * deadlock. For example, if a dirty page is locked by this function
1930 * and the call to btrfs_delalloc_reserve_space() ends up triggering
1931 * dirty page write out, then the btrfs_writepages() function could
1932 * end up waiting indefinitely to get a lock on the page currently
1933 * being processed by btrfs_page_mkwrite() function.
1934 */
1935 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
1936 page_start, reserved_space);
1937 if (!ret2) {
1938 ret2 = file_update_time(vmf->vma->vm_file);
1939 reserved = 1;
1940 }
1941 if (ret2) {
1942 ret = vmf_error(ret2);
1943 if (reserved)
1944 goto out;
1945 goto out_noreserve;
1946 }
1947
1948 /* Make the VM retry the fault. */
1949 ret = VM_FAULT_NOPAGE;
1950 again:
1951 down_read(&BTRFS_I(inode)->i_mmap_lock);
1952 folio_lock(folio);
1953 size = i_size_read(inode);
1954
1955 if ((folio->mapping != inode->i_mapping) ||
1956 (page_start >= size)) {
1957 /* Page got truncated out from underneath us. */
1958 goto out_unlock;
1959 }
1960 folio_wait_writeback(folio);
1961
1962 lock_extent(io_tree, page_start, page_end, &cached_state);
1963 ret2 = set_folio_extent_mapped(folio);
1964 if (ret2 < 0) {
1965 ret = vmf_error(ret2);
1966 unlock_extent(io_tree, page_start, page_end, &cached_state);
1967 goto out_unlock;
1968 }
1969
1970 /*
1971 * We can't set the delalloc bits if there are pending ordered
1972 * extents. Drop our locks and wait for them to finish.
1973 */
1974 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
1975 if (ordered) {
1976 unlock_extent(io_tree, page_start, page_end, &cached_state);
1977 folio_unlock(folio);
1978 up_read(&BTRFS_I(inode)->i_mmap_lock);
1979 btrfs_start_ordered_extent(ordered);
1980 btrfs_put_ordered_extent(ordered);
1981 goto again;
1982 }
1983
1984 if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
1985 reserved_space = round_up(size - page_start, fs_info->sectorsize);
1986 if (reserved_space < PAGE_SIZE) {
1987 end = page_start + reserved_space - 1;
1988 btrfs_delalloc_release_space(BTRFS_I(inode),
1989 data_reserved, page_start,
1990 PAGE_SIZE - reserved_space, true);
1991 }
1992 }
1993
1994 /*
1995 * page_mkwrite gets called when the page is firstly dirtied after it's
1996 * faulted in, but write(2) could also dirty a page and set delalloc
1997 * bits, thus in this case for space account reason, we still need to
1998 * clear any delalloc bits within this page range since we have to
1999 * reserve data&meta space before lock_page() (see above comments).
2000 */
2001 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
2002 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
2003 EXTENT_DEFRAG, &cached_state);
2004
2005 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
2006 &cached_state);
2007 if (ret2) {
2008 unlock_extent(io_tree, page_start, page_end, &cached_state);
2009 ret = VM_FAULT_SIGBUS;
2010 goto out_unlock;
2011 }
2012
2013 /* Page is wholly or partially inside EOF. */
2014 if (page_start + folio_size(folio) > size)
2015 zero_start = offset_in_folio(folio, size);
2016 else
2017 zero_start = PAGE_SIZE;
2018
2019 if (zero_start != PAGE_SIZE)
2020 folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
2021
2022 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2023 btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
2024 btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
2025
2026 btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
2027
2028 unlock_extent(io_tree, page_start, page_end, &cached_state);
2029 up_read(&BTRFS_I(inode)->i_mmap_lock);
2030
2031 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2032 sb_end_pagefault(inode->i_sb);
2033 extent_changeset_free(data_reserved);
2034 return VM_FAULT_LOCKED;
2035
2036 out_unlock:
2037 folio_unlock(folio);
2038 up_read(&BTRFS_I(inode)->i_mmap_lock);
2039 out:
2040 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2041 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
2042 reserved_space, (ret != 0));
2043 out_noreserve:
2044 sb_end_pagefault(inode->i_sb);
2045 extent_changeset_free(data_reserved);
2046 return ret;
2047 }
2048
2049 static const struct vm_operations_struct btrfs_file_vm_ops = {
2050 .fault = filemap_fault,
2051 .map_pages = filemap_map_pages,
2052 .page_mkwrite = btrfs_page_mkwrite,
2053 };
2054
btrfs_file_mmap(struct file * filp,struct vm_area_struct * vma)2055 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2056 {
2057 struct address_space *mapping = filp->f_mapping;
2058
2059 if (!mapping->a_ops->read_folio)
2060 return -ENOEXEC;
2061
2062 file_accessed(filp);
2063 vma->vm_ops = &btrfs_file_vm_ops;
2064
2065 return 0;
2066 }
2067
hole_mergeable(struct btrfs_inode * inode,struct extent_buffer * leaf,int slot,u64 start,u64 end)2068 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2069 int slot, u64 start, u64 end)
2070 {
2071 struct btrfs_file_extent_item *fi;
2072 struct btrfs_key key;
2073
2074 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2075 return 0;
2076
2077 btrfs_item_key_to_cpu(leaf, &key, slot);
2078 if (key.objectid != btrfs_ino(inode) ||
2079 key.type != BTRFS_EXTENT_DATA_KEY)
2080 return 0;
2081
2082 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2083
2084 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2085 return 0;
2086
2087 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2088 return 0;
2089
2090 if (key.offset == end)
2091 return 1;
2092 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2093 return 1;
2094 return 0;
2095 }
2096
fill_holes(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 end)2097 static int fill_holes(struct btrfs_trans_handle *trans,
2098 struct btrfs_inode *inode,
2099 struct btrfs_path *path, u64 offset, u64 end)
2100 {
2101 struct btrfs_fs_info *fs_info = trans->fs_info;
2102 struct btrfs_root *root = inode->root;
2103 struct extent_buffer *leaf;
2104 struct btrfs_file_extent_item *fi;
2105 struct extent_map *hole_em;
2106 struct btrfs_key key;
2107 int ret;
2108
2109 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2110 goto out;
2111
2112 key.objectid = btrfs_ino(inode);
2113 key.type = BTRFS_EXTENT_DATA_KEY;
2114 key.offset = offset;
2115
2116 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2117 if (ret <= 0) {
2118 /*
2119 * We should have dropped this offset, so if we find it then
2120 * something has gone horribly wrong.
2121 */
2122 if (ret == 0)
2123 ret = -EINVAL;
2124 return ret;
2125 }
2126
2127 leaf = path->nodes[0];
2128 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2129 u64 num_bytes;
2130
2131 path->slots[0]--;
2132 fi = btrfs_item_ptr(leaf, path->slots[0],
2133 struct btrfs_file_extent_item);
2134 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2135 end - offset;
2136 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2137 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2138 btrfs_set_file_extent_offset(leaf, fi, 0);
2139 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2140 btrfs_mark_buffer_dirty(trans, leaf);
2141 goto out;
2142 }
2143
2144 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2145 u64 num_bytes;
2146
2147 key.offset = offset;
2148 btrfs_set_item_key_safe(trans, path, &key);
2149 fi = btrfs_item_ptr(leaf, path->slots[0],
2150 struct btrfs_file_extent_item);
2151 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2152 offset;
2153 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2154 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2155 btrfs_set_file_extent_offset(leaf, fi, 0);
2156 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2157 btrfs_mark_buffer_dirty(trans, leaf);
2158 goto out;
2159 }
2160 btrfs_release_path(path);
2161
2162 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2163 end - offset);
2164 if (ret)
2165 return ret;
2166
2167 out:
2168 btrfs_release_path(path);
2169
2170 hole_em = alloc_extent_map();
2171 if (!hole_em) {
2172 btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2173 btrfs_set_inode_full_sync(inode);
2174 } else {
2175 hole_em->start = offset;
2176 hole_em->len = end - offset;
2177 hole_em->ram_bytes = hole_em->len;
2178
2179 hole_em->disk_bytenr = EXTENT_MAP_HOLE;
2180 hole_em->disk_num_bytes = 0;
2181 hole_em->generation = trans->transid;
2182
2183 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2184 free_extent_map(hole_em);
2185 if (ret)
2186 btrfs_set_inode_full_sync(inode);
2187 }
2188
2189 return 0;
2190 }
2191
2192 /*
2193 * Find a hole extent on given inode and change start/len to the end of hole
2194 * extent.(hole/vacuum extent whose em->start <= start &&
2195 * em->start + em->len > start)
2196 * When a hole extent is found, return 1 and modify start/len.
2197 */
find_first_non_hole(struct btrfs_inode * inode,u64 * start,u64 * len)2198 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2199 {
2200 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2201 struct extent_map *em;
2202 int ret = 0;
2203
2204 em = btrfs_get_extent(inode, NULL,
2205 round_down(*start, fs_info->sectorsize),
2206 round_up(*len, fs_info->sectorsize));
2207 if (IS_ERR(em))
2208 return PTR_ERR(em);
2209
2210 /* Hole or vacuum extent(only exists in no-hole mode) */
2211 if (em->disk_bytenr == EXTENT_MAP_HOLE) {
2212 ret = 1;
2213 *len = em->start + em->len > *start + *len ?
2214 0 : *start + *len - em->start - em->len;
2215 *start = em->start + em->len;
2216 }
2217 free_extent_map(em);
2218 return ret;
2219 }
2220
btrfs_punch_hole_lock_range(struct inode * inode,const u64 lockstart,const u64 lockend,struct extent_state ** cached_state)2221 static void btrfs_punch_hole_lock_range(struct inode *inode,
2222 const u64 lockstart,
2223 const u64 lockend,
2224 struct extent_state **cached_state)
2225 {
2226 /*
2227 * For subpage case, if the range is not at page boundary, we could
2228 * have pages at the leading/tailing part of the range.
2229 * This could lead to dead loop since filemap_range_has_page()
2230 * will always return true.
2231 * So here we need to do extra page alignment for
2232 * filemap_range_has_page().
2233 */
2234 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2235 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2236
2237 while (1) {
2238 truncate_pagecache_range(inode, lockstart, lockend);
2239
2240 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2241 cached_state);
2242 /*
2243 * We can't have ordered extents in the range, nor dirty/writeback
2244 * pages, because we have locked the inode's VFS lock in exclusive
2245 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2246 * we have flushed all delalloc in the range and we have waited
2247 * for any ordered extents in the range to complete.
2248 * We can race with anyone reading pages from this range, so after
2249 * locking the range check if we have pages in the range, and if
2250 * we do, unlock the range and retry.
2251 */
2252 if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2253 page_lockend))
2254 break;
2255
2256 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2257 cached_state);
2258 }
2259
2260 btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2261 }
2262
btrfs_insert_replace_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_replace_extent_info * extent_info,const u64 replace_len,const u64 bytes_to_drop)2263 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2264 struct btrfs_inode *inode,
2265 struct btrfs_path *path,
2266 struct btrfs_replace_extent_info *extent_info,
2267 const u64 replace_len,
2268 const u64 bytes_to_drop)
2269 {
2270 struct btrfs_fs_info *fs_info = trans->fs_info;
2271 struct btrfs_root *root = inode->root;
2272 struct btrfs_file_extent_item *extent;
2273 struct extent_buffer *leaf;
2274 struct btrfs_key key;
2275 int slot;
2276 int ret;
2277
2278 if (replace_len == 0)
2279 return 0;
2280
2281 if (extent_info->disk_offset == 0 &&
2282 btrfs_fs_incompat(fs_info, NO_HOLES)) {
2283 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2284 return 0;
2285 }
2286
2287 key.objectid = btrfs_ino(inode);
2288 key.type = BTRFS_EXTENT_DATA_KEY;
2289 key.offset = extent_info->file_offset;
2290 ret = btrfs_insert_empty_item(trans, root, path, &key,
2291 sizeof(struct btrfs_file_extent_item));
2292 if (ret)
2293 return ret;
2294 leaf = path->nodes[0];
2295 slot = path->slots[0];
2296 write_extent_buffer(leaf, extent_info->extent_buf,
2297 btrfs_item_ptr_offset(leaf, slot),
2298 sizeof(struct btrfs_file_extent_item));
2299 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2300 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2301 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2302 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2303 if (extent_info->is_new_extent)
2304 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2305 btrfs_mark_buffer_dirty(trans, leaf);
2306 btrfs_release_path(path);
2307
2308 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2309 replace_len);
2310 if (ret)
2311 return ret;
2312
2313 /* If it's a hole, nothing more needs to be done. */
2314 if (extent_info->disk_offset == 0) {
2315 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2316 return 0;
2317 }
2318
2319 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2320
2321 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2322 key.objectid = extent_info->disk_offset;
2323 key.type = BTRFS_EXTENT_ITEM_KEY;
2324 key.offset = extent_info->disk_len;
2325 ret = btrfs_alloc_reserved_file_extent(trans, root,
2326 btrfs_ino(inode),
2327 extent_info->file_offset,
2328 extent_info->qgroup_reserved,
2329 &key);
2330 } else {
2331 struct btrfs_ref ref = {
2332 .action = BTRFS_ADD_DELAYED_REF,
2333 .bytenr = extent_info->disk_offset,
2334 .num_bytes = extent_info->disk_len,
2335 .owning_root = btrfs_root_id(root),
2336 .ref_root = btrfs_root_id(root),
2337 };
2338 u64 ref_offset;
2339
2340 ref_offset = extent_info->file_offset - extent_info->data_offset;
2341 btrfs_init_data_ref(&ref, btrfs_ino(inode), ref_offset, 0, false);
2342 ret = btrfs_inc_extent_ref(trans, &ref);
2343 }
2344
2345 extent_info->insertions++;
2346
2347 return ret;
2348 }
2349
2350 /*
2351 * The respective range must have been previously locked, as well as the inode.
2352 * The end offset is inclusive (last byte of the range).
2353 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2354 * the file range with an extent.
2355 * When not punching a hole, we don't want to end up in a state where we dropped
2356 * extents without inserting a new one, so we must abort the transaction to avoid
2357 * a corruption.
2358 */
btrfs_replace_file_extents(struct btrfs_inode * inode,struct btrfs_path * path,const u64 start,const u64 end,struct btrfs_replace_extent_info * extent_info,struct btrfs_trans_handle ** trans_out)2359 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2360 struct btrfs_path *path, const u64 start,
2361 const u64 end,
2362 struct btrfs_replace_extent_info *extent_info,
2363 struct btrfs_trans_handle **trans_out)
2364 {
2365 struct btrfs_drop_extents_args drop_args = { 0 };
2366 struct btrfs_root *root = inode->root;
2367 struct btrfs_fs_info *fs_info = root->fs_info;
2368 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2369 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2370 struct btrfs_trans_handle *trans = NULL;
2371 struct btrfs_block_rsv *rsv;
2372 unsigned int rsv_count;
2373 u64 cur_offset;
2374 u64 len = end - start;
2375 int ret = 0;
2376
2377 if (end <= start)
2378 return -EINVAL;
2379
2380 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2381 if (!rsv) {
2382 ret = -ENOMEM;
2383 goto out;
2384 }
2385 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2386 rsv->failfast = true;
2387
2388 /*
2389 * 1 - update the inode
2390 * 1 - removing the extents in the range
2391 * 1 - adding the hole extent if no_holes isn't set or if we are
2392 * replacing the range with a new extent
2393 */
2394 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2395 rsv_count = 3;
2396 else
2397 rsv_count = 2;
2398
2399 trans = btrfs_start_transaction(root, rsv_count);
2400 if (IS_ERR(trans)) {
2401 ret = PTR_ERR(trans);
2402 trans = NULL;
2403 goto out_free;
2404 }
2405
2406 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2407 min_size, false);
2408 if (WARN_ON(ret))
2409 goto out_trans;
2410 trans->block_rsv = rsv;
2411
2412 cur_offset = start;
2413 drop_args.path = path;
2414 drop_args.end = end + 1;
2415 drop_args.drop_cache = true;
2416 while (cur_offset < end) {
2417 drop_args.start = cur_offset;
2418 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2419 /* If we are punching a hole decrement the inode's byte count */
2420 if (!extent_info)
2421 btrfs_update_inode_bytes(inode, 0,
2422 drop_args.bytes_found);
2423 if (ret != -ENOSPC) {
2424 /*
2425 * The only time we don't want to abort is if we are
2426 * attempting to clone a partial inline extent, in which
2427 * case we'll get EOPNOTSUPP. However if we aren't
2428 * clone we need to abort no matter what, because if we
2429 * got EOPNOTSUPP via prealloc then we messed up and
2430 * need to abort.
2431 */
2432 if (ret &&
2433 (ret != -EOPNOTSUPP ||
2434 (extent_info && extent_info->is_new_extent)))
2435 btrfs_abort_transaction(trans, ret);
2436 break;
2437 }
2438
2439 trans->block_rsv = &fs_info->trans_block_rsv;
2440
2441 if (!extent_info && cur_offset < drop_args.drop_end &&
2442 cur_offset < ino_size) {
2443 ret = fill_holes(trans, inode, path, cur_offset,
2444 drop_args.drop_end);
2445 if (ret) {
2446 /*
2447 * If we failed then we didn't insert our hole
2448 * entries for the area we dropped, so now the
2449 * fs is corrupted, so we must abort the
2450 * transaction.
2451 */
2452 btrfs_abort_transaction(trans, ret);
2453 break;
2454 }
2455 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2456 /*
2457 * We are past the i_size here, but since we didn't
2458 * insert holes we need to clear the mapped area so we
2459 * know to not set disk_i_size in this area until a new
2460 * file extent is inserted here.
2461 */
2462 ret = btrfs_inode_clear_file_extent_range(inode,
2463 cur_offset,
2464 drop_args.drop_end - cur_offset);
2465 if (ret) {
2466 /*
2467 * We couldn't clear our area, so we could
2468 * presumably adjust up and corrupt the fs, so
2469 * we need to abort.
2470 */
2471 btrfs_abort_transaction(trans, ret);
2472 break;
2473 }
2474 }
2475
2476 if (extent_info &&
2477 drop_args.drop_end > extent_info->file_offset) {
2478 u64 replace_len = drop_args.drop_end -
2479 extent_info->file_offset;
2480
2481 ret = btrfs_insert_replace_extent(trans, inode, path,
2482 extent_info, replace_len,
2483 drop_args.bytes_found);
2484 if (ret) {
2485 btrfs_abort_transaction(trans, ret);
2486 break;
2487 }
2488 extent_info->data_len -= replace_len;
2489 extent_info->data_offset += replace_len;
2490 extent_info->file_offset += replace_len;
2491 }
2492
2493 /*
2494 * We are releasing our handle on the transaction, balance the
2495 * dirty pages of the btree inode and flush delayed items, and
2496 * then get a new transaction handle, which may now point to a
2497 * new transaction in case someone else may have committed the
2498 * transaction we used to replace/drop file extent items. So
2499 * bump the inode's iversion and update mtime and ctime except
2500 * if we are called from a dedupe context. This is because a
2501 * power failure/crash may happen after the transaction is
2502 * committed and before we finish replacing/dropping all the
2503 * file extent items we need.
2504 */
2505 inode_inc_iversion(&inode->vfs_inode);
2506
2507 if (!extent_info || extent_info->update_times)
2508 inode_set_mtime_to_ts(&inode->vfs_inode,
2509 inode_set_ctime_current(&inode->vfs_inode));
2510
2511 ret = btrfs_update_inode(trans, inode);
2512 if (ret)
2513 break;
2514
2515 btrfs_end_transaction(trans);
2516 btrfs_btree_balance_dirty(fs_info);
2517
2518 trans = btrfs_start_transaction(root, rsv_count);
2519 if (IS_ERR(trans)) {
2520 ret = PTR_ERR(trans);
2521 trans = NULL;
2522 break;
2523 }
2524
2525 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2526 rsv, min_size, false);
2527 if (WARN_ON(ret))
2528 break;
2529 trans->block_rsv = rsv;
2530
2531 cur_offset = drop_args.drop_end;
2532 len = end - cur_offset;
2533 if (!extent_info && len) {
2534 ret = find_first_non_hole(inode, &cur_offset, &len);
2535 if (unlikely(ret < 0))
2536 break;
2537 if (ret && !len) {
2538 ret = 0;
2539 break;
2540 }
2541 }
2542 }
2543
2544 /*
2545 * If we were cloning, force the next fsync to be a full one since we
2546 * we replaced (or just dropped in the case of cloning holes when
2547 * NO_HOLES is enabled) file extent items and did not setup new extent
2548 * maps for the replacement extents (or holes).
2549 */
2550 if (extent_info && !extent_info->is_new_extent)
2551 btrfs_set_inode_full_sync(inode);
2552
2553 if (ret)
2554 goto out_trans;
2555
2556 trans->block_rsv = &fs_info->trans_block_rsv;
2557 /*
2558 * If we are using the NO_HOLES feature we might have had already an
2559 * hole that overlaps a part of the region [lockstart, lockend] and
2560 * ends at (or beyond) lockend. Since we have no file extent items to
2561 * represent holes, drop_end can be less than lockend and so we must
2562 * make sure we have an extent map representing the existing hole (the
2563 * call to __btrfs_drop_extents() might have dropped the existing extent
2564 * map representing the existing hole), otherwise the fast fsync path
2565 * will not record the existence of the hole region
2566 * [existing_hole_start, lockend].
2567 */
2568 if (drop_args.drop_end <= end)
2569 drop_args.drop_end = end + 1;
2570 /*
2571 * Don't insert file hole extent item if it's for a range beyond eof
2572 * (because it's useless) or if it represents a 0 bytes range (when
2573 * cur_offset == drop_end).
2574 */
2575 if (!extent_info && cur_offset < ino_size &&
2576 cur_offset < drop_args.drop_end) {
2577 ret = fill_holes(trans, inode, path, cur_offset,
2578 drop_args.drop_end);
2579 if (ret) {
2580 /* Same comment as above. */
2581 btrfs_abort_transaction(trans, ret);
2582 goto out_trans;
2583 }
2584 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2585 /* See the comment in the loop above for the reasoning here. */
2586 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2587 drop_args.drop_end - cur_offset);
2588 if (ret) {
2589 btrfs_abort_transaction(trans, ret);
2590 goto out_trans;
2591 }
2592
2593 }
2594 if (extent_info) {
2595 ret = btrfs_insert_replace_extent(trans, inode, path,
2596 extent_info, extent_info->data_len,
2597 drop_args.bytes_found);
2598 if (ret) {
2599 btrfs_abort_transaction(trans, ret);
2600 goto out_trans;
2601 }
2602 }
2603
2604 out_trans:
2605 if (!trans)
2606 goto out_free;
2607
2608 trans->block_rsv = &fs_info->trans_block_rsv;
2609 if (ret)
2610 btrfs_end_transaction(trans);
2611 else
2612 *trans_out = trans;
2613 out_free:
2614 btrfs_free_block_rsv(fs_info, rsv);
2615 out:
2616 return ret;
2617 }
2618
btrfs_punch_hole(struct file * file,loff_t offset,loff_t len)2619 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2620 {
2621 struct inode *inode = file_inode(file);
2622 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2623 struct btrfs_root *root = BTRFS_I(inode)->root;
2624 struct extent_state *cached_state = NULL;
2625 struct btrfs_path *path;
2626 struct btrfs_trans_handle *trans = NULL;
2627 u64 lockstart;
2628 u64 lockend;
2629 u64 tail_start;
2630 u64 tail_len;
2631 u64 orig_start = offset;
2632 int ret = 0;
2633 bool same_block;
2634 u64 ino_size;
2635 bool truncated_block = false;
2636 bool updated_inode = false;
2637
2638 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2639
2640 ret = btrfs_wait_ordered_range(BTRFS_I(inode), offset, len);
2641 if (ret)
2642 goto out_only_mutex;
2643
2644 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2645 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2646 if (ret < 0)
2647 goto out_only_mutex;
2648 if (ret && !len) {
2649 /* Already in a large hole */
2650 ret = 0;
2651 goto out_only_mutex;
2652 }
2653
2654 ret = file_modified(file);
2655 if (ret)
2656 goto out_only_mutex;
2657
2658 lockstart = round_up(offset, fs_info->sectorsize);
2659 lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2660 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2661 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2662 /*
2663 * We needn't truncate any block which is beyond the end of the file
2664 * because we are sure there is no data there.
2665 */
2666 /*
2667 * Only do this if we are in the same block and we aren't doing the
2668 * entire block.
2669 */
2670 if (same_block && len < fs_info->sectorsize) {
2671 if (offset < ino_size) {
2672 truncated_block = true;
2673 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2674 0);
2675 } else {
2676 ret = 0;
2677 }
2678 goto out_only_mutex;
2679 }
2680
2681 /* zero back part of the first block */
2682 if (offset < ino_size) {
2683 truncated_block = true;
2684 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2685 if (ret) {
2686 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2687 return ret;
2688 }
2689 }
2690
2691 /* Check the aligned pages after the first unaligned page,
2692 * if offset != orig_start, which means the first unaligned page
2693 * including several following pages are already in holes,
2694 * the extra check can be skipped */
2695 if (offset == orig_start) {
2696 /* after truncate page, check hole again */
2697 len = offset + len - lockstart;
2698 offset = lockstart;
2699 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2700 if (ret < 0)
2701 goto out_only_mutex;
2702 if (ret && !len) {
2703 ret = 0;
2704 goto out_only_mutex;
2705 }
2706 lockstart = offset;
2707 }
2708
2709 /* Check the tail unaligned part is in a hole */
2710 tail_start = lockend + 1;
2711 tail_len = offset + len - tail_start;
2712 if (tail_len) {
2713 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2714 if (unlikely(ret < 0))
2715 goto out_only_mutex;
2716 if (!ret) {
2717 /* zero the front end of the last page */
2718 if (tail_start + tail_len < ino_size) {
2719 truncated_block = true;
2720 ret = btrfs_truncate_block(BTRFS_I(inode),
2721 tail_start + tail_len,
2722 0, 1);
2723 if (ret)
2724 goto out_only_mutex;
2725 }
2726 }
2727 }
2728
2729 if (lockend < lockstart) {
2730 ret = 0;
2731 goto out_only_mutex;
2732 }
2733
2734 btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2735
2736 path = btrfs_alloc_path();
2737 if (!path) {
2738 ret = -ENOMEM;
2739 goto out;
2740 }
2741
2742 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2743 lockend, NULL, &trans);
2744 btrfs_free_path(path);
2745 if (ret)
2746 goto out;
2747
2748 ASSERT(trans != NULL);
2749 inode_inc_iversion(inode);
2750 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2751 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2752 updated_inode = true;
2753 btrfs_end_transaction(trans);
2754 btrfs_btree_balance_dirty(fs_info);
2755 out:
2756 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2757 &cached_state);
2758 out_only_mutex:
2759 if (!updated_inode && truncated_block && !ret) {
2760 /*
2761 * If we only end up zeroing part of a page, we still need to
2762 * update the inode item, so that all the time fields are
2763 * updated as well as the necessary btrfs inode in memory fields
2764 * for detecting, at fsync time, if the inode isn't yet in the
2765 * log tree or it's there but not up to date.
2766 */
2767 struct timespec64 now = inode_set_ctime_current(inode);
2768
2769 inode_inc_iversion(inode);
2770 inode_set_mtime_to_ts(inode, now);
2771 trans = btrfs_start_transaction(root, 1);
2772 if (IS_ERR(trans)) {
2773 ret = PTR_ERR(trans);
2774 } else {
2775 int ret2;
2776
2777 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2778 ret2 = btrfs_end_transaction(trans);
2779 if (!ret)
2780 ret = ret2;
2781 }
2782 }
2783 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2784 return ret;
2785 }
2786
2787 /* Helper structure to record which range is already reserved */
2788 struct falloc_range {
2789 struct list_head list;
2790 u64 start;
2791 u64 len;
2792 };
2793
2794 /*
2795 * Helper function to add falloc range
2796 *
2797 * Caller should have locked the larger range of extent containing
2798 * [start, len)
2799 */
add_falloc_range(struct list_head * head,u64 start,u64 len)2800 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2801 {
2802 struct falloc_range *range = NULL;
2803
2804 if (!list_empty(head)) {
2805 /*
2806 * As fallocate iterates by bytenr order, we only need to check
2807 * the last range.
2808 */
2809 range = list_last_entry(head, struct falloc_range, list);
2810 if (range->start + range->len == start) {
2811 range->len += len;
2812 return 0;
2813 }
2814 }
2815
2816 range = kmalloc(sizeof(*range), GFP_KERNEL);
2817 if (!range)
2818 return -ENOMEM;
2819 range->start = start;
2820 range->len = len;
2821 list_add_tail(&range->list, head);
2822 return 0;
2823 }
2824
btrfs_fallocate_update_isize(struct inode * inode,const u64 end,const int mode)2825 static int btrfs_fallocate_update_isize(struct inode *inode,
2826 const u64 end,
2827 const int mode)
2828 {
2829 struct btrfs_trans_handle *trans;
2830 struct btrfs_root *root = BTRFS_I(inode)->root;
2831 int ret;
2832 int ret2;
2833
2834 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2835 return 0;
2836
2837 trans = btrfs_start_transaction(root, 1);
2838 if (IS_ERR(trans))
2839 return PTR_ERR(trans);
2840
2841 inode_set_ctime_current(inode);
2842 i_size_write(inode, end);
2843 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2844 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2845 ret2 = btrfs_end_transaction(trans);
2846
2847 return ret ? ret : ret2;
2848 }
2849
2850 enum {
2851 RANGE_BOUNDARY_WRITTEN_EXTENT,
2852 RANGE_BOUNDARY_PREALLOC_EXTENT,
2853 RANGE_BOUNDARY_HOLE,
2854 };
2855
btrfs_zero_range_check_range_boundary(struct btrfs_inode * inode,u64 offset)2856 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2857 u64 offset)
2858 {
2859 const u64 sectorsize = inode->root->fs_info->sectorsize;
2860 struct extent_map *em;
2861 int ret;
2862
2863 offset = round_down(offset, sectorsize);
2864 em = btrfs_get_extent(inode, NULL, offset, sectorsize);
2865 if (IS_ERR(em))
2866 return PTR_ERR(em);
2867
2868 if (em->disk_bytenr == EXTENT_MAP_HOLE)
2869 ret = RANGE_BOUNDARY_HOLE;
2870 else if (em->flags & EXTENT_FLAG_PREALLOC)
2871 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2872 else
2873 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2874
2875 free_extent_map(em);
2876 return ret;
2877 }
2878
btrfs_zero_range(struct inode * inode,loff_t offset,loff_t len,const int mode)2879 static int btrfs_zero_range(struct inode *inode,
2880 loff_t offset,
2881 loff_t len,
2882 const int mode)
2883 {
2884 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2885 struct extent_map *em;
2886 struct extent_changeset *data_reserved = NULL;
2887 int ret;
2888 u64 alloc_hint = 0;
2889 const u64 sectorsize = fs_info->sectorsize;
2890 u64 alloc_start = round_down(offset, sectorsize);
2891 u64 alloc_end = round_up(offset + len, sectorsize);
2892 u64 bytes_to_reserve = 0;
2893 bool space_reserved = false;
2894
2895 em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start,
2896 alloc_end - alloc_start);
2897 if (IS_ERR(em)) {
2898 ret = PTR_ERR(em);
2899 goto out;
2900 }
2901
2902 /*
2903 * Avoid hole punching and extent allocation for some cases. More cases
2904 * could be considered, but these are unlikely common and we keep things
2905 * as simple as possible for now. Also, intentionally, if the target
2906 * range contains one or more prealloc extents together with regular
2907 * extents and holes, we drop all the existing extents and allocate a
2908 * new prealloc extent, so that we get a larger contiguous disk extent.
2909 */
2910 if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
2911 const u64 em_end = em->start + em->len;
2912
2913 if (em_end >= offset + len) {
2914 /*
2915 * The whole range is already a prealloc extent,
2916 * do nothing except updating the inode's i_size if
2917 * needed.
2918 */
2919 free_extent_map(em);
2920 ret = btrfs_fallocate_update_isize(inode, offset + len,
2921 mode);
2922 goto out;
2923 }
2924 /*
2925 * Part of the range is already a prealloc extent, so operate
2926 * only on the remaining part of the range.
2927 */
2928 alloc_start = em_end;
2929 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2930 len = offset + len - alloc_start;
2931 offset = alloc_start;
2932 alloc_hint = extent_map_block_start(em) + em->len;
2933 }
2934 free_extent_map(em);
2935
2936 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2937 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2938 em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, sectorsize);
2939 if (IS_ERR(em)) {
2940 ret = PTR_ERR(em);
2941 goto out;
2942 }
2943
2944 if (em->flags & EXTENT_FLAG_PREALLOC) {
2945 free_extent_map(em);
2946 ret = btrfs_fallocate_update_isize(inode, offset + len,
2947 mode);
2948 goto out;
2949 }
2950 if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
2951 free_extent_map(em);
2952 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2953 0);
2954 if (!ret)
2955 ret = btrfs_fallocate_update_isize(inode,
2956 offset + len,
2957 mode);
2958 return ret;
2959 }
2960 free_extent_map(em);
2961 alloc_start = round_down(offset, sectorsize);
2962 alloc_end = alloc_start + sectorsize;
2963 goto reserve_space;
2964 }
2965
2966 alloc_start = round_up(offset, sectorsize);
2967 alloc_end = round_down(offset + len, sectorsize);
2968
2969 /*
2970 * For unaligned ranges, check the pages at the boundaries, they might
2971 * map to an extent, in which case we need to partially zero them, or
2972 * they might map to a hole, in which case we need our allocation range
2973 * to cover them.
2974 */
2975 if (!IS_ALIGNED(offset, sectorsize)) {
2976 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2977 offset);
2978 if (ret < 0)
2979 goto out;
2980 if (ret == RANGE_BOUNDARY_HOLE) {
2981 alloc_start = round_down(offset, sectorsize);
2982 ret = 0;
2983 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2984 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2985 if (ret)
2986 goto out;
2987 } else {
2988 ret = 0;
2989 }
2990 }
2991
2992 if (!IS_ALIGNED(offset + len, sectorsize)) {
2993 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2994 offset + len);
2995 if (ret < 0)
2996 goto out;
2997 if (ret == RANGE_BOUNDARY_HOLE) {
2998 alloc_end = round_up(offset + len, sectorsize);
2999 ret = 0;
3000 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3001 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3002 0, 1);
3003 if (ret)
3004 goto out;
3005 } else {
3006 ret = 0;
3007 }
3008 }
3009
3010 reserve_space:
3011 if (alloc_start < alloc_end) {
3012 struct extent_state *cached_state = NULL;
3013 const u64 lockstart = alloc_start;
3014 const u64 lockend = alloc_end - 1;
3015
3016 bytes_to_reserve = alloc_end - alloc_start;
3017 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3018 bytes_to_reserve);
3019 if (ret < 0)
3020 goto out;
3021 space_reserved = true;
3022 btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3023 &cached_state);
3024 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3025 alloc_start, bytes_to_reserve);
3026 if (ret) {
3027 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3028 lockend, &cached_state);
3029 goto out;
3030 }
3031 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3032 alloc_end - alloc_start,
3033 fs_info->sectorsize,
3034 offset + len, &alloc_hint);
3035 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3036 &cached_state);
3037 /* btrfs_prealloc_file_range releases reserved space on error */
3038 if (ret) {
3039 space_reserved = false;
3040 goto out;
3041 }
3042 }
3043 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3044 out:
3045 if (ret && space_reserved)
3046 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3047 alloc_start, bytes_to_reserve);
3048 extent_changeset_free(data_reserved);
3049
3050 return ret;
3051 }
3052
btrfs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3053 static long btrfs_fallocate(struct file *file, int mode,
3054 loff_t offset, loff_t len)
3055 {
3056 struct inode *inode = file_inode(file);
3057 struct extent_state *cached_state = NULL;
3058 struct extent_changeset *data_reserved = NULL;
3059 struct falloc_range *range;
3060 struct falloc_range *tmp;
3061 LIST_HEAD(reserve_list);
3062 u64 cur_offset;
3063 u64 last_byte;
3064 u64 alloc_start;
3065 u64 alloc_end;
3066 u64 alloc_hint = 0;
3067 u64 locked_end;
3068 u64 actual_end = 0;
3069 u64 data_space_needed = 0;
3070 u64 data_space_reserved = 0;
3071 u64 qgroup_reserved = 0;
3072 struct extent_map *em;
3073 int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3074 int ret;
3075
3076 /* Do not allow fallocate in ZONED mode */
3077 if (btrfs_is_zoned(inode_to_fs_info(inode)))
3078 return -EOPNOTSUPP;
3079
3080 alloc_start = round_down(offset, blocksize);
3081 alloc_end = round_up(offset + len, blocksize);
3082 cur_offset = alloc_start;
3083
3084 /* Make sure we aren't being give some crap mode */
3085 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3086 FALLOC_FL_ZERO_RANGE))
3087 return -EOPNOTSUPP;
3088
3089 if (mode & FALLOC_FL_PUNCH_HOLE)
3090 return btrfs_punch_hole(file, offset, len);
3091
3092 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3093
3094 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3095 ret = inode_newsize_ok(inode, offset + len);
3096 if (ret)
3097 goto out;
3098 }
3099
3100 ret = file_modified(file);
3101 if (ret)
3102 goto out;
3103
3104 /*
3105 * TODO: Move these two operations after we have checked
3106 * accurate reserved space, or fallocate can still fail but
3107 * with page truncated or size expanded.
3108 *
3109 * But that's a minor problem and won't do much harm BTW.
3110 */
3111 if (alloc_start > inode->i_size) {
3112 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3113 alloc_start);
3114 if (ret)
3115 goto out;
3116 } else if (offset + len > inode->i_size) {
3117 /*
3118 * If we are fallocating from the end of the file onward we
3119 * need to zero out the end of the block if i_size lands in the
3120 * middle of a block.
3121 */
3122 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3123 if (ret)
3124 goto out;
3125 }
3126
3127 /*
3128 * We have locked the inode at the VFS level (in exclusive mode) and we
3129 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3130 * locking the file range, flush all dealloc in the range and wait for
3131 * all ordered extents in the range to complete. After this we can lock
3132 * the file range and, due to the previous locking we did, we know there
3133 * can't be more delalloc or ordered extents in the range.
3134 */
3135 ret = btrfs_wait_ordered_range(BTRFS_I(inode), alloc_start,
3136 alloc_end - alloc_start);
3137 if (ret)
3138 goto out;
3139
3140 if (mode & FALLOC_FL_ZERO_RANGE) {
3141 ret = btrfs_zero_range(inode, offset, len, mode);
3142 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3143 return ret;
3144 }
3145
3146 locked_end = alloc_end - 1;
3147 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3148 &cached_state);
3149
3150 btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3151
3152 /* First, check if we exceed the qgroup limit */
3153 while (cur_offset < alloc_end) {
3154 em = btrfs_get_extent(BTRFS_I(inode), NULL, cur_offset,
3155 alloc_end - cur_offset);
3156 if (IS_ERR(em)) {
3157 ret = PTR_ERR(em);
3158 break;
3159 }
3160 last_byte = min(extent_map_end(em), alloc_end);
3161 actual_end = min_t(u64, extent_map_end(em), offset + len);
3162 last_byte = ALIGN(last_byte, blocksize);
3163 if (em->disk_bytenr == EXTENT_MAP_HOLE ||
3164 (cur_offset >= inode->i_size &&
3165 !(em->flags & EXTENT_FLAG_PREALLOC))) {
3166 const u64 range_len = last_byte - cur_offset;
3167
3168 ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3169 if (ret < 0) {
3170 free_extent_map(em);
3171 break;
3172 }
3173 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3174 &data_reserved, cur_offset, range_len);
3175 if (ret < 0) {
3176 free_extent_map(em);
3177 break;
3178 }
3179 qgroup_reserved += range_len;
3180 data_space_needed += range_len;
3181 }
3182 free_extent_map(em);
3183 cur_offset = last_byte;
3184 }
3185
3186 if (!ret && data_space_needed > 0) {
3187 /*
3188 * We are safe to reserve space here as we can't have delalloc
3189 * in the range, see above.
3190 */
3191 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3192 data_space_needed);
3193 if (!ret)
3194 data_space_reserved = data_space_needed;
3195 }
3196
3197 /*
3198 * If ret is still 0, means we're OK to fallocate.
3199 * Or just cleanup the list and exit.
3200 */
3201 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3202 if (!ret) {
3203 ret = btrfs_prealloc_file_range(inode, mode,
3204 range->start,
3205 range->len, blocksize,
3206 offset + len, &alloc_hint);
3207 /*
3208 * btrfs_prealloc_file_range() releases space even
3209 * if it returns an error.
3210 */
3211 data_space_reserved -= range->len;
3212 qgroup_reserved -= range->len;
3213 } else if (data_space_reserved > 0) {
3214 btrfs_free_reserved_data_space(BTRFS_I(inode),
3215 data_reserved, range->start,
3216 range->len);
3217 data_space_reserved -= range->len;
3218 qgroup_reserved -= range->len;
3219 } else if (qgroup_reserved > 0) {
3220 btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3221 range->start, range->len, NULL);
3222 qgroup_reserved -= range->len;
3223 }
3224 list_del(&range->list);
3225 kfree(range);
3226 }
3227 if (ret < 0)
3228 goto out_unlock;
3229
3230 /*
3231 * We didn't need to allocate any more space, but we still extended the
3232 * size of the file so we need to update i_size and the inode item.
3233 */
3234 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3235 out_unlock:
3236 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3237 &cached_state);
3238 out:
3239 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3240 extent_changeset_free(data_reserved);
3241 return ret;
3242 }
3243
3244 /*
3245 * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3246 * that has unflushed and/or flushing delalloc. There might be other adjacent
3247 * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3248 * looping while it gets adjacent subranges, and merging them together.
3249 */
find_delalloc_subrange(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state,bool * search_io_tree,u64 * delalloc_start_ret,u64 * delalloc_end_ret)3250 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3251 struct extent_state **cached_state,
3252 bool *search_io_tree,
3253 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3254 {
3255 u64 len = end + 1 - start;
3256 u64 delalloc_len = 0;
3257 struct btrfs_ordered_extent *oe;
3258 u64 oe_start;
3259 u64 oe_end;
3260
3261 /*
3262 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3263 * means we have delalloc (dirty pages) for which writeback has not
3264 * started yet.
3265 */
3266 if (*search_io_tree) {
3267 spin_lock(&inode->lock);
3268 if (inode->delalloc_bytes > 0) {
3269 spin_unlock(&inode->lock);
3270 *delalloc_start_ret = start;
3271 delalloc_len = count_range_bits(&inode->io_tree,
3272 delalloc_start_ret, end,
3273 len, EXTENT_DELALLOC, 1,
3274 cached_state);
3275 } else {
3276 spin_unlock(&inode->lock);
3277 }
3278 }
3279
3280 if (delalloc_len > 0) {
3281 /*
3282 * If delalloc was found then *delalloc_start_ret has a sector size
3283 * aligned value (rounded down).
3284 */
3285 *delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3286
3287 if (*delalloc_start_ret == start) {
3288 /* Delalloc for the whole range, nothing more to do. */
3289 if (*delalloc_end_ret == end)
3290 return true;
3291 /* Else trim our search range for ordered extents. */
3292 start = *delalloc_end_ret + 1;
3293 len = end + 1 - start;
3294 }
3295 } else {
3296 /* No delalloc, future calls don't need to search again. */
3297 *search_io_tree = false;
3298 }
3299
3300 /*
3301 * Now also check if there's any ordered extent in the range.
3302 * We do this because:
3303 *
3304 * 1) When delalloc is flushed, the file range is locked, we clear the
3305 * EXTENT_DELALLOC bit from the io tree and create an extent map and
3306 * an ordered extent for the write. So we might just have been called
3307 * after delalloc is flushed and before the ordered extent completes
3308 * and inserts the new file extent item in the subvolume's btree;
3309 *
3310 * 2) We may have an ordered extent created by flushing delalloc for a
3311 * subrange that starts before the subrange we found marked with
3312 * EXTENT_DELALLOC in the io tree.
3313 *
3314 * We could also use the extent map tree to find such delalloc that is
3315 * being flushed, but using the ordered extents tree is more efficient
3316 * because it's usually much smaller as ordered extents are removed from
3317 * the tree once they complete. With the extent maps, we mau have them
3318 * in the extent map tree for a very long time, and they were either
3319 * created by previous writes or loaded by read operations.
3320 */
3321 oe = btrfs_lookup_first_ordered_range(inode, start, len);
3322 if (!oe)
3323 return (delalloc_len > 0);
3324
3325 /* The ordered extent may span beyond our search range. */
3326 oe_start = max(oe->file_offset, start);
3327 oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3328
3329 btrfs_put_ordered_extent(oe);
3330
3331 /* Don't have unflushed delalloc, return the ordered extent range. */
3332 if (delalloc_len == 0) {
3333 *delalloc_start_ret = oe_start;
3334 *delalloc_end_ret = oe_end;
3335 return true;
3336 }
3337
3338 /*
3339 * We have both unflushed delalloc (io_tree) and an ordered extent.
3340 * If the ranges are adjacent returned a combined range, otherwise
3341 * return the leftmost range.
3342 */
3343 if (oe_start < *delalloc_start_ret) {
3344 if (oe_end < *delalloc_start_ret)
3345 *delalloc_end_ret = oe_end;
3346 *delalloc_start_ret = oe_start;
3347 } else if (*delalloc_end_ret + 1 == oe_start) {
3348 *delalloc_end_ret = oe_end;
3349 }
3350
3351 return true;
3352 }
3353
3354 /*
3355 * Check if there's delalloc in a given range.
3356 *
3357 * @inode: The inode.
3358 * @start: The start offset of the range. It does not need to be
3359 * sector size aligned.
3360 * @end: The end offset (inclusive value) of the search range.
3361 * It does not need to be sector size aligned.
3362 * @cached_state: Extent state record used for speeding up delalloc
3363 * searches in the inode's io_tree. Can be NULL.
3364 * @delalloc_start_ret: Output argument, set to the start offset of the
3365 * subrange found with delalloc (may not be sector size
3366 * aligned).
3367 * @delalloc_end_ret: Output argument, set to he end offset (inclusive value)
3368 * of the subrange found with delalloc.
3369 *
3370 * Returns true if a subrange with delalloc is found within the given range, and
3371 * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3372 * end offsets of the subrange.
3373 */
btrfs_find_delalloc_in_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state,u64 * delalloc_start_ret,u64 * delalloc_end_ret)3374 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3375 struct extent_state **cached_state,
3376 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3377 {
3378 u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3379 u64 prev_delalloc_end = 0;
3380 bool search_io_tree = true;
3381 bool ret = false;
3382
3383 while (cur_offset <= end) {
3384 u64 delalloc_start;
3385 u64 delalloc_end;
3386 bool delalloc;
3387
3388 delalloc = find_delalloc_subrange(inode, cur_offset, end,
3389 cached_state, &search_io_tree,
3390 &delalloc_start,
3391 &delalloc_end);
3392 if (!delalloc)
3393 break;
3394
3395 if (prev_delalloc_end == 0) {
3396 /* First subrange found. */
3397 *delalloc_start_ret = max(delalloc_start, start);
3398 *delalloc_end_ret = delalloc_end;
3399 ret = true;
3400 } else if (delalloc_start == prev_delalloc_end + 1) {
3401 /* Subrange adjacent to the previous one, merge them. */
3402 *delalloc_end_ret = delalloc_end;
3403 } else {
3404 /* Subrange not adjacent to the previous one, exit. */
3405 break;
3406 }
3407
3408 prev_delalloc_end = delalloc_end;
3409 cur_offset = delalloc_end + 1;
3410 cond_resched();
3411 }
3412
3413 return ret;
3414 }
3415
3416 /*
3417 * Check if there's a hole or delalloc range in a range representing a hole (or
3418 * prealloc extent) found in the inode's subvolume btree.
3419 *
3420 * @inode: The inode.
3421 * @whence: Seek mode (SEEK_DATA or SEEK_HOLE).
3422 * @start: Start offset of the hole region. It does not need to be sector
3423 * size aligned.
3424 * @end: End offset (inclusive value) of the hole region. It does not
3425 * need to be sector size aligned.
3426 * @start_ret: Return parameter, used to set the start of the subrange in the
3427 * hole that matches the search criteria (seek mode), if such
3428 * subrange is found (return value of the function is true).
3429 * The value returned here may not be sector size aligned.
3430 *
3431 * Returns true if a subrange matching the given seek mode is found, and if one
3432 * is found, it updates @start_ret with the start of the subrange.
3433 */
find_desired_extent_in_hole(struct btrfs_inode * inode,int whence,struct extent_state ** cached_state,u64 start,u64 end,u64 * start_ret)3434 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3435 struct extent_state **cached_state,
3436 u64 start, u64 end, u64 *start_ret)
3437 {
3438 u64 delalloc_start;
3439 u64 delalloc_end;
3440 bool delalloc;
3441
3442 delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3443 &delalloc_start, &delalloc_end);
3444 if (delalloc && whence == SEEK_DATA) {
3445 *start_ret = delalloc_start;
3446 return true;
3447 }
3448
3449 if (delalloc && whence == SEEK_HOLE) {
3450 /*
3451 * We found delalloc but it starts after out start offset. So we
3452 * have a hole between our start offset and the delalloc start.
3453 */
3454 if (start < delalloc_start) {
3455 *start_ret = start;
3456 return true;
3457 }
3458 /*
3459 * Delalloc range starts at our start offset.
3460 * If the delalloc range's length is smaller than our range,
3461 * then it means we have a hole that starts where the delalloc
3462 * subrange ends.
3463 */
3464 if (delalloc_end < end) {
3465 *start_ret = delalloc_end + 1;
3466 return true;
3467 }
3468
3469 /* There's delalloc for the whole range. */
3470 return false;
3471 }
3472
3473 if (!delalloc && whence == SEEK_HOLE) {
3474 *start_ret = start;
3475 return true;
3476 }
3477
3478 /*
3479 * No delalloc in the range and we are seeking for data. The caller has
3480 * to iterate to the next extent item in the subvolume btree.
3481 */
3482 return false;
3483 }
3484
find_desired_extent(struct file * file,loff_t offset,int whence)3485 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3486 {
3487 struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3488 struct btrfs_file_private *private;
3489 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3490 struct extent_state *cached_state = NULL;
3491 struct extent_state **delalloc_cached_state;
3492 const loff_t i_size = i_size_read(&inode->vfs_inode);
3493 const u64 ino = btrfs_ino(inode);
3494 struct btrfs_root *root = inode->root;
3495 struct btrfs_path *path;
3496 struct btrfs_key key;
3497 u64 last_extent_end;
3498 u64 lockstart;
3499 u64 lockend;
3500 u64 start;
3501 int ret;
3502 bool found = false;
3503
3504 if (i_size == 0 || offset >= i_size)
3505 return -ENXIO;
3506
3507 /*
3508 * Quick path. If the inode has no prealloc extents and its number of
3509 * bytes used matches its i_size, then it can not have holes.
3510 */
3511 if (whence == SEEK_HOLE &&
3512 !(inode->flags & BTRFS_INODE_PREALLOC) &&
3513 inode_get_bytes(&inode->vfs_inode) == i_size)
3514 return i_size;
3515
3516 spin_lock(&inode->lock);
3517 private = file->private_data;
3518 spin_unlock(&inode->lock);
3519
3520 if (private && private->owner_task != current) {
3521 /*
3522 * Not allocated by us, don't use it as its cached state is used
3523 * by the task that allocated it and we don't want neither to
3524 * mess with it nor get incorrect results because it reflects an
3525 * invalid state for the current task.
3526 */
3527 private = NULL;
3528 } else if (!private) {
3529 private = kzalloc(sizeof(*private), GFP_KERNEL);
3530 /*
3531 * No worries if memory allocation failed.
3532 * The private structure is used only for speeding up multiple
3533 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3534 * so everything will still be correct.
3535 */
3536 if (private) {
3537 bool free = false;
3538
3539 private->owner_task = current;
3540
3541 spin_lock(&inode->lock);
3542 if (file->private_data)
3543 free = true;
3544 else
3545 file->private_data = private;
3546 spin_unlock(&inode->lock);
3547
3548 if (free) {
3549 kfree(private);
3550 private = NULL;
3551 }
3552 }
3553 }
3554
3555 if (private)
3556 delalloc_cached_state = &private->llseek_cached_state;
3557 else
3558 delalloc_cached_state = NULL;
3559
3560 /*
3561 * offset can be negative, in this case we start finding DATA/HOLE from
3562 * the very start of the file.
3563 */
3564 start = max_t(loff_t, 0, offset);
3565
3566 lockstart = round_down(start, fs_info->sectorsize);
3567 lockend = round_up(i_size, fs_info->sectorsize);
3568 if (lockend <= lockstart)
3569 lockend = lockstart + fs_info->sectorsize;
3570 lockend--;
3571
3572 path = btrfs_alloc_path();
3573 if (!path)
3574 return -ENOMEM;
3575 path->reada = READA_FORWARD;
3576
3577 key.objectid = ino;
3578 key.type = BTRFS_EXTENT_DATA_KEY;
3579 key.offset = start;
3580
3581 last_extent_end = lockstart;
3582
3583 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3584
3585 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3586 if (ret < 0) {
3587 goto out;
3588 } else if (ret > 0 && path->slots[0] > 0) {
3589 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3590 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3591 path->slots[0]--;
3592 }
3593
3594 while (start < i_size) {
3595 struct extent_buffer *leaf = path->nodes[0];
3596 struct btrfs_file_extent_item *extent;
3597 u64 extent_end;
3598 u8 type;
3599
3600 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3601 ret = btrfs_next_leaf(root, path);
3602 if (ret < 0)
3603 goto out;
3604 else if (ret > 0)
3605 break;
3606
3607 leaf = path->nodes[0];
3608 }
3609
3610 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3611 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3612 break;
3613
3614 extent_end = btrfs_file_extent_end(path);
3615
3616 /*
3617 * In the first iteration we may have a slot that points to an
3618 * extent that ends before our start offset, so skip it.
3619 */
3620 if (extent_end <= start) {
3621 path->slots[0]++;
3622 continue;
3623 }
3624
3625 /* We have an implicit hole, NO_HOLES feature is likely set. */
3626 if (last_extent_end < key.offset) {
3627 u64 search_start = last_extent_end;
3628 u64 found_start;
3629
3630 /*
3631 * First iteration, @start matches @offset and it's
3632 * within the hole.
3633 */
3634 if (start == offset)
3635 search_start = offset;
3636
3637 found = find_desired_extent_in_hole(inode, whence,
3638 delalloc_cached_state,
3639 search_start,
3640 key.offset - 1,
3641 &found_start);
3642 if (found) {
3643 start = found_start;
3644 break;
3645 }
3646 /*
3647 * Didn't find data or a hole (due to delalloc) in the
3648 * implicit hole range, so need to analyze the extent.
3649 */
3650 }
3651
3652 extent = btrfs_item_ptr(leaf, path->slots[0],
3653 struct btrfs_file_extent_item);
3654 type = btrfs_file_extent_type(leaf, extent);
3655
3656 /*
3657 * Can't access the extent's disk_bytenr field if this is an
3658 * inline extent, since at that offset, it's where the extent
3659 * data starts.
3660 */
3661 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3662 (type == BTRFS_FILE_EXTENT_REG &&
3663 btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3664 /*
3665 * Explicit hole or prealloc extent, search for delalloc.
3666 * A prealloc extent is treated like a hole.
3667 */
3668 u64 search_start = key.offset;
3669 u64 found_start;
3670
3671 /*
3672 * First iteration, @start matches @offset and it's
3673 * within the hole.
3674 */
3675 if (start == offset)
3676 search_start = offset;
3677
3678 found = find_desired_extent_in_hole(inode, whence,
3679 delalloc_cached_state,
3680 search_start,
3681 extent_end - 1,
3682 &found_start);
3683 if (found) {
3684 start = found_start;
3685 break;
3686 }
3687 /*
3688 * Didn't find data or a hole (due to delalloc) in the
3689 * implicit hole range, so need to analyze the next
3690 * extent item.
3691 */
3692 } else {
3693 /*
3694 * Found a regular or inline extent.
3695 * If we are seeking for data, adjust the start offset
3696 * and stop, we're done.
3697 */
3698 if (whence == SEEK_DATA) {
3699 start = max_t(u64, key.offset, offset);
3700 found = true;
3701 break;
3702 }
3703 /*
3704 * Else, we are seeking for a hole, check the next file
3705 * extent item.
3706 */
3707 }
3708
3709 start = extent_end;
3710 last_extent_end = extent_end;
3711 path->slots[0]++;
3712 if (fatal_signal_pending(current)) {
3713 ret = -EINTR;
3714 goto out;
3715 }
3716 cond_resched();
3717 }
3718
3719 /* We have an implicit hole from the last extent found up to i_size. */
3720 if (!found && start < i_size) {
3721 found = find_desired_extent_in_hole(inode, whence,
3722 delalloc_cached_state, start,
3723 i_size - 1, &start);
3724 if (!found)
3725 start = i_size;
3726 }
3727
3728 out:
3729 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3730 btrfs_free_path(path);
3731
3732 if (ret < 0)
3733 return ret;
3734
3735 if (whence == SEEK_DATA && start >= i_size)
3736 return -ENXIO;
3737
3738 return min_t(loff_t, start, i_size);
3739 }
3740
btrfs_file_llseek(struct file * file,loff_t offset,int whence)3741 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3742 {
3743 struct inode *inode = file->f_mapping->host;
3744
3745 switch (whence) {
3746 default:
3747 return generic_file_llseek(file, offset, whence);
3748 case SEEK_DATA:
3749 case SEEK_HOLE:
3750 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3751 offset = find_desired_extent(file, offset, whence);
3752 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3753 break;
3754 }
3755
3756 if (offset < 0)
3757 return offset;
3758
3759 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3760 }
3761
btrfs_file_open(struct inode * inode,struct file * filp)3762 static int btrfs_file_open(struct inode *inode, struct file *filp)
3763 {
3764 int ret;
3765
3766 filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
3767
3768 ret = fsverity_file_open(inode, filp);
3769 if (ret)
3770 return ret;
3771 return generic_file_open(inode, filp);
3772 }
3773
btrfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3774 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3775 {
3776 ssize_t ret = 0;
3777
3778 if (iocb->ki_flags & IOCB_DIRECT) {
3779 ret = btrfs_direct_read(iocb, to);
3780 if (ret < 0 || !iov_iter_count(to) ||
3781 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3782 return ret;
3783 }
3784
3785 return filemap_read(iocb, to, ret);
3786 }
3787
3788 const struct file_operations btrfs_file_operations = {
3789 .llseek = btrfs_file_llseek,
3790 .read_iter = btrfs_file_read_iter,
3791 .splice_read = filemap_splice_read,
3792 .write_iter = btrfs_file_write_iter,
3793 .splice_write = iter_file_splice_write,
3794 .mmap = btrfs_file_mmap,
3795 .open = btrfs_file_open,
3796 .release = btrfs_release_file,
3797 .get_unmapped_area = thp_get_unmapped_area,
3798 .fsync = btrfs_sync_file,
3799 .fallocate = btrfs_fallocate,
3800 .unlocked_ioctl = btrfs_ioctl,
3801 #ifdef CONFIG_COMPAT
3802 .compat_ioctl = btrfs_compat_ioctl,
3803 #endif
3804 .remap_file_range = btrfs_remap_file_range,
3805 .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
3806 };
3807
btrfs_fdatawrite_range(struct btrfs_inode * inode,loff_t start,loff_t end)3808 int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
3809 {
3810 struct address_space *mapping = inode->vfs_inode.i_mapping;
3811 int ret;
3812
3813 /*
3814 * So with compression we will find and lock a dirty page and clear the
3815 * first one as dirty, setup an async extent, and immediately return
3816 * with the entire range locked but with nobody actually marked with
3817 * writeback. So we can't just filemap_write_and_wait_range() and
3818 * expect it to work since it will just kick off a thread to do the
3819 * actual work. So we need to call filemap_fdatawrite_range _again_
3820 * since it will wait on the page lock, which won't be unlocked until
3821 * after the pages have been marked as writeback and so we're good to go
3822 * from there. We have to do this otherwise we'll miss the ordered
3823 * extents and that results in badness. Please Josef, do not think you
3824 * know better and pull this out at some point in the future, it is
3825 * right and you are wrong.
3826 */
3827 ret = filemap_fdatawrite_range(mapping, start, end);
3828 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags))
3829 ret = filemap_fdatawrite_range(mapping, start, end);
3830
3831 return ret;
3832 }
3833