filemap.c (35f12f0f5c3bbd60caba89351f45c8eef8ffd423) filemap.c (a862f68a8b360086f248cbc3606029441b5f5197)
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by

--- 378 unchanged lines hidden (view full) ---

387 *
388 * Start writeback against all of a mapping's dirty pages that lie
389 * within the byte offsets <start, end> inclusive.
390 *
391 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
392 * opposed to a regular memory cleansing writeback. The difference between
393 * these two operations is that if a dirty page/buffer is encountered, it must
394 * be waited upon, and not just skipped over.
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by

--- 378 unchanged lines hidden (view full) ---

387 *
388 * Start writeback against all of a mapping's dirty pages that lie
389 * within the byte offsets <start, end> inclusive.
390 *
391 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
392 * opposed to a regular memory cleansing writeback. The difference between
393 * these two operations is that if a dirty page/buffer is encountered, it must
394 * be waited upon, and not just skipped over.
395 *
396 * Return: %0 on success, negative error code otherwise.
395 */
396int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
397 loff_t end, int sync_mode)
398{
399 int ret;
400 struct writeback_control wbc = {
401 .sync_mode = sync_mode,
402 .nr_to_write = LONG_MAX,

--- 30 unchanged lines hidden (view full) ---

433EXPORT_SYMBOL(filemap_fdatawrite_range);
434
435/**
436 * filemap_flush - mostly a non-blocking flush
437 * @mapping: target address_space
438 *
439 * This is a mostly non-blocking flush. Not suitable for data-integrity
440 * purposes - I/O may not be started against all dirty pages.
397 */
398int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
399 loff_t end, int sync_mode)
400{
401 int ret;
402 struct writeback_control wbc = {
403 .sync_mode = sync_mode,
404 .nr_to_write = LONG_MAX,

--- 30 unchanged lines hidden (view full) ---

435EXPORT_SYMBOL(filemap_fdatawrite_range);
436
437/**
438 * filemap_flush - mostly a non-blocking flush
439 * @mapping: target address_space
440 *
441 * This is a mostly non-blocking flush. Not suitable for data-integrity
442 * purposes - I/O may not be started against all dirty pages.
443 *
444 * Return: %0 on success, negative error code otherwise.
441 */
442int filemap_flush(struct address_space *mapping)
443{
444 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
445}
446EXPORT_SYMBOL(filemap_flush);
447
448/**
449 * filemap_range_has_page - check if a page exists in range.
450 * @mapping: address space within which to check
451 * @start_byte: offset in bytes where the range starts
452 * @end_byte: offset in bytes where the range ends (inclusive)
453 *
454 * Find at least one page in the range supplied, usually used to check if
455 * direct writing in this range will trigger a writeback.
445 */
446int filemap_flush(struct address_space *mapping)
447{
448 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
449}
450EXPORT_SYMBOL(filemap_flush);
451
452/**
453 * filemap_range_has_page - check if a page exists in range.
454 * @mapping: address space within which to check
455 * @start_byte: offset in bytes where the range starts
456 * @end_byte: offset in bytes where the range ends (inclusive)
457 *
458 * Find at least one page in the range supplied, usually used to check if
459 * direct writing in this range will trigger a writeback.
460 *
461 * Return: %true if at least one page exists in the specified range,
462 * %false otherwise.
456 */
457bool filemap_range_has_page(struct address_space *mapping,
458 loff_t start_byte, loff_t end_byte)
459{
460 struct page *page;
461 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
462 pgoff_t max = end_byte >> PAGE_SHIFT;
463

--- 60 unchanged lines hidden (view full) ---

524 *
525 * Walk the list of under-writeback pages of the given address space
526 * in the given range and wait for all of them. Check error status of
527 * the address space and return it.
528 *
529 * Since the error status of the address space is cleared by this function,
530 * callers are responsible for checking the return value and handling and/or
531 * reporting the error.
463 */
464bool filemap_range_has_page(struct address_space *mapping,
465 loff_t start_byte, loff_t end_byte)
466{
467 struct page *page;
468 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
469 pgoff_t max = end_byte >> PAGE_SHIFT;
470

--- 60 unchanged lines hidden (view full) ---

531 *
532 * Walk the list of under-writeback pages of the given address space
533 * in the given range and wait for all of them. Check error status of
534 * the address space and return it.
535 *
536 * Since the error status of the address space is cleared by this function,
537 * callers are responsible for checking the return value and handling and/or
538 * reporting the error.
539 *
540 * Return: error status of the address space.
532 */
533int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
534 loff_t end_byte)
535{
536 __filemap_fdatawait_range(mapping, start_byte, end_byte);
537 return filemap_check_errors(mapping);
538}
539EXPORT_SYMBOL(filemap_fdatawait_range);

--- 6 unchanged lines hidden (view full) ---

546 *
547 * Walk the list of under-writeback pages of the address space that file
548 * refers to, in the given range and wait for all of them. Check error
549 * status of the address space vs. the file->f_wb_err cursor and return it.
550 *
551 * Since the error status of the file is advanced by this function,
552 * callers are responsible for checking the return value and handling and/or
553 * reporting the error.
541 */
542int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
543 loff_t end_byte)
544{
545 __filemap_fdatawait_range(mapping, start_byte, end_byte);
546 return filemap_check_errors(mapping);
547}
548EXPORT_SYMBOL(filemap_fdatawait_range);

--- 6 unchanged lines hidden (view full) ---

555 *
556 * Walk the list of under-writeback pages of the address space that file
557 * refers to, in the given range and wait for all of them. Check error
558 * status of the address space vs. the file->f_wb_err cursor and return it.
559 *
560 * Since the error status of the file is advanced by this function,
561 * callers are responsible for checking the return value and handling and/or
562 * reporting the error.
563 *
564 * Return: error status of the address space vs. the file->f_wb_err cursor.
554 */
555int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
556{
557 struct address_space *mapping = file->f_mapping;
558
559 __filemap_fdatawait_range(mapping, start_byte, end_byte);
560 return file_check_and_advance_wb_err(file);
561}

--- 5 unchanged lines hidden (view full) ---

567 *
568 * Walk the list of under-writeback pages of the given address space
569 * and wait for all of them. Unlike filemap_fdatawait(), this function
570 * does not clear error status of the address space.
571 *
572 * Use this function if callers don't handle errors themselves. Expected
573 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
574 * fsfreeze(8)
565 */
566int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
567{
568 struct address_space *mapping = file->f_mapping;
569
570 __filemap_fdatawait_range(mapping, start_byte, end_byte);
571 return file_check_and_advance_wb_err(file);
572}

--- 5 unchanged lines hidden (view full) ---

578 *
579 * Walk the list of under-writeback pages of the given address space
580 * and wait for all of them. Unlike filemap_fdatawait(), this function
581 * does not clear error status of the address space.
582 *
583 * Use this function if callers don't handle errors themselves. Expected
584 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
585 * fsfreeze(8)
586 *
587 * Return: error status of the address space.
575 */
576int filemap_fdatawait_keep_errors(struct address_space *mapping)
577{
578 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
579 return filemap_check_and_keep_errors(mapping);
580}
581EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
582

--- 35 unchanged lines hidden (view full) ---

618 * @mapping: the address_space for the pages
619 * @lstart: offset in bytes where the range starts
620 * @lend: offset in bytes where the range ends (inclusive)
621 *
622 * Write out and wait upon file offsets lstart->lend, inclusive.
623 *
624 * Note that @lend is inclusive (describes the last byte to be written) so
625 * that this function can be used to write to the very end-of-file (end = -1).
588 */
589int filemap_fdatawait_keep_errors(struct address_space *mapping)
590{
591 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
592 return filemap_check_and_keep_errors(mapping);
593}
594EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
595

--- 35 unchanged lines hidden (view full) ---

631 * @mapping: the address_space for the pages
632 * @lstart: offset in bytes where the range starts
633 * @lend: offset in bytes where the range ends (inclusive)
634 *
635 * Write out and wait upon file offsets lstart->lend, inclusive.
636 *
637 * Note that @lend is inclusive (describes the last byte to be written) so
638 * that this function can be used to write to the very end-of-file (end = -1).
639 *
640 * Return: error status of the address space.
626 */
627int filemap_write_and_wait_range(struct address_space *mapping,
628 loff_t lstart, loff_t lend)
629{
630 int err = 0;
631
632 if (mapping_needs_writeback(mapping)) {
633 err = __filemap_fdatawrite_range(mapping, lstart, lend,

--- 39 unchanged lines hidden (view full) ---

673 * it and try to swap it into place. If it works, or another task beat us
674 * to it with the new value, then update the f_wb_err and return the error
675 * portion. The error at this point must be reported via proper channels
676 * (a'la fsync, or NFS COMMIT operation, etc.).
677 *
678 * While we handle mapping->wb_err with atomic operations, the f_wb_err
679 * value is protected by the f_lock since we must ensure that it reflects
680 * the latest value swapped in for this file descriptor.
641 */
642int filemap_write_and_wait_range(struct address_space *mapping,
643 loff_t lstart, loff_t lend)
644{
645 int err = 0;
646
647 if (mapping_needs_writeback(mapping)) {
648 err = __filemap_fdatawrite_range(mapping, lstart, lend,

--- 39 unchanged lines hidden (view full) ---

688 * it and try to swap it into place. If it works, or another task beat us
689 * to it with the new value, then update the f_wb_err and return the error
690 * portion. The error at this point must be reported via proper channels
691 * (a'la fsync, or NFS COMMIT operation, etc.).
692 *
693 * While we handle mapping->wb_err with atomic operations, the f_wb_err
694 * value is protected by the f_lock since we must ensure that it reflects
695 * the latest value swapped in for this file descriptor.
696 *
697 * Return: %0 on success, negative error code otherwise.
681 */
682int file_check_and_advance_wb_err(struct file *file)
683{
684 int err = 0;
685 errseq_t old = READ_ONCE(file->f_wb_err);
686 struct address_space *mapping = file->f_mapping;
687
688 /* Locklessly handle the common case where nothing has changed */

--- 26 unchanged lines hidden (view full) ---

715 *
716 * Write out and wait upon file offsets lstart->lend, inclusive.
717 *
718 * Note that @lend is inclusive (describes the last byte to be written) so
719 * that this function can be used to write to the very end-of-file (end = -1).
720 *
721 * After writing out and waiting on the data, we check and advance the
722 * f_wb_err cursor to the latest value, and return any errors detected there.
698 */
699int file_check_and_advance_wb_err(struct file *file)
700{
701 int err = 0;
702 errseq_t old = READ_ONCE(file->f_wb_err);
703 struct address_space *mapping = file->f_mapping;
704
705 /* Locklessly handle the common case where nothing has changed */

--- 26 unchanged lines hidden (view full) ---

732 *
733 * Write out and wait upon file offsets lstart->lend, inclusive.
734 *
735 * Note that @lend is inclusive (describes the last byte to be written) so
736 * that this function can be used to write to the very end-of-file (end = -1).
737 *
738 * After writing out and waiting on the data, we check and advance the
739 * f_wb_err cursor to the latest value, and return any errors detected there.
740 *
741 * Return: %0 on success, negative error code otherwise.
723 */
724int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
725{
726 int err = 0, err2;
727 struct address_space *mapping = file->f_mapping;
728
729 if (mapping_needs_writeback(mapping)) {
730 err = __filemap_fdatawrite_range(mapping, lstart, lend,

--- 17 unchanged lines hidden (view full) ---

748 *
749 * This function replaces a page in the pagecache with a new one. On
750 * success it acquires the pagecache reference for the new page and
751 * drops it for the old page. Both the old and new pages must be
752 * locked. This function does not add the new page to the LRU, the
753 * caller must do that.
754 *
755 * The remove + add is atomic. This function cannot fail.
742 */
743int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
744{
745 int err = 0, err2;
746 struct address_space *mapping = file->f_mapping;
747
748 if (mapping_needs_writeback(mapping)) {
749 err = __filemap_fdatawrite_range(mapping, lstart, lend,

--- 17 unchanged lines hidden (view full) ---

767 *
768 * This function replaces a page in the pagecache with a new one. On
769 * success it acquires the pagecache reference for the new page and
770 * drops it for the old page. Both the old and new pages must be
771 * locked. This function does not add the new page to the LRU, the
772 * caller must do that.
773 *
774 * The remove + add is atomic. This function cannot fail.
775 *
776 * Return: %0
756 */
757int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
758{
759 struct address_space *mapping = old->mapping;
760 void (*freepage)(struct page *) = mapping->a_ops->freepage;
761 pgoff_t offset = old->index;
762 XA_STATE(xas, &mapping->i_pages, offset);
763 unsigned long flags;

--- 98 unchanged lines hidden (view full) ---

862 * add_to_page_cache_locked - add a locked page to the pagecache
863 * @page: page to add
864 * @mapping: the page's address_space
865 * @offset: page index
866 * @gfp_mask: page allocation mode
867 *
868 * This function is used to add a page to the pagecache. It must be locked.
869 * This function does not add the page to the LRU. The caller must do that.
777 */
778int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
779{
780 struct address_space *mapping = old->mapping;
781 void (*freepage)(struct page *) = mapping->a_ops->freepage;
782 pgoff_t offset = old->index;
783 XA_STATE(xas, &mapping->i_pages, offset);
784 unsigned long flags;

--- 98 unchanged lines hidden (view full) ---

883 * add_to_page_cache_locked - add a locked page to the pagecache
884 * @page: page to add
885 * @mapping: the page's address_space
886 * @offset: page index
887 * @gfp_mask: page allocation mode
888 *
889 * This function is used to add a page to the pagecache. It must be locked.
890 * This function does not add the page to the LRU. The caller must do that.
891 *
892 * Return: %0 on success, negative error code otherwise.
870 */
871int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
872 pgoff_t offset, gfp_t gfp_mask)
873{
874 return __add_to_page_cache_locked(page, mapping, offset,
875 gfp_mask, NULL);
876}
877EXPORT_SYMBOL(add_to_page_cache_locked);

--- 580 unchanged lines hidden (view full) ---

1458 * @offset: the page cache index
1459 *
1460 * Looks up the page cache slot at @mapping & @offset. If there is a
1461 * page cache page, it is returned with an increased refcount.
1462 *
1463 * If the slot holds a shadow entry of a previously evicted page, or a
1464 * swap entry from shmem/tmpfs, it is returned.
1465 *
893 */
894int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
895 pgoff_t offset, gfp_t gfp_mask)
896{
897 return __add_to_page_cache_locked(page, mapping, offset,
898 gfp_mask, NULL);
899}
900EXPORT_SYMBOL(add_to_page_cache_locked);

--- 580 unchanged lines hidden (view full) ---

1481 * @offset: the page cache index
1482 *
1483 * Looks up the page cache slot at @mapping & @offset. If there is a
1484 * page cache page, it is returned with an increased refcount.
1485 *
1486 * If the slot holds a shadow entry of a previously evicted page, or a
1487 * swap entry from shmem/tmpfs, it is returned.
1488 *
1466 * Otherwise, %NULL is returned.
1489 * Return: the found page or shadow entry, %NULL if nothing is found.
1467 */
1468struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1469{
1470 XA_STATE(xas, &mapping->i_pages, offset);
1471 struct page *head, *page;
1472
1473 rcu_read_lock();
1474repeat:

--- 41 unchanged lines hidden (view full) ---

1516 *
1517 * Looks up the page cache slot at @mapping & @offset. If there is a
1518 * page cache page, it is returned locked and with an increased
1519 * refcount.
1520 *
1521 * If the slot holds a shadow entry of a previously evicted page, or a
1522 * swap entry from shmem/tmpfs, it is returned.
1523 *
1490 */
1491struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1492{
1493 XA_STATE(xas, &mapping->i_pages, offset);
1494 struct page *head, *page;
1495
1496 rcu_read_lock();
1497repeat:

--- 41 unchanged lines hidden (view full) ---

1539 *
1540 * Looks up the page cache slot at @mapping & @offset. If there is a
1541 * page cache page, it is returned locked and with an increased
1542 * refcount.
1543 *
1544 * If the slot holds a shadow entry of a previously evicted page, or a
1545 * swap entry from shmem/tmpfs, it is returned.
1546 *
1524 * Otherwise, %NULL is returned.
1525 *
1526 * find_lock_entry() may sleep.
1547 * find_lock_entry() may sleep.
1548 *
1549 * Return: the found page or shadow entry, %NULL if nothing is found.
1527 */
1528struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1529{
1530 struct page *page;
1531
1532repeat:
1533 page = find_get_entry(mapping, offset);
1534 if (page && !xa_is_value(page)) {

--- 23 unchanged lines hidden (view full) ---

1558 *
1559 * @fgp_flags can be:
1560 *
1561 * - FGP_ACCESSED: the page will be marked accessed
1562 * - FGP_LOCK: Page is return locked
1563 * - FGP_CREAT: If page is not present then a new page is allocated using
1564 * @gfp_mask and added to the page cache and the VM's LRU
1565 * list. The page is returned locked and with an increased
1550 */
1551struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1552{
1553 struct page *page;
1554
1555repeat:
1556 page = find_get_entry(mapping, offset);
1557 if (page && !xa_is_value(page)) {

--- 23 unchanged lines hidden (view full) ---

1581 *
1582 * @fgp_flags can be:
1583 *
1584 * - FGP_ACCESSED: the page will be marked accessed
1585 * - FGP_LOCK: Page is return locked
1586 * - FGP_CREAT: If page is not present then a new page is allocated using
1587 * @gfp_mask and added to the page cache and the VM's LRU
1588 * list. The page is returned locked and with an increased
1566 * refcount. Otherwise, NULL is returned.
1589 * refcount.
1567 *
1568 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1569 * if the GFP flags specified for FGP_CREAT are atomic.
1570 *
1571 * If there is a page cache page, it is returned with an increased refcount.
1590 *
1591 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1592 * if the GFP flags specified for FGP_CREAT are atomic.
1593 *
1594 * If there is a page cache page, it is returned with an increased refcount.
1595 *
1596 * Return: the found page or %NULL otherwise.
1572 */
1573struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1574 int fgp_flags, gfp_t gfp_mask)
1575{
1576 struct page *page;
1577
1578repeat:
1579 page = find_get_entry(mapping, offset);

--- 71 unchanged lines hidden (view full) ---

1651 *
1652 * The search returns a group of mapping-contiguous page cache entries
1653 * with ascending indexes. There may be holes in the indices due to
1654 * not-present pages.
1655 *
1656 * Any shadow entries of evicted pages, or swap entries from
1657 * shmem/tmpfs, are included in the returned array.
1658 *
1597 */
1598struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1599 int fgp_flags, gfp_t gfp_mask)
1600{
1601 struct page *page;
1602
1603repeat:
1604 page = find_get_entry(mapping, offset);

--- 71 unchanged lines hidden (view full) ---

1676 *
1677 * The search returns a group of mapping-contiguous page cache entries
1678 * with ascending indexes. There may be holes in the indices due to
1679 * not-present pages.
1680 *
1681 * Any shadow entries of evicted pages, or swap entries from
1682 * shmem/tmpfs, are included in the returned array.
1683 *
1659 * find_get_entries() returns the number of pages and shadow entries
1660 * which were found.
1684 * Return: the number of pages and shadow entries which were found.
1661 */
1662unsigned find_get_entries(struct address_space *mapping,
1663 pgoff_t start, unsigned int nr_entries,
1664 struct page **entries, pgoff_t *indices)
1665{
1666 XA_STATE(xas, &mapping->i_pages, start);
1667 struct page *page;
1668 unsigned int ret = 0;

--- 53 unchanged lines hidden (view full) ---

1722 * pages in the mapping starting at index @start and up to index @end
1723 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1724 * a reference against the returned pages.
1725 *
1726 * The search returns a group of mapping-contiguous pages with ascending
1727 * indexes. There may be holes in the indices due to not-present pages.
1728 * We also update @start to index the next page for the traversal.
1729 *
1685 */
1686unsigned find_get_entries(struct address_space *mapping,
1687 pgoff_t start, unsigned int nr_entries,
1688 struct page **entries, pgoff_t *indices)
1689{
1690 XA_STATE(xas, &mapping->i_pages, start);
1691 struct page *page;
1692 unsigned int ret = 0;

--- 53 unchanged lines hidden (view full) ---

1746 * pages in the mapping starting at index @start and up to index @end
1747 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1748 * a reference against the returned pages.
1749 *
1750 * The search returns a group of mapping-contiguous pages with ascending
1751 * indexes. There may be holes in the indices due to not-present pages.
1752 * We also update @start to index the next page for the traversal.
1753 *
1730 * find_get_pages_range() returns the number of pages which were found. If this
1731 * number is smaller than @nr_pages, the end of specified range has been
1754 * Return: the number of pages which were found. If this number is
1755 * smaller than @nr_pages, the end of specified range has been
1732 * reached.
1733 */
1734unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1735 pgoff_t end, unsigned int nr_pages,
1736 struct page **pages)
1737{
1738 XA_STATE(xas, &mapping->i_pages, *start);
1739 struct page *page;

--- 56 unchanged lines hidden (view full) ---

1796 * @mapping: The address_space to search
1797 * @index: The starting page index
1798 * @nr_pages: The maximum number of pages
1799 * @pages: Where the resulting pages are placed
1800 *
1801 * find_get_pages_contig() works exactly like find_get_pages(), except
1802 * that the returned number of pages are guaranteed to be contiguous.
1803 *
1756 * reached.
1757 */
1758unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1759 pgoff_t end, unsigned int nr_pages,
1760 struct page **pages)
1761{
1762 XA_STATE(xas, &mapping->i_pages, *start);
1763 struct page *page;

--- 56 unchanged lines hidden (view full) ---

1820 * @mapping: The address_space to search
1821 * @index: The starting page index
1822 * @nr_pages: The maximum number of pages
1823 * @pages: Where the resulting pages are placed
1824 *
1825 * find_get_pages_contig() works exactly like find_get_pages(), except
1826 * that the returned number of pages are guaranteed to be contiguous.
1827 *
1804 * find_get_pages_contig() returns the number of pages which were found.
1828 * Return: the number of pages which were found.
1805 */
1806unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1807 unsigned int nr_pages, struct page **pages)
1808{
1809 XA_STATE(xas, &mapping->i_pages, index);
1810 struct page *page;
1811 unsigned int ret = 0;
1812

--- 44 unchanged lines hidden (view full) ---

1857 * @index: the starting page index
1858 * @end: The final page index (inclusive)
1859 * @tag: the tag index
1860 * @nr_pages: the maximum number of pages
1861 * @pages: where the resulting pages are placed
1862 *
1863 * Like find_get_pages, except we only return pages which are tagged with
1864 * @tag. We update @index to index the next page for the traversal.
1829 */
1830unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1831 unsigned int nr_pages, struct page **pages)
1832{
1833 XA_STATE(xas, &mapping->i_pages, index);
1834 struct page *page;
1835 unsigned int ret = 0;
1836

--- 44 unchanged lines hidden (view full) ---

1881 * @index: the starting page index
1882 * @end: The final page index (inclusive)
1883 * @tag: the tag index
1884 * @nr_pages: the maximum number of pages
1885 * @pages: where the resulting pages are placed
1886 *
1887 * Like find_get_pages, except we only return pages which are tagged with
1888 * @tag. We update @index to index the next page for the traversal.
1889 *
1890 * Return: the number of pages which were found.
1865 */
1866unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1867 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
1868 struct page **pages)
1869{
1870 XA_STATE(xas, &mapping->i_pages, *index);
1871 struct page *page;
1872 unsigned ret = 0;

--- 61 unchanged lines hidden (view full) ---

1934 * @start: the starting page cache index
1935 * @tag: the tag index
1936 * @nr_entries: the maximum number of entries
1937 * @entries: where the resulting entries are placed
1938 * @indices: the cache indices corresponding to the entries in @entries
1939 *
1940 * Like find_get_entries, except we only return entries which are tagged with
1941 * @tag.
1891 */
1892unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1893 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
1894 struct page **pages)
1895{
1896 XA_STATE(xas, &mapping->i_pages, *index);
1897 struct page *page;
1898 unsigned ret = 0;

--- 61 unchanged lines hidden (view full) ---

1960 * @start: the starting page cache index
1961 * @tag: the tag index
1962 * @nr_entries: the maximum number of entries
1963 * @entries: where the resulting entries are placed
1964 * @indices: the cache indices corresponding to the entries in @entries
1965 *
1966 * Like find_get_entries, except we only return entries which are tagged with
1967 * @tag.
1968 *
1969 * Return: the number of entries which were found.
1942 */
1943unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1944 xa_mark_t tag, unsigned int nr_entries,
1945 struct page **entries, pgoff_t *indices)
1946{
1947 XA_STATE(xas, &mapping->i_pages, start);
1948 struct page *page;
1949 unsigned int ret = 0;

--- 69 unchanged lines hidden (view full) ---

2019 * @iter: data destination
2020 * @written: already copied
2021 *
2022 * This is a generic file read routine, and uses the
2023 * mapping->a_ops->readpage() function for the actual low-level stuff.
2024 *
2025 * This is really ugly. But the goto's actually try to clarify some
2026 * of the logic when it comes to error handling etc.
1970 */
1971unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1972 xa_mark_t tag, unsigned int nr_entries,
1973 struct page **entries, pgoff_t *indices)
1974{
1975 XA_STATE(xas, &mapping->i_pages, start);
1976 struct page *page;
1977 unsigned int ret = 0;

--- 69 unchanged lines hidden (view full) ---

2047 * @iter: data destination
2048 * @written: already copied
2049 *
2050 * This is a generic file read routine, and uses the
2051 * mapping->a_ops->readpage() function for the actual low-level stuff.
2052 *
2053 * This is really ugly. But the goto's actually try to clarify some
2054 * of the logic when it comes to error handling etc.
2055 *
2056 * Return:
2057 * * total number of bytes copied, including those the were already @written
2058 * * negative error code if nothing was copied
2027 */
2028static ssize_t generic_file_buffered_read(struct kiocb *iocb,
2029 struct iov_iter *iter, ssize_t written)
2030{
2031 struct file *filp = iocb->ki_filp;
2032 struct address_space *mapping = filp->f_mapping;
2033 struct inode *inode = mapping->host;
2034 struct file_ra_state *ra = &filp->f_ra;

--- 245 unchanged lines hidden (view full) ---

2280
2281/**
2282 * generic_file_read_iter - generic filesystem read routine
2283 * @iocb: kernel I/O control block
2284 * @iter: destination for the data read
2285 *
2286 * This is the "read_iter()" routine for all filesystems
2287 * that can use the page cache directly.
2059 */
2060static ssize_t generic_file_buffered_read(struct kiocb *iocb,
2061 struct iov_iter *iter, ssize_t written)
2062{
2063 struct file *filp = iocb->ki_filp;
2064 struct address_space *mapping = filp->f_mapping;
2065 struct inode *inode = mapping->host;
2066 struct file_ra_state *ra = &filp->f_ra;

--- 245 unchanged lines hidden (view full) ---

2312
2313/**
2314 * generic_file_read_iter - generic filesystem read routine
2315 * @iocb: kernel I/O control block
2316 * @iter: destination for the data read
2317 *
2318 * This is the "read_iter()" routine for all filesystems
2319 * that can use the page cache directly.
2320 * Return:
2321 * * number of bytes copied, even for partial reads
2322 * * negative error code if nothing was read
2288 */
2289ssize_t
2290generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2291{
2292 size_t count = iov_iter_count(iter);
2293 ssize_t retval = 0;
2294
2295 if (!count)

--- 51 unchanged lines hidden (view full) ---

2347/**
2348 * page_cache_read - adds requested page to the page cache if not already there
2349 * @file: file to read
2350 * @offset: page index
2351 * @gfp_mask: memory allocation flags
2352 *
2353 * This adds the requested page to the page cache if it isn't already there,
2354 * and schedules an I/O to read in its contents from disk.
2323 */
2324ssize_t
2325generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2326{
2327 size_t count = iov_iter_count(iter);
2328 ssize_t retval = 0;
2329
2330 if (!count)

--- 51 unchanged lines hidden (view full) ---

2382/**
2383 * page_cache_read - adds requested page to the page cache if not already there
2384 * @file: file to read
2385 * @offset: page index
2386 * @gfp_mask: memory allocation flags
2387 *
2388 * This adds the requested page to the page cache if it isn't already there,
2389 * and schedules an I/O to read in its contents from disk.
2390 *
2391 * Return: %0 on success, negative error code otherwise.
2355 */
2356static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2357{
2358 struct address_space *mapping = file->f_mapping;
2359 struct page *page;
2360 int ret;
2361
2362 do {

--- 98 unchanged lines hidden (view full) ---

2461 * lock_page_or_retry() returned 0.
2462 * The mmap_sem has usually been released in this case.
2463 * See __lock_page_or_retry() for the exception.
2464 *
2465 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2466 * has not been released.
2467 *
2468 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2392 */
2393static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2394{
2395 struct address_space *mapping = file->f_mapping;
2396 struct page *page;
2397 int ret;
2398
2399 do {

--- 98 unchanged lines hidden (view full) ---

2498 * lock_page_or_retry() returned 0.
2499 * The mmap_sem has usually been released in this case.
2500 * See __lock_page_or_retry() for the exception.
2501 *
2502 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2503 * has not been released.
2504 *
2505 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2506 *
2507 * Return: bitwise-OR of %VM_FAULT_ codes.
2469 */
2470vm_fault_t filemap_fault(struct vm_fault *vmf)
2471{
2472 int error;
2473 struct file *file = vmf->vma->vm_file;
2474 struct address_space *mapping = file->f_mapping;
2475 struct file_ra_state *ra = &file->f_ra;
2476 struct inode *inode = mapping->host;

--- 369 unchanged lines hidden (view full) ---

2846 * @index: the page index
2847 * @filler: function to perform the read
2848 * @data: first arg to filler(data, page) function, often left as NULL
2849 *
2850 * Read into the page cache. If a page already exists, and PageUptodate() is
2851 * not set, try to fill the page and wait for it to become unlocked.
2852 *
2853 * If the page does not get brought uptodate, return -EIO.
2508 */
2509vm_fault_t filemap_fault(struct vm_fault *vmf)
2510{
2511 int error;
2512 struct file *file = vmf->vma->vm_file;
2513 struct address_space *mapping = file->f_mapping;
2514 struct file_ra_state *ra = &file->f_ra;
2515 struct inode *inode = mapping->host;

--- 369 unchanged lines hidden (view full) ---

2885 * @index: the page index
2886 * @filler: function to perform the read
2887 * @data: first arg to filler(data, page) function, often left as NULL
2888 *
2889 * Read into the page cache. If a page already exists, and PageUptodate() is
2890 * not set, try to fill the page and wait for it to become unlocked.
2891 *
2892 * If the page does not get brought uptodate, return -EIO.
2893 *
2894 * Return: up to date page on success, ERR_PTR() on failure.
2854 */
2855struct page *read_cache_page(struct address_space *mapping,
2856 pgoff_t index,
2857 int (*filler)(void *, struct page *),
2858 void *data)
2859{
2860 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2861}

--- 4 unchanged lines hidden (view full) ---

2866 * @mapping: the page's address_space
2867 * @index: the page index
2868 * @gfp: the page allocator flags to use if allocating
2869 *
2870 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2871 * any new page allocations done using the specified allocation flags.
2872 *
2873 * If the page does not get brought uptodate, return -EIO.
2895 */
2896struct page *read_cache_page(struct address_space *mapping,
2897 pgoff_t index,
2898 int (*filler)(void *, struct page *),
2899 void *data)
2900{
2901 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2902}

--- 4 unchanged lines hidden (view full) ---

2907 * @mapping: the page's address_space
2908 * @index: the page index
2909 * @gfp: the page allocator flags to use if allocating
2910 *
2911 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2912 * any new page allocations done using the specified allocation flags.
2913 *
2914 * If the page does not get brought uptodate, return -EIO.
2915 *
2916 * Return: up to date page on success, ERR_PTR() on failure.
2874 */
2875struct page *read_cache_page_gfp(struct address_space *mapping,
2876 pgoff_t index,
2877 gfp_t gfp)
2878{
2879 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2880
2881 return do_read_cache_page(mapping, index, filler, NULL, gfp);

--- 367 unchanged lines hidden (view full) ---

3249 * do direct IO or a standard buffered write.
3250 *
3251 * It expects i_mutex to be grabbed unless we work on a block device or similar
3252 * object which does not need locking at all.
3253 *
3254 * This function does *not* take care of syncing data in case of O_SYNC write.
3255 * A caller has to handle it. This is mainly due to the fact that we want to
3256 * avoid syncing under i_mutex.
2917 */
2918struct page *read_cache_page_gfp(struct address_space *mapping,
2919 pgoff_t index,
2920 gfp_t gfp)
2921{
2922 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2923
2924 return do_read_cache_page(mapping, index, filler, NULL, gfp);

--- 367 unchanged lines hidden (view full) ---

3292 * do direct IO or a standard buffered write.
3293 *
3294 * It expects i_mutex to be grabbed unless we work on a block device or similar
3295 * object which does not need locking at all.
3296 *
3297 * This function does *not* take care of syncing data in case of O_SYNC write.
3298 * A caller has to handle it. This is mainly due to the fact that we want to
3299 * avoid syncing under i_mutex.
3300 *
3301 * Return:
3302 * * number of bytes written, even for truncated writes
3303 * * negative error code if no data has been written at all
3257 */
3258ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3259{
3260 struct file *file = iocb->ki_filp;
3261 struct address_space * mapping = file->f_mapping;
3262 struct inode *inode = mapping->host;
3263 ssize_t written = 0;
3264 ssize_t err;

--- 68 unchanged lines hidden (view full) ---

3333/**
3334 * generic_file_write_iter - write data to a file
3335 * @iocb: IO state structure
3336 * @from: iov_iter with data to write
3337 *
3338 * This is a wrapper around __generic_file_write_iter() to be used by most
3339 * filesystems. It takes care of syncing the file in case of O_SYNC file
3340 * and acquires i_mutex as needed.
3304 */
3305ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3306{
3307 struct file *file = iocb->ki_filp;
3308 struct address_space * mapping = file->f_mapping;
3309 struct inode *inode = mapping->host;
3310 ssize_t written = 0;
3311 ssize_t err;

--- 68 unchanged lines hidden (view full) ---

3380/**
3381 * generic_file_write_iter - write data to a file
3382 * @iocb: IO state structure
3383 * @from: iov_iter with data to write
3384 *
3385 * This is a wrapper around __generic_file_write_iter() to be used by most
3386 * filesystems. It takes care of syncing the file in case of O_SYNC file
3387 * and acquires i_mutex as needed.
3388 * Return:
3389 * * negative error code if no data has been written at all of
3390 * vfs_fsync_range() failed for a synchronous write
3391 * * number of bytes written, even for truncated writes
3341 */
3342ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3343{
3344 struct file *file = iocb->ki_filp;
3345 struct inode *inode = file->f_mapping->host;
3346 ssize_t ret;
3347
3348 inode_lock(inode);

--- 10 unchanged lines hidden (view full) ---

3359
3360/**
3361 * try_to_release_page() - release old fs-specific metadata on a page
3362 *
3363 * @page: the page which the kernel is trying to free
3364 * @gfp_mask: memory allocation flags (and I/O mode)
3365 *
3366 * The address_space is to try to release any data against the page
3392 */
3393ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3394{
3395 struct file *file = iocb->ki_filp;
3396 struct inode *inode = file->f_mapping->host;
3397 ssize_t ret;
3398
3399 inode_lock(inode);

--- 10 unchanged lines hidden (view full) ---

3410
3411/**
3412 * try_to_release_page() - release old fs-specific metadata on a page
3413 *
3414 * @page: the page which the kernel is trying to free
3415 * @gfp_mask: memory allocation flags (and I/O mode)
3416 *
3417 * The address_space is to try to release any data against the page
3367 * (presumably at page->private). If the release was successful, return '1'.
3368 * Otherwise return zero.
3418 * (presumably at page->private).
3369 *
3370 * This may also be called if PG_fscache is set on a page, indicating that the
3371 * page is known to the local caching routines.
3372 *
3373 * The @gfp_mask argument specifies whether I/O may be performed to release
3374 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3375 *
3419 *
3420 * This may also be called if PG_fscache is set on a page, indicating that the
3421 * page is known to the local caching routines.
3422 *
3423 * The @gfp_mask argument specifies whether I/O may be performed to release
3424 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3425 *
3426 * Return: %1 if the release was successful, otherwise return zero.
3376 */
3377int try_to_release_page(struct page *page, gfp_t gfp_mask)
3378{
3379 struct address_space * const mapping = page->mapping;
3380
3381 BUG_ON(!PageLocked(page));
3382 if (PageWriteback(page))
3383 return 0;
3384
3385 if (mapping && mapping->a_ops->releasepage)
3386 return mapping->a_ops->releasepage(page, gfp_mask);
3387 return try_to_free_buffers(page);
3388}
3389
3390EXPORT_SYMBOL(try_to_release_page);
3427 */
3428int try_to_release_page(struct page *page, gfp_t gfp_mask)
3429{
3430 struct address_space * const mapping = page->mapping;
3431
3432 BUG_ON(!PageLocked(page));
3433 if (PageWriteback(page))
3434 return 0;
3435
3436 if (mapping && mapping->a_ops->releasepage)
3437 return mapping->a_ops->releasepage(page, gfp_mask);
3438 return try_to_free_buffers(page);
3439}
3440
3441EXPORT_SYMBOL(try_to_release_page);