Lines Matching full:entry

54  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
55 * and two more to tell us if the entry is a zero page or an empty entry that
58 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
59 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68 static unsigned long dax_to_pfn(void *entry) in dax_to_pfn() argument
70 return xa_to_value(entry) >> DAX_SHIFT; in dax_to_pfn()
73 static struct folio *dax_to_folio(void *entry) in dax_to_folio() argument
75 return page_folio(pfn_to_page(dax_to_pfn(entry))); in dax_to_folio()
83 static bool dax_is_locked(void *entry) in dax_is_locked() argument
85 return xa_to_value(entry) & DAX_LOCKED; in dax_is_locked()
88 static unsigned int dax_entry_order(void *entry) in dax_entry_order() argument
90 if (xa_to_value(entry) & DAX_PMD) in dax_entry_order()
95 static unsigned long dax_is_pmd_entry(void *entry) in dax_is_pmd_entry() argument
97 return xa_to_value(entry) & DAX_PMD; in dax_is_pmd_entry()
100 static bool dax_is_pte_entry(void *entry) in dax_is_pte_entry() argument
102 return !(xa_to_value(entry) & DAX_PMD); in dax_is_pte_entry()
105 static int dax_is_zero_entry(void *entry) in dax_is_zero_entry() argument
107 return xa_to_value(entry) & DAX_ZERO_PAGE; in dax_is_zero_entry()
110 static int dax_is_empty_entry(void *entry) in dax_is_empty_entry() argument
112 return xa_to_value(entry) & DAX_EMPTY; in dax_is_empty_entry()
116 * true if the entry that was found is of a smaller order than the entry
119 static bool dax_is_conflict(void *entry) in dax_is_conflict() argument
121 return entry == XA_RETRY_ENTRY; in dax_is_conflict()
125 * DAX page cache entry locking
148 void *entry, struct exceptional_entry_key *key) in dax_entry_waitqueue() argument
154 * If 'entry' is a PMD, align the 'index' that we use for the wait in dax_entry_waitqueue()
158 if (dax_is_pmd_entry(entry)) in dax_entry_waitqueue()
181 * @entry may no longer be the entry at the index in the mapping.
182 * The important information it's conveying is whether the entry at
183 * this index used to be a PMD entry.
185 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
191 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
194 * Checking for locked entry and prepare_to_wait_exclusive() happens in dax_wake_entry()
195 * under the i_pages lock, ditto for entry handling in our callers. in dax_wake_entry()
196 * So at this point all tasks that could have seen our entry locked in dax_wake_entry()
204 * Look up entry in page cache, wait for it to become unlocked if it
205 * is a DAX entry and return it. The caller must subsequently call
206 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
207 * if it did. The entry returned may have a larger order than @order.
208 * If @order is larger than the order of the entry found in i_pages, this
209 * function returns a dax_is_conflict entry.
215 void *entry; in get_next_unlocked_entry() local
223 entry = xas_find_conflict(xas); in get_next_unlocked_entry()
224 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in get_next_unlocked_entry()
225 return entry; in get_next_unlocked_entry()
226 if (dax_entry_order(entry) < order) in get_next_unlocked_entry()
228 if (!dax_is_locked(entry)) in get_next_unlocked_entry()
229 return entry; in get_next_unlocked_entry()
231 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_next_unlocked_entry()
243 * Wait for the given entry to become unlocked. Caller must hold the i_pages
244 * lock and call either put_unlocked_entry() if it did not lock the entry or
245 * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
247 static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) in wait_entry_unlocked_exclusive() argument
255 while (unlikely(dax_is_locked(entry))) { in wait_entry_unlocked_exclusive()
256 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked_exclusive()
264 entry = xas_load(xas); in wait_entry_unlocked_exclusive()
267 if (xa_is_internal(entry)) in wait_entry_unlocked_exclusive()
270 return entry; in wait_entry_unlocked_exclusive()
278 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
286 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
289 * path ever successfully retrieves an unlocked entry before an in wait_entry_unlocked()
299 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
302 if (entry && !dax_is_conflict(entry)) in put_unlocked_entry()
303 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
307 * We used the xa_state to get the entry, but then we locked the entry and
311 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
315 BUG_ON(dax_is_locked(entry)); in dax_unlock_entry()
318 old = xas_store(xas, entry); in dax_unlock_entry()
321 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
325 * Return: The entry stored at this location before it was locked.
327 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
329 unsigned long v = xa_to_value(entry); in dax_lock_entry()
333 static unsigned long dax_entry_size(void *entry) in dax_entry_size() argument
335 if (dax_is_zero_entry(entry)) in dax_entry_size()
337 else if (dax_is_empty_entry(entry)) in dax_entry_size()
339 else if (dax_is_pmd_entry(entry)) in dax_entry_size()
358 * whether this entry is shared by multiple files. If the page has not
421 static void dax_folio_init(void *entry) in dax_folio_init() argument
423 struct folio *folio = dax_to_folio(entry); in dax_folio_init()
424 int order = dax_entry_order(entry); in dax_folio_init()
441 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
445 unsigned long size = dax_entry_size(entry), index; in dax_associate_entry()
446 struct folio *folio = dax_to_folio(entry); in dax_associate_entry()
448 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_associate_entry()
457 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); in dax_associate_entry()
461 dax_folio_init(entry); in dax_associate_entry()
462 folio = dax_to_folio(entry); in dax_associate_entry()
468 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
471 struct folio *folio = dax_to_folio(entry); in dax_disassociate_entry()
473 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_disassociate_entry()
479 static struct page *dax_busy_page(void *entry) in dax_busy_page() argument
481 struct folio *folio = dax_to_folio(entry); in dax_busy_page()
483 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_busy_page()
493 * dax_lock_folio - Lock the DAX entry corresponding to a folio
494 * @folio: The folio whose entry we want to lock
497 * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
503 void *entry; in dax_lock_folio() local
510 entry = NULL; in dax_lock_folio()
521 entry = (void *)~0UL; in dax_lock_folio()
532 entry = xas_load(&xas); in dax_lock_folio()
533 if (dax_is_locked(entry)) { in dax_lock_folio()
535 wait_entry_unlocked(&xas, entry); in dax_lock_folio()
539 dax_lock_entry(&xas, entry); in dax_lock_folio()
544 return (dax_entry_t)entry; in dax_lock_folio()
559 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
560 * @mapping: the file's mapping whose entry we want to lock
562 * @page: output the dax page corresponding to this dax entry
564 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
571 void *entry; in dax_lock_mapping_entry() local
575 entry = NULL; in dax_lock_mapping_entry()
582 entry = xas_load(&xas); in dax_lock_mapping_entry()
583 if (dax_is_locked(entry)) { in dax_lock_mapping_entry()
585 wait_entry_unlocked(&xas, entry); in dax_lock_mapping_entry()
589 if (!entry || in dax_lock_mapping_entry()
590 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_lock_mapping_entry()
592 * Because we are looking for entry from file's mapping in dax_lock_mapping_entry()
593 * and index, so the entry may not be inserted for now, in dax_lock_mapping_entry()
594 * or even a zero/empty entry. We don't think this is in dax_lock_mapping_entry()
598 entry = (void *)~0UL; in dax_lock_mapping_entry()
600 *page = pfn_to_page(dax_to_pfn(entry)); in dax_lock_mapping_entry()
601 dax_lock_entry(&xas, entry); in dax_lock_mapping_entry()
607 return (dax_entry_t)entry; in dax_lock_mapping_entry()
622 * Find page cache entry at given index. If it is a DAX entry, return it
623 * with the entry locked. If the page cache doesn't contain an entry at
624 * that index, add a locked empty entry.
626 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
627 * either return that locked entry or will return VM_FAULT_FALLBACK.
632 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
634 * PTE insertion will cause an existing PMD entry to be unmapped and
640 * the tree, and PTE writes will simply dirty the entire PMD entry.
647 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
654 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ in grab_mapping_entry()
655 void *entry; in grab_mapping_entry() local
660 entry = get_next_unlocked_entry(xas, order); in grab_mapping_entry()
662 if (entry) { in grab_mapping_entry()
663 if (dax_is_conflict(entry)) in grab_mapping_entry()
665 if (!xa_is_value(entry)) { in grab_mapping_entry()
671 if (dax_is_pmd_entry(entry) && in grab_mapping_entry()
672 (dax_is_zero_entry(entry) || in grab_mapping_entry()
673 dax_is_empty_entry(entry))) { in grab_mapping_entry()
681 * Make sure 'entry' remains valid while we drop in grab_mapping_entry()
684 dax_lock_entry(xas, entry); in grab_mapping_entry()
691 if (dax_is_zero_entry(entry)) { in grab_mapping_entry()
700 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
702 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
704 entry = NULL; in grab_mapping_entry()
708 if (entry) { in grab_mapping_entry()
709 dax_lock_entry(xas, entry); in grab_mapping_entry()
715 entry = dax_make_entry(0, flags); in grab_mapping_entry()
716 dax_lock_entry(xas, entry); in grab_mapping_entry()
730 return entry; in grab_mapping_entry()
757 void *entry; in dax_layout_busy_page_range() local
787 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
788 if (WARN_ON_ONCE(!xa_is_value(entry))) in dax_layout_busy_page_range()
790 entry = wait_entry_unlocked_exclusive(&xas, entry); in dax_layout_busy_page_range()
791 if (entry) in dax_layout_busy_page_range()
792 page = dax_busy_page(entry); in dax_layout_busy_page_range()
793 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
820 void *entry; in __dax_invalidate_entry() local
823 entry = get_next_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
824 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in __dax_invalidate_entry()
830 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
832 mapping->nrpages -= 1UL << dax_entry_order(entry); in __dax_invalidate_entry()
835 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
845 void *entry; in __dax_clear_dirty_range() local
848 xas_for_each(&xas, entry, end) { in __dax_clear_dirty_range()
849 entry = wait_entry_unlocked_exclusive(&xas, entry); in __dax_clear_dirty_range()
850 if (!entry) in __dax_clear_dirty_range()
854 put_unlocked_entry(&xas, entry, WAKE_NEXT); in __dax_clear_dirty_range()
870 * Delete DAX entry at @index from @mapping. Wait for it
881 * caller has seen a DAX entry for this index, we better find it in dax_delete_mapping_entry()
891 void *entry; in dax_delete_mapping_range() local
903 xas_for_each(&xas, entry, end_idx) { in dax_delete_mapping_range()
904 if (!xa_is_value(entry)) in dax_delete_mapping_range()
906 entry = wait_entry_unlocked_exclusive(&xas, entry); in dax_delete_mapping_range()
907 if (!entry) in dax_delete_mapping_range()
909 dax_disassociate_entry(entry, mapping, true); in dax_delete_mapping_range()
911 mapping->nrpages -= 1UL << dax_entry_order(entry); in dax_delete_mapping_range()
912 put_unlocked_entry(&xas, entry, WAKE_ALL); in dax_delete_mapping_range()
990 * Invalidate DAX entry if it is clean.
1036 * By this point grab_mapping_entry() has ensured that we have a locked entry
1043 const struct iomap_iter *iter, void *entry, unsigned long pfn, in dax_insert_entry() argument
1055 if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { in dax_insert_entry()
1058 if (dax_is_pmd_entry(entry)) in dax_insert_entry()
1061 else /* pte entry */ in dax_insert_entry()
1067 if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_insert_entry()
1070 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
1075 * Only swap our new entry into the page cache if the current in dax_insert_entry()
1076 * entry is a zero page or an empty entry. If a normal PTE or in dax_insert_entry()
1077 * PMD entry is already in the cache, we leave it alone. This in dax_insert_entry()
1079 * existing entry is a PMD, we will just leave the PMD in the in dax_insert_entry()
1083 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | in dax_insert_entry()
1085 entry = new_entry; in dax_insert_entry()
1097 return entry; in dax_insert_entry()
1101 struct address_space *mapping, void *entry) in dax_writeback_one() argument
1111 if (WARN_ON(!xa_is_value(entry))) in dax_writeback_one()
1114 if (unlikely(dax_is_locked(entry))) { in dax_writeback_one()
1115 void *old_entry = entry; in dax_writeback_one()
1117 entry = get_next_unlocked_entry(xas, 0); in dax_writeback_one()
1119 /* Entry got punched out / reallocated? */ in dax_writeback_one()
1120 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in dax_writeback_one()
1123 * Entry got reallocated elsewhere? No need to writeback. in dax_writeback_one()
1125 * difference in lockbit or entry type. in dax_writeback_one()
1127 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) in dax_writeback_one()
1129 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || in dax_writeback_one()
1130 dax_is_zero_entry(entry))) { in dax_writeback_one()
1135 /* Another fsync thread may have already done this entry */ in dax_writeback_one()
1140 /* Lock the entry to serialize with page faults */ in dax_writeback_one()
1141 dax_lock_entry(xas, entry); in dax_writeback_one()
1147 * at the entry only under the i_pages lock and once they do that in dax_writeback_one()
1148 * they will see the entry locked and wait for it to unlock. in dax_writeback_one()
1160 pfn = dax_to_pfn(entry); in dax_writeback_one()
1161 count = 1UL << dax_entry_order(entry); in dax_writeback_one()
1178 * entry lock. in dax_writeback_one()
1182 xas_store(xas, entry); in dax_writeback_one()
1184 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1190 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1205 void *entry; in dax_writeback_mapping_range() local
1220 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
1221 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1359 const struct iomap_iter *iter, void **entry) in dax_load_hole() argument
1366 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1375 const struct iomap_iter *iter, void **entry) in dax_pmd_load_hole() argument
1385 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1389 *entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio), in dax_pmd_load_hole()
1394 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1399 const struct iomap_iter *iter, void **entry) in dax_pmd_load_hole() argument
1802 * @entry: an unlocked dax entry to be inserted
1807 struct xa_state *xas, void **entry, bool pmd) in dax_fault_iter() argument
1827 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1828 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1840 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1848 folio = dax_to_folio(*entry); in dax_fault_iter()
1874 void *entry; in dax_iomap_pte_fault() local
1891 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1892 if (xa_is_internal(entry)) { in dax_iomap_pte_fault()
1893 ret = xa_to_internal(entry); in dax_iomap_pte_fault()
1914 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1934 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1986 void *entry; in dax_iomap_pmd_fault() local
2009 * grab_mapping_entry() will make sure we get an empty PMD entry, in dax_iomap_pmd_fault()
2010 * a zero PMD entry or a DAX PMD. If it can't (because a PTE in dax_iomap_pmd_fault()
2011 * entry is already in the array, for instance), it will return in dax_iomap_pmd_fault()
2014 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
2015 if (xa_is_internal(entry)) { in dax_iomap_pmd_fault()
2016 ret = xa_to_internal(entry); in dax_iomap_pmd_fault()
2036 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
2044 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
2089 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
2092 * @order: Order of entry to insert.
2094 * This function inserts a writeable PTE or PMD entry into the page tables
2095 * for an mmaped DAX file. It also marks the page cache entry as dirty.
2103 void *entry; in dax_insert_pfn_mkwrite() local
2107 entry = get_next_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
2108 /* Did we race with someone splitting entry or so? */ in dax_insert_pfn_mkwrite()
2109 if (!entry || dax_is_conflict(entry) || in dax_insert_pfn_mkwrite()
2110 (order == 0 && !dax_is_pte_entry(entry))) { in dax_insert_pfn_mkwrite()
2111 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
2118 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
2131 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
2139 * @order: Order of entry to be inserted
2144 * table entry.