1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef LINUX_MM_INLINE_H 3 #define LINUX_MM_INLINE_H 4 5 #include <linux/atomic.h> 6 #include <linux/huge_mm.h> 7 #include <linux/mm_types.h> 8 #include <linux/swap.h> 9 #include <linux/string.h> 10 #include <linux/userfaultfd_k.h> 11 #include <linux/leafops.h> 12 13 /** 14 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 15 * @folio: The folio to test. 16 * 17 * We would like to get this info without a page flag, but the state 18 * needs to survive until the folio is last deleted from the LRU, which 19 * could be as far down as __page_cache_release. 20 * 21 * Return: An integer (not a boolean!) used to sort a folio onto the 22 * right LRU list and to account folios correctly. 23 * 1 if @folio is a regular filesystem backed page cache folio 24 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 25 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 26 * ram or swap backed folio. 27 */ 28 static inline int folio_is_file_lru(const struct folio *folio) 29 { 30 return !folio_test_swapbacked(folio); 31 } 32 33 static __always_inline void __update_lru_size(struct lruvec *lruvec, 34 enum lru_list lru, enum zone_type zid, 35 long nr_pages) 36 { 37 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 38 39 lockdep_assert_held(&lruvec->lru_lock); 40 WARN_ON_ONCE(nr_pages != (int)nr_pages); 41 42 mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 43 __mod_zone_page_state(&pgdat->node_zones[zid], 44 NR_ZONE_LRU_BASE + lru, nr_pages); 45 } 46 47 static __always_inline void update_lru_size(struct lruvec *lruvec, 48 enum lru_list lru, enum zone_type zid, 49 long nr_pages) 50 { 51 __update_lru_size(lruvec, lru, zid, nr_pages); 52 #ifdef CONFIG_MEMCG 53 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 54 #endif 55 } 56 57 /** 58 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 59 * @folio: The folio that was on lru and now has a zero reference. 60 */ 61 static __always_inline void __folio_clear_lru_flags(struct folio *folio) 62 { 63 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 64 65 __folio_clear_lru(folio); 66 67 /* this shouldn't happen, so leave the flags to bad_page() */ 68 if (folio_test_active(folio) && folio_test_unevictable(folio)) 69 return; 70 71 __folio_clear_active(folio); 72 __folio_clear_unevictable(folio); 73 } 74 75 /** 76 * folio_lru_list - Which LRU list should a folio be on? 77 * @folio: The folio to test. 78 * 79 * Return: The LRU list a folio should be on, as an index 80 * into the array of LRU lists. 81 */ 82 static __always_inline enum lru_list folio_lru_list(const struct folio *folio) 83 { 84 enum lru_list lru; 85 86 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 87 88 if (folio_test_unevictable(folio)) 89 return LRU_UNEVICTABLE; 90 91 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 92 if (folio_test_active(folio)) 93 lru += LRU_ACTIVE; 94 95 return lru; 96 } 97 98 #ifdef CONFIG_LRU_GEN 99 100 static inline bool lru_gen_switching(void) 101 { 102 DECLARE_STATIC_KEY_FALSE(lru_switch); 103 104 return static_branch_unlikely(&lru_switch); 105 } 106 #ifdef CONFIG_LRU_GEN_ENABLED 107 static inline bool lru_gen_enabled(void) 108 { 109 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); 110 111 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); 112 } 113 #else 114 static inline bool lru_gen_enabled(void) 115 { 116 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); 117 118 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); 119 } 120 #endif 121 122 static inline bool lru_gen_in_fault(void) 123 { 124 return current->in_lru_fault; 125 } 126 127 static inline int lru_gen_from_seq(unsigned long seq) 128 { 129 return seq % MAX_NR_GENS; 130 } 131 132 static inline int lru_hist_from_seq(unsigned long seq) 133 { 134 return seq % NR_HIST_GENS; 135 } 136 137 static inline int lru_tier_from_refs(int refs, bool workingset) 138 { 139 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); 140 141 /* see the comment on MAX_NR_TIERS */ 142 return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs); 143 } 144 145 static inline int folio_lru_refs(const struct folio *folio) 146 { 147 unsigned long flags = READ_ONCE(folio->flags.f); 148 149 if (!(flags & BIT(PG_referenced))) 150 return 0; 151 /* 152 * Return the total number of accesses including PG_referenced. Also see 153 * the comment on LRU_REFS_FLAGS. 154 */ 155 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1; 156 } 157 158 static inline int folio_lru_gen(const struct folio *folio) 159 { 160 unsigned long flags = READ_ONCE(folio->flags.f); 161 162 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 163 } 164 165 static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen) 166 { 167 unsigned long max_seq = lruvec->lrugen.max_seq; 168 169 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 170 171 /* see the comment on MIN_NR_GENS */ 172 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); 173 } 174 175 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, 176 int old_gen, int new_gen) 177 { 178 int type = folio_is_file_lru(folio); 179 int zone = folio_zonenum(folio); 180 int delta = folio_nr_pages(folio); 181 enum lru_list lru = type * LRU_INACTIVE_FILE; 182 struct lru_gen_folio *lrugen = &lruvec->lrugen; 183 184 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); 185 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); 186 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); 187 188 if (old_gen >= 0) 189 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], 190 lrugen->nr_pages[old_gen][type][zone] - delta); 191 if (new_gen >= 0) 192 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], 193 lrugen->nr_pages[new_gen][type][zone] + delta); 194 195 /* addition */ 196 if (old_gen < 0) { 197 if (lru_gen_is_active(lruvec, new_gen)) 198 lru += LRU_ACTIVE; 199 __update_lru_size(lruvec, lru, zone, delta); 200 return; 201 } 202 203 /* deletion */ 204 if (new_gen < 0) { 205 if (lru_gen_is_active(lruvec, old_gen)) 206 lru += LRU_ACTIVE; 207 __update_lru_size(lruvec, lru, zone, -delta); 208 return; 209 } 210 211 /* promotion */ 212 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { 213 __update_lru_size(lruvec, lru, zone, -delta); 214 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); 215 } 216 217 /* demotion requires isolation, e.g., lru_deactivate_fn() */ 218 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); 219 } 220 221 static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec, 222 const struct folio *folio, 223 bool reclaiming) 224 { 225 int gen; 226 int type = folio_is_file_lru(folio); 227 const struct lru_gen_folio *lrugen = &lruvec->lrugen; 228 229 /* 230 * +-----------------------------------+-----------------------------------+ 231 * | Accessed through page tables and | Accessed through file descriptors | 232 * | promoted by folio_update_gen() | and protected by folio_inc_gen() | 233 * +-----------------------------------+-----------------------------------+ 234 * | PG_active (set while isolated) | | 235 * +-----------------+-----------------+-----------------+-----------------+ 236 * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS | 237 * +-----------------------------------+-----------------------------------+ 238 * |<---------- MIN_NR_GENS ---------->| | 239 * |<---------------------------- MAX_NR_GENS ---------------------------->| 240 */ 241 if (folio_test_active(folio)) 242 gen = MIN_NR_GENS - folio_test_workingset(folio); 243 else if (reclaiming) 244 gen = MAX_NR_GENS; 245 else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) || 246 (folio_test_reclaim(folio) && 247 (folio_test_dirty(folio) || folio_test_writeback(folio)))) 248 gen = MIN_NR_GENS; 249 else 250 gen = MAX_NR_GENS - folio_test_workingset(folio); 251 252 return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type])); 253 } 254 255 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 256 { 257 unsigned long seq; 258 unsigned long flags; 259 int gen = folio_lru_gen(folio); 260 int type = folio_is_file_lru(folio); 261 int zone = folio_zonenum(folio); 262 struct lru_gen_folio *lrugen = &lruvec->lrugen; 263 264 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); 265 266 if (folio_test_unevictable(folio) || !lrugen->enabled) 267 return false; 268 269 seq = lru_gen_folio_seq(lruvec, folio, reclaiming); 270 gen = lru_gen_from_seq(seq); 271 flags = (gen + 1UL) << LRU_GEN_PGOFF; 272 /* see the comment on MIN_NR_GENS about PG_active */ 273 set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags); 274 275 lru_gen_update_size(lruvec, folio, -1, gen); 276 /* for folio_rotate_reclaimable() */ 277 if (reclaiming) 278 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 279 else 280 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); 281 282 return true; 283 } 284 285 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 286 { 287 unsigned long flags; 288 int gen = folio_lru_gen(folio); 289 290 if (gen < 0) 291 return false; 292 293 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 294 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 295 296 /* for folio_migrate_flags() */ 297 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 298 flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags); 299 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 300 301 lru_gen_update_size(lruvec, folio, gen, -1); 302 list_del(&folio->lru); 303 304 return true; 305 } 306 307 static inline void folio_migrate_refs(struct folio *new, const struct folio *old) 308 { 309 unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK; 310 311 set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs); 312 } 313 #else /* !CONFIG_LRU_GEN */ 314 315 static inline bool lru_gen_enabled(void) 316 { 317 return false; 318 } 319 320 static inline bool lru_gen_switching(void) 321 { 322 return false; 323 } 324 325 static inline bool lru_gen_in_fault(void) 326 { 327 return false; 328 } 329 330 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 331 { 332 return false; 333 } 334 335 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 336 { 337 return false; 338 } 339 340 static inline void folio_migrate_refs(struct folio *new, const struct folio *old) 341 { 342 343 } 344 #endif /* CONFIG_LRU_GEN */ 345 346 static __always_inline 347 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 348 { 349 enum lru_list lru = folio_lru_list(folio); 350 351 VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 352 353 if (lru_gen_add_folio(lruvec, folio, false)) 354 return; 355 356 update_lru_size(lruvec, lru, folio_zonenum(folio), 357 folio_nr_pages(folio)); 358 if (lru != LRU_UNEVICTABLE) 359 list_add(&folio->lru, &lruvec->lists[lru]); 360 } 361 362 static __always_inline 363 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 364 { 365 enum lru_list lru = folio_lru_list(folio); 366 367 VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 368 369 if (lru_gen_add_folio(lruvec, folio, true)) 370 return; 371 372 update_lru_size(lruvec, lru, folio_zonenum(folio), 373 folio_nr_pages(folio)); 374 /* This is not expected to be used on LRU_UNEVICTABLE */ 375 list_add_tail(&folio->lru, &lruvec->lists[lru]); 376 } 377 378 static __always_inline 379 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 380 { 381 enum lru_list lru = folio_lru_list(folio); 382 383 VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 384 385 if (lru_gen_del_folio(lruvec, folio, false)) 386 return; 387 388 if (lru != LRU_UNEVICTABLE) 389 list_del(&folio->lru); 390 update_lru_size(lruvec, lru, folio_zonenum(folio), 391 -folio_nr_pages(folio)); 392 } 393 394 #ifdef CONFIG_ANON_VMA_NAME 395 /* mmap_lock should be read-locked */ 396 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 397 { 398 if (anon_name) 399 kref_get(&anon_name->kref); 400 } 401 402 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 403 { 404 if (anon_name) 405 kref_put(&anon_name->kref, anon_vma_name_free); 406 } 407 408 static inline 409 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 410 { 411 /* Prevent anon_name refcount saturation early on */ 412 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 413 anon_vma_name_get(anon_name); 414 return anon_name; 415 416 } 417 return anon_vma_name_alloc(anon_name->name); 418 } 419 420 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 421 struct vm_area_struct *new_vma) 422 { 423 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 424 425 if (anon_name) 426 new_vma->anon_name = anon_vma_name_reuse(anon_name); 427 } 428 429 static inline void free_anon_vma_name(struct vm_area_struct *vma) 430 { 431 /* 432 * Not using anon_vma_name because it generates a warning if mmap_lock 433 * is not held, which might be the case here. 434 */ 435 anon_vma_name_put(vma->anon_name); 436 } 437 438 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 439 struct anon_vma_name *anon_name2) 440 { 441 if (anon_name1 == anon_name2) 442 return true; 443 444 return anon_name1 && anon_name2 && 445 !strcmp(anon_name1->name, anon_name2->name); 446 } 447 448 #else /* CONFIG_ANON_VMA_NAME */ 449 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 450 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 451 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 452 struct vm_area_struct *new_vma) {} 453 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 454 455 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 456 struct anon_vma_name *anon_name2) 457 { 458 return true; 459 } 460 461 #endif /* CONFIG_ANON_VMA_NAME */ 462 463 void pfnmap_track_ctx_release(struct kref *ref); 464 465 static inline void init_tlb_flush_pending(struct mm_struct *mm) 466 { 467 atomic_set(&mm->tlb_flush_pending, 0); 468 } 469 470 static inline void inc_tlb_flush_pending(struct mm_struct *mm) 471 { 472 atomic_inc(&mm->tlb_flush_pending); 473 /* 474 * The only time this value is relevant is when there are indeed pages 475 * to flush. And we'll only flush pages after changing them, which 476 * requires the PTL. 477 * 478 * So the ordering here is: 479 * 480 * atomic_inc(&mm->tlb_flush_pending); 481 * spin_lock(&ptl); 482 * ... 483 * set_pte_at(); 484 * spin_unlock(&ptl); 485 * 486 * spin_lock(&ptl) 487 * mm_tlb_flush_pending(); 488 * .... 489 * spin_unlock(&ptl); 490 * 491 * flush_tlb_range(); 492 * atomic_dec(&mm->tlb_flush_pending); 493 * 494 * Where the increment if constrained by the PTL unlock, it thus 495 * ensures that the increment is visible if the PTE modification is 496 * visible. After all, if there is no PTE modification, nobody cares 497 * about TLB flushes either. 498 * 499 * This very much relies on users (mm_tlb_flush_pending() and 500 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 501 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 502 * locks (PPC) the unlock of one doesn't order against the lock of 503 * another PTL. 504 * 505 * The decrement is ordered by the flush_tlb_range(), such that 506 * mm_tlb_flush_pending() will not return false unless all flushes have 507 * completed. 508 */ 509 } 510 511 static inline void dec_tlb_flush_pending(struct mm_struct *mm) 512 { 513 /* 514 * See inc_tlb_flush_pending(). 515 * 516 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 517 * not order against TLB invalidate completion, which is what we need. 518 * 519 * Therefore we must rely on tlb_flush_*() to guarantee order. 520 */ 521 atomic_dec(&mm->tlb_flush_pending); 522 } 523 524 static inline bool mm_tlb_flush_pending(const struct mm_struct *mm) 525 { 526 /* 527 * Must be called after having acquired the PTL; orders against that 528 * PTLs release and therefore ensures that if we observe the modified 529 * PTE we must also observe the increment from inc_tlb_flush_pending(). 530 * 531 * That is, it only guarantees to return true if there is a flush 532 * pending for _this_ PTL. 533 */ 534 return atomic_read(&mm->tlb_flush_pending); 535 } 536 537 static inline bool mm_tlb_flush_nested(const struct mm_struct *mm) 538 { 539 /* 540 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 541 * for which there is a TLB flush pending in order to guarantee 542 * we've seen both that PTE modification and the increment. 543 * 544 * (no requirement on actually still holding the PTL, that is irrelevant) 545 */ 546 return atomic_read(&mm->tlb_flush_pending) > 1; 547 } 548 549 #ifdef CONFIG_MMU 550 /* 551 * Computes the pte marker to copy from the given source entry into dst_vma. 552 * If no marker should be copied, returns 0. 553 * The caller should insert a new pte created with make_pte_marker(). 554 */ 555 static inline pte_marker copy_pte_marker( 556 softleaf_t entry, struct vm_area_struct *dst_vma) 557 { 558 const pte_marker srcm = softleaf_to_marker(entry); 559 /* Always copy error entries. */ 560 pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD); 561 562 /* Only copy PTE markers if UFFD register matches. */ 563 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) 564 dstm |= PTE_MARKER_UFFD_WP; 565 566 return dstm; 567 } 568 569 /* 570 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 571 * replace a none pte. NOTE! This should only be called when *pte is already 572 * cleared so we will never accidentally replace something valuable. Meanwhile 573 * none pte also means we are not demoting the pte so tlb flushed is not needed. 574 * E.g., when pte cleared the caller should have taken care of the tlb flush. 575 * 576 * Must be called with pgtable lock held so that no thread will see the none 577 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 578 * 579 * Returns true if an uffd-wp pte was installed, false otherwise. 580 */ 581 static inline bool 582 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 583 pte_t *pte, pte_t pteval) 584 { 585 bool arm_uffd_pte = false; 586 587 if (!uffd_supports_wp_marker()) 588 return false; 589 590 /* The current status of the pte should be "cleared" before calling */ 591 WARN_ON_ONCE(!pte_none(ptep_get(pte))); 592 593 /* 594 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole 595 * thing, because when zapping either it means it's dropping the 596 * page, or in TTU where the present pte will be quickly replaced 597 * with a swap pte. There's no way of leaking the bit. 598 */ 599 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 600 return false; 601 602 /* A uffd-wp wr-protected normal pte */ 603 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 604 arm_uffd_pte = true; 605 606 /* 607 * A uffd-wp wr-protected swap pte. Note: this should even cover an 608 * existing pte marker with uffd-wp bit set. 609 */ 610 if (unlikely(pte_swp_uffd_wp_any(pteval))) 611 arm_uffd_pte = true; 612 613 if (unlikely(arm_uffd_pte)) { 614 set_pte_at(vma->vm_mm, addr, pte, 615 make_pte_marker(PTE_MARKER_UFFD_WP)); 616 return true; 617 } 618 619 return false; 620 } 621 622 static inline bool vma_has_recency(const struct vm_area_struct *vma) 623 { 624 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) 625 return false; 626 627 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) 628 return false; 629 630 return true; 631 } 632 #endif 633 634 /** 635 * num_pages_contiguous() - determine the number of contiguous pages 636 * that represent contiguous PFNs 637 * @pages: an array of page pointers 638 * @nr_pages: length of the array, at least 1 639 * 640 * Determine the number of contiguous pages that represent contiguous PFNs 641 * in @pages, starting from the first page. 642 * 643 * In some kernel configs contiguous PFNs will not have contiguous struct 644 * pages. In these configurations num_pages_contiguous() will return a num 645 * smaller than ideal number. The caller should continue to check for pfn 646 * contiguity after each call to num_pages_contiguous(). 647 * 648 * Returns the number of contiguous pages. 649 */ 650 static inline size_t num_pages_contiguous(struct page **pages, size_t nr_pages) 651 { 652 struct page *cur_page = pages[0]; 653 unsigned long section = memdesc_section(cur_page->flags); 654 size_t i; 655 656 for (i = 1; i < nr_pages; i++) { 657 if (++cur_page != pages[i]) 658 break; 659 /* 660 * In unproblematic kernel configs, page_to_section() == 0 and 661 * the whole check will get optimized out. 662 */ 663 if (memdesc_section(cur_page->flags) != section) 664 break; 665 } 666 667 return i; 668 } 669 670 #endif 671