1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SWAP_H 3 #define _LINUX_SWAP_H 4 5 #include <linux/spinlock.h> 6 #include <linux/linkage.h> 7 #include <linux/mmzone.h> 8 #include <linux/list.h> 9 #include <linux/memcontrol.h> 10 #include <linux/sched.h> 11 #include <linux/node.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 #include <linux/atomic.h> 15 #include <linux/page-flags.h> 16 #include <uapi/linux/mempolicy.h> 17 #include <asm/page.h> 18 19 struct notifier_block; 20 21 struct bio; 22 23 struct pagevec; 24 25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 26 #define SWAP_FLAG_PRIO_MASK 0x7fff 27 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ 28 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ 29 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ 30 31 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 32 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ 33 SWAP_FLAG_DISCARD_PAGES) 34 #define SWAP_BATCH 64 35 36 static inline int current_is_kswapd(void) 37 { 38 return current->flags & PF_KSWAPD; 39 } 40 41 /* 42 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 43 * be swapped to. The swap type and the offset into that swap type are 44 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 45 * for the type means that the maximum number of swapcache pages is 27 bits 46 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 47 * the type/offset into the pte as 5/27 as well. 48 */ 49 #define MAX_SWAPFILES_SHIFT 5 50 51 /* 52 * Use some of the swap files numbers for other purposes. This 53 * is a convenient way to hook into the VM to trigger special 54 * actions on faults. 55 */ 56 57 /* 58 * PTE markers are used to persist information onto PTEs that otherwise 59 * should be a none pte. As its name "PTE" hints, it should only be 60 * applied to the leaves of pgtables. 61 */ 62 #define SWP_PTE_MARKER_NUM 1 63 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ 64 SWP_MIGRATION_NUM + SWP_DEVICE_NUM) 65 66 /* 67 * Unaddressable device memory support. See include/linux/hmm.h and 68 * Documentation/mm/hmm.rst. Short description is we need struct pages for 69 * device memory that is unaddressable (inaccessible) by CPU, so that we can 70 * migrate part of a process memory to device memory. 71 * 72 * When a page is migrated from CPU to device, we set the CPU page table entry 73 * to a special SWP_DEVICE_{READ|WRITE} entry. 74 * 75 * When a page is mapped by the device for exclusive access we set the CPU page 76 * table entries to a special SWP_DEVICE_EXCLUSIVE entry. 77 */ 78 #ifdef CONFIG_DEVICE_PRIVATE 79 #define SWP_DEVICE_NUM 3 80 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) 81 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) 82 #define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) 83 #else 84 #define SWP_DEVICE_NUM 0 85 #endif 86 87 /* 88 * Page migration support. 89 * 90 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and 91 * indicates that the referenced (part of) an anonymous page is exclusive to 92 * a single process. For SWP_MIGRATION_WRITE, that information is implicit: 93 * (part of) an anonymous page that are mapped writable are exclusive to a 94 * single process. 95 */ 96 #ifdef CONFIG_MIGRATION 97 #define SWP_MIGRATION_NUM 3 98 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 99 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 100 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) 101 #else 102 #define SWP_MIGRATION_NUM 0 103 #endif 104 105 /* 106 * Handling of hardware poisoned pages with memory corruption. 107 */ 108 #ifdef CONFIG_MEMORY_FAILURE 109 #define SWP_HWPOISON_NUM 1 110 #define SWP_HWPOISON MAX_SWAPFILES 111 #else 112 #define SWP_HWPOISON_NUM 0 113 #endif 114 115 #define MAX_SWAPFILES \ 116 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ 117 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ 118 SWP_PTE_MARKER_NUM) 119 120 /* 121 * Magic header for a swap area. The first part of the union is 122 * what the swap magic looks like for the old (limited to 128MB) 123 * swap area format, the second part of the union adds - in the 124 * old reserved area - some extra information. Note that the first 125 * kilobyte is reserved for boot loader or disk label stuff... 126 * 127 * Having the magic at the end of the PAGE_SIZE makes detecting swap 128 * areas somewhat tricky on machines that support multiple page sizes. 129 * For 2.5 we'll probably want to move the magic to just beyond the 130 * bootbits... 131 */ 132 union swap_header { 133 struct { 134 char reserved[PAGE_SIZE - 10]; 135 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 136 } magic; 137 struct { 138 char bootbits[1024]; /* Space for disklabel etc. */ 139 __u32 version; 140 __u32 last_page; 141 __u32 nr_badpages; 142 unsigned char sws_uuid[16]; 143 unsigned char sws_volume[16]; 144 __u32 padding[117]; 145 __u32 badpages[1]; 146 } info; 147 }; 148 149 /* 150 * current->reclaim_state points to one of these when a task is running 151 * memory reclaim 152 */ 153 struct reclaim_state { 154 /* pages reclaimed outside of LRU-based reclaim */ 155 unsigned long reclaimed; 156 #ifdef CONFIG_LRU_GEN 157 /* per-thread mm walk data */ 158 struct lru_gen_mm_walk *mm_walk; 159 #endif 160 }; 161 162 /* 163 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based 164 * reclaim 165 * @pages: number of pages reclaimed 166 * 167 * If the current process is undergoing a reclaim operation, increment the 168 * number of reclaimed pages by @pages. 169 */ 170 static inline void mm_account_reclaimed_pages(unsigned long pages) 171 { 172 if (current->reclaim_state) 173 current->reclaim_state->reclaimed += pages; 174 } 175 176 #ifdef __KERNEL__ 177 178 struct address_space; 179 struct sysinfo; 180 struct writeback_control; 181 struct zone; 182 183 /* 184 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 185 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the 186 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart 187 * from setup, they're handled identically. 188 * 189 * We always assume that blocks are of size PAGE_SIZE. 190 */ 191 struct swap_extent { 192 struct rb_node rb_node; 193 pgoff_t start_page; 194 pgoff_t nr_pages; 195 sector_t start_block; 196 }; 197 198 /* 199 * Max bad pages in the new format.. 200 */ 201 #define MAX_SWAP_BADPAGES \ 202 ((offsetof(union swap_header, magic.magic) - \ 203 offsetof(union swap_header, info.badpages)) / sizeof(int)) 204 205 enum { 206 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 207 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 208 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ 209 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 210 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 211 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 212 SWP_BLKDEV = (1 << 6), /* its a block device */ 213 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ 214 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ 215 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ 216 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ 217 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ 218 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ 219 /* add others here before... */ 220 }; 221 222 #define SWAP_CLUSTER_MAX 32UL 223 #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) 224 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 225 226 /* Bit flag in swap_map */ 227 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ 228 229 /* Special value in first swap_map */ 230 #define SWAP_MAP_MAX 0x3e /* Max count */ 231 #define SWAP_MAP_BAD 0x3f /* Note page is bad */ 232 233 /* Special value in each swap_map continuation */ 234 #define SWAP_CONT_MAX 0x7f /* Max count */ 235 236 /* 237 * The first page in the swap file is the swap header, which is always marked 238 * bad to prevent it from being allocated as an entry. This also prevents the 239 * cluster to which it belongs being marked free. Therefore 0 is safe to use as 240 * a sentinel to indicate an entry is not valid. 241 */ 242 #define SWAP_ENTRY_INVALID 0 243 244 #ifdef CONFIG_THP_SWAP 245 #define SWAP_NR_ORDERS (PMD_ORDER + 1) 246 #else 247 #define SWAP_NR_ORDERS 1 248 #endif 249 250 /* 251 * We keep using same cluster for rotational device so IO will be sequential. 252 * The purpose is to optimize SWAP throughput on these device. 253 */ 254 struct swap_sequential_cluster { 255 unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ 256 }; 257 258 /* 259 * The in-memory structure used to track swap areas. 260 */ 261 struct swap_info_struct { 262 struct percpu_ref users; /* indicate and keep swap device valid. */ 263 unsigned long flags; /* SWP_USED etc: see above */ 264 signed short prio; /* swap priority of this type */ 265 struct plist_node list; /* entry in swap_active_head */ 266 signed char type; /* strange name for an index */ 267 unsigned int max; /* extent of the swap_map */ 268 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 269 unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ 270 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 271 struct list_head free_clusters; /* free clusters list */ 272 struct list_head full_clusters; /* full clusters list */ 273 struct list_head nonfull_clusters[SWAP_NR_ORDERS]; 274 /* list of cluster that contains at least one free slot */ 275 struct list_head frag_clusters[SWAP_NR_ORDERS]; 276 /* list of cluster that are fragmented or contented */ 277 unsigned int pages; /* total of usable pages of swap */ 278 atomic_long_t inuse_pages; /* number of those currently in use */ 279 struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ 280 spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ 281 struct rb_root swap_extent_root;/* root of the swap extent rbtree */ 282 struct block_device *bdev; /* swap device or bdev of swap file */ 283 struct file *swap_file; /* seldom referenced */ 284 struct completion comp; /* seldom referenced */ 285 spinlock_t lock; /* 286 * protect map scan related fields like 287 * swap_map, inuse_pages and all cluster 288 * lists. other fields are only changed 289 * at swapon/swapoff, so are protected 290 * by swap_lock. changing flags need 291 * hold this lock and swap_lock. If 292 * both locks need hold, hold swap_lock 293 * first. 294 */ 295 spinlock_t cont_lock; /* 296 * protect swap count continuation page 297 * list. 298 */ 299 struct work_struct discard_work; /* discard worker */ 300 struct work_struct reclaim_work; /* reclaim worker */ 301 struct list_head discard_clusters; /* discard clusters list */ 302 struct plist_node avail_list; /* entry in swap_avail_head */ 303 }; 304 305 static inline swp_entry_t page_swap_entry(struct page *page) 306 { 307 struct folio *folio = page_folio(page); 308 swp_entry_t entry = folio->swap; 309 310 entry.val += folio_page_idx(folio, page); 311 return entry; 312 } 313 314 /* linux/mm/workingset.c */ 315 bool workingset_test_recent(void *shadow, bool file, bool *workingset, 316 bool flush); 317 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); 318 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); 319 void workingset_refault(struct folio *folio, void *shadow); 320 void workingset_activation(struct folio *folio); 321 322 /* linux/mm/page_alloc.c */ 323 extern unsigned long totalreserve_pages; 324 325 /* Definition of global_zone_page_state not available yet */ 326 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) 327 328 329 /* linux/mm/swap.c */ 330 void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, 331 unsigned int nr_io, unsigned int nr_rotated) 332 __releases(lruvec->lru_lock); 333 void lru_note_cost_refault(struct folio *); 334 void folio_add_lru(struct folio *); 335 void folio_add_lru_vma(struct folio *, struct vm_area_struct *); 336 void mark_page_accessed(struct page *); 337 void folio_mark_accessed(struct folio *); 338 339 static inline bool folio_may_be_lru_cached(struct folio *folio) 340 { 341 /* 342 * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. 343 * Holding small numbers of low-order mTHP folios in per-CPU LRU cache 344 * will be sensible, but nobody has implemented and tested that yet. 345 */ 346 return !folio_test_large(folio); 347 } 348 349 extern atomic_t lru_disable_count; 350 351 static inline bool lru_cache_disabled(void) 352 { 353 return atomic_read(&lru_disable_count); 354 } 355 356 static inline void lru_cache_enable(void) 357 { 358 atomic_dec(&lru_disable_count); 359 } 360 361 extern void lru_cache_disable(void); 362 extern void lru_add_drain(void); 363 extern void lru_add_drain_cpu(int cpu); 364 extern void lru_add_drain_cpu_zone(struct zone *zone); 365 extern void lru_add_drain_all(void); 366 void folio_deactivate(struct folio *folio); 367 void folio_mark_lazyfree(struct folio *folio); 368 extern void swap_setup(void); 369 370 /* linux/mm/vmscan.c */ 371 extern unsigned long zone_reclaimable_pages(struct zone *zone); 372 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 373 gfp_t gfp_mask, nodemask_t *mask); 374 375 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) 376 #define MEMCG_RECLAIM_PROACTIVE (1 << 2) 377 #define MIN_SWAPPINESS 0 378 #define MAX_SWAPPINESS 200 379 380 /* Just reclaim from anon folios in proactive memory reclaim */ 381 #define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1) 382 383 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 384 unsigned long nr_pages, 385 gfp_t gfp_mask, 386 unsigned int reclaim_options, 387 int *swappiness); 388 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, 389 gfp_t gfp_mask, bool noswap, 390 pg_data_t *pgdat, 391 unsigned long *nr_scanned); 392 extern unsigned long shrink_all_memory(unsigned long nr_pages); 393 extern int vm_swappiness; 394 long remove_mapping(struct address_space *mapping, struct folio *folio); 395 396 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 397 extern int reclaim_register_node(struct node *node); 398 extern void reclaim_unregister_node(struct node *node); 399 400 #else 401 402 static inline int reclaim_register_node(struct node *node) 403 { 404 return 0; 405 } 406 407 static inline void reclaim_unregister_node(struct node *node) 408 { 409 } 410 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 411 412 #ifdef CONFIG_NUMA 413 extern int sysctl_min_unmapped_ratio; 414 extern int sysctl_min_slab_ratio; 415 #endif 416 417 void check_move_unevictable_folios(struct folio_batch *fbatch); 418 419 extern void __meminit kswapd_run(int nid); 420 extern void __meminit kswapd_stop(int nid); 421 422 #ifdef CONFIG_SWAP 423 424 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 425 unsigned long nr_pages, sector_t start_block); 426 int generic_swapfile_activate(struct swap_info_struct *, struct file *, 427 sector_t *); 428 429 static inline unsigned long total_swapcache_pages(void) 430 { 431 return global_node_page_state(NR_SWAPCACHE); 432 } 433 434 void free_swap_cache(struct folio *folio); 435 void free_folio_and_swap_cache(struct folio *folio); 436 void free_pages_and_swap_cache(struct encoded_page **, int); 437 /* linux/mm/swapfile.c */ 438 extern atomic_long_t nr_swap_pages; 439 extern long total_swap_pages; 440 extern atomic_t nr_rotate_swap; 441 442 /* Swap 50% full? Release swapcache more aggressively.. */ 443 static inline bool vm_swap_full(void) 444 { 445 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; 446 } 447 448 static inline long get_nr_swap_pages(void) 449 { 450 return atomic_long_read(&nr_swap_pages); 451 } 452 453 extern void si_swapinfo(struct sysinfo *); 454 extern int add_swap_count_continuation(swp_entry_t, gfp_t); 455 int swap_type_of(dev_t device, sector_t offset); 456 int find_first_swap(dev_t *device); 457 extern unsigned int count_swap_pages(int, int); 458 extern sector_t swapdev_block(int, pgoff_t); 459 extern int __swap_count(swp_entry_t entry); 460 extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); 461 extern int swp_swapcount(swp_entry_t entry); 462 struct backing_dev_info; 463 extern struct swap_info_struct *get_swap_device(swp_entry_t entry); 464 sector_t swap_folio_sector(struct folio *folio); 465 466 /* 467 * If there is an existing swap slot reference (swap entry) and the caller 468 * guarantees that there is no race modification of it (e.g., PTL 469 * protecting the swap entry in page table; shmem's cmpxchg protects t 470 * he swap entry in shmem mapping), these two helpers below can be used 471 * to put/dup the entries directly. 472 * 473 * All entries must be allocated by folio_alloc_swap(). And they must have 474 * a swap count > 1. See comments of folio_*_swap helpers for more info. 475 */ 476 int swap_dup_entry_direct(swp_entry_t entry); 477 void swap_put_entries_direct(swp_entry_t entry, int nr); 478 479 /* 480 * folio_free_swap tries to free the swap entries pinned by a swap cache 481 * folio, it has to be here to be called by other components. 482 */ 483 bool folio_free_swap(struct folio *folio); 484 485 /* Allocate / free (hibernation) exclusive entries */ 486 swp_entry_t swap_alloc_hibernation_slot(int type); 487 void swap_free_hibernation_slot(swp_entry_t entry); 488 489 static inline void put_swap_device(struct swap_info_struct *si) 490 { 491 percpu_ref_put(&si->users); 492 } 493 494 #else /* CONFIG_SWAP */ 495 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) 496 { 497 return NULL; 498 } 499 500 static inline void put_swap_device(struct swap_info_struct *si) 501 { 502 } 503 504 #define get_nr_swap_pages() 0L 505 #define total_swap_pages 0L 506 #define total_swapcache_pages() 0UL 507 #define vm_swap_full() 0 508 509 #define si_swapinfo(val) \ 510 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 511 #define free_folio_and_swap_cache(folio) \ 512 folio_put(folio) 513 #define free_pages_and_swap_cache(pages, nr) \ 514 release_pages((pages), (nr)); 515 516 static inline void free_swap_cache(struct folio *folio) 517 { 518 } 519 520 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 521 { 522 return 0; 523 } 524 525 static inline int swap_dup_entry_direct(swp_entry_t ent) 526 { 527 return 0; 528 } 529 530 static inline void swap_put_entries_direct(swp_entry_t ent, int nr) 531 { 532 } 533 534 static inline int __swap_count(swp_entry_t entry) 535 { 536 return 0; 537 } 538 539 static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) 540 { 541 return false; 542 } 543 544 static inline int swp_swapcount(swp_entry_t entry) 545 { 546 return 0; 547 } 548 549 static inline bool folio_free_swap(struct folio *folio) 550 { 551 return false; 552 } 553 554 static inline int add_swap_extent(struct swap_info_struct *sis, 555 unsigned long start_page, 556 unsigned long nr_pages, sector_t start_block) 557 { 558 return -EINVAL; 559 } 560 #endif /* CONFIG_SWAP */ 561 #ifdef CONFIG_MEMCG 562 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 563 { 564 /* Cgroup2 doesn't have per-cgroup swappiness */ 565 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 566 return READ_ONCE(vm_swappiness); 567 568 /* root ? */ 569 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) 570 return READ_ONCE(vm_swappiness); 571 572 return READ_ONCE(memcg->swappiness); 573 } 574 #else 575 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 576 { 577 return READ_ONCE(vm_swappiness); 578 } 579 #endif 580 581 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 582 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); 583 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) 584 { 585 if (mem_cgroup_disabled()) 586 return; 587 __folio_throttle_swaprate(folio, gfp); 588 } 589 #else 590 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) 591 { 592 } 593 #endif 594 595 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) 596 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); 597 static inline int mem_cgroup_try_charge_swap(struct folio *folio, 598 swp_entry_t entry) 599 { 600 if (mem_cgroup_disabled()) 601 return 0; 602 return __mem_cgroup_try_charge_swap(folio, entry); 603 } 604 605 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); 606 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 607 { 608 if (mem_cgroup_disabled()) 609 return; 610 __mem_cgroup_uncharge_swap(entry, nr_pages); 611 } 612 613 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 614 extern bool mem_cgroup_swap_full(struct folio *folio); 615 #else 616 static inline int mem_cgroup_try_charge_swap(struct folio *folio, 617 swp_entry_t entry) 618 { 619 return 0; 620 } 621 622 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, 623 unsigned int nr_pages) 624 { 625 } 626 627 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 628 { 629 return get_nr_swap_pages(); 630 } 631 632 static inline bool mem_cgroup_swap_full(struct folio *folio) 633 { 634 return vm_swap_full(); 635 } 636 #endif 637 638 #endif /* __KERNEL__*/ 639 #endif /* _LINUX_SWAP_H */ 640