internal.h (d61f0d59683d9c899211c300254d4140c482a6c0) internal.h (49b1b8d6f6831026cb105b0eafa18f13db612d86)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/khugepaged.h>
11#include <linux/mm.h>
12#include <linux/mm.h>
13#include <linux/mm_inline.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
15#include <linux/swapops.h>
16#include <linux/tracepoint-defs.h>
17
14#include <linux/pagemap.h>
15#include <linux/rmap.h>
16#include <linux/swap.h>
17#include <linux/swapops.h>
18#include <linux/tracepoint-defs.h>
19
20/* Internal core VMA manipulation functions. */
21#include "vma.h"
22
18struct folio_batch;
19
20/*
21 * The set of flags that only affect watermark checking and reclaim
22 * behaviour. This is used by the MM to obey the caller constraints
23 * about IO, FS and watermark checking while ignoring placement
24 * hints such as HIGHMEM usage.
25 */

--- 747 unchanged lines hidden (view full) ---

773int find_suitable_fallback(struct free_area *area, unsigned int order,
774 int migratetype, bool only_stealable, bool *can_steal);
775
776static inline bool free_area_empty(struct free_area *area, int migratetype)
777{
778 return list_empty(&area->free_list[migratetype]);
779}
780
23struct folio_batch;
24
25/*
26 * The set of flags that only affect watermark checking and reclaim
27 * behaviour. This is used by the MM to obey the caller constraints
28 * about IO, FS and watermark checking while ignoring placement
29 * hints such as HIGHMEM usage.
30 */

--- 747 unchanged lines hidden (view full) ---

778int find_suitable_fallback(struct free_area *area, unsigned int order,
779 int migratetype, bool only_stealable, bool *can_steal);
780
781static inline bool free_area_empty(struct free_area *area, int migratetype)
782{
783 return list_empty(&area->free_list[migratetype]);
784}
785
781/*
782 * These three helpers classifies VMAs for virtual memory accounting.
783 */
784
785/*
786 * Executable code area - executable, not writable, not stack
787 */
788static inline bool is_exec_mapping(vm_flags_t flags)
789{
790 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
791}
792
793/*
794 * Stack area (including shadow stacks)
795 *
796 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
797 * do_mmap() forbids all other combinations.
798 */
799static inline bool is_stack_mapping(vm_flags_t flags)
800{
801 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
802}
803
804/*
805 * Data area - private, writable, not stack
806 */
807static inline bool is_data_mapping(vm_flags_t flags)
808{
809 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
810}
811
812/* mm/util.c */
813struct anon_vma *folio_anon_vma(struct folio *folio);
814
815#ifdef CONFIG_MMU
816void unmap_mapping_folio(struct folio *folio);
817extern long populate_vma_page_range(struct vm_area_struct *vma,
818 unsigned long start, unsigned long end, int *locked);
819extern long faultin_page_range(struct mm_struct *mm, unsigned long start,

--- 412 unchanged lines hidden (view full) ---

1232/*
1233 * mm/huge_memory.c
1234 */
1235void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1236 pud_t *pud, bool write);
1237void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1238 pmd_t *pmd, bool write);
1239
786/* mm/util.c */
787struct anon_vma *folio_anon_vma(struct folio *folio);
788
789#ifdef CONFIG_MMU
790void unmap_mapping_folio(struct folio *folio);
791extern long populate_vma_page_range(struct vm_area_struct *vma,
792 unsigned long start, unsigned long end, int *locked);
793extern long faultin_page_range(struct mm_struct *mm, unsigned long start,

--- 412 unchanged lines hidden (view full) ---

1206/*
1207 * mm/huge_memory.c
1208 */
1209void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1210 pud_t *pud, bool write);
1211void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1212 pmd_t *pmd, bool write);
1213
1240/*
1241 * mm/mmap.c
1242 */
1243struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1244 struct vm_area_struct *vma,
1245 unsigned long delta);
1246
1247struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
1248 struct vm_area_struct *prev,
1249 struct vm_area_struct *vma,
1250 unsigned long start, unsigned long end,
1251 unsigned long vm_flags,
1252 struct mempolicy *policy,
1253 struct vm_userfaultfd_ctx uffd_ctx,
1254 struct anon_vma_name *anon_name);
1255
1256/* We are about to modify the VMA's flags. */
1257static inline struct vm_area_struct
1258*vma_modify_flags(struct vma_iterator *vmi,
1259 struct vm_area_struct *prev,
1260 struct vm_area_struct *vma,
1261 unsigned long start, unsigned long end,
1262 unsigned long new_flags)
1263{
1264 return vma_modify(vmi, prev, vma, start, end, new_flags,
1265 vma_policy(vma), vma->vm_userfaultfd_ctx,
1266 anon_vma_name(vma));
1267}
1268
1269/* We are about to modify the VMA's flags and/or anon_name. */
1270static inline struct vm_area_struct
1271*vma_modify_flags_name(struct vma_iterator *vmi,
1272 struct vm_area_struct *prev,
1273 struct vm_area_struct *vma,
1274 unsigned long start,
1275 unsigned long end,
1276 unsigned long new_flags,
1277 struct anon_vma_name *new_name)
1278{
1279 return vma_modify(vmi, prev, vma, start, end, new_flags,
1280 vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
1281}
1282
1283/* We are about to modify the VMA's memory policy. */
1284static inline struct vm_area_struct
1285*vma_modify_policy(struct vma_iterator *vmi,
1286 struct vm_area_struct *prev,
1287 struct vm_area_struct *vma,
1288 unsigned long start, unsigned long end,
1289 struct mempolicy *new_pol)
1290{
1291 return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
1292 new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1293}
1294
1295/* We are about to modify the VMA's flags and/or uffd context. */
1296static inline struct vm_area_struct
1297*vma_modify_flags_uffd(struct vma_iterator *vmi,
1298 struct vm_area_struct *prev,
1299 struct vm_area_struct *vma,
1300 unsigned long start, unsigned long end,
1301 unsigned long new_flags,
1302 struct vm_userfaultfd_ctx new_ctx)
1303{
1304 return vma_modify(vmi, prev, vma, start, end, new_flags,
1305 vma_policy(vma), new_ctx, anon_vma_name(vma));
1306}
1307
1308int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
1309 unsigned long start, unsigned long end, pgoff_t pgoff,
1310 struct vm_area_struct *next);
1311int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1312 unsigned long start, unsigned long end, pgoff_t pgoff);
1313
1314enum {
1315 /* mark page accessed */
1316 FOLL_TOUCH = 1 << 16,
1317 /* a retry, previous pass started an IO */
1318 FOLL_TRIED = 1 << 17,
1319 /* we are working on non-current tsk/mm */
1320 FOLL_REMOTE = 1 << 18,
1321 /* pages must be released via unpin_user_page */

--- 110 unchanged lines hidden (view full) ---

1432 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1433}
1434
1435static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1436{
1437 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1438}
1439
1214enum {
1215 /* mark page accessed */
1216 FOLL_TOUCH = 1 << 16,
1217 /* a retry, previous pass started an IO */
1218 FOLL_TRIED = 1 << 17,
1219 /* we are working on non-current tsk/mm */
1220 FOLL_REMOTE = 1 << 18,
1221 /* pages must be released via unpin_user_page */

--- 110 unchanged lines hidden (view full) ---

1332 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1333}
1334
1335static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1336{
1337 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1338}
1339
1440static inline void vma_iter_config(struct vma_iterator *vmi,
1441 unsigned long index, unsigned long last)
1442{
1443 __mas_set_range(&vmi->mas, index, last - 1);
1444}
1445
1446static inline void vma_iter_reset(struct vma_iterator *vmi)
1447{
1448 mas_reset(&vmi->mas);
1449}
1450
1451static inline
1452struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1453{
1454 return mas_prev_range(&vmi->mas, min);
1455}
1456
1457static inline
1458struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1459{
1460 return mas_next_range(&vmi->mas, max);
1461}
1462
1463static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1464 unsigned long max, unsigned long size)
1465{
1466 return mas_empty_area(&vmi->mas, min, max - 1, size);
1467}
1468
1469static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1470 unsigned long max, unsigned long size)
1471{
1472 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1473}
1474
1475/*
1476 * VMA Iterator functions shared between nommu and mmap
1477 */
1478static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1479 struct vm_area_struct *vma)
1480{
1481 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1482}
1483
1484static inline void vma_iter_clear(struct vma_iterator *vmi)
1485{
1486 mas_store_prealloc(&vmi->mas, NULL);
1487}
1488
1489static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1490{
1491 return mas_walk(&vmi->mas);
1492}
1493
1494/* Store a VMA with preallocated memory */
1495static inline void vma_iter_store(struct vma_iterator *vmi,
1496 struct vm_area_struct *vma)
1497{
1498
1499#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1500 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1501 vmi->mas.index > vma->vm_start)) {
1502 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1503 vmi->mas.index, vma->vm_start, vma->vm_start,
1504 vma->vm_end, vmi->mas.index, vmi->mas.last);
1505 }
1506 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1507 vmi->mas.last < vma->vm_start)) {
1508 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1509 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1510 vmi->mas.index, vmi->mas.last);
1511 }
1512#endif
1513
1514 if (vmi->mas.status != ma_start &&
1515 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1516 vma_iter_invalidate(vmi);
1517
1518 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1519 mas_store_prealloc(&vmi->mas, vma);
1520}
1521
1522static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1523 struct vm_area_struct *vma, gfp_t gfp)
1524{
1525 if (vmi->mas.status != ma_start &&
1526 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1527 vma_iter_invalidate(vmi);
1528
1529 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1530 mas_store_gfp(&vmi->mas, vma, gfp);
1531 if (unlikely(mas_is_err(&vmi->mas)))
1532 return -ENOMEM;
1533
1534 return 0;
1535}
1536
1537static inline
1538struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
1539{
1540 return mas_prev_range(&vmi->mas, 0);
1541}
1542
1543/*
1544 * VMA lock generalization
1545 */
1546struct vma_prepare {
1547 struct vm_area_struct *vma;
1548 struct vm_area_struct *adj_next;
1549 struct file *file;
1550 struct address_space *mapping;
1551 struct anon_vma *anon_vma;
1552 struct vm_area_struct *insert;
1553 struct vm_area_struct *remove;
1554 struct vm_area_struct *remove2;
1555};
1556
1557void __meminit __init_single_page(struct page *page, unsigned long pfn,
1558 unsigned long zone, int nid);
1559
1560/* shrinker related functions */
1561unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1562 int priority);
1563
1564#ifdef CONFIG_64BIT

--- 72 unchanged lines hidden (view full) ---

1637{
1638}
1639#endif /* CONFIG_SHRINKER_DEBUG */
1640
1641/* Only track the nodes of mappings with shadow entries */
1642void workingset_update_node(struct xa_node *node);
1643extern struct list_lru shadow_nodes;
1644
1340void __meminit __init_single_page(struct page *page, unsigned long pfn,
1341 unsigned long zone, int nid);
1342
1343/* shrinker related functions */
1344unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1345 int priority);
1346
1347#ifdef CONFIG_64BIT

--- 72 unchanged lines hidden (view full) ---

1420{
1421}
1422#endif /* CONFIG_SHRINKER_DEBUG */
1423
1424/* Only track the nodes of mappings with shadow entries */
1425void workingset_update_node(struct xa_node *node);
1426extern struct list_lru shadow_nodes;
1427
1645struct unlink_vma_file_batch {
1646 int count;
1647 struct vm_area_struct *vmas[8];
1648};
1649
1650void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
1651void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
1652void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
1653
1654/* mremap.c */
1655unsigned long move_page_tables(struct vm_area_struct *vma,
1656 unsigned long old_addr, struct vm_area_struct *new_vma,
1657 unsigned long new_addr, unsigned long len,
1658 bool need_rmap_locks, bool for_stack);
1659
1660#endif /* __MM_INTERNAL_H */
1428/* mremap.c */
1429unsigned long move_page_tables(struct vm_area_struct *vma,
1430 unsigned long old_addr, struct vm_area_struct *new_vma,
1431 unsigned long new_addr, unsigned long len,
1432 bool need_rmap_locks, bool for_stack);
1433
1434#endif /* __MM_INTERNAL_H */