mmap.c (224c1c702c08ca4d874690991f02e5b08c816e5b) mmap.c (2f1c6611b0a89afcb8641471af5f223c9caa01e0)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 1359 unchanged lines hidden (view full) ---

1368 pgoff_t pglen = PHYS_PFN(len);
1369 unsigned long charged = 0;
1370 struct vma_munmap_struct vms;
1371 struct ma_state mas_detach;
1372 struct maple_tree mt_detach;
1373 unsigned long end = addr + len;
1374 unsigned long merge_start = addr, merge_end = end;
1375 bool writable_file_mapping = false;
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 1359 unchanged lines hidden (view full) ---

1368 pgoff_t pglen = PHYS_PFN(len);
1369 unsigned long charged = 0;
1370 struct vma_munmap_struct vms;
1371 struct ma_state mas_detach;
1372 struct maple_tree mt_detach;
1373 unsigned long end = addr + len;
1374 unsigned long merge_start = addr, merge_end = end;
1375 bool writable_file_mapping = false;
1376 pgoff_t vm_pgoff;
1377 int error = -ENOMEM;
1378 VMA_ITERATOR(vmi, mm, addr);
1376 int error = -ENOMEM;
1377 VMA_ITERATOR(vmi, mm, addr);
1378 VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
1379
1379
1380 vmg.file = file;
1380 /* Find the first overlapping VMA */
1381 vma = vma_find(&vmi, end);
1382 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
1383 if (vma) {
1384 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1385 mt_on_stack(mt_detach);
1386 mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1387 /* Prepare to unmap any existing mapping in the area */
1388 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1389 if (error)
1390 goto gather_failed;
1391
1381 /* Find the first overlapping VMA */
1382 vma = vma_find(&vmi, end);
1383 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
1384 if (vma) {
1385 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1386 mt_on_stack(mt_detach);
1387 mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1388 /* Prepare to unmap any existing mapping in the area */
1389 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1390 if (error)
1391 goto gather_failed;
1392
1392 next = vms.next;
1393 prev = vms.prev;
1393 next = vmg.next = vms.next;
1394 prev = vmg.prev = vms.prev;
1394 vma = NULL;
1395 } else {
1395 vma = NULL;
1396 } else {
1396 next = vma_next(&vmi);
1397 prev = vma_prev(&vmi);
1397 next = vmg.next = vma_next(&vmi);
1398 prev = vmg.prev = vma_prev(&vmi);
1398 if (prev)
1399 vma_iter_next_range(&vmi);
1400 }
1401
1402 /* Check against address space limit. */
1403 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
1404 goto abort_munmap;
1405
1406 /*
1407 * Private writable mapping: check memory availability
1408 */
1409 if (accountable_mapping(file, vm_flags)) {
1410 charged = pglen;
1411 charged -= vms.nr_accounted;
1412 if (charged && security_vm_enough_memory_mm(mm, charged))
1413 goto abort_munmap;
1414
1415 vms.nr_accounted = 0;
1416 vm_flags |= VM_ACCOUNT;
1399 if (prev)
1400 vma_iter_next_range(&vmi);
1401 }
1402
1403 /* Check against address space limit. */
1404 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
1405 goto abort_munmap;
1406
1407 /*
1408 * Private writable mapping: check memory availability
1409 */
1410 if (accountable_mapping(file, vm_flags)) {
1411 charged = pglen;
1412 charged -= vms.nr_accounted;
1413 if (charged && security_vm_enough_memory_mm(mm, charged))
1414 goto abort_munmap;
1415
1416 vms.nr_accounted = 0;
1417 vm_flags |= VM_ACCOUNT;
1418 vmg.flags = vm_flags;
1417 }
1418
1419 if (vm_flags & VM_SPECIAL)
1420 goto cannot_expand;
1421
1422 /* Attempt to expand an old mapping */
1423 /* Check next */
1424 if (next && next->vm_start == end && !vma_policy(next) &&
1419 }
1420
1421 if (vm_flags & VM_SPECIAL)
1422 goto cannot_expand;
1423
1424 /* Attempt to expand an old mapping */
1425 /* Check next */
1426 if (next && next->vm_start == end && !vma_policy(next) &&
1425 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
1426 NULL_VM_UFFD_CTX, NULL)) {
1427 can_vma_merge_before(&vmg)) {
1427 merge_end = next->vm_end;
1428 vma = next;
1428 merge_end = next->vm_end;
1429 vma = next;
1429 vm_pgoff = next->vm_pgoff - pglen;
1430 vmg.pgoff = next->vm_pgoff - pglen;
1431 /*
1432 * We set this here so if we will merge with the previous VMA in
1433 * the code below, can_vma_merge_after() ensures anon_vma
1434 * compatibility between prev and next.
1435 */
1436 vmg.anon_vma = vma->anon_vma;
1437 vmg.uffd_ctx = vma->vm_userfaultfd_ctx;
1430 }
1431
1432 /* Check prev */
1433 if (prev && prev->vm_end == addr && !vma_policy(prev) &&
1438 }
1439
1440 /* Check prev */
1441 if (prev && prev->vm_end == addr && !vma_policy(prev) &&
1434 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
1435 pgoff, vma->vm_userfaultfd_ctx, NULL) :
1436 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
1437 NULL_VM_UFFD_CTX, NULL))) {
1442 can_vma_merge_after(&vmg)) {
1438 merge_start = prev->vm_start;
1439 vma = prev;
1443 merge_start = prev->vm_start;
1444 vma = prev;
1440 vm_pgoff = prev->vm_pgoff;
1445 vmg.pgoff = prev->vm_pgoff;
1441 vma_prev(&vmi); /* Equivalent to going to the previous range */
1442 }
1443
1444 if (vma) {
1445 /* Actually expand, if possible */
1446 vma_prev(&vmi); /* Equivalent to going to the previous range */
1447 }
1448
1449 if (vma) {
1450 /* Actually expand, if possible */
1446 if (!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
1451 if (!vma_expand(&vmi, vma, merge_start, merge_end, vmg.pgoff, next)) {
1447 khugepaged_enter_vma(vma, vm_flags);
1448 goto expanded;
1449 }
1450
1451 /* If the expand fails, then reposition the vma iterator */
1452 if (unlikely(vma == prev))
1453 vma_iter_set(&vmi, addr);
1454 }

--- 314 unchanged lines hidden (view full) ---

1769
1770 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1771 return -ENOMEM;
1772
1773 /*
1774 * Expand the existing vma if possible; Note that singular lists do not
1775 * occur after forking, so the expand will only happen on new VMAs.
1776 */
1452 khugepaged_enter_vma(vma, vm_flags);
1453 goto expanded;
1454 }
1455
1456 /* If the expand fails, then reposition the vma iterator */
1457 if (unlikely(vma == prev))
1458 vma_iter_set(&vmi, addr);
1459 }

--- 314 unchanged lines hidden (view full) ---

1774
1775 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1776 return -ENOMEM;
1777
1778 /*
1779 * Expand the existing vma if possible; Note that singular lists do not
1780 * occur after forking, so the expand will only happen on new VMAs.
1781 */
1777 if (vma && vma->vm_end == addr && !vma_policy(vma) &&
1778 can_vma_merge_after(vma, flags, NULL, NULL,
1779 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
1780 vma_iter_config(vmi, vma->vm_start, addr + len);
1781 if (vma_iter_prealloc(vmi, vma))
1782 goto unacct_fail;
1782 if (vma && vma->vm_end == addr && !vma_policy(vma)) {
1783 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1783
1784
1784 vma_start_write(vma);
1785 vmg.prev = vma;
1786 if (can_vma_merge_after(&vmg)) {
1787 vma_iter_config(vmi, vma->vm_start, addr + len);
1788 if (vma_iter_prealloc(vmi, vma))
1789 goto unacct_fail;
1785
1790
1786 init_vma_prep(&vp, vma);
1787 vma_prepare(&vp);
1788 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
1789 vma->vm_end = addr + len;
1790 vm_flags_set(vma, VM_SOFTDIRTY);
1791 vma_iter_store(vmi, vma);
1791 vma_start_write(vma);
1792
1792
1793 vma_complete(&vp, vmi, mm);
1794 validate_mm(mm);
1795 khugepaged_enter_vma(vma, flags);
1796 goto out;
1793 init_vma_prep(&vp, vma);
1794 vma_prepare(&vp);
1795 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
1796 vma->vm_end = addr + len;
1797 vm_flags_set(vma, VM_SOFTDIRTY);
1798 vma_iter_store(vmi, vma);
1799
1800 vma_complete(&vp, vmi, mm);
1801 validate_mm(mm);
1802 khugepaged_enter_vma(vma, flags);
1803 goto out;
1804 }
1797 }
1798
1799 if (vma)
1800 vma_iter_next_range(vmi);
1801 /* create a vma struct for an anonymous mapping */
1802 vma = vm_area_alloc(mm);
1803 if (!vma)
1804 goto unacct_fail;

--- 564 unchanged lines hidden ---
1805 }
1806
1807 if (vma)
1808 vma_iter_next_range(vmi);
1809 /* create a vma struct for an anonymous mapping */
1810 vma = vm_area_alloc(mm);
1811 if (!vma)
1812 goto unacct_fail;

--- 564 unchanged lines hidden ---