mmap.c (fc21959f74bc1138b28e90a02ec224ab8626111e) mmap.c (cacded5e42b9609b07b22d80c10f0076d439f7d1)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 1350 unchanged lines hidden (view full) ---

1359}
1360
1361unsigned long mmap_region(struct file *file, unsigned long addr,
1362 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1363 struct list_head *uf)
1364{
1365 struct mm_struct *mm = current->mm;
1366 struct vm_area_struct *vma = NULL;
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */

--- 1350 unchanged lines hidden (view full) ---

1359}
1360
1361unsigned long mmap_region(struct file *file, unsigned long addr,
1362 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1363 struct list_head *uf)
1364{
1365 struct mm_struct *mm = current->mm;
1366 struct vm_area_struct *vma = NULL;
1367 struct vm_area_struct *next, *prev, *merge;
1368 pgoff_t pglen = PHYS_PFN(len);
1367 pgoff_t pglen = PHYS_PFN(len);
1368 struct vm_area_struct *merge;
1369 unsigned long charged = 0;
1370 struct vma_munmap_struct vms;
1371 struct ma_state mas_detach;
1372 struct maple_tree mt_detach;
1373 unsigned long end = addr + len;
1374 bool writable_file_mapping = false;
1375 int error = -ENOMEM;
1376 VMA_ITERATOR(vmi, mm, addr);

--- 7 unchanged lines hidden (view full) ---

1384 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1385 mt_on_stack(mt_detach);
1386 mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1387 /* Prepare to unmap any existing mapping in the area */
1388 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1389 if (error)
1390 goto gather_failed;
1391
1369 unsigned long charged = 0;
1370 struct vma_munmap_struct vms;
1371 struct ma_state mas_detach;
1372 struct maple_tree mt_detach;
1373 unsigned long end = addr + len;
1374 bool writable_file_mapping = false;
1375 int error = -ENOMEM;
1376 VMA_ITERATOR(vmi, mm, addr);

--- 7 unchanged lines hidden (view full) ---

1384 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1385 mt_on_stack(mt_detach);
1386 mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1387 /* Prepare to unmap any existing mapping in the area */
1388 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1389 if (error)
1390 goto gather_failed;
1391
1392 next = vmg.next = vms.next;
1393 prev = vmg.prev = vms.prev;
1392 vmg.next = vms.next;
1393 vmg.prev = vms.prev;
1394 vma = NULL;
1395 } else {
1394 vma = NULL;
1395 } else {
1396 next = vmg.next = vma_next(&vmi);
1397 prev = vmg.prev = vma_prev(&vmi);
1398 if (prev)
1399 vma_iter_next_range(&vmi);
1396 vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev);
1400 }
1401
1402 /* Check against address space limit. */
1403 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
1404 goto abort_munmap;
1405
1406 /*
1407 * Private writable mapping: check memory availability

--- 4 unchanged lines hidden (view full) ---

1412 if (charged && security_vm_enough_memory_mm(mm, charged))
1413 goto abort_munmap;
1414
1415 vms.nr_accounted = 0;
1416 vm_flags |= VM_ACCOUNT;
1417 vmg.flags = vm_flags;
1418 }
1419
1397 }
1398
1399 /* Check against address space limit. */
1400 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
1401 goto abort_munmap;
1402
1403 /*
1404 * Private writable mapping: check memory availability

--- 4 unchanged lines hidden (view full) ---

1409 if (charged && security_vm_enough_memory_mm(mm, charged))
1410 goto abort_munmap;
1411
1412 vms.nr_accounted = 0;
1413 vm_flags |= VM_ACCOUNT;
1414 vmg.flags = vm_flags;
1415 }
1416
1420 if (vm_flags & VM_SPECIAL)
1421 goto cannot_expand;
1422
1423 /* Attempt to expand an old mapping */
1424 /* Check next */
1425 if (next && next->vm_start == end && can_vma_merge_before(&vmg)) {
1426 vmg.end = next->vm_end;
1427 vma = vmg.vma = next;
1428 vmg.pgoff = next->vm_pgoff - pglen;
1429 /*
1430 * We set this here so if we will merge with the previous VMA in
1431 * the code below, can_vma_merge_after() ensures anon_vma
1432 * compatibility between prev and next.
1433 */
1434 vmg.anon_vma = vma->anon_vma;
1435 vmg.uffd_ctx = vma->vm_userfaultfd_ctx;
1436 }
1437
1438 /* Check prev */
1439 if (prev && prev->vm_end == addr && can_vma_merge_after(&vmg)) {
1440 vmg.start = prev->vm_start;
1441 vma = vmg.vma = prev;
1442 vmg.pgoff = prev->vm_pgoff;
1443 vma_prev(&vmi); /* Equivalent to going to the previous range */
1444 }
1445
1446 if (vma) {
1447 /* Actually expand, if possible */
1448 if (!vma_expand(&vmg)) {
1449 khugepaged_enter_vma(vma, vm_flags);
1450 goto expanded;
1451 }
1452
1453 /* If the expand fails, then reposition the vma iterator */
1454 if (unlikely(vma == prev))
1455 vma_iter_set(&vmi, addr);
1456 }
1457
1458cannot_expand:
1459
1417 vma = vma_merge_new_range(&vmg);
1418 if (vma)
1419 goto expanded;
1460 /*
1461 * Determine the object being mapped and call the appropriate
1462 * specific mapper. the address has already been validated, but
1463 * not unmapped, but the maps are removed from the list.
1464 */
1465 vma = vm_area_alloc(mm);
1466 if (!vma)
1467 goto unacct_error;

--- 30 unchanged lines hidden (view full) ---

1498 if (WARN_ON((addr != vma->vm_start)))
1499 goto close_and_free_vma;
1500
1501 vma_iter_config(&vmi, addr, end);
1502 /*
1503 * If vm_flags changed after call_mmap(), we should try merge
1504 * vma again as we may succeed this time.
1505 */
1420 /*
1421 * Determine the object being mapped and call the appropriate
1422 * specific mapper. the address has already been validated, but
1423 * not unmapped, but the maps are removed from the list.
1424 */
1425 vma = vm_area_alloc(mm);
1426 if (!vma)
1427 goto unacct_error;

--- 30 unchanged lines hidden (view full) ---

1458 if (WARN_ON((addr != vma->vm_start)))
1459 goto close_and_free_vma;
1460
1461 vma_iter_config(&vmi, addr, end);
1462 /*
1463 * If vm_flags changed after call_mmap(), we should try merge
1464 * vma again as we may succeed this time.
1465 */
1506 if (unlikely(vm_flags != vma->vm_flags && prev)) {
1507 merge = vma_merge_new_vma(&vmi, prev, vma,
1508 vma->vm_start, vma->vm_end,
1509 vma->vm_pgoff);
1466 if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
1467 vmg.flags = vma->vm_flags;
1468 /* If this fails, state is reset ready for a reattempt. */
1469 merge = vma_merge_new_range(&vmg);
1470
1510 if (merge) {
1511 /*
1512 * ->mmap() can change vma->vm_file and fput
1513 * the original file. So fput the vma->vm_file
1514 * here or we would add an extra fput for file
1515 * and cause general protection fault
1516 * ultimately.
1517 */
1518 fput(vma->vm_file);
1519 vm_area_free(vma);
1520 vma = merge;
1521 /* Update vm_flags to pick up the change. */
1522 vm_flags = vma->vm_flags;
1523 goto unmap_writable;
1524 }
1471 if (merge) {
1472 /*
1473 * ->mmap() can change vma->vm_file and fput
1474 * the original file. So fput the vma->vm_file
1475 * here or we would add an extra fput for file
1476 * and cause general protection fault
1477 * ultimately.
1478 */
1479 fput(vma->vm_file);
1480 vm_area_free(vma);
1481 vma = merge;
1482 /* Update vm_flags to pick up the change. */
1483 vm_flags = vma->vm_flags;
1484 goto unmap_writable;
1485 }
1486 vma_iter_config(&vmi, addr, end);
1525 }
1526
1527 vm_flags = vma->vm_flags;
1528 } else if (vm_flags & VM_SHARED) {
1529 error = shmem_zero_setup(vma);
1530 if (error)
1531 goto free_vma;
1532 } else {

--- 16 unchanged lines hidden (view full) ---

1549
1550 /* Lock the VMA since it is modified after insertion into VMA tree */
1551 vma_start_write(vma);
1552 vma_iter_store(&vmi, vma);
1553 mm->map_count++;
1554 vma_link_file(vma);
1555
1556 /*
1487 }
1488
1489 vm_flags = vma->vm_flags;
1490 } else if (vm_flags & VM_SHARED) {
1491 error = shmem_zero_setup(vma);
1492 if (error)
1493 goto free_vma;
1494 } else {

--- 16 unchanged lines hidden (view full) ---

1511
1512 /* Lock the VMA since it is modified after insertion into VMA tree */
1513 vma_start_write(vma);
1514 vma_iter_store(&vmi, vma);
1515 mm->map_count++;
1516 vma_link_file(vma);
1517
1518 /*
1557 * vma_merge() calls khugepaged_enter_vma() either, the below
1519 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
1558 * call covers the non-merge case.
1559 */
1560 khugepaged_enter_vma(vma, vma->vm_flags);
1561
1562 /* Once vma denies write, undo our temporary denial count */
1563unmap_writable:
1564 if (writable_file_mapping)
1565 mapping_unmap_writable(file->f_mapping);

--- 38 unchanged lines hidden (view full) ---

1604
1605 if (file || vma->vm_file) {
1606unmap_and_free_vma:
1607 fput(vma->vm_file);
1608 vma->vm_file = NULL;
1609
1610 vma_iter_set(&vmi, vma->vm_end);
1611 /* Undo any partial mapping done by a device driver. */
1520 * call covers the non-merge case.
1521 */
1522 khugepaged_enter_vma(vma, vma->vm_flags);
1523
1524 /* Once vma denies write, undo our temporary denial count */
1525unmap_writable:
1526 if (writable_file_mapping)
1527 mapping_unmap_writable(file->f_mapping);

--- 38 unchanged lines hidden (view full) ---

1566
1567 if (file || vma->vm_file) {
1568unmap_and_free_vma:
1569 fput(vma->vm_file);
1570 vma->vm_file = NULL;
1571
1572 vma_iter_set(&vmi, vma->vm_end);
1573 /* Undo any partial mapping done by a device driver. */
1612 unmap_region(&vmi.mas, vma, prev, next);
1574 unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
1613 }
1614 if (writable_file_mapping)
1615 mapping_unmap_writable(file->f_mapping);
1616free_vma:
1617 vm_area_free(vma);
1618unacct_error:
1619 if (charged)
1620 vm_unacct_memory(charged);

--- 130 unchanged lines hidden (view full) ---

1751 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
1752 * do not match then create a new anonymous VMA. Eventually we may be able to
1753 * do some brk-specific accounting here.
1754 */
1755static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1756 unsigned long addr, unsigned long len, unsigned long flags)
1757{
1758 struct mm_struct *mm = current->mm;
1575 }
1576 if (writable_file_mapping)
1577 mapping_unmap_writable(file->f_mapping);
1578free_vma:
1579 vm_area_free(vma);
1580unacct_error:
1581 if (charged)
1582 vm_unacct_memory(charged);

--- 130 unchanged lines hidden (view full) ---

1713 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
1714 * do not match then create a new anonymous VMA. Eventually we may be able to
1715 * do some brk-specific accounting here.
1716 */
1717static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1718 unsigned long addr, unsigned long len, unsigned long flags)
1719{
1720 struct mm_struct *mm = current->mm;
1759 struct vma_prepare vp;
1760
1761 /*
1762 * Check against address space limits by the changed size
1763 * Note: This happens *after* clearing old mappings in some code paths.
1764 */
1765 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1766 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1767 return -ENOMEM;

--- 7 unchanged lines hidden (view full) ---

1775 /*
1776 * Expand the existing vma if possible; Note that singular lists do not
1777 * occur after forking, so the expand will only happen on new VMAs.
1778 */
1779 if (vma && vma->vm_end == addr) {
1780 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1781
1782 vmg.prev = vma;
1721
1722 /*
1723 * Check against address space limits by the changed size
1724 * Note: This happens *after* clearing old mappings in some code paths.
1725 */
1726 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1727 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1728 return -ENOMEM;

--- 7 unchanged lines hidden (view full) ---

1736 /*
1737 * Expand the existing vma if possible; Note that singular lists do not
1738 * occur after forking, so the expand will only happen on new VMAs.
1739 */
1740 if (vma && vma->vm_end == addr) {
1741 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1742
1743 vmg.prev = vma;
1783 if (can_vma_merge_after(&vmg)) {
1784 vma_iter_config(vmi, vma->vm_start, addr + len);
1785 if (vma_iter_prealloc(vmi, vma))
1786 goto unacct_fail;
1744 vma_iter_next_range(vmi);
1787
1745
1788 vma_start_write(vma);
1789
1790 init_vma_prep(&vp, vma);
1791 vma_prepare(&vp);
1792 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
1793 vma->vm_end = addr + len;
1794 vm_flags_set(vma, VM_SOFTDIRTY);
1795 vma_iter_store(vmi, vma);
1796
1797 vma_complete(&vp, vmi, mm);
1798 validate_mm(mm);
1799 khugepaged_enter_vma(vma, flags);
1746 if (vma_merge_new_range(&vmg))
1800 goto out;
1747 goto out;
1801 }
1748 else if (vmg_nomem(&vmg))
1749 goto unacct_fail;
1802 }
1803
1804 if (vma)
1805 vma_iter_next_range(vmi);
1806 /* create a vma struct for an anonymous mapping */
1807 vma = vm_area_alloc(mm);
1808 if (!vma)
1809 goto unacct_fail;

--- 566 unchanged lines hidden ---
1750 }
1751
1752 if (vma)
1753 vma_iter_next_range(vmi);
1754 /* create a vma struct for an anonymous mapping */
1755 vma = vm_area_alloc(mm);
1756 if (!vma)
1757 goto unacct_fail;

--- 566 unchanged lines hidden ---