xref: /linux/arch/arc/mm/mmap.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
25bba49f5SVineet Gupta /*
35bba49f5SVineet Gupta  * ARC700 mmap
45bba49f5SVineet Gupta  *
55bba49f5SVineet Gupta  * (started from arm version - for VIPT alias handling)
65bba49f5SVineet Gupta  *
75bba49f5SVineet Gupta  * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
85bba49f5SVineet Gupta  */
95bba49f5SVineet Gupta 
105bba49f5SVineet Gupta #include <linux/fs.h>
115bba49f5SVineet Gupta #include <linux/mm.h>
125bba49f5SVineet Gupta #include <linux/mman.h>
1301042607SIngo Molnar #include <linux/sched/mm.h>
1401042607SIngo Molnar 
155bba49f5SVineet Gupta #include <asm/cacheflush.h>
165bba49f5SVineet Gupta 
175bba49f5SVineet Gupta /*
185bba49f5SVineet Gupta  * Ensure that shared mappings are correctly aligned to
195bba49f5SVineet Gupta  * avoid aliasing issues with VIPT caches.
205bba49f5SVineet Gupta  * We need to ensure that
215bba49f5SVineet Gupta  * a specific page of an object is always mapped at a multiple of
225bba49f5SVineet Gupta  * SHMLBA bytes.
235bba49f5SVineet Gupta  */
245bba49f5SVineet Gupta unsigned long
255bba49f5SVineet Gupta arch_get_unmapped_area(struct file *filp, unsigned long addr,
265bba49f5SVineet Gupta 		unsigned long len, unsigned long pgoff, unsigned long flags)
275bba49f5SVineet Gupta {
285bba49f5SVineet Gupta 	struct mm_struct *mm = current->mm;
295bba49f5SVineet Gupta 	struct vm_area_struct *vma;
30*b80fa3cbSRick Edgecombe 	struct vm_unmapped_area_info info = {};
315bba49f5SVineet Gupta 
325bba49f5SVineet Gupta 	/*
335bba49f5SVineet Gupta 	 * We enforce the MAP_FIXED case.
345bba49f5SVineet Gupta 	 */
355bba49f5SVineet Gupta 	if (flags & MAP_FIXED) {
366732c0e4SVineet Gupta 		if (flags & MAP_SHARED &&
375bba49f5SVineet Gupta 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
385bba49f5SVineet Gupta 			return -EINVAL;
395bba49f5SVineet Gupta 		return addr;
405bba49f5SVineet Gupta 	}
415bba49f5SVineet Gupta 
425bba49f5SVineet Gupta 	if (len > TASK_SIZE)
435bba49f5SVineet Gupta 		return -ENOMEM;
445bba49f5SVineet Gupta 
455bba49f5SVineet Gupta 	if (addr) {
465bba49f5SVineet Gupta 		addr = PAGE_ALIGN(addr);
475bba49f5SVineet Gupta 
485bba49f5SVineet Gupta 		vma = find_vma(mm, addr);
495bba49f5SVineet Gupta 		if (TASK_SIZE - len >= addr &&
501be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
515bba49f5SVineet Gupta 			return addr;
525bba49f5SVineet Gupta 	}
535bba49f5SVineet Gupta 
545bba49f5SVineet Gupta 	info.length = len;
555bba49f5SVineet Gupta 	info.low_limit = mm->mmap_base;
565bba49f5SVineet Gupta 	info.high_limit = TASK_SIZE;
575bba49f5SVineet Gupta 	info.align_offset = pgoff << PAGE_SHIFT;
585bba49f5SVineet Gupta 	return vm_unmapped_area(&info);
595bba49f5SVineet Gupta }
605d260625SAnshuman Khandual 
615d260625SAnshuman Khandual static const pgprot_t protection_map[16] = {
625d260625SAnshuman Khandual 	[VM_NONE]					= PAGE_U_NONE,
635d260625SAnshuman Khandual 	[VM_READ]					= PAGE_U_R,
645d260625SAnshuman Khandual 	[VM_WRITE]					= PAGE_U_R,
655d260625SAnshuman Khandual 	[VM_WRITE | VM_READ]				= PAGE_U_R,
665d260625SAnshuman Khandual 	[VM_EXEC]					= PAGE_U_X_R,
675d260625SAnshuman Khandual 	[VM_EXEC | VM_READ]				= PAGE_U_X_R,
685d260625SAnshuman Khandual 	[VM_EXEC | VM_WRITE]				= PAGE_U_X_R,
695d260625SAnshuman Khandual 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_U_X_R,
705d260625SAnshuman Khandual 	[VM_SHARED]					= PAGE_U_NONE,
715d260625SAnshuman Khandual 	[VM_SHARED | VM_READ]				= PAGE_U_R,
725d260625SAnshuman Khandual 	[VM_SHARED | VM_WRITE]				= PAGE_U_W_R,
735d260625SAnshuman Khandual 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_U_W_R,
745d260625SAnshuman Khandual 	[VM_SHARED | VM_EXEC]				= PAGE_U_X_R,
755d260625SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_U_X_R,
765d260625SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_U_X_W_R,
775d260625SAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_U_X_W_R
785d260625SAnshuman Khandual };
795d260625SAnshuman Khandual DECLARE_VM_GET_PAGE_PROT
80