xref: /linux/arch/x86/kernel/sys_x86_64.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 
17 #include <asm/uaccess.h>
18 #include <asm/ia32.h>
19 
20 /*
21  * sys_pipe() is the normal C calling standard for creating
22  * a pipe. It's not the way Unix traditionally does this, though.
23  */
24 asmlinkage long sys_pipe(int __user *fildes)
25 {
26 	int fd[2];
27 	int error;
28 
29 	error = do_pipe(fd);
30 	if (!error) {
31 		if (copy_to_user(fildes, fd, 2*sizeof(int)))
32 			error = -EFAULT;
33 	}
34 	return error;
35 }
36 
37 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
38 	unsigned long fd, unsigned long off)
39 {
40 	long error;
41 	struct file * file;
42 
43 	error = -EINVAL;
44 	if (off & ~PAGE_MASK)
45 		goto out;
46 
47 	error = -EBADF;
48 	file = NULL;
49 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
50 	if (!(flags & MAP_ANONYMOUS)) {
51 		file = fget(fd);
52 		if (!file)
53 			goto out;
54 	}
55 	down_write(&current->mm->mmap_sem);
56 	error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
57 	up_write(&current->mm->mmap_sem);
58 
59 	if (file)
60 		fput(file);
61 out:
62 	return error;
63 }
64 
65 static void find_start_end(unsigned long flags, unsigned long *begin,
66 			   unsigned long *end)
67 {
68 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
69 		unsigned long new_begin;
70 		/* This is usually used needed to map code in small
71 		   model, so it needs to be in the first 31bit. Limit
72 		   it to that.  This means we need to move the
73 		   unmapped base down for this case. This can give
74 		   conflicts with the heap, but we assume that glibc
75 		   malloc knows how to fall back to mmap. Give it 1GB
76 		   of playground for now. -AK */
77 		*begin = 0x40000000;
78 		*end = 0x80000000;
79 		if (current->flags & PF_RANDOMIZE) {
80 			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
81 			if (new_begin)
82 				*begin = new_begin;
83 		}
84 	} else {
85 		*begin = TASK_UNMAPPED_BASE;
86 		*end = TASK_SIZE;
87 	}
88 }
89 
90 unsigned long
91 arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 		unsigned long len, unsigned long pgoff, unsigned long flags)
93 {
94 	struct mm_struct *mm = current->mm;
95 	struct vm_area_struct *vma;
96 	unsigned long start_addr;
97 	unsigned long begin, end;
98 
99 	if (flags & MAP_FIXED)
100 		return addr;
101 
102 	find_start_end(flags, &begin, &end);
103 
104 	if (len > end)
105 		return -ENOMEM;
106 
107 	if (addr) {
108 		addr = PAGE_ALIGN(addr);
109 		vma = find_vma(mm, addr);
110 		if (end - len >= addr &&
111 		    (!vma || addr + len <= vma->vm_start))
112 			return addr;
113 	}
114 	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
115 	    && len <= mm->cached_hole_size) {
116 	        mm->cached_hole_size = 0;
117 		mm->free_area_cache = begin;
118 	}
119 	addr = mm->free_area_cache;
120 	if (addr < begin)
121 		addr = begin;
122 	start_addr = addr;
123 
124 full_search:
125 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
126 		/* At this point:  (!vma || addr < vma->vm_end). */
127 		if (end - len < addr) {
128 			/*
129 			 * Start a new search - just in case we missed
130 			 * some holes.
131 			 */
132 			if (start_addr != begin) {
133 				start_addr = addr = begin;
134 				mm->cached_hole_size = 0;
135 				goto full_search;
136 			}
137 			return -ENOMEM;
138 		}
139 		if (!vma || addr + len <= vma->vm_start) {
140 			/*
141 			 * Remember the place where we stopped the search:
142 			 */
143 			mm->free_area_cache = addr + len;
144 			return addr;
145 		}
146 		if (addr + mm->cached_hole_size < vma->vm_start)
147 		        mm->cached_hole_size = vma->vm_start - addr;
148 
149 		addr = vma->vm_end;
150 	}
151 }
152 
153 
154 unsigned long
155 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
156 			  const unsigned long len, const unsigned long pgoff,
157 			  const unsigned long flags)
158 {
159 	struct vm_area_struct *vma;
160 	struct mm_struct *mm = current->mm;
161 	unsigned long addr = addr0;
162 
163 	/* requested length too big for entire address space */
164 	if (len > TASK_SIZE)
165 		return -ENOMEM;
166 
167 	if (flags & MAP_FIXED)
168 		return addr;
169 
170 	/* for MAP_32BIT mappings we force the legact mmap base */
171 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
172 		goto bottomup;
173 
174 	/* requesting a specific address */
175 	if (addr) {
176 		addr = PAGE_ALIGN(addr);
177 		vma = find_vma(mm, addr);
178 		if (TASK_SIZE - len >= addr &&
179 				(!vma || addr + len <= vma->vm_start))
180 			return addr;
181 	}
182 
183 	/* check if free_area_cache is useful for us */
184 	if (len <= mm->cached_hole_size) {
185 		mm->cached_hole_size = 0;
186 		mm->free_area_cache = mm->mmap_base;
187 	}
188 
189 	/* either no address requested or can't fit in requested address hole */
190 	addr = mm->free_area_cache;
191 
192 	/* make sure it can fit in the remaining address space */
193 	if (addr > len) {
194 		vma = find_vma(mm, addr-len);
195 		if (!vma || addr <= vma->vm_start)
196 			/* remember the address as a hint for next time */
197 			return (mm->free_area_cache = addr-len);
198 	}
199 
200 	if (mm->mmap_base < len)
201 		goto bottomup;
202 
203 	addr = mm->mmap_base-len;
204 
205 	do {
206 		/*
207 		 * Lookup failure means no vma is above this address,
208 		 * else if new region fits below vma->vm_start,
209 		 * return with success:
210 		 */
211 		vma = find_vma(mm, addr);
212 		if (!vma || addr+len <= vma->vm_start)
213 			/* remember the address as a hint for next time */
214 			return (mm->free_area_cache = addr);
215 
216 		/* remember the largest hole we saw so far */
217 		if (addr + mm->cached_hole_size < vma->vm_start)
218 			mm->cached_hole_size = vma->vm_start - addr;
219 
220 		/* try just below the current vma->vm_start */
221 		addr = vma->vm_start-len;
222 	} while (len < vma->vm_start);
223 
224 bottomup:
225 	/*
226 	 * A failed mmap() very likely causes application failure,
227 	 * so fall back to the bottom-up function here. This scenario
228 	 * can happen with large stack limits and large mmap()
229 	 * allocations.
230 	 */
231 	mm->cached_hole_size = ~0UL;
232 	mm->free_area_cache = TASK_UNMAPPED_BASE;
233 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
234 	/*
235 	 * Restore the topdown base:
236 	 */
237 	mm->free_area_cache = mm->mmap_base;
238 	mm->cached_hole_size = ~0UL;
239 
240 	return addr;
241 }
242 
243 
244 asmlinkage long sys_uname(struct new_utsname __user * name)
245 {
246 	int err;
247 	down_read(&uts_sem);
248 	err = copy_to_user(name, utsname(), sizeof (*name));
249 	up_read(&uts_sem);
250 	if (personality(current->personality) == PER_LINUX32)
251 		err |= copy_to_user(&name->machine, "i686", 5);
252 	return err ? -EFAULT : 0;
253 }
254