1 /* 2 * linux/arch/x86_64/kernel/sys_x86_64.c 3 */ 4 5 #include <linux/errno.h> 6 #include <linux/sched.h> 7 #include <linux/syscalls.h> 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/smp.h> 11 #include <linux/sem.h> 12 #include <linux/msg.h> 13 #include <linux/shm.h> 14 #include <linux/stat.h> 15 #include <linux/mman.h> 16 #include <linux/file.h> 17 #include <linux/utsname.h> 18 #include <linux/personality.h> 19 20 #include <asm/uaccess.h> 21 #include <asm/ia32.h> 22 23 /* 24 * sys_pipe() is the normal C calling standard for creating 25 * a pipe. It's not the way Unix traditionally does this, though. 26 */ 27 asmlinkage long sys_pipe(int __user *fildes) 28 { 29 int fd[2]; 30 int error; 31 32 error = do_pipe(fd); 33 if (!error) { 34 if (copy_to_user(fildes, fd, 2*sizeof(int))) 35 error = -EFAULT; 36 } 37 return error; 38 } 39 40 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 41 unsigned long fd, unsigned long off) 42 { 43 long error; 44 struct file * file; 45 46 error = -EINVAL; 47 if (off & ~PAGE_MASK) 48 goto out; 49 50 error = -EBADF; 51 file = NULL; 52 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 53 if (!(flags & MAP_ANONYMOUS)) { 54 file = fget(fd); 55 if (!file) 56 goto out; 57 } 58 down_write(¤t->mm->mmap_sem); 59 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); 60 up_write(¤t->mm->mmap_sem); 61 62 if (file) 63 fput(file); 64 out: 65 return error; 66 } 67 68 static void find_start_end(unsigned long flags, unsigned long *begin, 69 unsigned long *end) 70 { 71 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 72 /* This is usually used needed to map code in small 73 model, so it needs to be in the first 31bit. Limit 74 it to that. This means we need to move the 75 unmapped base down for this case. This can give 76 conflicts with the heap, but we assume that glibc 77 malloc knows how to fall back to mmap. Give it 1GB 78 of playground for now. -AK */ 79 *begin = 0x40000000; 80 *end = 0x80000000; 81 } else { 82 *begin = TASK_UNMAPPED_BASE; 83 *end = TASK_SIZE; 84 } 85 } 86 87 unsigned long 88 arch_get_unmapped_area(struct file *filp, unsigned long addr, 89 unsigned long len, unsigned long pgoff, unsigned long flags) 90 { 91 struct mm_struct *mm = current->mm; 92 struct vm_area_struct *vma; 93 unsigned long start_addr; 94 unsigned long begin, end; 95 96 if (flags & MAP_FIXED) 97 return addr; 98 99 find_start_end(flags, &begin, &end); 100 101 if (len > end) 102 return -ENOMEM; 103 104 if (addr) { 105 addr = PAGE_ALIGN(addr); 106 vma = find_vma(mm, addr); 107 if (end - len >= addr && 108 (!vma || addr + len <= vma->vm_start)) 109 return addr; 110 } 111 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 112 && len <= mm->cached_hole_size) { 113 mm->cached_hole_size = 0; 114 mm->free_area_cache = begin; 115 } 116 addr = mm->free_area_cache; 117 if (addr < begin) 118 addr = begin; 119 start_addr = addr; 120 121 full_search: 122 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 123 /* At this point: (!vma || addr < vma->vm_end). */ 124 if (end - len < addr) { 125 /* 126 * Start a new search - just in case we missed 127 * some holes. 128 */ 129 if (start_addr != begin) { 130 start_addr = addr = begin; 131 mm->cached_hole_size = 0; 132 goto full_search; 133 } 134 return -ENOMEM; 135 } 136 if (!vma || addr + len <= vma->vm_start) { 137 /* 138 * Remember the place where we stopped the search: 139 */ 140 mm->free_area_cache = addr + len; 141 return addr; 142 } 143 if (addr + mm->cached_hole_size < vma->vm_start) 144 mm->cached_hole_size = vma->vm_start - addr; 145 146 addr = vma->vm_end; 147 } 148 } 149 150 asmlinkage long sys_uname(struct new_utsname __user * name) 151 { 152 int err; 153 down_read(&uts_sem); 154 err = copy_to_user(name, utsname(), sizeof (*name)); 155 up_read(&uts_sem); 156 if (personality(current->personality) == PER_LINUX32) 157 err |= copy_to_user(&name->machine, "i686", 5); 158 return err ? -EFAULT : 0; 159 } 160