xref: /linux/fs/binfmt_elf.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  * linux/fs/binfmt_elf.c
3  *
4  * These are the functions used to load ELF format executables as used
5  * on SVr4 machines.  Information on the format may be found in the book
6  * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7  * Tools".
8  *
9  * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
45 
46 #include <linux/elf.h>
47 
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 
53 #ifndef elf_addr_t
54 #define elf_addr_t unsigned long
55 #endif
56 
57 /*
58  * If we don't support core dumping, then supply a NULL so we
59  * don't even try.
60  */
61 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #else
64 #define elf_core_dump	NULL
65 #endif
66 
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
69 #else
70 # define ELF_MIN_ALIGN	PAGE_SIZE
71 #endif
72 
73 #ifndef ELF_CORE_EFLAGS
74 #define ELF_CORE_EFLAGS	0
75 #endif
76 
77 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
78 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
79 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
80 
81 static struct linux_binfmt elf_format = {
82 		.module		= THIS_MODULE,
83 		.load_binary	= load_elf_binary,
84 		.load_shlib	= load_elf_library,
85 		.core_dump	= elf_core_dump,
86 		.min_coredump	= ELF_EXEC_PAGESIZE
87 };
88 
89 #define BAD_ADDR(x)	((unsigned long)(x) > TASK_SIZE)
90 
91 static int set_brk(unsigned long start, unsigned long end)
92 {
93 	start = ELF_PAGEALIGN(start);
94 	end = ELF_PAGEALIGN(end);
95 	if (end > start) {
96 		unsigned long addr;
97 		down_write(&current->mm->mmap_sem);
98 		addr = do_brk(start, end - start);
99 		up_write(&current->mm->mmap_sem);
100 		if (BAD_ADDR(addr))
101 			return addr;
102 	}
103 	current->mm->start_brk = current->mm->brk = end;
104 	return 0;
105 }
106 
107 
108 /* We need to explicitly zero any fractional pages
109    after the data section (i.e. bss).  This would
110    contain the junk from the file that should not
111    be in memory */
112 
113 
114 static int padzero(unsigned long elf_bss)
115 {
116 	unsigned long nbyte;
117 
118 	nbyte = ELF_PAGEOFFSET(elf_bss);
119 	if (nbyte) {
120 		nbyte = ELF_MIN_ALIGN - nbyte;
121 		if (clear_user((void __user *) elf_bss, nbyte))
122 			return -EFAULT;
123 	}
124 	return 0;
125 }
126 
127 /* Let's use some macros to make this stack manipulation a litle clearer */
128 #ifdef CONFIG_STACK_GROWSUP
129 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
130 #define STACK_ROUND(sp, items) \
131 	((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
132 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
133 #else
134 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
135 #define STACK_ROUND(sp, items) \
136 	(((unsigned long) (sp - items)) &~ 15UL)
137 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
138 #endif
139 
140 static int
141 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
142 		int interp_aout, unsigned long load_addr,
143 		unsigned long interp_load_addr)
144 {
145 	unsigned long p = bprm->p;
146 	int argc = bprm->argc;
147 	int envc = bprm->envc;
148 	elf_addr_t __user *argv;
149 	elf_addr_t __user *envp;
150 	elf_addr_t __user *sp;
151 	elf_addr_t __user *u_platform;
152 	const char *k_platform = ELF_PLATFORM;
153 	int items;
154 	elf_addr_t *elf_info;
155 	int ei_index = 0;
156 	struct task_struct *tsk = current;
157 
158 	/*
159 	 * If this architecture has a platform capability string, copy it
160 	 * to userspace.  In some cases (Sparc), this info is impossible
161 	 * for userspace to get any other way, in others (i386) it is
162 	 * merely difficult.
163 	 */
164 
165 	u_platform = NULL;
166 	if (k_platform) {
167 		size_t len = strlen(k_platform) + 1;
168 
169 		/*
170 		 * In some cases (e.g. Hyper-Threading), we want to avoid L1
171 		 * evictions by the processes running on the same package. One
172 		 * thing we can do is to shuffle the initial stack for them.
173 		 */
174 
175 		p = arch_align_stack(p);
176 
177 		u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
178 		if (__copy_to_user(u_platform, k_platform, len))
179 			return -EFAULT;
180 	}
181 
182 	/* Create the ELF interpreter info */
183 	elf_info = (elf_addr_t *) current->mm->saved_auxv;
184 #define NEW_AUX_ENT(id, val) \
185 	do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
186 
187 #ifdef ARCH_DLINFO
188 	/*
189 	 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 	 * AUXV.
191 	 */
192 	ARCH_DLINFO;
193 #endif
194 	NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 	NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 	NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 	NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 	NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
199 	NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 	NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 	NEW_AUX_ENT(AT_FLAGS, 0);
202 	NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 	NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
204 	NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
205 	NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
206 	NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
207  	NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
208 	if (k_platform) {
209 		NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
210 	}
211 	if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
212 		NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
213 	}
214 #undef NEW_AUX_ENT
215 	/* AT_NULL is zero; clear the rest too */
216 	memset(&elf_info[ei_index], 0,
217 	       sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
218 
219 	/* And advance past the AT_NULL entry.  */
220 	ei_index += 2;
221 
222 	sp = STACK_ADD(p, ei_index);
223 
224 	items = (argc + 1) + (envc + 1);
225 	if (interp_aout) {
226 		items += 3; /* a.out interpreters require argv & envp too */
227 	} else {
228 		items += 1; /* ELF interpreters only put argc on the stack */
229 	}
230 	bprm->p = STACK_ROUND(sp, items);
231 
232 	/* Point sp at the lowest address on the stack */
233 #ifdef CONFIG_STACK_GROWSUP
234 	sp = (elf_addr_t __user *)bprm->p - items - ei_index;
235 	bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
236 #else
237 	sp = (elf_addr_t __user *)bprm->p;
238 #endif
239 
240 	/* Now, let's put argc (and argv, envp if appropriate) on the stack */
241 	if (__put_user(argc, sp++))
242 		return -EFAULT;
243 	if (interp_aout) {
244 		argv = sp + 2;
245 		envp = argv + argc + 1;
246 		__put_user((elf_addr_t)(unsigned long)argv, sp++);
247 		__put_user((elf_addr_t)(unsigned long)envp, sp++);
248 	} else {
249 		argv = sp;
250 		envp = argv + argc + 1;
251 	}
252 
253 	/* Populate argv and envp */
254 	p = current->mm->arg_end = current->mm->arg_start;
255 	while (argc-- > 0) {
256 		size_t len;
257 		__put_user((elf_addr_t)p, argv++);
258 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
259 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 			return 0;
261 		p += len;
262 	}
263 	if (__put_user(0, argv))
264 		return -EFAULT;
265 	current->mm->arg_end = current->mm->env_start = p;
266 	while (envc-- > 0) {
267 		size_t len;
268 		__put_user((elf_addr_t)p, envp++);
269 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
270 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
271 			return 0;
272 		p += len;
273 	}
274 	if (__put_user(0, envp))
275 		return -EFAULT;
276 	current->mm->env_end = p;
277 
278 	/* Put the elf_info on the stack in the right place.  */
279 	sp = (elf_addr_t __user *)envp + 1;
280 	if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
281 		return -EFAULT;
282 	return 0;
283 }
284 
285 #ifndef elf_map
286 
287 static unsigned long elf_map(struct file *filep, unsigned long addr,
288 			struct elf_phdr *eppnt, int prot, int type)
289 {
290 	unsigned long map_addr;
291 	unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
292 
293 	down_write(&current->mm->mmap_sem);
294 	/* mmap() will return -EINVAL if given a zero size, but a
295 	 * segment with zero filesize is perfectly valid */
296 	if (eppnt->p_filesz + pageoffset)
297 		map_addr = do_mmap(filep, ELF_PAGESTART(addr),
298 				   eppnt->p_filesz + pageoffset, prot, type,
299 				   eppnt->p_offset - pageoffset);
300 	else
301 		map_addr = ELF_PAGESTART(addr);
302 	up_write(&current->mm->mmap_sem);
303 	return(map_addr);
304 }
305 
306 #endif /* !elf_map */
307 
308 /* This is much more generalized than the library routine read function,
309    so we keep this separate.  Technically the library read function
310    is only provided so that we can read a.out libraries that have
311    an ELF header */
312 
313 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
314 				     struct file * interpreter,
315 				     unsigned long *interp_load_addr)
316 {
317 	struct elf_phdr *elf_phdata;
318 	struct elf_phdr *eppnt;
319 	unsigned long load_addr = 0;
320 	int load_addr_set = 0;
321 	unsigned long last_bss = 0, elf_bss = 0;
322 	unsigned long error = ~0UL;
323 	int retval, i, size;
324 
325 	/* First of all, some simple consistency checks */
326 	if (interp_elf_ex->e_type != ET_EXEC &&
327 	    interp_elf_ex->e_type != ET_DYN)
328 		goto out;
329 	if (!elf_check_arch(interp_elf_ex))
330 		goto out;
331 	if (!interpreter->f_op || !interpreter->f_op->mmap)
332 		goto out;
333 
334 	/*
335 	 * If the size of this structure has changed, then punt, since
336 	 * we will be doing the wrong thing.
337 	 */
338 	if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
339 		goto out;
340 	if (interp_elf_ex->e_phnum < 1 ||
341 		interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
342 		goto out;
343 
344 	/* Now read in all of the header information */
345 
346 	size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
347 	if (size > ELF_MIN_ALIGN)
348 		goto out;
349 	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
350 	if (!elf_phdata)
351 		goto out;
352 
353 	retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
354 	error = -EIO;
355 	if (retval != size) {
356 		if (retval < 0)
357 			error = retval;
358 		goto out_close;
359 	}
360 
361 	eppnt = elf_phdata;
362 	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
363 	  if (eppnt->p_type == PT_LOAD) {
364 	    int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
365 	    int elf_prot = 0;
366 	    unsigned long vaddr = 0;
367 	    unsigned long k, map_addr;
368 
369 	    if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
370 	    if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
371 	    if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
372 	    vaddr = eppnt->p_vaddr;
373 	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
374 	    	elf_type |= MAP_FIXED;
375 
376 	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
377 	    error = map_addr;
378 	    if (BAD_ADDR(map_addr))
379 	    	goto out_close;
380 
381 	    if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
382 		load_addr = map_addr - ELF_PAGESTART(vaddr);
383 		load_addr_set = 1;
384 	    }
385 
386 	    /*
387 	     * Check to see if the section's size will overflow the
388 	     * allowed task size. Note that p_filesz must always be
389 	     * <= p_memsize so it is only necessary to check p_memsz.
390 	     */
391 	    k = load_addr + eppnt->p_vaddr;
392 	    if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
393 		eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
394 	        error = -ENOMEM;
395 		goto out_close;
396 	    }
397 
398 	    /*
399 	     * Find the end of the file mapping for this phdr, and keep
400 	     * track of the largest address we see for this.
401 	     */
402 	    k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
403 	    if (k > elf_bss)
404 		elf_bss = k;
405 
406 	    /*
407 	     * Do the same thing for the memory mapping - between
408 	     * elf_bss and last_bss is the bss section.
409 	     */
410 	    k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
411 	    if (k > last_bss)
412 		last_bss = k;
413 	  }
414 	}
415 
416 	/*
417 	 * Now fill out the bss section.  First pad the last page up
418 	 * to the page boundary, and then perform a mmap to make sure
419 	 * that there are zero-mapped pages up to and including the
420 	 * last bss page.
421 	 */
422 	if (padzero(elf_bss)) {
423 		error = -EFAULT;
424 		goto out_close;
425 	}
426 
427 	elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);	/* What we have mapped so far */
428 
429 	/* Map the last of the bss segment */
430 	if (last_bss > elf_bss) {
431 		down_write(&current->mm->mmap_sem);
432 		error = do_brk(elf_bss, last_bss - elf_bss);
433 		up_write(&current->mm->mmap_sem);
434 		if (BAD_ADDR(error))
435 			goto out_close;
436 	}
437 
438 	*interp_load_addr = load_addr;
439 	error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
440 
441 out_close:
442 	kfree(elf_phdata);
443 out:
444 	return error;
445 }
446 
447 static unsigned long load_aout_interp(struct exec * interp_ex,
448 			     struct file * interpreter)
449 {
450 	unsigned long text_data, elf_entry = ~0UL;
451 	char __user * addr;
452 	loff_t offset;
453 
454 	current->mm->end_code = interp_ex->a_text;
455 	text_data = interp_ex->a_text + interp_ex->a_data;
456 	current->mm->end_data = text_data;
457 	current->mm->brk = interp_ex->a_bss + text_data;
458 
459 	switch (N_MAGIC(*interp_ex)) {
460 	case OMAGIC:
461 		offset = 32;
462 		addr = (char __user *)0;
463 		break;
464 	case ZMAGIC:
465 	case QMAGIC:
466 		offset = N_TXTOFF(*interp_ex);
467 		addr = (char __user *) N_TXTADDR(*interp_ex);
468 		break;
469 	default:
470 		goto out;
471 	}
472 
473 	down_write(&current->mm->mmap_sem);
474 	do_brk(0, text_data);
475 	up_write(&current->mm->mmap_sem);
476 	if (!interpreter->f_op || !interpreter->f_op->read)
477 		goto out;
478 	if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
479 		goto out;
480 	flush_icache_range((unsigned long)addr,
481 	                   (unsigned long)addr + text_data);
482 
483 
484 	down_write(&current->mm->mmap_sem);
485 	do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
486 		interp_ex->a_bss);
487 	up_write(&current->mm->mmap_sem);
488 	elf_entry = interp_ex->a_entry;
489 
490 out:
491 	return elf_entry;
492 }
493 
494 /*
495  * These are the functions used to load ELF style executables and shared
496  * libraries.  There is no binary dependent code anywhere else.
497  */
498 
499 #define INTERPRETER_NONE 0
500 #define INTERPRETER_AOUT 1
501 #define INTERPRETER_ELF 2
502 
503 
504 static unsigned long randomize_stack_top(unsigned long stack_top)
505 {
506 	unsigned int random_variable = 0;
507 
508 	if (current->flags & PF_RANDOMIZE)
509 		random_variable = get_random_int() % (8*1024*1024);
510 #ifdef CONFIG_STACK_GROWSUP
511 	return PAGE_ALIGN(stack_top + random_variable);
512 #else
513 	return PAGE_ALIGN(stack_top - random_variable);
514 #endif
515 }
516 
517 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
518 {
519 	struct file *interpreter = NULL; /* to shut gcc up */
520  	unsigned long load_addr = 0, load_bias = 0;
521 	int load_addr_set = 0;
522 	char * elf_interpreter = NULL;
523 	unsigned int interpreter_type = INTERPRETER_NONE;
524 	unsigned char ibcs2_interpreter = 0;
525 	unsigned long error;
526 	struct elf_phdr * elf_ppnt, *elf_phdata;
527 	unsigned long elf_bss, elf_brk;
528 	int elf_exec_fileno;
529 	int retval, i;
530 	unsigned int size;
531 	unsigned long elf_entry, interp_load_addr = 0;
532 	unsigned long start_code, end_code, start_data, end_data;
533 	unsigned long reloc_func_desc = 0;
534 	char passed_fileno[6];
535 	struct files_struct *files;
536 	int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
537 	unsigned long def_flags = 0;
538 	struct {
539 		struct elfhdr elf_ex;
540 		struct elfhdr interp_elf_ex;
541   		struct exec interp_ex;
542 	} *loc;
543 
544 	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
545 	if (!loc) {
546 		retval = -ENOMEM;
547 		goto out_ret;
548 	}
549 
550 	/* Get the exec-header */
551 	loc->elf_ex = *((struct elfhdr *) bprm->buf);
552 
553 	retval = -ENOEXEC;
554 	/* First of all, some simple consistency checks */
555 	if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
556 		goto out;
557 
558 	if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
559 		goto out;
560 	if (!elf_check_arch(&loc->elf_ex))
561 		goto out;
562 	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
563 		goto out;
564 
565 	/* Now read in all of the header information */
566 
567 	if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
568 		goto out;
569 	if (loc->elf_ex.e_phnum < 1 ||
570 	 	loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
571 		goto out;
572 	size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
573 	retval = -ENOMEM;
574 	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
575 	if (!elf_phdata)
576 		goto out;
577 
578 	retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
579 	if (retval != size) {
580 		if (retval >= 0)
581 			retval = -EIO;
582 		goto out_free_ph;
583 	}
584 
585 	files = current->files;		/* Refcounted so ok */
586 	retval = unshare_files();
587 	if (retval < 0)
588 		goto out_free_ph;
589 	if (files == current->files) {
590 		put_files_struct(files);
591 		files = NULL;
592 	}
593 
594 	/* exec will make our files private anyway, but for the a.out
595 	   loader stuff we need to do it earlier */
596 
597 	retval = get_unused_fd();
598 	if (retval < 0)
599 		goto out_free_fh;
600 	get_file(bprm->file);
601 	fd_install(elf_exec_fileno = retval, bprm->file);
602 
603 	elf_ppnt = elf_phdata;
604 	elf_bss = 0;
605 	elf_brk = 0;
606 
607 	start_code = ~0UL;
608 	end_code = 0;
609 	start_data = 0;
610 	end_data = 0;
611 
612 	for (i = 0; i < loc->elf_ex.e_phnum; i++) {
613 		if (elf_ppnt->p_type == PT_INTERP) {
614 			/* This is the program interpreter used for
615 			 * shared libraries - for now assume that this
616 			 * is an a.out format binary
617 			 */
618 
619 			retval = -ENOEXEC;
620 			if (elf_ppnt->p_filesz > PATH_MAX ||
621 			    elf_ppnt->p_filesz < 2)
622 				goto out_free_file;
623 
624 			retval = -ENOMEM;
625 			elf_interpreter = kmalloc(elf_ppnt->p_filesz,
626 							   GFP_KERNEL);
627 			if (!elf_interpreter)
628 				goto out_free_file;
629 
630 			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
631 					   elf_interpreter,
632 					   elf_ppnt->p_filesz);
633 			if (retval != elf_ppnt->p_filesz) {
634 				if (retval >= 0)
635 					retval = -EIO;
636 				goto out_free_interp;
637 			}
638 			/* make sure path is NULL terminated */
639 			retval = -ENOEXEC;
640 			if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
641 				goto out_free_interp;
642 
643 			/* If the program interpreter is one of these two,
644 			 * then assume an iBCS2 image. Otherwise assume
645 			 * a native linux image.
646 			 */
647 			if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
648 			    strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
649 				ibcs2_interpreter = 1;
650 
651 			/*
652 			 * The early SET_PERSONALITY here is so that the lookup
653 			 * for the interpreter happens in the namespace of the
654 			 * to-be-execed image.  SET_PERSONALITY can select an
655 			 * alternate root.
656 			 *
657 			 * However, SET_PERSONALITY is NOT allowed to switch
658 			 * this task into the new images's memory mapping
659 			 * policy - that is, TASK_SIZE must still evaluate to
660 			 * that which is appropriate to the execing application.
661 			 * This is because exit_mmap() needs to have TASK_SIZE
662 			 * evaluate to the size of the old image.
663 			 *
664 			 * So if (say) a 64-bit application is execing a 32-bit
665 			 * application it is the architecture's responsibility
666 			 * to defer changing the value of TASK_SIZE until the
667 			 * switch really is going to happen - do this in
668 			 * flush_thread().	- akpm
669 			 */
670 			SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
671 
672 			interpreter = open_exec(elf_interpreter);
673 			retval = PTR_ERR(interpreter);
674 			if (IS_ERR(interpreter))
675 				goto out_free_interp;
676 			retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
677 			if (retval != BINPRM_BUF_SIZE) {
678 				if (retval >= 0)
679 					retval = -EIO;
680 				goto out_free_dentry;
681 			}
682 
683 			/* Get the exec headers */
684 			loc->interp_ex = *((struct exec *) bprm->buf);
685 			loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
686 			break;
687 		}
688 		elf_ppnt++;
689 	}
690 
691 	elf_ppnt = elf_phdata;
692 	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
693 		if (elf_ppnt->p_type == PT_GNU_STACK) {
694 			if (elf_ppnt->p_flags & PF_X)
695 				executable_stack = EXSTACK_ENABLE_X;
696 			else
697 				executable_stack = EXSTACK_DISABLE_X;
698 			break;
699 		}
700 	have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
701 
702 	/* Some simple consistency checks for the interpreter */
703 	if (elf_interpreter) {
704 		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
705 
706 		/* Now figure out which format our binary is */
707 		if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
708 		    (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
709 		    (N_MAGIC(loc->interp_ex) != QMAGIC))
710 			interpreter_type = INTERPRETER_ELF;
711 
712 		if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
713 			interpreter_type &= ~INTERPRETER_ELF;
714 
715 		retval = -ELIBBAD;
716 		if (!interpreter_type)
717 			goto out_free_dentry;
718 
719 		/* Make sure only one type was selected */
720 		if ((interpreter_type & INTERPRETER_ELF) &&
721 		     interpreter_type != INTERPRETER_ELF) {
722 	     		// FIXME - ratelimit this before re-enabling
723 			// printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
724 			interpreter_type = INTERPRETER_ELF;
725 		}
726 		/* Verify the interpreter has a valid arch */
727 		if ((interpreter_type == INTERPRETER_ELF) &&
728 		    !elf_check_arch(&loc->interp_elf_ex))
729 			goto out_free_dentry;
730 	} else {
731 		/* Executables without an interpreter also need a personality  */
732 		SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
733 	}
734 
735 	/* OK, we are done with that, now set up the arg stuff,
736 	   and then start this sucker up */
737 
738 	if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
739 		char *passed_p = passed_fileno;
740 		sprintf(passed_fileno, "%d", elf_exec_fileno);
741 
742 		if (elf_interpreter) {
743 			retval = copy_strings_kernel(1, &passed_p, bprm);
744 			if (retval)
745 				goto out_free_dentry;
746 			bprm->argc++;
747 		}
748 	}
749 
750 	/* Flush all traces of the currently running executable */
751 	retval = flush_old_exec(bprm);
752 	if (retval)
753 		goto out_free_dentry;
754 
755 	/* Discard our unneeded old files struct */
756 	if (files) {
757 		steal_locks(files);
758 		put_files_struct(files);
759 		files = NULL;
760 	}
761 
762 	/* OK, This is the point of no return */
763 	current->mm->start_data = 0;
764 	current->mm->end_data = 0;
765 	current->mm->end_code = 0;
766 	current->mm->mmap = NULL;
767 	current->flags &= ~PF_FORKNOEXEC;
768 	current->mm->def_flags = def_flags;
769 
770 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
771 	   may depend on the personality.  */
772 	SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
773 	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
774 		current->personality |= READ_IMPLIES_EXEC;
775 
776 	if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
777 		current->flags |= PF_RANDOMIZE;
778 	arch_pick_mmap_layout(current->mm);
779 
780 	/* Do this so that we can load the interpreter, if need be.  We will
781 	   change some of these later */
782 	current->mm->free_area_cache = current->mm->mmap_base;
783 	current->mm->cached_hole_size = 0;
784 	retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
785 				 executable_stack);
786 	if (retval < 0) {
787 		send_sig(SIGKILL, current, 0);
788 		goto out_free_dentry;
789 	}
790 
791 	current->mm->start_stack = bprm->p;
792 
793 	/* Now we do a little grungy work by mmaping the ELF image into
794 	   the correct location in memory.  At this point, we assume that
795 	   the image should be loaded at fixed address, not at a variable
796 	   address. */
797 
798 	for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
799 		int elf_prot = 0, elf_flags;
800 		unsigned long k, vaddr;
801 
802 		if (elf_ppnt->p_type != PT_LOAD)
803 			continue;
804 
805 		if (unlikely (elf_brk > elf_bss)) {
806 			unsigned long nbyte;
807 
808 			/* There was a PT_LOAD segment with p_memsz > p_filesz
809 			   before this one. Map anonymous pages, if needed,
810 			   and clear the area.  */
811 			retval = set_brk (elf_bss + load_bias,
812 					  elf_brk + load_bias);
813 			if (retval) {
814 				send_sig(SIGKILL, current, 0);
815 				goto out_free_dentry;
816 			}
817 			nbyte = ELF_PAGEOFFSET(elf_bss);
818 			if (nbyte) {
819 				nbyte = ELF_MIN_ALIGN - nbyte;
820 				if (nbyte > elf_brk - elf_bss)
821 					nbyte = elf_brk - elf_bss;
822 				if (clear_user((void __user *)elf_bss +
823 							load_bias, nbyte)) {
824 					/*
825 					 * This bss-zeroing can fail if the ELF
826 					 * file specifies odd protections.  So
827 					 * we don't check the return value
828 					 */
829 				}
830 			}
831 		}
832 
833 		if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
834 		if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
835 		if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
836 
837 		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
838 
839 		vaddr = elf_ppnt->p_vaddr;
840 		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
841 			elf_flags |= MAP_FIXED;
842 		} else if (loc->elf_ex.e_type == ET_DYN) {
843 			/* Try and get dynamic programs out of the way of the default mmap
844 			   base, as well as whatever program they might try to exec.  This
845 			   is because the brk will follow the loader, and is not movable.  */
846 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
847 		}
848 
849 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
850 		if (BAD_ADDR(error)) {
851 			send_sig(SIGKILL, current, 0);
852 			goto out_free_dentry;
853 		}
854 
855 		if (!load_addr_set) {
856 			load_addr_set = 1;
857 			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
858 			if (loc->elf_ex.e_type == ET_DYN) {
859 				load_bias += error -
860 				             ELF_PAGESTART(load_bias + vaddr);
861 				load_addr += load_bias;
862 				reloc_func_desc = load_bias;
863 			}
864 		}
865 		k = elf_ppnt->p_vaddr;
866 		if (k < start_code) start_code = k;
867 		if (start_data < k) start_data = k;
868 
869 		/*
870 		 * Check to see if the section's size will overflow the
871 		 * allowed task size. Note that p_filesz must always be
872 		 * <= p_memsz so it is only necessary to check p_memsz.
873 		 */
874 		if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
875 		    elf_ppnt->p_memsz > TASK_SIZE ||
876 		    TASK_SIZE - elf_ppnt->p_memsz < k) {
877 			/* set_brk can never work.  Avoid overflows.  */
878 			send_sig(SIGKILL, current, 0);
879 			goto out_free_dentry;
880 		}
881 
882 		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
883 
884 		if (k > elf_bss)
885 			elf_bss = k;
886 		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
887 			end_code = k;
888 		if (end_data < k)
889 			end_data = k;
890 		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
891 		if (k > elf_brk)
892 			elf_brk = k;
893 	}
894 
895 	loc->elf_ex.e_entry += load_bias;
896 	elf_bss += load_bias;
897 	elf_brk += load_bias;
898 	start_code += load_bias;
899 	end_code += load_bias;
900 	start_data += load_bias;
901 	end_data += load_bias;
902 
903 	/* Calling set_brk effectively mmaps the pages that we need
904 	 * for the bss and break sections.  We must do this before
905 	 * mapping in the interpreter, to make sure it doesn't wind
906 	 * up getting placed where the bss needs to go.
907 	 */
908 	retval = set_brk(elf_bss, elf_brk);
909 	if (retval) {
910 		send_sig(SIGKILL, current, 0);
911 		goto out_free_dentry;
912 	}
913 	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
914 		send_sig(SIGSEGV, current, 0);
915 		retval = -EFAULT; /* Nobody gets to see this, but.. */
916 		goto out_free_dentry;
917 	}
918 
919 	if (elf_interpreter) {
920 		if (interpreter_type == INTERPRETER_AOUT)
921 			elf_entry = load_aout_interp(&loc->interp_ex,
922 						     interpreter);
923 		else
924 			elf_entry = load_elf_interp(&loc->interp_elf_ex,
925 						    interpreter,
926 						    &interp_load_addr);
927 		if (BAD_ADDR(elf_entry)) {
928 			printk(KERN_ERR "Unable to load interpreter %.128s\n",
929 				elf_interpreter);
930 			force_sig(SIGSEGV, current);
931 			retval = -ENOEXEC; /* Nobody gets to see this, but.. */
932 			goto out_free_dentry;
933 		}
934 		reloc_func_desc = interp_load_addr;
935 
936 		allow_write_access(interpreter);
937 		fput(interpreter);
938 		kfree(elf_interpreter);
939 	} else {
940 		elf_entry = loc->elf_ex.e_entry;
941 		if (BAD_ADDR(elf_entry)) {
942 			send_sig(SIGSEGV, current, 0);
943 			retval = -ENOEXEC; /* Nobody gets to see this, but.. */
944 			goto out_free_dentry;
945 		}
946 	}
947 
948 	kfree(elf_phdata);
949 
950 	if (interpreter_type != INTERPRETER_AOUT)
951 		sys_close(elf_exec_fileno);
952 
953 	set_binfmt(&elf_format);
954 
955 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
956 	retval = arch_setup_additional_pages(bprm, executable_stack);
957 	if (retval < 0) {
958 		send_sig(SIGKILL, current, 0);
959 		goto out;
960 	}
961 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
962 
963 	compute_creds(bprm);
964 	current->flags &= ~PF_FORKNOEXEC;
965 	create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
966 			load_addr, interp_load_addr);
967 	/* N.B. passed_fileno might not be initialized? */
968 	if (interpreter_type == INTERPRETER_AOUT)
969 		current->mm->arg_start += strlen(passed_fileno) + 1;
970 	current->mm->end_code = end_code;
971 	current->mm->start_code = start_code;
972 	current->mm->start_data = start_data;
973 	current->mm->end_data = end_data;
974 	current->mm->start_stack = bprm->p;
975 
976 	if (current->personality & MMAP_PAGE_ZERO) {
977 		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
978 		   and some applications "depend" upon this behavior.
979 		   Since we do not have the power to recompile these, we
980 		   emulate the SVr4 behavior.  Sigh.  */
981 		down_write(&current->mm->mmap_sem);
982 		error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
983 				MAP_FIXED | MAP_PRIVATE, 0);
984 		up_write(&current->mm->mmap_sem);
985 	}
986 
987 #ifdef ELF_PLAT_INIT
988 	/*
989 	 * The ABI may specify that certain registers be set up in special
990 	 * ways (on i386 %edx is the address of a DT_FINI function, for
991 	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
992 	 * that the e_entry field is the address of the function descriptor
993 	 * for the startup routine, rather than the address of the startup
994 	 * routine itself.  This macro performs whatever initialization to
995 	 * the regs structure is required as well as any relocations to the
996 	 * function descriptor entries when executing dynamically links apps.
997 	 */
998 	ELF_PLAT_INIT(regs, reloc_func_desc);
999 #endif
1000 
1001 	start_thread(regs, elf_entry, bprm->p);
1002 	if (unlikely(current->ptrace & PT_PTRACED)) {
1003 		if (current->ptrace & PT_TRACE_EXEC)
1004 			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1005 		else
1006 			send_sig(SIGTRAP, current, 0);
1007 	}
1008 	retval = 0;
1009 out:
1010 	kfree(loc);
1011 out_ret:
1012 	return retval;
1013 
1014 	/* error cleanup */
1015 out_free_dentry:
1016 	allow_write_access(interpreter);
1017 	if (interpreter)
1018 		fput(interpreter);
1019 out_free_interp:
1020 	kfree(elf_interpreter);
1021 out_free_file:
1022 	sys_close(elf_exec_fileno);
1023 out_free_fh:
1024 	if (files) {
1025 		put_files_struct(current->files);
1026 		current->files = files;
1027 	}
1028 out_free_ph:
1029 	kfree(elf_phdata);
1030 	goto out;
1031 }
1032 
1033 /* This is really simpleminded and specialized - we are loading an
1034    a.out library that is given an ELF header. */
1035 
1036 static int load_elf_library(struct file *file)
1037 {
1038 	struct elf_phdr *elf_phdata;
1039 	struct elf_phdr *eppnt;
1040 	unsigned long elf_bss, bss, len;
1041 	int retval, error, i, j;
1042 	struct elfhdr elf_ex;
1043 
1044 	error = -ENOEXEC;
1045 	retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1046 	if (retval != sizeof(elf_ex))
1047 		goto out;
1048 
1049 	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1050 		goto out;
1051 
1052 	/* First of all, some simple consistency checks */
1053 	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1054 	   !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1055 		goto out;
1056 
1057 	/* Now read in all of the header information */
1058 
1059 	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1060 	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1061 
1062 	error = -ENOMEM;
1063 	elf_phdata = kmalloc(j, GFP_KERNEL);
1064 	if (!elf_phdata)
1065 		goto out;
1066 
1067 	eppnt = elf_phdata;
1068 	error = -ENOEXEC;
1069 	retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1070 	if (retval != j)
1071 		goto out_free_ph;
1072 
1073 	for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1074 		if ((eppnt + i)->p_type == PT_LOAD)
1075 			j++;
1076 	if (j != 1)
1077 		goto out_free_ph;
1078 
1079 	while (eppnt->p_type != PT_LOAD)
1080 		eppnt++;
1081 
1082 	/* Now use mmap to map the library into memory. */
1083 	down_write(&current->mm->mmap_sem);
1084 	error = do_mmap(file,
1085 			ELF_PAGESTART(eppnt->p_vaddr),
1086 			(eppnt->p_filesz +
1087 			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1088 			PROT_READ | PROT_WRITE | PROT_EXEC,
1089 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1090 			(eppnt->p_offset -
1091 			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1092 	up_write(&current->mm->mmap_sem);
1093 	if (error != ELF_PAGESTART(eppnt->p_vaddr))
1094 		goto out_free_ph;
1095 
1096 	elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1097 	if (padzero(elf_bss)) {
1098 		error = -EFAULT;
1099 		goto out_free_ph;
1100 	}
1101 
1102 	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1103 	bss = eppnt->p_memsz + eppnt->p_vaddr;
1104 	if (bss > len) {
1105 		down_write(&current->mm->mmap_sem);
1106 		do_brk(len, bss - len);
1107 		up_write(&current->mm->mmap_sem);
1108 	}
1109 	error = 0;
1110 
1111 out_free_ph:
1112 	kfree(elf_phdata);
1113 out:
1114 	return error;
1115 }
1116 
1117 /*
1118  * Note that some platforms still use traditional core dumps and not
1119  * the ELF core dump.  Each platform can select it as appropriate.
1120  */
1121 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1122 
1123 /*
1124  * ELF core dumper
1125  *
1126  * Modelled on fs/exec.c:aout_core_dump()
1127  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1128  */
1129 /*
1130  * These are the only things you should do on a core-file: use only these
1131  * functions to write out all the necessary info.
1132  */
1133 static int dump_write(struct file *file, const void *addr, int nr)
1134 {
1135 	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1136 }
1137 
1138 static int dump_seek(struct file *file, loff_t off)
1139 {
1140 	if (file->f_op->llseek) {
1141 		if (file->f_op->llseek(file, off, 0) != off)
1142 			return 0;
1143 	} else
1144 		file->f_pos = off;
1145 	return 1;
1146 }
1147 
1148 /*
1149  * Decide whether a segment is worth dumping; default is yes to be
1150  * sure (missing info is worse than too much; etc).
1151  * Personally I'd include everything, and use the coredump limit...
1152  *
1153  * I think we should skip something. But I am not sure how. H.J.
1154  */
1155 static int maydump(struct vm_area_struct *vma)
1156 {
1157 	/* Do not dump I/O mapped devices or special mappings */
1158 	if (vma->vm_flags & (VM_IO | VM_RESERVED))
1159 		return 0;
1160 
1161 	/* Dump shared memory only if mapped from an anonymous file.  */
1162 	if (vma->vm_flags & VM_SHARED)
1163 		return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1164 
1165 	/* If it hasn't been written to, don't write it out */
1166 	if (!vma->anon_vma)
1167 		return 0;
1168 
1169 	return 1;
1170 }
1171 
1172 #define roundup(x, y)  ((((x)+((y)-1))/(y))*(y))
1173 
1174 /* An ELF note in memory */
1175 struct memelfnote
1176 {
1177 	const char *name;
1178 	int type;
1179 	unsigned int datasz;
1180 	void *data;
1181 };
1182 
1183 static int notesize(struct memelfnote *en)
1184 {
1185 	int sz;
1186 
1187 	sz = sizeof(struct elf_note);
1188 	sz += roundup(strlen(en->name) + 1, 4);
1189 	sz += roundup(en->datasz, 4);
1190 
1191 	return sz;
1192 }
1193 
1194 #define DUMP_WRITE(addr, nr)	\
1195 	do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1196 #define DUMP_SEEK(off)	\
1197 	do { if (!dump_seek(file, (off))) return 0; } while(0)
1198 
1199 static int writenote(struct memelfnote *men, struct file *file)
1200 {
1201 	struct elf_note en;
1202 
1203 	en.n_namesz = strlen(men->name) + 1;
1204 	en.n_descsz = men->datasz;
1205 	en.n_type = men->type;
1206 
1207 	DUMP_WRITE(&en, sizeof(en));
1208 	DUMP_WRITE(men->name, en.n_namesz);
1209 	/* XXX - cast from long long to long to avoid need for libgcc.a */
1210 	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
1211 	DUMP_WRITE(men->data, men->datasz);
1212 	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
1213 
1214 	return 1;
1215 }
1216 #undef DUMP_WRITE
1217 #undef DUMP_SEEK
1218 
1219 #define DUMP_WRITE(addr, nr)	\
1220 	if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1221 		goto end_coredump;
1222 #define DUMP_SEEK(off)	\
1223 	if (!dump_seek(file, (off))) \
1224 		goto end_coredump;
1225 
1226 static void fill_elf_header(struct elfhdr *elf, int segs)
1227 {
1228 	memcpy(elf->e_ident, ELFMAG, SELFMAG);
1229 	elf->e_ident[EI_CLASS] = ELF_CLASS;
1230 	elf->e_ident[EI_DATA] = ELF_DATA;
1231 	elf->e_ident[EI_VERSION] = EV_CURRENT;
1232 	elf->e_ident[EI_OSABI] = ELF_OSABI;
1233 	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1234 
1235 	elf->e_type = ET_CORE;
1236 	elf->e_machine = ELF_ARCH;
1237 	elf->e_version = EV_CURRENT;
1238 	elf->e_entry = 0;
1239 	elf->e_phoff = sizeof(struct elfhdr);
1240 	elf->e_shoff = 0;
1241 	elf->e_flags = ELF_CORE_EFLAGS;
1242 	elf->e_ehsize = sizeof(struct elfhdr);
1243 	elf->e_phentsize = sizeof(struct elf_phdr);
1244 	elf->e_phnum = segs;
1245 	elf->e_shentsize = 0;
1246 	elf->e_shnum = 0;
1247 	elf->e_shstrndx = 0;
1248 	return;
1249 }
1250 
1251 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1252 {
1253 	phdr->p_type = PT_NOTE;
1254 	phdr->p_offset = offset;
1255 	phdr->p_vaddr = 0;
1256 	phdr->p_paddr = 0;
1257 	phdr->p_filesz = sz;
1258 	phdr->p_memsz = 0;
1259 	phdr->p_flags = 0;
1260 	phdr->p_align = 0;
1261 	return;
1262 }
1263 
1264 static void fill_note(struct memelfnote *note, const char *name, int type,
1265 		unsigned int sz, void *data)
1266 {
1267 	note->name = name;
1268 	note->type = type;
1269 	note->datasz = sz;
1270 	note->data = data;
1271 	return;
1272 }
1273 
1274 /*
1275  * fill up all the fields in prstatus from the given task struct, except registers
1276  * which need to be filled up separately.
1277  */
1278 static void fill_prstatus(struct elf_prstatus *prstatus,
1279 			struct task_struct *p, long signr)
1280 {
1281 	prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1282 	prstatus->pr_sigpend = p->pending.signal.sig[0];
1283 	prstatus->pr_sighold = p->blocked.sig[0];
1284 	prstatus->pr_pid = p->pid;
1285 	prstatus->pr_ppid = p->parent->pid;
1286 	prstatus->pr_pgrp = process_group(p);
1287 	prstatus->pr_sid = p->signal->session;
1288 	if (thread_group_leader(p)) {
1289 		/*
1290 		 * This is the record for the group leader.  Add in the
1291 		 * cumulative times of previous dead threads.  This total
1292 		 * won't include the time of each live thread whose state
1293 		 * is included in the core dump.  The final total reported
1294 		 * to our parent process when it calls wait4 will include
1295 		 * those sums as well as the little bit more time it takes
1296 		 * this and each other thread to finish dying after the
1297 		 * core dump synchronization phase.
1298 		 */
1299 		cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1300 				   &prstatus->pr_utime);
1301 		cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1302 				   &prstatus->pr_stime);
1303 	} else {
1304 		cputime_to_timeval(p->utime, &prstatus->pr_utime);
1305 		cputime_to_timeval(p->stime, &prstatus->pr_stime);
1306 	}
1307 	cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1308 	cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1309 }
1310 
1311 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1312 		       struct mm_struct *mm)
1313 {
1314 	unsigned int i, len;
1315 
1316 	/* first copy the parameters from user space */
1317 	memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1318 
1319 	len = mm->arg_end - mm->arg_start;
1320 	if (len >= ELF_PRARGSZ)
1321 		len = ELF_PRARGSZ-1;
1322 	if (copy_from_user(&psinfo->pr_psargs,
1323 		           (const char __user *)mm->arg_start, len))
1324 		return -EFAULT;
1325 	for(i = 0; i < len; i++)
1326 		if (psinfo->pr_psargs[i] == 0)
1327 			psinfo->pr_psargs[i] = ' ';
1328 	psinfo->pr_psargs[len] = 0;
1329 
1330 	psinfo->pr_pid = p->pid;
1331 	psinfo->pr_ppid = p->parent->pid;
1332 	psinfo->pr_pgrp = process_group(p);
1333 	psinfo->pr_sid = p->signal->session;
1334 
1335 	i = p->state ? ffz(~p->state) + 1 : 0;
1336 	psinfo->pr_state = i;
1337 	psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1338 	psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1339 	psinfo->pr_nice = task_nice(p);
1340 	psinfo->pr_flag = p->flags;
1341 	SET_UID(psinfo->pr_uid, p->uid);
1342 	SET_GID(psinfo->pr_gid, p->gid);
1343 	strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1344 
1345 	return 0;
1346 }
1347 
1348 /* Here is the structure in which status of each thread is captured. */
1349 struct elf_thread_status
1350 {
1351 	struct list_head list;
1352 	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
1353 	elf_fpregset_t fpu;		/* NT_PRFPREG */
1354 	struct task_struct *thread;
1355 #ifdef ELF_CORE_COPY_XFPREGS
1356 	elf_fpxregset_t xfpu;		/* NT_PRXFPREG */
1357 #endif
1358 	struct memelfnote notes[3];
1359 	int num_notes;
1360 };
1361 
1362 /*
1363  * In order to add the specific thread information for the elf file format,
1364  * we need to keep a linked list of every threads pr_status and then
1365  * create a single section for them in the final core file.
1366  */
1367 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1368 {
1369 	int sz = 0;
1370 	struct task_struct *p = t->thread;
1371 	t->num_notes = 0;
1372 
1373 	fill_prstatus(&t->prstatus, p, signr);
1374 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1375 
1376 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1377 	t->num_notes++;
1378 	sz += notesize(&t->notes[0]);
1379 
1380 	if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1381 		fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1382 		t->num_notes++;
1383 		sz += notesize(&t->notes[1]);
1384 	}
1385 
1386 #ifdef ELF_CORE_COPY_XFPREGS
1387 	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1388 		fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1389 		t->num_notes++;
1390 		sz += notesize(&t->notes[2]);
1391 	}
1392 #endif
1393 	return sz;
1394 }
1395 
1396 /*
1397  * Actual dumper
1398  *
1399  * This is a two-pass process; first we find the offsets of the bits,
1400  * and then they are actually written out.  If we run out of core limit
1401  * we just truncate.
1402  */
1403 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1404 {
1405 #define	NUM_NOTES	6
1406 	int has_dumped = 0;
1407 	mm_segment_t fs;
1408 	int segs;
1409 	size_t size = 0;
1410 	int i;
1411 	struct vm_area_struct *vma;
1412 	struct elfhdr *elf = NULL;
1413 	off_t offset = 0, dataoff;
1414 	unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1415 	int numnote;
1416 	struct memelfnote *notes = NULL;
1417 	struct elf_prstatus *prstatus = NULL;	/* NT_PRSTATUS */
1418 	struct elf_prpsinfo *psinfo = NULL;	/* NT_PRPSINFO */
1419  	struct task_struct *g, *p;
1420  	LIST_HEAD(thread_list);
1421  	struct list_head *t;
1422 	elf_fpregset_t *fpu = NULL;
1423 #ifdef ELF_CORE_COPY_XFPREGS
1424 	elf_fpxregset_t *xfpu = NULL;
1425 #endif
1426 	int thread_status_size = 0;
1427 	elf_addr_t *auxv;
1428 
1429 	/*
1430 	 * We no longer stop all VM operations.
1431 	 *
1432 	 * This is because those proceses that could possibly change map_count or
1433 	 * the mmap / vma pages are now blocked in do_exit on current finishing
1434 	 * this core dump.
1435 	 *
1436 	 * Only ptrace can touch these memory addresses, but it doesn't change
1437 	 * the map_count or the pages allocated.  So no possibility of crashing
1438 	 * exists while dumping the mm->vm_next areas to the core file.
1439 	 */
1440 
1441 	/* alloc memory for large data structures: too large to be on stack */
1442 	elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1443 	if (!elf)
1444 		goto cleanup;
1445 	prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1446 	if (!prstatus)
1447 		goto cleanup;
1448 	psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1449 	if (!psinfo)
1450 		goto cleanup;
1451 	notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1452 	if (!notes)
1453 		goto cleanup;
1454 	fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1455 	if (!fpu)
1456 		goto cleanup;
1457 #ifdef ELF_CORE_COPY_XFPREGS
1458 	xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1459 	if (!xfpu)
1460 		goto cleanup;
1461 #endif
1462 
1463 	if (signr) {
1464 		struct elf_thread_status *tmp;
1465 		read_lock(&tasklist_lock);
1466 		do_each_thread(g,p)
1467 			if (current->mm == p->mm && current != p) {
1468 				tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1469 				if (!tmp) {
1470 					read_unlock(&tasklist_lock);
1471 					goto cleanup;
1472 				}
1473 				memset(tmp, 0, sizeof(*tmp));
1474 				INIT_LIST_HEAD(&tmp->list);
1475 				tmp->thread = p;
1476 				list_add(&tmp->list, &thread_list);
1477 			}
1478 		while_each_thread(g,p);
1479 		read_unlock(&tasklist_lock);
1480 		list_for_each(t, &thread_list) {
1481 			struct elf_thread_status *tmp;
1482 			int sz;
1483 
1484 			tmp = list_entry(t, struct elf_thread_status, list);
1485 			sz = elf_dump_thread_status(signr, tmp);
1486 			thread_status_size += sz;
1487 		}
1488 	}
1489 	/* now collect the dump for the current */
1490 	memset(prstatus, 0, sizeof(*prstatus));
1491 	fill_prstatus(prstatus, current, signr);
1492 	elf_core_copy_regs(&prstatus->pr_reg, regs);
1493 
1494 	segs = current->mm->map_count;
1495 #ifdef ELF_CORE_EXTRA_PHDRS
1496 	segs += ELF_CORE_EXTRA_PHDRS;
1497 #endif
1498 
1499 	/* Set up header */
1500 	fill_elf_header(elf, segs+1);	/* including notes section */
1501 
1502 	has_dumped = 1;
1503 	current->flags |= PF_DUMPCORE;
1504 
1505 	/*
1506 	 * Set up the notes in similar form to SVR4 core dumps made
1507 	 * with info from their /proc.
1508 	 */
1509 
1510 	fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1511 
1512 	fill_psinfo(psinfo, current->group_leader, current->mm);
1513 	fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1514 
1515 	numnote = 2;
1516 
1517 	auxv = (elf_addr_t *) current->mm->saved_auxv;
1518 
1519 	i = 0;
1520 	do
1521 		i += 2;
1522 	while (auxv[i - 2] != AT_NULL);
1523 	fill_note(&notes[numnote++], "CORE", NT_AUXV,
1524 		  i * sizeof (elf_addr_t), auxv);
1525 
1526   	/* Try to dump the FPU. */
1527 	if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1528 		fill_note(notes + numnote++,
1529 			  "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1530 #ifdef ELF_CORE_COPY_XFPREGS
1531 	if (elf_core_copy_task_xfpregs(current, xfpu))
1532 		fill_note(notes + numnote++,
1533 			  "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1534 #endif
1535 
1536 	fs = get_fs();
1537 	set_fs(KERNEL_DS);
1538 
1539 	DUMP_WRITE(elf, sizeof(*elf));
1540 	offset += sizeof(*elf);				/* Elf header */
1541 	offset += (segs+1) * sizeof(struct elf_phdr);	/* Program headers */
1542 
1543 	/* Write notes phdr entry */
1544 	{
1545 		struct elf_phdr phdr;
1546 		int sz = 0;
1547 
1548 		for (i = 0; i < numnote; i++)
1549 			sz += notesize(notes + i);
1550 
1551 		sz += thread_status_size;
1552 
1553 		fill_elf_note_phdr(&phdr, sz, offset);
1554 		offset += sz;
1555 		DUMP_WRITE(&phdr, sizeof(phdr));
1556 	}
1557 
1558 	/* Page-align dumped data */
1559 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1560 
1561 	/* Write program headers for segments dump */
1562 	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1563 		struct elf_phdr phdr;
1564 		size_t sz;
1565 
1566 		sz = vma->vm_end - vma->vm_start;
1567 
1568 		phdr.p_type = PT_LOAD;
1569 		phdr.p_offset = offset;
1570 		phdr.p_vaddr = vma->vm_start;
1571 		phdr.p_paddr = 0;
1572 		phdr.p_filesz = maydump(vma) ? sz : 0;
1573 		phdr.p_memsz = sz;
1574 		offset += phdr.p_filesz;
1575 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1576 		if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1577 		if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1578 		phdr.p_align = ELF_EXEC_PAGESIZE;
1579 
1580 		DUMP_WRITE(&phdr, sizeof(phdr));
1581 	}
1582 
1583 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1584 	ELF_CORE_WRITE_EXTRA_PHDRS;
1585 #endif
1586 
1587  	/* write out the notes section */
1588 	for (i = 0; i < numnote; i++)
1589 		if (!writenote(notes + i, file))
1590 			goto end_coredump;
1591 
1592 	/* write out the thread status notes section */
1593 	list_for_each(t, &thread_list) {
1594 		struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1595 		for (i = 0; i < tmp->num_notes; i++)
1596 			if (!writenote(&tmp->notes[i], file))
1597 				goto end_coredump;
1598 	}
1599 
1600 	DUMP_SEEK(dataoff);
1601 
1602 	for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1603 		unsigned long addr;
1604 
1605 		if (!maydump(vma))
1606 			continue;
1607 
1608 		for (addr = vma->vm_start;
1609 		     addr < vma->vm_end;
1610 		     addr += PAGE_SIZE) {
1611 			struct page* page;
1612 			struct vm_area_struct *vma;
1613 
1614 			if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1615 						&page, &vma) <= 0) {
1616 				DUMP_SEEK (file->f_pos + PAGE_SIZE);
1617 			} else {
1618 				if (page == ZERO_PAGE(addr)) {
1619 					DUMP_SEEK (file->f_pos + PAGE_SIZE);
1620 				} else {
1621 					void *kaddr;
1622 					flush_cache_page(vma, addr, page_to_pfn(page));
1623 					kaddr = kmap(page);
1624 					if ((size += PAGE_SIZE) > limit ||
1625 					    !dump_write(file, kaddr,
1626 					    PAGE_SIZE)) {
1627 						kunmap(page);
1628 						page_cache_release(page);
1629 						goto end_coredump;
1630 					}
1631 					kunmap(page);
1632 				}
1633 				page_cache_release(page);
1634 			}
1635 		}
1636 	}
1637 
1638 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1639 	ELF_CORE_WRITE_EXTRA_DATA;
1640 #endif
1641 
1642 	if ((off_t)file->f_pos != offset) {
1643 		/* Sanity check */
1644 		printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1645 		       (off_t)file->f_pos, offset);
1646 	}
1647 
1648 end_coredump:
1649 	set_fs(fs);
1650 
1651 cleanup:
1652 	while (!list_empty(&thread_list)) {
1653 		struct list_head *tmp = thread_list.next;
1654 		list_del(tmp);
1655 		kfree(list_entry(tmp, struct elf_thread_status, list));
1656 	}
1657 
1658 	kfree(elf);
1659 	kfree(prstatus);
1660 	kfree(psinfo);
1661 	kfree(notes);
1662 	kfree(fpu);
1663 #ifdef ELF_CORE_COPY_XFPREGS
1664 	kfree(xfpu);
1665 #endif
1666 	return has_dumped;
1667 #undef NUM_NOTES
1668 }
1669 
1670 #endif		/* USE_ELF_CORE_DUMP */
1671 
1672 static int __init init_elf_binfmt(void)
1673 {
1674 	return register_binfmt(&elf_format);
1675 }
1676 
1677 static void __exit exit_elf_binfmt(void)
1678 {
1679 	/* Remove the COFF and ELF loaders. */
1680 	unregister_binfmt(&elf_format);
1681 }
1682 
1683 core_initcall(init_elf_binfmt);
1684 module_exit(exit_elf_binfmt);
1685 MODULE_LICENSE("GPL");
1686