xref: /linux/fs/coredump.c (revision 5647ac0ad4f355817b788372a01cb293ed63bde4)
1  #include <linux/slab.h>
2  #include <linux/file.h>
3  #include <linux/fdtable.h>
4  #include <linux/mm.h>
5  #include <linux/stat.h>
6  #include <linux/fcntl.h>
7  #include <linux/swap.h>
8  #include <linux/string.h>
9  #include <linux/init.h>
10  #include <linux/pagemap.h>
11  #include <linux/perf_event.h>
12  #include <linux/highmem.h>
13  #include <linux/spinlock.h>
14  #include <linux/key.h>
15  #include <linux/personality.h>
16  #include <linux/binfmts.h>
17  #include <linux/coredump.h>
18  #include <linux/utsname.h>
19  #include <linux/pid_namespace.h>
20  #include <linux/module.h>
21  #include <linux/namei.h>
22  #include <linux/mount.h>
23  #include <linux/security.h>
24  #include <linux/syscalls.h>
25  #include <linux/tsacct_kern.h>
26  #include <linux/cn_proc.h>
27  #include <linux/audit.h>
28  #include <linux/tracehook.h>
29  #include <linux/kmod.h>
30  #include <linux/fsnotify.h>
31  #include <linux/fs_struct.h>
32  #include <linux/pipe_fs_i.h>
33  #include <linux/oom.h>
34  #include <linux/compat.h>
35  
36  #include <asm/uaccess.h>
37  #include <asm/mmu_context.h>
38  #include <asm/tlb.h>
39  #include <asm/exec.h>
40  
41  #include <trace/events/task.h>
42  #include "internal.h"
43  #include "coredump.h"
44  
45  #include <trace/events/sched.h>
46  
47  int core_uses_pid;
48  char core_pattern[CORENAME_MAX_SIZE] = "core";
49  unsigned int core_pipe_limit;
50  
51  struct core_name {
52  	char *corename;
53  	int used, size;
54  };
55  static atomic_t call_count = ATOMIC_INIT(1);
56  
57  /* The maximal length of core_pattern is also specified in sysctl.c */
58  
59  static int expand_corename(struct core_name *cn)
60  {
61  	char *old_corename = cn->corename;
62  
63  	cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
64  	cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
65  
66  	if (!cn->corename) {
67  		kfree(old_corename);
68  		return -ENOMEM;
69  	}
70  
71  	return 0;
72  }
73  
74  static int cn_printf(struct core_name *cn, const char *fmt, ...)
75  {
76  	char *cur;
77  	int need;
78  	int ret;
79  	va_list arg;
80  
81  	va_start(arg, fmt);
82  	need = vsnprintf(NULL, 0, fmt, arg);
83  	va_end(arg);
84  
85  	if (likely(need < cn->size - cn->used - 1))
86  		goto out_printf;
87  
88  	ret = expand_corename(cn);
89  	if (ret)
90  		goto expand_fail;
91  
92  out_printf:
93  	cur = cn->corename + cn->used;
94  	va_start(arg, fmt);
95  	vsnprintf(cur, need + 1, fmt, arg);
96  	va_end(arg);
97  	cn->used += need;
98  	return 0;
99  
100  expand_fail:
101  	return ret;
102  }
103  
104  static void cn_escape(char *str)
105  {
106  	for (; *str; str++)
107  		if (*str == '/')
108  			*str = '!';
109  }
110  
111  static int cn_print_exe_file(struct core_name *cn)
112  {
113  	struct file *exe_file;
114  	char *pathbuf, *path;
115  	int ret;
116  
117  	exe_file = get_mm_exe_file(current->mm);
118  	if (!exe_file) {
119  		char *commstart = cn->corename + cn->used;
120  		ret = cn_printf(cn, "%s (path unknown)", current->comm);
121  		cn_escape(commstart);
122  		return ret;
123  	}
124  
125  	pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
126  	if (!pathbuf) {
127  		ret = -ENOMEM;
128  		goto put_exe_file;
129  	}
130  
131  	path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
132  	if (IS_ERR(path)) {
133  		ret = PTR_ERR(path);
134  		goto free_buf;
135  	}
136  
137  	cn_escape(path);
138  
139  	ret = cn_printf(cn, "%s", path);
140  
141  free_buf:
142  	kfree(pathbuf);
143  put_exe_file:
144  	fput(exe_file);
145  	return ret;
146  }
147  
148  /* format_corename will inspect the pattern parameter, and output a
149   * name into corename, which must have space for at least
150   * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
151   */
152  static int format_corename(struct core_name *cn, struct coredump_params *cprm)
153  {
154  	const struct cred *cred = current_cred();
155  	const char *pat_ptr = core_pattern;
156  	int ispipe = (*pat_ptr == '|');
157  	int pid_in_pattern = 0;
158  	int err = 0;
159  
160  	cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
161  	cn->corename = kmalloc(cn->size, GFP_KERNEL);
162  	cn->used = 0;
163  
164  	if (!cn->corename)
165  		return -ENOMEM;
166  
167  	/* Repeat as long as we have more pattern to process and more output
168  	   space */
169  	while (*pat_ptr) {
170  		if (*pat_ptr != '%') {
171  			if (*pat_ptr == 0)
172  				goto out;
173  			err = cn_printf(cn, "%c", *pat_ptr++);
174  		} else {
175  			switch (*++pat_ptr) {
176  			/* single % at the end, drop that */
177  			case 0:
178  				goto out;
179  			/* Double percent, output one percent */
180  			case '%':
181  				err = cn_printf(cn, "%c", '%');
182  				break;
183  			/* pid */
184  			case 'p':
185  				pid_in_pattern = 1;
186  				err = cn_printf(cn, "%d",
187  					      task_tgid_vnr(current));
188  				break;
189  			/* uid */
190  			case 'u':
191  				err = cn_printf(cn, "%d", cred->uid);
192  				break;
193  			/* gid */
194  			case 'g':
195  				err = cn_printf(cn, "%d", cred->gid);
196  				break;
197  			case 'd':
198  				err = cn_printf(cn, "%d",
199  					__get_dumpable(cprm->mm_flags));
200  				break;
201  			/* signal that caused the coredump */
202  			case 's':
203  				err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
204  				break;
205  			/* UNIX time of coredump */
206  			case 't': {
207  				struct timeval tv;
208  				do_gettimeofday(&tv);
209  				err = cn_printf(cn, "%lu", tv.tv_sec);
210  				break;
211  			}
212  			/* hostname */
213  			case 'h': {
214  				char *namestart = cn->corename + cn->used;
215  				down_read(&uts_sem);
216  				err = cn_printf(cn, "%s",
217  					      utsname()->nodename);
218  				up_read(&uts_sem);
219  				cn_escape(namestart);
220  				break;
221  			}
222  			/* executable */
223  			case 'e': {
224  				char *commstart = cn->corename + cn->used;
225  				err = cn_printf(cn, "%s", current->comm);
226  				cn_escape(commstart);
227  				break;
228  			}
229  			case 'E':
230  				err = cn_print_exe_file(cn);
231  				break;
232  			/* core limit size */
233  			case 'c':
234  				err = cn_printf(cn, "%lu",
235  					      rlimit(RLIMIT_CORE));
236  				break;
237  			default:
238  				break;
239  			}
240  			++pat_ptr;
241  		}
242  
243  		if (err)
244  			return err;
245  	}
246  
247  	/* Backward compatibility with core_uses_pid:
248  	 *
249  	 * If core_pattern does not include a %p (as is the default)
250  	 * and core_uses_pid is set, then .%pid will be appended to
251  	 * the filename. Do not do this for piped commands. */
252  	if (!ispipe && !pid_in_pattern && core_uses_pid) {
253  		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
254  		if (err)
255  			return err;
256  	}
257  out:
258  	return ispipe;
259  }
260  
261  static int zap_process(struct task_struct *start, int exit_code)
262  {
263  	struct task_struct *t;
264  	int nr = 0;
265  
266  	start->signal->group_exit_code = exit_code;
267  	start->signal->group_stop_count = 0;
268  
269  	t = start;
270  	do {
271  		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
272  		if (t != current && t->mm) {
273  			sigaddset(&t->pending.signal, SIGKILL);
274  			signal_wake_up(t, 1);
275  			nr++;
276  		}
277  	} while_each_thread(start, t);
278  
279  	return nr;
280  }
281  
282  static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
283  			struct core_state *core_state, int exit_code)
284  {
285  	struct task_struct *g, *p;
286  	unsigned long flags;
287  	int nr = -EAGAIN;
288  
289  	spin_lock_irq(&tsk->sighand->siglock);
290  	if (!signal_group_exit(tsk->signal)) {
291  		mm->core_state = core_state;
292  		nr = zap_process(tsk, exit_code);
293  		tsk->signal->group_exit_task = tsk;
294  		/* ignore all signals except SIGKILL, see prepare_signal() */
295  		tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
296  		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
297  	}
298  	spin_unlock_irq(&tsk->sighand->siglock);
299  	if (unlikely(nr < 0))
300  		return nr;
301  
302  	tsk->flags = PF_DUMPCORE;
303  	if (atomic_read(&mm->mm_users) == nr + 1)
304  		goto done;
305  	/*
306  	 * We should find and kill all tasks which use this mm, and we should
307  	 * count them correctly into ->nr_threads. We don't take tasklist
308  	 * lock, but this is safe wrt:
309  	 *
310  	 * fork:
311  	 *	None of sub-threads can fork after zap_process(leader). All
312  	 *	processes which were created before this point should be
313  	 *	visible to zap_threads() because copy_process() adds the new
314  	 *	process to the tail of init_task.tasks list, and lock/unlock
315  	 *	of ->siglock provides a memory barrier.
316  	 *
317  	 * do_exit:
318  	 *	The caller holds mm->mmap_sem. This means that the task which
319  	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
320  	 *	its ->mm.
321  	 *
322  	 * de_thread:
323  	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
324  	 *	we must see either old or new leader, this does not matter.
325  	 *	However, it can change p->sighand, so lock_task_sighand(p)
326  	 *	must be used. Since p->mm != NULL and we hold ->mmap_sem
327  	 *	it can't fail.
328  	 *
329  	 *	Note also that "g" can be the old leader with ->mm == NULL
330  	 *	and already unhashed and thus removed from ->thread_group.
331  	 *	This is OK, __unhash_process()->list_del_rcu() does not
332  	 *	clear the ->next pointer, we will find the new leader via
333  	 *	next_thread().
334  	 */
335  	rcu_read_lock();
336  	for_each_process(g) {
337  		if (g == tsk->group_leader)
338  			continue;
339  		if (g->flags & PF_KTHREAD)
340  			continue;
341  		p = g;
342  		do {
343  			if (p->mm) {
344  				if (unlikely(p->mm == mm)) {
345  					lock_task_sighand(p, &flags);
346  					nr += zap_process(p, exit_code);
347  					p->signal->flags = SIGNAL_GROUP_EXIT;
348  					unlock_task_sighand(p, &flags);
349  				}
350  				break;
351  			}
352  		} while_each_thread(g, p);
353  	}
354  	rcu_read_unlock();
355  done:
356  	atomic_set(&core_state->nr_threads, nr);
357  	return nr;
358  }
359  
360  static int coredump_wait(int exit_code, struct core_state *core_state)
361  {
362  	struct task_struct *tsk = current;
363  	struct mm_struct *mm = tsk->mm;
364  	int core_waiters = -EBUSY;
365  
366  	init_completion(&core_state->startup);
367  	core_state->dumper.task = tsk;
368  	core_state->dumper.next = NULL;
369  
370  	down_write(&mm->mmap_sem);
371  	if (!mm->core_state)
372  		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
373  	up_write(&mm->mmap_sem);
374  
375  	if (core_waiters > 0) {
376  		struct core_thread *ptr;
377  
378  		wait_for_completion(&core_state->startup);
379  		/*
380  		 * Wait for all the threads to become inactive, so that
381  		 * all the thread context (extended register state, like
382  		 * fpu etc) gets copied to the memory.
383  		 */
384  		ptr = core_state->dumper.next;
385  		while (ptr != NULL) {
386  			wait_task_inactive(ptr->task, 0);
387  			ptr = ptr->next;
388  		}
389  	}
390  
391  	return core_waiters;
392  }
393  
394  static void coredump_finish(struct mm_struct *mm, bool core_dumped)
395  {
396  	struct core_thread *curr, *next;
397  	struct task_struct *task;
398  
399  	spin_lock_irq(&current->sighand->siglock);
400  	if (core_dumped && !__fatal_signal_pending(current))
401  		current->signal->group_exit_code |= 0x80;
402  	current->signal->group_exit_task = NULL;
403  	current->signal->flags = SIGNAL_GROUP_EXIT;
404  	spin_unlock_irq(&current->sighand->siglock);
405  
406  	next = mm->core_state->dumper.next;
407  	while ((curr = next) != NULL) {
408  		next = curr->next;
409  		task = curr->task;
410  		/*
411  		 * see exit_mm(), curr->task must not see
412  		 * ->task == NULL before we read ->next.
413  		 */
414  		smp_mb();
415  		curr->task = NULL;
416  		wake_up_process(task);
417  	}
418  
419  	mm->core_state = NULL;
420  }
421  
422  static bool dump_interrupted(void)
423  {
424  	/*
425  	 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
426  	 * can do try_to_freeze() and check __fatal_signal_pending(),
427  	 * but then we need to teach dump_write() to restart and clear
428  	 * TIF_SIGPENDING.
429  	 */
430  	return signal_pending(current);
431  }
432  
433  static void wait_for_dump_helpers(struct file *file)
434  {
435  	struct pipe_inode_info *pipe = file->private_data;
436  
437  	pipe_lock(pipe);
438  	pipe->readers++;
439  	pipe->writers--;
440  	wake_up_interruptible_sync(&pipe->wait);
441  	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
442  	pipe_unlock(pipe);
443  
444  	/*
445  	 * We actually want wait_event_freezable() but then we need
446  	 * to clear TIF_SIGPENDING and improve dump_interrupted().
447  	 */
448  	wait_event_interruptible(pipe->wait, pipe->readers == 1);
449  
450  	pipe_lock(pipe);
451  	pipe->readers--;
452  	pipe->writers++;
453  	pipe_unlock(pipe);
454  }
455  
456  /*
457   * umh_pipe_setup
458   * helper function to customize the process used
459   * to collect the core in userspace.  Specifically
460   * it sets up a pipe and installs it as fd 0 (stdin)
461   * for the process.  Returns 0 on success, or
462   * PTR_ERR on failure.
463   * Note that it also sets the core limit to 1.  This
464   * is a special value that we use to trap recursive
465   * core dumps
466   */
467  static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
468  {
469  	struct file *files[2];
470  	struct coredump_params *cp = (struct coredump_params *)info->data;
471  	int err = create_pipe_files(files, 0);
472  	if (err)
473  		return err;
474  
475  	cp->file = files[1];
476  
477  	err = replace_fd(0, files[0], 0);
478  	fput(files[0]);
479  	/* and disallow core files too */
480  	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
481  
482  	return err;
483  }
484  
485  void do_coredump(siginfo_t *siginfo)
486  {
487  	struct core_state core_state;
488  	struct core_name cn;
489  	struct mm_struct *mm = current->mm;
490  	struct linux_binfmt * binfmt;
491  	const struct cred *old_cred;
492  	struct cred *cred;
493  	int retval = 0;
494  	int flag = 0;
495  	int ispipe;
496  	struct files_struct *displaced;
497  	bool need_nonrelative = false;
498  	bool core_dumped = false;
499  	static atomic_t core_dump_count = ATOMIC_INIT(0);
500  	struct coredump_params cprm = {
501  		.siginfo = siginfo,
502  		.regs = signal_pt_regs(),
503  		.limit = rlimit(RLIMIT_CORE),
504  		/*
505  		 * We must use the same mm->flags while dumping core to avoid
506  		 * inconsistency of bit flags, since this flag is not protected
507  		 * by any locks.
508  		 */
509  		.mm_flags = mm->flags,
510  	};
511  
512  	audit_core_dumps(siginfo->si_signo);
513  
514  	binfmt = mm->binfmt;
515  	if (!binfmt || !binfmt->core_dump)
516  		goto fail;
517  	if (!__get_dumpable(cprm.mm_flags))
518  		goto fail;
519  
520  	cred = prepare_creds();
521  	if (!cred)
522  		goto fail;
523  	/*
524  	 * We cannot trust fsuid as being the "true" uid of the process
525  	 * nor do we know its entire history. We only know it was tainted
526  	 * so we dump it as root in mode 2, and only into a controlled
527  	 * environment (pipe handler or fully qualified path).
528  	 */
529  	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
530  		/* Setuid core dump mode */
531  		flag = O_EXCL;		/* Stop rewrite attacks */
532  		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
533  		need_nonrelative = true;
534  	}
535  
536  	retval = coredump_wait(siginfo->si_signo, &core_state);
537  	if (retval < 0)
538  		goto fail_creds;
539  
540  	old_cred = override_creds(cred);
541  
542  	ispipe = format_corename(&cn, &cprm);
543  
544  	if (ispipe) {
545  		int dump_count;
546  		char **helper_argv;
547  		struct subprocess_info *sub_info;
548  
549  		if (ispipe < 0) {
550  			printk(KERN_WARNING "format_corename failed\n");
551  			printk(KERN_WARNING "Aborting core\n");
552  			goto fail_corename;
553  		}
554  
555  		if (cprm.limit == 1) {
556  			/* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
557  			 *
558  			 * Normally core limits are irrelevant to pipes, since
559  			 * we're not writing to the file system, but we use
560  			 * cprm.limit of 1 here as a speacial value, this is a
561  			 * consistent way to catch recursive crashes.
562  			 * We can still crash if the core_pattern binary sets
563  			 * RLIM_CORE = !1, but it runs as root, and can do
564  			 * lots of stupid things.
565  			 *
566  			 * Note that we use task_tgid_vnr here to grab the pid
567  			 * of the process group leader.  That way we get the
568  			 * right pid if a thread in a multi-threaded
569  			 * core_pattern process dies.
570  			 */
571  			printk(KERN_WARNING
572  				"Process %d(%s) has RLIMIT_CORE set to 1\n",
573  				task_tgid_vnr(current), current->comm);
574  			printk(KERN_WARNING "Aborting core\n");
575  			goto fail_unlock;
576  		}
577  		cprm.limit = RLIM_INFINITY;
578  
579  		dump_count = atomic_inc_return(&core_dump_count);
580  		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
581  			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
582  			       task_tgid_vnr(current), current->comm);
583  			printk(KERN_WARNING "Skipping core dump\n");
584  			goto fail_dropcount;
585  		}
586  
587  		helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
588  		if (!helper_argv) {
589  			printk(KERN_WARNING "%s failed to allocate memory\n",
590  			       __func__);
591  			goto fail_dropcount;
592  		}
593  
594  		retval = -ENOMEM;
595  		sub_info = call_usermodehelper_setup(helper_argv[0],
596  						helper_argv, NULL, GFP_KERNEL,
597  						umh_pipe_setup, NULL, &cprm);
598  		if (sub_info)
599  			retval = call_usermodehelper_exec(sub_info,
600  							  UMH_WAIT_EXEC);
601  
602  		argv_free(helper_argv);
603  		if (retval) {
604  			printk(KERN_INFO "Core dump to %s pipe failed\n",
605  			       cn.corename);
606  			goto close_fail;
607  		}
608  	} else {
609  		struct inode *inode;
610  
611  		if (cprm.limit < binfmt->min_coredump)
612  			goto fail_unlock;
613  
614  		if (need_nonrelative && cn.corename[0] != '/') {
615  			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
616  				"to fully qualified path!\n",
617  				task_tgid_vnr(current), current->comm);
618  			printk(KERN_WARNING "Skipping core dump\n");
619  			goto fail_unlock;
620  		}
621  
622  		cprm.file = filp_open(cn.corename,
623  				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
624  				 0600);
625  		if (IS_ERR(cprm.file))
626  			goto fail_unlock;
627  
628  		inode = file_inode(cprm.file);
629  		if (inode->i_nlink > 1)
630  			goto close_fail;
631  		if (d_unhashed(cprm.file->f_path.dentry))
632  			goto close_fail;
633  		/*
634  		 * AK: actually i see no reason to not allow this for named
635  		 * pipes etc, but keep the previous behaviour for now.
636  		 */
637  		if (!S_ISREG(inode->i_mode))
638  			goto close_fail;
639  		/*
640  		 * Dont allow local users get cute and trick others to coredump
641  		 * into their pre-created files.
642  		 */
643  		if (!uid_eq(inode->i_uid, current_fsuid()))
644  			goto close_fail;
645  		if (!cprm.file->f_op || !cprm.file->f_op->write)
646  			goto close_fail;
647  		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
648  			goto close_fail;
649  	}
650  
651  	/* get us an unshared descriptor table; almost always a no-op */
652  	retval = unshare_files(&displaced);
653  	if (retval)
654  		goto close_fail;
655  	if (displaced)
656  		put_files_struct(displaced);
657  	if (!dump_interrupted()) {
658  		file_start_write(cprm.file);
659  		core_dumped = binfmt->core_dump(&cprm);
660  		file_end_write(cprm.file);
661  	}
662  	if (ispipe && core_pipe_limit)
663  		wait_for_dump_helpers(cprm.file);
664  close_fail:
665  	if (cprm.file)
666  		filp_close(cprm.file, NULL);
667  fail_dropcount:
668  	if (ispipe)
669  		atomic_dec(&core_dump_count);
670  fail_unlock:
671  	kfree(cn.corename);
672  fail_corename:
673  	coredump_finish(mm, core_dumped);
674  	revert_creds(old_cred);
675  fail_creds:
676  	put_cred(cred);
677  fail:
678  	return;
679  }
680  
681  /*
682   * Core dumping helper functions.  These are the only things you should
683   * do on a core-file: use only these functions to write out all the
684   * necessary info.
685   */
686  int dump_write(struct file *file, const void *addr, int nr)
687  {
688  	return !dump_interrupted() &&
689  		access_ok(VERIFY_READ, addr, nr) &&
690  		file->f_op->write(file, addr, nr, &file->f_pos) == nr;
691  }
692  EXPORT_SYMBOL(dump_write);
693  
694  int dump_seek(struct file *file, loff_t off)
695  {
696  	int ret = 1;
697  
698  	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
699  		if (dump_interrupted() ||
700  		    file->f_op->llseek(file, off, SEEK_CUR) < 0)
701  			return 0;
702  	} else {
703  		char *buf = (char *)get_zeroed_page(GFP_KERNEL);
704  
705  		if (!buf)
706  			return 0;
707  		while (off > 0) {
708  			unsigned long n = off;
709  
710  			if (n > PAGE_SIZE)
711  				n = PAGE_SIZE;
712  			if (!dump_write(file, buf, n)) {
713  				ret = 0;
714  				break;
715  			}
716  			off -= n;
717  		}
718  		free_page((unsigned long)buf);
719  	}
720  	return ret;
721  }
722  EXPORT_SYMBOL(dump_seek);
723