xref: /linux/ipc/shm.c (revision 5b25ab29bad3114f798b136b4147f255a5d5742f)
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
42 #include <linux/ima.h>
43 
44 #include <asm/uaccess.h>
45 
46 #include "util.h"
47 
48 struct shm_file_data {
49 	int id;
50 	struct ipc_namespace *ns;
51 	struct file *file;
52 	const struct vm_operations_struct *vm_ops;
53 };
54 
55 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 
57 static const struct file_operations shm_file_operations;
58 static struct vm_operations_struct shm_vm_ops;
59 
60 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
61 
62 #define shm_unlock(shp)			\
63 	ipc_unlock(&(shp)->shm_perm)
64 
65 static int newseg(struct ipc_namespace *, struct ipc_params *);
66 static void shm_open(struct vm_area_struct *vma);
67 static void shm_close(struct vm_area_struct *vma);
68 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
69 #ifdef CONFIG_PROC_FS
70 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71 #endif
72 
73 void shm_init_ns(struct ipc_namespace *ns)
74 {
75 	ns->shm_ctlmax = SHMMAX;
76 	ns->shm_ctlall = SHMALL;
77 	ns->shm_ctlmni = SHMMNI;
78 	ns->shm_tot = 0;
79 	ipc_init_ids(&shm_ids(ns));
80 }
81 
82 /*
83  * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84  * Only shm_ids.rw_mutex remains locked on exit.
85  */
86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 {
88 	struct shmid_kernel *shp;
89 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90 
91 	if (shp->shm_nattch){
92 		shp->shm_perm.mode |= SHM_DEST;
93 		/* Do not find it any more */
94 		shp->shm_perm.key = IPC_PRIVATE;
95 		shm_unlock(shp);
96 	} else
97 		shm_destroy(ns, shp);
98 }
99 
100 #ifdef CONFIG_IPC_NS
101 void shm_exit_ns(struct ipc_namespace *ns)
102 {
103 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 }
105 #endif
106 
107 void __init shm_init (void)
108 {
109 	shm_init_ns(&init_ipc_ns);
110 	ipc_init_proc_interface("sysvipc/shm",
111 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n",
112 				IPC_SHM_IDS, sysvipc_shm_proc_show);
113 }
114 
115 /*
116  * shm_lock_(check_) routines are called in the paths where the rw_mutex
117  * is not necessarily held.
118  */
119 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
120 {
121 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
122 
123 	if (IS_ERR(ipcp))
124 		return (struct shmid_kernel *)ipcp;
125 
126 	return container_of(ipcp, struct shmid_kernel, shm_perm);
127 }
128 
129 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
130 						int id)
131 {
132 	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
133 
134 	if (IS_ERR(ipcp))
135 		return (struct shmid_kernel *)ipcp;
136 
137 	return container_of(ipcp, struct shmid_kernel, shm_perm);
138 }
139 
140 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
141 {
142 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
143 }
144 
145 
146 /* This is called by fork, once for every shm attach. */
147 static void shm_open(struct vm_area_struct *vma)
148 {
149 	struct file *file = vma->vm_file;
150 	struct shm_file_data *sfd = shm_file_data(file);
151 	struct shmid_kernel *shp;
152 
153 	shp = shm_lock(sfd->ns, sfd->id);
154 	BUG_ON(IS_ERR(shp));
155 	shp->shm_atim = get_seconds();
156 	shp->shm_lprid = task_tgid_vnr(current);
157 	shp->shm_nattch++;
158 	shm_unlock(shp);
159 }
160 
161 /*
162  * shm_destroy - free the struct shmid_kernel
163  *
164  * @ns: namespace
165  * @shp: struct to free
166  *
167  * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
168  * but returns with shp unlocked and freed.
169  */
170 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
171 {
172 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
173 	shm_rmid(ns, shp);
174 	shm_unlock(shp);
175 	if (!is_file_hugepages(shp->shm_file))
176 		shmem_lock(shp->shm_file, 0, shp->mlock_user);
177 	else
178 		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
179 						shp->mlock_user);
180 	fput (shp->shm_file);
181 	security_shm_free(shp);
182 	ipc_rcu_putref(shp);
183 }
184 
185 /*
186  * remove the attach descriptor vma.
187  * free memory for segment if it is marked destroyed.
188  * The descriptor has already been removed from the current->mm->mmap list
189  * and will later be kfree()d.
190  */
191 static void shm_close(struct vm_area_struct *vma)
192 {
193 	struct file * file = vma->vm_file;
194 	struct shm_file_data *sfd = shm_file_data(file);
195 	struct shmid_kernel *shp;
196 	struct ipc_namespace *ns = sfd->ns;
197 
198 	down_write(&shm_ids(ns).rw_mutex);
199 	/* remove from the list of attaches of the shm segment */
200 	shp = shm_lock(ns, sfd->id);
201 	BUG_ON(IS_ERR(shp));
202 	shp->shm_lprid = task_tgid_vnr(current);
203 	shp->shm_dtim = get_seconds();
204 	shp->shm_nattch--;
205 	if(shp->shm_nattch == 0 &&
206 	   shp->shm_perm.mode & SHM_DEST)
207 		shm_destroy(ns, shp);
208 	else
209 		shm_unlock(shp);
210 	up_write(&shm_ids(ns).rw_mutex);
211 }
212 
213 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
214 {
215 	struct file *file = vma->vm_file;
216 	struct shm_file_data *sfd = shm_file_data(file);
217 
218 	return sfd->vm_ops->fault(vma, vmf);
219 }
220 
221 #ifdef CONFIG_NUMA
222 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
223 {
224 	struct file *file = vma->vm_file;
225 	struct shm_file_data *sfd = shm_file_data(file);
226 	int err = 0;
227 	if (sfd->vm_ops->set_policy)
228 		err = sfd->vm_ops->set_policy(vma, new);
229 	return err;
230 }
231 
232 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
233 					unsigned long addr)
234 {
235 	struct file *file = vma->vm_file;
236 	struct shm_file_data *sfd = shm_file_data(file);
237 	struct mempolicy *pol = NULL;
238 
239 	if (sfd->vm_ops->get_policy)
240 		pol = sfd->vm_ops->get_policy(vma, addr);
241 	else if (vma->vm_policy)
242 		pol = vma->vm_policy;
243 
244 	return pol;
245 }
246 #endif
247 
248 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
249 {
250 	struct shm_file_data *sfd = shm_file_data(file);
251 	int ret;
252 
253 	ret = sfd->file->f_op->mmap(sfd->file, vma);
254 	if (ret != 0)
255 		return ret;
256 	sfd->vm_ops = vma->vm_ops;
257 #ifdef CONFIG_MMU
258 	BUG_ON(!sfd->vm_ops->fault);
259 #endif
260 	vma->vm_ops = &shm_vm_ops;
261 	shm_open(vma);
262 
263 	return ret;
264 }
265 
266 static int shm_release(struct inode *ino, struct file *file)
267 {
268 	struct shm_file_data *sfd = shm_file_data(file);
269 
270 	put_ipc_ns(sfd->ns);
271 	shm_file_data(file) = NULL;
272 	kfree(sfd);
273 	return 0;
274 }
275 
276 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
277 {
278 	int (*fsync) (struct file *, struct dentry *, int datasync);
279 	struct shm_file_data *sfd = shm_file_data(file);
280 	int ret = -EINVAL;
281 
282 	fsync = sfd->file->f_op->fsync;
283 	if (fsync)
284 		ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
285 	return ret;
286 }
287 
288 static unsigned long shm_get_unmapped_area(struct file *file,
289 	unsigned long addr, unsigned long len, unsigned long pgoff,
290 	unsigned long flags)
291 {
292 	struct shm_file_data *sfd = shm_file_data(file);
293 	return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
294 }
295 
296 int is_file_shm_hugepages(struct file *file)
297 {
298 	int ret = 0;
299 
300 	if (file->f_op == &shm_file_operations) {
301 		struct shm_file_data *sfd;
302 		sfd = shm_file_data(file);
303 		ret = is_file_hugepages(sfd->file);
304 	}
305 	return ret;
306 }
307 
308 static const struct file_operations shm_file_operations = {
309 	.mmap		= shm_mmap,
310 	.fsync		= shm_fsync,
311 	.release	= shm_release,
312 	.get_unmapped_area	= shm_get_unmapped_area,
313 };
314 
315 static struct vm_operations_struct shm_vm_ops = {
316 	.open	= shm_open,	/* callback for a new vm-area open */
317 	.close	= shm_close,	/* callback for when the vm-area is released */
318 	.fault	= shm_fault,
319 #if defined(CONFIG_NUMA)
320 	.set_policy = shm_set_policy,
321 	.get_policy = shm_get_policy,
322 #endif
323 };
324 
325 /**
326  * newseg - Create a new shared memory segment
327  * @ns: namespace
328  * @params: ptr to the structure that contains key, size and shmflg
329  *
330  * Called with shm_ids.rw_mutex held as a writer.
331  */
332 
333 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
334 {
335 	key_t key = params->key;
336 	int shmflg = params->flg;
337 	size_t size = params->u.size;
338 	int error;
339 	struct shmid_kernel *shp;
340 	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
341 	struct file * file;
342 	char name[13];
343 	int id;
344 	int acctflag = 0;
345 
346 	if (size < SHMMIN || size > ns->shm_ctlmax)
347 		return -EINVAL;
348 
349 	if (ns->shm_tot + numpages > ns->shm_ctlall)
350 		return -ENOSPC;
351 
352 	shp = ipc_rcu_alloc(sizeof(*shp));
353 	if (!shp)
354 		return -ENOMEM;
355 
356 	shp->shm_perm.key = key;
357 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
358 	shp->mlock_user = NULL;
359 
360 	shp->shm_perm.security = NULL;
361 	error = security_shm_alloc(shp);
362 	if (error) {
363 		ipc_rcu_putref(shp);
364 		return error;
365 	}
366 
367 	sprintf (name, "SYSV%08x", key);
368 	if (shmflg & SHM_HUGETLB) {
369 		/* hugetlb_file_setup applies strict accounting */
370 		if (shmflg & SHM_NORESERVE)
371 			acctflag = VM_NORESERVE;
372 		file = hugetlb_file_setup(name, size, acctflag);
373 		shp->mlock_user = current_user();
374 	} else {
375 		/*
376 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
377 	 	 * if it's asked for.
378 		 */
379 		if  ((shmflg & SHM_NORESERVE) &&
380 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
381 			acctflag = VM_NORESERVE;
382 		file = shmem_file_setup(name, size, acctflag);
383 	}
384 	error = PTR_ERR(file);
385 	if (IS_ERR(file))
386 		goto no_file;
387 	ima_shm_check(file);
388 
389 	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
390 	if (id < 0) {
391 		error = id;
392 		goto no_id;
393 	}
394 
395 	shp->shm_cprid = task_tgid_vnr(current);
396 	shp->shm_lprid = 0;
397 	shp->shm_atim = shp->shm_dtim = 0;
398 	shp->shm_ctim = get_seconds();
399 	shp->shm_segsz = size;
400 	shp->shm_nattch = 0;
401 	shp->shm_file = file;
402 	/*
403 	 * shmid gets reported as "inode#" in /proc/pid/maps.
404 	 * proc-ps tools use this. Changing this will break them.
405 	 */
406 	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
407 
408 	ns->shm_tot += numpages;
409 	error = shp->shm_perm.id;
410 	shm_unlock(shp);
411 	return error;
412 
413 no_id:
414 	fput(file);
415 no_file:
416 	security_shm_free(shp);
417 	ipc_rcu_putref(shp);
418 	return error;
419 }
420 
421 /*
422  * Called with shm_ids.rw_mutex and ipcp locked.
423  */
424 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
425 {
426 	struct shmid_kernel *shp;
427 
428 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
429 	return security_shm_associate(shp, shmflg);
430 }
431 
432 /*
433  * Called with shm_ids.rw_mutex and ipcp locked.
434  */
435 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
436 				struct ipc_params *params)
437 {
438 	struct shmid_kernel *shp;
439 
440 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
441 	if (shp->shm_segsz < params->u.size)
442 		return -EINVAL;
443 
444 	return 0;
445 }
446 
447 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
448 {
449 	struct ipc_namespace *ns;
450 	struct ipc_ops shm_ops;
451 	struct ipc_params shm_params;
452 
453 	ns = current->nsproxy->ipc_ns;
454 
455 	shm_ops.getnew = newseg;
456 	shm_ops.associate = shm_security;
457 	shm_ops.more_checks = shm_more_checks;
458 
459 	shm_params.key = key;
460 	shm_params.flg = shmflg;
461 	shm_params.u.size = size;
462 
463 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
464 }
465 
466 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
467 {
468 	switch(version) {
469 	case IPC_64:
470 		return copy_to_user(buf, in, sizeof(*in));
471 	case IPC_OLD:
472 	    {
473 		struct shmid_ds out;
474 
475 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
476 		out.shm_segsz	= in->shm_segsz;
477 		out.shm_atime	= in->shm_atime;
478 		out.shm_dtime	= in->shm_dtime;
479 		out.shm_ctime	= in->shm_ctime;
480 		out.shm_cpid	= in->shm_cpid;
481 		out.shm_lpid	= in->shm_lpid;
482 		out.shm_nattch	= in->shm_nattch;
483 
484 		return copy_to_user(buf, &out, sizeof(out));
485 	    }
486 	default:
487 		return -EINVAL;
488 	}
489 }
490 
491 static inline unsigned long
492 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
493 {
494 	switch(version) {
495 	case IPC_64:
496 		if (copy_from_user(out, buf, sizeof(*out)))
497 			return -EFAULT;
498 		return 0;
499 	case IPC_OLD:
500 	    {
501 		struct shmid_ds tbuf_old;
502 
503 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
504 			return -EFAULT;
505 
506 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
507 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
508 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
509 
510 		return 0;
511 	    }
512 	default:
513 		return -EINVAL;
514 	}
515 }
516 
517 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
518 {
519 	switch(version) {
520 	case IPC_64:
521 		return copy_to_user(buf, in, sizeof(*in));
522 	case IPC_OLD:
523 	    {
524 		struct shminfo out;
525 
526 		if(in->shmmax > INT_MAX)
527 			out.shmmax = INT_MAX;
528 		else
529 			out.shmmax = (int)in->shmmax;
530 
531 		out.shmmin	= in->shmmin;
532 		out.shmmni	= in->shmmni;
533 		out.shmseg	= in->shmseg;
534 		out.shmall	= in->shmall;
535 
536 		return copy_to_user(buf, &out, sizeof(out));
537 	    }
538 	default:
539 		return -EINVAL;
540 	}
541 }
542 
543 /*
544  * Called with shm_ids.rw_mutex held as a reader
545  */
546 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
547 		unsigned long *swp)
548 {
549 	int next_id;
550 	int total, in_use;
551 
552 	*rss = 0;
553 	*swp = 0;
554 
555 	in_use = shm_ids(ns).in_use;
556 
557 	for (total = 0, next_id = 0; total < in_use; next_id++) {
558 		struct kern_ipc_perm *ipc;
559 		struct shmid_kernel *shp;
560 		struct inode *inode;
561 
562 		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
563 		if (ipc == NULL)
564 			continue;
565 		shp = container_of(ipc, struct shmid_kernel, shm_perm);
566 
567 		inode = shp->shm_file->f_path.dentry->d_inode;
568 
569 		if (is_file_hugepages(shp->shm_file)) {
570 			struct address_space *mapping = inode->i_mapping;
571 			struct hstate *h = hstate_file(shp->shm_file);
572 			*rss += pages_per_huge_page(h) * mapping->nrpages;
573 		} else {
574 #ifdef CONFIG_SHMEM
575 			struct shmem_inode_info *info = SHMEM_I(inode);
576 			spin_lock(&info->lock);
577 			*rss += inode->i_mapping->nrpages;
578 			*swp += info->swapped;
579 			spin_unlock(&info->lock);
580 #else
581 			*rss += inode->i_mapping->nrpages;
582 #endif
583 		}
584 
585 		total++;
586 	}
587 }
588 
589 /*
590  * This function handles some shmctl commands which require the rw_mutex
591  * to be held in write mode.
592  * NOTE: no locks must be held, the rw_mutex is taken inside this function.
593  */
594 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
595 		       struct shmid_ds __user *buf, int version)
596 {
597 	struct kern_ipc_perm *ipcp;
598 	struct shmid64_ds shmid64;
599 	struct shmid_kernel *shp;
600 	int err;
601 
602 	if (cmd == IPC_SET) {
603 		if (copy_shmid_from_user(&shmid64, buf, version))
604 			return -EFAULT;
605 	}
606 
607 	ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
608 	if (IS_ERR(ipcp))
609 		return PTR_ERR(ipcp);
610 
611 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
612 
613 	err = security_shm_shmctl(shp, cmd);
614 	if (err)
615 		goto out_unlock;
616 	switch (cmd) {
617 	case IPC_RMID:
618 		do_shm_rmid(ns, ipcp);
619 		goto out_up;
620 	case IPC_SET:
621 		ipc_update_perm(&shmid64.shm_perm, ipcp);
622 		shp->shm_ctim = get_seconds();
623 		break;
624 	default:
625 		err = -EINVAL;
626 	}
627 out_unlock:
628 	shm_unlock(shp);
629 out_up:
630 	up_write(&shm_ids(ns).rw_mutex);
631 	return err;
632 }
633 
634 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
635 {
636 	struct shmid_kernel *shp;
637 	int err, version;
638 	struct ipc_namespace *ns;
639 
640 	if (cmd < 0 || shmid < 0) {
641 		err = -EINVAL;
642 		goto out;
643 	}
644 
645 	version = ipc_parse_version(&cmd);
646 	ns = current->nsproxy->ipc_ns;
647 
648 	switch (cmd) { /* replace with proc interface ? */
649 	case IPC_INFO:
650 	{
651 		struct shminfo64 shminfo;
652 
653 		err = security_shm_shmctl(NULL, cmd);
654 		if (err)
655 			return err;
656 
657 		memset(&shminfo, 0, sizeof(shminfo));
658 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
659 		shminfo.shmmax = ns->shm_ctlmax;
660 		shminfo.shmall = ns->shm_ctlall;
661 
662 		shminfo.shmmin = SHMMIN;
663 		if(copy_shminfo_to_user (buf, &shminfo, version))
664 			return -EFAULT;
665 
666 		down_read(&shm_ids(ns).rw_mutex);
667 		err = ipc_get_maxid(&shm_ids(ns));
668 		up_read(&shm_ids(ns).rw_mutex);
669 
670 		if(err<0)
671 			err = 0;
672 		goto out;
673 	}
674 	case SHM_INFO:
675 	{
676 		struct shm_info shm_info;
677 
678 		err = security_shm_shmctl(NULL, cmd);
679 		if (err)
680 			return err;
681 
682 		memset(&shm_info, 0, sizeof(shm_info));
683 		down_read(&shm_ids(ns).rw_mutex);
684 		shm_info.used_ids = shm_ids(ns).in_use;
685 		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
686 		shm_info.shm_tot = ns->shm_tot;
687 		shm_info.swap_attempts = 0;
688 		shm_info.swap_successes = 0;
689 		err = ipc_get_maxid(&shm_ids(ns));
690 		up_read(&shm_ids(ns).rw_mutex);
691 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
692 			err = -EFAULT;
693 			goto out;
694 		}
695 
696 		err = err < 0 ? 0 : err;
697 		goto out;
698 	}
699 	case SHM_STAT:
700 	case IPC_STAT:
701 	{
702 		struct shmid64_ds tbuf;
703 		int result;
704 
705 		if (cmd == SHM_STAT) {
706 			shp = shm_lock(ns, shmid);
707 			if (IS_ERR(shp)) {
708 				err = PTR_ERR(shp);
709 				goto out;
710 			}
711 			result = shp->shm_perm.id;
712 		} else {
713 			shp = shm_lock_check(ns, shmid);
714 			if (IS_ERR(shp)) {
715 				err = PTR_ERR(shp);
716 				goto out;
717 			}
718 			result = 0;
719 		}
720 		err = -EACCES;
721 		if (ipcperms (&shp->shm_perm, S_IRUGO))
722 			goto out_unlock;
723 		err = security_shm_shmctl(shp, cmd);
724 		if (err)
725 			goto out_unlock;
726 		memset(&tbuf, 0, sizeof(tbuf));
727 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
728 		tbuf.shm_segsz	= shp->shm_segsz;
729 		tbuf.shm_atime	= shp->shm_atim;
730 		tbuf.shm_dtime	= shp->shm_dtim;
731 		tbuf.shm_ctime	= shp->shm_ctim;
732 		tbuf.shm_cpid	= shp->shm_cprid;
733 		tbuf.shm_lpid	= shp->shm_lprid;
734 		tbuf.shm_nattch	= shp->shm_nattch;
735 		shm_unlock(shp);
736 		if(copy_shmid_to_user (buf, &tbuf, version))
737 			err = -EFAULT;
738 		else
739 			err = result;
740 		goto out;
741 	}
742 	case SHM_LOCK:
743 	case SHM_UNLOCK:
744 	{
745 		struct file *uninitialized_var(shm_file);
746 
747 		lru_add_drain_all();  /* drain pagevecs to lru lists */
748 
749 		shp = shm_lock_check(ns, shmid);
750 		if (IS_ERR(shp)) {
751 			err = PTR_ERR(shp);
752 			goto out;
753 		}
754 
755 		audit_ipc_obj(&(shp->shm_perm));
756 
757 		if (!capable(CAP_IPC_LOCK)) {
758 			uid_t euid = current_euid();
759 			err = -EPERM;
760 			if (euid != shp->shm_perm.uid &&
761 			    euid != shp->shm_perm.cuid)
762 				goto out_unlock;
763 			if (cmd == SHM_LOCK &&
764 			    !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
765 				goto out_unlock;
766 		}
767 
768 		err = security_shm_shmctl(shp, cmd);
769 		if (err)
770 			goto out_unlock;
771 
772 		if(cmd==SHM_LOCK) {
773 			struct user_struct *user = current_user();
774 			if (!is_file_hugepages(shp->shm_file)) {
775 				err = shmem_lock(shp->shm_file, 1, user);
776 				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
777 					shp->shm_perm.mode |= SHM_LOCKED;
778 					shp->mlock_user = user;
779 				}
780 			}
781 		} else if (!is_file_hugepages(shp->shm_file)) {
782 			shmem_lock(shp->shm_file, 0, shp->mlock_user);
783 			shp->shm_perm.mode &= ~SHM_LOCKED;
784 			shp->mlock_user = NULL;
785 		}
786 		shm_unlock(shp);
787 		goto out;
788 	}
789 	case IPC_RMID:
790 	case IPC_SET:
791 		err = shmctl_down(ns, shmid, cmd, buf, version);
792 		return err;
793 	default:
794 		return -EINVAL;
795 	}
796 
797 out_unlock:
798 	shm_unlock(shp);
799 out:
800 	return err;
801 }
802 
803 /*
804  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
805  *
806  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
807  * "raddr" thing points to kernel space, and there has to be a wrapper around
808  * this.
809  */
810 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
811 {
812 	struct shmid_kernel *shp;
813 	unsigned long addr;
814 	unsigned long size;
815 	struct file * file;
816 	int    err;
817 	unsigned long flags;
818 	unsigned long prot;
819 	int acc_mode;
820 	unsigned long user_addr;
821 	struct ipc_namespace *ns;
822 	struct shm_file_data *sfd;
823 	struct path path;
824 	fmode_t f_mode;
825 
826 	err = -EINVAL;
827 	if (shmid < 0)
828 		goto out;
829 	else if ((addr = (ulong)shmaddr)) {
830 		if (addr & (SHMLBA-1)) {
831 			if (shmflg & SHM_RND)
832 				addr &= ~(SHMLBA-1);	   /* round down */
833 			else
834 #ifndef __ARCH_FORCE_SHMLBA
835 				if (addr & ~PAGE_MASK)
836 #endif
837 					goto out;
838 		}
839 		flags = MAP_SHARED | MAP_FIXED;
840 	} else {
841 		if ((shmflg & SHM_REMAP))
842 			goto out;
843 
844 		flags = MAP_SHARED;
845 	}
846 
847 	if (shmflg & SHM_RDONLY) {
848 		prot = PROT_READ;
849 		acc_mode = S_IRUGO;
850 		f_mode = FMODE_READ;
851 	} else {
852 		prot = PROT_READ | PROT_WRITE;
853 		acc_mode = S_IRUGO | S_IWUGO;
854 		f_mode = FMODE_READ | FMODE_WRITE;
855 	}
856 	if (shmflg & SHM_EXEC) {
857 		prot |= PROT_EXEC;
858 		acc_mode |= S_IXUGO;
859 	}
860 
861 	/*
862 	 * We cannot rely on the fs check since SYSV IPC does have an
863 	 * additional creator id...
864 	 */
865 	ns = current->nsproxy->ipc_ns;
866 	shp = shm_lock_check(ns, shmid);
867 	if (IS_ERR(shp)) {
868 		err = PTR_ERR(shp);
869 		goto out;
870 	}
871 
872 	err = -EACCES;
873 	if (ipcperms(&shp->shm_perm, acc_mode))
874 		goto out_unlock;
875 
876 	err = security_shm_shmat(shp, shmaddr, shmflg);
877 	if (err)
878 		goto out_unlock;
879 
880 	path.dentry = dget(shp->shm_file->f_path.dentry);
881 	path.mnt    = shp->shm_file->f_path.mnt;
882 	shp->shm_nattch++;
883 	size = i_size_read(path.dentry->d_inode);
884 	shm_unlock(shp);
885 
886 	err = -ENOMEM;
887 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
888 	if (!sfd)
889 		goto out_put_dentry;
890 
891 	file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
892 	if (!file)
893 		goto out_free;
894 	ima_shm_check(file);
895 
896 	file->private_data = sfd;
897 	file->f_mapping = shp->shm_file->f_mapping;
898 	sfd->id = shp->shm_perm.id;
899 	sfd->ns = get_ipc_ns(ns);
900 	sfd->file = shp->shm_file;
901 	sfd->vm_ops = NULL;
902 
903 	down_write(&current->mm->mmap_sem);
904 	if (addr && !(shmflg & SHM_REMAP)) {
905 		err = -EINVAL;
906 		if (find_vma_intersection(current->mm, addr, addr + size))
907 			goto invalid;
908 		/*
909 		 * If shm segment goes below stack, make sure there is some
910 		 * space left for the stack to grow (at least 4 pages).
911 		 */
912 		if (addr < current->mm->start_stack &&
913 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
914 			goto invalid;
915 	}
916 
917 	user_addr = do_mmap (file, addr, size, prot, flags, 0);
918 	*raddr = user_addr;
919 	err = 0;
920 	if (IS_ERR_VALUE(user_addr))
921 		err = (long)user_addr;
922 invalid:
923 	up_write(&current->mm->mmap_sem);
924 
925 	fput(file);
926 
927 out_nattch:
928 	down_write(&shm_ids(ns).rw_mutex);
929 	shp = shm_lock(ns, shmid);
930 	BUG_ON(IS_ERR(shp));
931 	shp->shm_nattch--;
932 	if(shp->shm_nattch == 0 &&
933 	   shp->shm_perm.mode & SHM_DEST)
934 		shm_destroy(ns, shp);
935 	else
936 		shm_unlock(shp);
937 	up_write(&shm_ids(ns).rw_mutex);
938 
939 out:
940 	return err;
941 
942 out_unlock:
943 	shm_unlock(shp);
944 	goto out;
945 
946 out_free:
947 	kfree(sfd);
948 out_put_dentry:
949 	dput(path.dentry);
950 	goto out_nattch;
951 }
952 
953 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
954 {
955 	unsigned long ret;
956 	long err;
957 
958 	err = do_shmat(shmid, shmaddr, shmflg, &ret);
959 	if (err)
960 		return err;
961 	force_successful_syscall_return();
962 	return (long)ret;
963 }
964 
965 /*
966  * detach and kill segment if marked destroyed.
967  * The work is done in shm_close.
968  */
969 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
970 {
971 	struct mm_struct *mm = current->mm;
972 	struct vm_area_struct *vma, *next;
973 	unsigned long addr = (unsigned long)shmaddr;
974 	loff_t size = 0;
975 	int retval = -EINVAL;
976 
977 	if (addr & ~PAGE_MASK)
978 		return retval;
979 
980 	down_write(&mm->mmap_sem);
981 
982 	/*
983 	 * This function tries to be smart and unmap shm segments that
984 	 * were modified by partial mlock or munmap calls:
985 	 * - It first determines the size of the shm segment that should be
986 	 *   unmapped: It searches for a vma that is backed by shm and that
987 	 *   started at address shmaddr. It records it's size and then unmaps
988 	 *   it.
989 	 * - Then it unmaps all shm vmas that started at shmaddr and that
990 	 *   are within the initially determined size.
991 	 * Errors from do_munmap are ignored: the function only fails if
992 	 * it's called with invalid parameters or if it's called to unmap
993 	 * a part of a vma. Both calls in this function are for full vmas,
994 	 * the parameters are directly copied from the vma itself and always
995 	 * valid - therefore do_munmap cannot fail. (famous last words?)
996 	 */
997 	/*
998 	 * If it had been mremap()'d, the starting address would not
999 	 * match the usual checks anyway. So assume all vma's are
1000 	 * above the starting address given.
1001 	 */
1002 	vma = find_vma(mm, addr);
1003 
1004 #ifdef CONFIG_MMU
1005 	while (vma) {
1006 		next = vma->vm_next;
1007 
1008 		/*
1009 		 * Check if the starting address would match, i.e. it's
1010 		 * a fragment created by mprotect() and/or munmap(), or it
1011 		 * otherwise it starts at this address with no hassles.
1012 		 */
1013 		if ((vma->vm_ops == &shm_vm_ops) &&
1014 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1015 
1016 
1017 			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1018 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1019 			/*
1020 			 * We discovered the size of the shm segment, so
1021 			 * break out of here and fall through to the next
1022 			 * loop that uses the size information to stop
1023 			 * searching for matching vma's.
1024 			 */
1025 			retval = 0;
1026 			vma = next;
1027 			break;
1028 		}
1029 		vma = next;
1030 	}
1031 
1032 	/*
1033 	 * We need look no further than the maximum address a fragment
1034 	 * could possibly have landed at. Also cast things to loff_t to
1035 	 * prevent overflows and make comparisions vs. equal-width types.
1036 	 */
1037 	size = PAGE_ALIGN(size);
1038 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1039 		next = vma->vm_next;
1040 
1041 		/* finding a matching vma now does not alter retval */
1042 		if ((vma->vm_ops == &shm_vm_ops) &&
1043 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1044 
1045 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1046 		vma = next;
1047 	}
1048 
1049 #else /* CONFIG_MMU */
1050 	/* under NOMMU conditions, the exact address to be destroyed must be
1051 	 * given */
1052 	retval = -EINVAL;
1053 	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1054 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1055 		retval = 0;
1056 	}
1057 
1058 #endif
1059 
1060 	up_write(&mm->mmap_sem);
1061 	return retval;
1062 }
1063 
1064 #ifdef CONFIG_PROC_FS
1065 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1066 {
1067 	struct shmid_kernel *shp = it;
1068 
1069 #if BITS_PER_LONG <= 32
1070 #define SIZE_SPEC "%10lu"
1071 #else
1072 #define SIZE_SPEC "%21lu"
1073 #endif
1074 
1075 	return seq_printf(s,
1076 			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1077 			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
1078 			  shp->shm_perm.key,
1079 			  shp->shm_perm.id,
1080 			  shp->shm_perm.mode,
1081 			  shp->shm_segsz,
1082 			  shp->shm_cprid,
1083 			  shp->shm_lprid,
1084 			  shp->shm_nattch,
1085 			  shp->shm_perm.uid,
1086 			  shp->shm_perm.gid,
1087 			  shp->shm_perm.cuid,
1088 			  shp->shm_perm.cgid,
1089 			  shp->shm_atim,
1090 			  shp->shm_dtim,
1091 			  shp->shm_ctim);
1092 }
1093 #endif
1094