xref: /linux/ipc/shm.c (revision 0c93ea4064a209cdc36de8a9a3003d43d08f46f7)
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
42 #include <linux/ima.h>
43 
44 #include <asm/uaccess.h>
45 
46 #include "util.h"
47 
48 struct shm_file_data {
49 	int id;
50 	struct ipc_namespace *ns;
51 	struct file *file;
52 	const struct vm_operations_struct *vm_ops;
53 };
54 
55 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 
57 static const struct file_operations shm_file_operations;
58 static struct vm_operations_struct shm_vm_ops;
59 
60 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
61 
62 #define shm_unlock(shp)			\
63 	ipc_unlock(&(shp)->shm_perm)
64 
65 static int newseg(struct ipc_namespace *, struct ipc_params *);
66 static void shm_open(struct vm_area_struct *vma);
67 static void shm_close(struct vm_area_struct *vma);
68 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
69 #ifdef CONFIG_PROC_FS
70 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71 #endif
72 
73 void shm_init_ns(struct ipc_namespace *ns)
74 {
75 	ns->shm_ctlmax = SHMMAX;
76 	ns->shm_ctlall = SHMALL;
77 	ns->shm_ctlmni = SHMMNI;
78 	ns->shm_tot = 0;
79 	ipc_init_ids(&shm_ids(ns));
80 }
81 
82 /*
83  * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84  * Only shm_ids.rw_mutex remains locked on exit.
85  */
86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 {
88 	struct shmid_kernel *shp;
89 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90 
91 	if (shp->shm_nattch){
92 		shp->shm_perm.mode |= SHM_DEST;
93 		/* Do not find it any more */
94 		shp->shm_perm.key = IPC_PRIVATE;
95 		shm_unlock(shp);
96 	} else
97 		shm_destroy(ns, shp);
98 }
99 
100 #ifdef CONFIG_IPC_NS
101 void shm_exit_ns(struct ipc_namespace *ns)
102 {
103 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 }
105 #endif
106 
107 void __init shm_init (void)
108 {
109 	shm_init_ns(&init_ipc_ns);
110 	ipc_init_proc_interface("sysvipc/shm",
111 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n",
112 				IPC_SHM_IDS, sysvipc_shm_proc_show);
113 }
114 
115 /*
116  * shm_lock_(check_) routines are called in the paths where the rw_mutex
117  * is not necessarily held.
118  */
119 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
120 {
121 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
122 
123 	if (IS_ERR(ipcp))
124 		return (struct shmid_kernel *)ipcp;
125 
126 	return container_of(ipcp, struct shmid_kernel, shm_perm);
127 }
128 
129 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
130 						int id)
131 {
132 	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
133 
134 	if (IS_ERR(ipcp))
135 		return (struct shmid_kernel *)ipcp;
136 
137 	return container_of(ipcp, struct shmid_kernel, shm_perm);
138 }
139 
140 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
141 {
142 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
143 }
144 
145 
146 /* This is called by fork, once for every shm attach. */
147 static void shm_open(struct vm_area_struct *vma)
148 {
149 	struct file *file = vma->vm_file;
150 	struct shm_file_data *sfd = shm_file_data(file);
151 	struct shmid_kernel *shp;
152 
153 	shp = shm_lock(sfd->ns, sfd->id);
154 	BUG_ON(IS_ERR(shp));
155 	shp->shm_atim = get_seconds();
156 	shp->shm_lprid = task_tgid_vnr(current);
157 	shp->shm_nattch++;
158 	shm_unlock(shp);
159 }
160 
161 /*
162  * shm_destroy - free the struct shmid_kernel
163  *
164  * @ns: namespace
165  * @shp: struct to free
166  *
167  * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
168  * but returns with shp unlocked and freed.
169  */
170 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
171 {
172 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
173 	shm_rmid(ns, shp);
174 	shm_unlock(shp);
175 	if (!is_file_hugepages(shp->shm_file))
176 		shmem_lock(shp->shm_file, 0, shp->mlock_user);
177 	else
178 		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
179 						shp->mlock_user);
180 	fput (shp->shm_file);
181 	security_shm_free(shp);
182 	ipc_rcu_putref(shp);
183 }
184 
185 /*
186  * remove the attach descriptor vma.
187  * free memory for segment if it is marked destroyed.
188  * The descriptor has already been removed from the current->mm->mmap list
189  * and will later be kfree()d.
190  */
191 static void shm_close(struct vm_area_struct *vma)
192 {
193 	struct file * file = vma->vm_file;
194 	struct shm_file_data *sfd = shm_file_data(file);
195 	struct shmid_kernel *shp;
196 	struct ipc_namespace *ns = sfd->ns;
197 
198 	down_write(&shm_ids(ns).rw_mutex);
199 	/* remove from the list of attaches of the shm segment */
200 	shp = shm_lock(ns, sfd->id);
201 	BUG_ON(IS_ERR(shp));
202 	shp->shm_lprid = task_tgid_vnr(current);
203 	shp->shm_dtim = get_seconds();
204 	shp->shm_nattch--;
205 	if(shp->shm_nattch == 0 &&
206 	   shp->shm_perm.mode & SHM_DEST)
207 		shm_destroy(ns, shp);
208 	else
209 		shm_unlock(shp);
210 	up_write(&shm_ids(ns).rw_mutex);
211 }
212 
213 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
214 {
215 	struct file *file = vma->vm_file;
216 	struct shm_file_data *sfd = shm_file_data(file);
217 
218 	return sfd->vm_ops->fault(vma, vmf);
219 }
220 
221 #ifdef CONFIG_NUMA
222 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
223 {
224 	struct file *file = vma->vm_file;
225 	struct shm_file_data *sfd = shm_file_data(file);
226 	int err = 0;
227 	if (sfd->vm_ops->set_policy)
228 		err = sfd->vm_ops->set_policy(vma, new);
229 	return err;
230 }
231 
232 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
233 					unsigned long addr)
234 {
235 	struct file *file = vma->vm_file;
236 	struct shm_file_data *sfd = shm_file_data(file);
237 	struct mempolicy *pol = NULL;
238 
239 	if (sfd->vm_ops->get_policy)
240 		pol = sfd->vm_ops->get_policy(vma, addr);
241 	else if (vma->vm_policy)
242 		pol = vma->vm_policy;
243 
244 	return pol;
245 }
246 #endif
247 
248 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
249 {
250 	struct shm_file_data *sfd = shm_file_data(file);
251 	int ret;
252 
253 	ret = sfd->file->f_op->mmap(sfd->file, vma);
254 	if (ret != 0)
255 		return ret;
256 	sfd->vm_ops = vma->vm_ops;
257 #ifdef CONFIG_MMU
258 	BUG_ON(!sfd->vm_ops->fault);
259 #endif
260 	vma->vm_ops = &shm_vm_ops;
261 	shm_open(vma);
262 
263 	return ret;
264 }
265 
266 static int shm_release(struct inode *ino, struct file *file)
267 {
268 	struct shm_file_data *sfd = shm_file_data(file);
269 
270 	put_ipc_ns(sfd->ns);
271 	shm_file_data(file) = NULL;
272 	kfree(sfd);
273 	return 0;
274 }
275 
276 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
277 {
278 	int (*fsync) (struct file *, struct dentry *, int datasync);
279 	struct shm_file_data *sfd = shm_file_data(file);
280 	int ret = -EINVAL;
281 
282 	fsync = sfd->file->f_op->fsync;
283 	if (fsync)
284 		ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
285 	return ret;
286 }
287 
288 static unsigned long shm_get_unmapped_area(struct file *file,
289 	unsigned long addr, unsigned long len, unsigned long pgoff,
290 	unsigned long flags)
291 {
292 	struct shm_file_data *sfd = shm_file_data(file);
293 	return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
294 }
295 
296 int is_file_shm_hugepages(struct file *file)
297 {
298 	int ret = 0;
299 
300 	if (file->f_op == &shm_file_operations) {
301 		struct shm_file_data *sfd;
302 		sfd = shm_file_data(file);
303 		ret = is_file_hugepages(sfd->file);
304 	}
305 	return ret;
306 }
307 
308 static const struct file_operations shm_file_operations = {
309 	.mmap		= shm_mmap,
310 	.fsync		= shm_fsync,
311 	.release	= shm_release,
312 	.get_unmapped_area	= shm_get_unmapped_area,
313 };
314 
315 static struct vm_operations_struct shm_vm_ops = {
316 	.open	= shm_open,	/* callback for a new vm-area open */
317 	.close	= shm_close,	/* callback for when the vm-area is released */
318 	.fault	= shm_fault,
319 #if defined(CONFIG_NUMA)
320 	.set_policy = shm_set_policy,
321 	.get_policy = shm_get_policy,
322 #endif
323 };
324 
325 /**
326  * newseg - Create a new shared memory segment
327  * @ns: namespace
328  * @params: ptr to the structure that contains key, size and shmflg
329  *
330  * Called with shm_ids.rw_mutex held as a writer.
331  */
332 
333 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
334 {
335 	key_t key = params->key;
336 	int shmflg = params->flg;
337 	size_t size = params->u.size;
338 	int error;
339 	struct shmid_kernel *shp;
340 	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
341 	struct file * file;
342 	char name[13];
343 	int id;
344 	int acctflag = 0;
345 
346 	if (size < SHMMIN || size > ns->shm_ctlmax)
347 		return -EINVAL;
348 
349 	if (ns->shm_tot + numpages > ns->shm_ctlall)
350 		return -ENOSPC;
351 
352 	shp = ipc_rcu_alloc(sizeof(*shp));
353 	if (!shp)
354 		return -ENOMEM;
355 
356 	shp->shm_perm.key = key;
357 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
358 	shp->mlock_user = NULL;
359 
360 	shp->shm_perm.security = NULL;
361 	error = security_shm_alloc(shp);
362 	if (error) {
363 		ipc_rcu_putref(shp);
364 		return error;
365 	}
366 
367 	sprintf (name, "SYSV%08x", key);
368 	if (shmflg & SHM_HUGETLB) {
369 		/* hugetlb_file_setup applies strict accounting */
370 		if (shmflg & SHM_NORESERVE)
371 			acctflag = VM_NORESERVE;
372 		file = hugetlb_file_setup(name, size, acctflag);
373 		shp->mlock_user = current_user();
374 	} else {
375 		/*
376 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
377 	 	 * if it's asked for.
378 		 */
379 		if  ((shmflg & SHM_NORESERVE) &&
380 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
381 			acctflag = VM_NORESERVE;
382 		file = shmem_file_setup(name, size, acctflag);
383 	}
384 	error = PTR_ERR(file);
385 	if (IS_ERR(file))
386 		goto no_file;
387 	ima_shm_check(file);
388 
389 	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
390 	if (id < 0) {
391 		error = id;
392 		goto no_id;
393 	}
394 
395 	shp->shm_cprid = task_tgid_vnr(current);
396 	shp->shm_lprid = 0;
397 	shp->shm_atim = shp->shm_dtim = 0;
398 	shp->shm_ctim = get_seconds();
399 	shp->shm_segsz = size;
400 	shp->shm_nattch = 0;
401 	shp->shm_file = file;
402 	/*
403 	 * shmid gets reported as "inode#" in /proc/pid/maps.
404 	 * proc-ps tools use this. Changing this will break them.
405 	 */
406 	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
407 
408 	ns->shm_tot += numpages;
409 	error = shp->shm_perm.id;
410 	shm_unlock(shp);
411 	return error;
412 
413 no_id:
414 	fput(file);
415 no_file:
416 	security_shm_free(shp);
417 	ipc_rcu_putref(shp);
418 	return error;
419 }
420 
421 /*
422  * Called with shm_ids.rw_mutex and ipcp locked.
423  */
424 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
425 {
426 	struct shmid_kernel *shp;
427 
428 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
429 	return security_shm_associate(shp, shmflg);
430 }
431 
432 /*
433  * Called with shm_ids.rw_mutex and ipcp locked.
434  */
435 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
436 				struct ipc_params *params)
437 {
438 	struct shmid_kernel *shp;
439 
440 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
441 	if (shp->shm_segsz < params->u.size)
442 		return -EINVAL;
443 
444 	return 0;
445 }
446 
447 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
448 {
449 	struct ipc_namespace *ns;
450 	struct ipc_ops shm_ops;
451 	struct ipc_params shm_params;
452 
453 	ns = current->nsproxy->ipc_ns;
454 
455 	shm_ops.getnew = newseg;
456 	shm_ops.associate = shm_security;
457 	shm_ops.more_checks = shm_more_checks;
458 
459 	shm_params.key = key;
460 	shm_params.flg = shmflg;
461 	shm_params.u.size = size;
462 
463 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
464 }
465 
466 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
467 {
468 	switch(version) {
469 	case IPC_64:
470 		return copy_to_user(buf, in, sizeof(*in));
471 	case IPC_OLD:
472 	    {
473 		struct shmid_ds out;
474 
475 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
476 		out.shm_segsz	= in->shm_segsz;
477 		out.shm_atime	= in->shm_atime;
478 		out.shm_dtime	= in->shm_dtime;
479 		out.shm_ctime	= in->shm_ctime;
480 		out.shm_cpid	= in->shm_cpid;
481 		out.shm_lpid	= in->shm_lpid;
482 		out.shm_nattch	= in->shm_nattch;
483 
484 		return copy_to_user(buf, &out, sizeof(out));
485 	    }
486 	default:
487 		return -EINVAL;
488 	}
489 }
490 
491 static inline unsigned long
492 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
493 {
494 	switch(version) {
495 	case IPC_64:
496 		if (copy_from_user(out, buf, sizeof(*out)))
497 			return -EFAULT;
498 		return 0;
499 	case IPC_OLD:
500 	    {
501 		struct shmid_ds tbuf_old;
502 
503 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
504 			return -EFAULT;
505 
506 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
507 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
508 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
509 
510 		return 0;
511 	    }
512 	default:
513 		return -EINVAL;
514 	}
515 }
516 
517 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
518 {
519 	switch(version) {
520 	case IPC_64:
521 		return copy_to_user(buf, in, sizeof(*in));
522 	case IPC_OLD:
523 	    {
524 		struct shminfo out;
525 
526 		if(in->shmmax > INT_MAX)
527 			out.shmmax = INT_MAX;
528 		else
529 			out.shmmax = (int)in->shmmax;
530 
531 		out.shmmin	= in->shmmin;
532 		out.shmmni	= in->shmmni;
533 		out.shmseg	= in->shmseg;
534 		out.shmall	= in->shmall;
535 
536 		return copy_to_user(buf, &out, sizeof(out));
537 	    }
538 	default:
539 		return -EINVAL;
540 	}
541 }
542 
543 /*
544  * Called with shm_ids.rw_mutex held as a reader
545  */
546 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
547 		unsigned long *swp)
548 {
549 	int next_id;
550 	int total, in_use;
551 
552 	*rss = 0;
553 	*swp = 0;
554 
555 	in_use = shm_ids(ns).in_use;
556 
557 	for (total = 0, next_id = 0; total < in_use; next_id++) {
558 		struct shmid_kernel *shp;
559 		struct inode *inode;
560 
561 		shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
562 		if (shp == NULL)
563 			continue;
564 
565 		inode = shp->shm_file->f_path.dentry->d_inode;
566 
567 		if (is_file_hugepages(shp->shm_file)) {
568 			struct address_space *mapping = inode->i_mapping;
569 			struct hstate *h = hstate_file(shp->shm_file);
570 			*rss += pages_per_huge_page(h) * mapping->nrpages;
571 		} else {
572 #ifdef CONFIG_SHMEM
573 			struct shmem_inode_info *info = SHMEM_I(inode);
574 			spin_lock(&info->lock);
575 			*rss += inode->i_mapping->nrpages;
576 			*swp += info->swapped;
577 			spin_unlock(&info->lock);
578 #else
579 			*rss += inode->i_mapping->nrpages;
580 #endif
581 		}
582 
583 		total++;
584 	}
585 }
586 
587 /*
588  * This function handles some shmctl commands which require the rw_mutex
589  * to be held in write mode.
590  * NOTE: no locks must be held, the rw_mutex is taken inside this function.
591  */
592 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
593 		       struct shmid_ds __user *buf, int version)
594 {
595 	struct kern_ipc_perm *ipcp;
596 	struct shmid64_ds shmid64;
597 	struct shmid_kernel *shp;
598 	int err;
599 
600 	if (cmd == IPC_SET) {
601 		if (copy_shmid_from_user(&shmid64, buf, version))
602 			return -EFAULT;
603 	}
604 
605 	ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
606 	if (IS_ERR(ipcp))
607 		return PTR_ERR(ipcp);
608 
609 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
610 
611 	err = security_shm_shmctl(shp, cmd);
612 	if (err)
613 		goto out_unlock;
614 	switch (cmd) {
615 	case IPC_RMID:
616 		do_shm_rmid(ns, ipcp);
617 		goto out_up;
618 	case IPC_SET:
619 		ipc_update_perm(&shmid64.shm_perm, ipcp);
620 		shp->shm_ctim = get_seconds();
621 		break;
622 	default:
623 		err = -EINVAL;
624 	}
625 out_unlock:
626 	shm_unlock(shp);
627 out_up:
628 	up_write(&shm_ids(ns).rw_mutex);
629 	return err;
630 }
631 
632 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
633 {
634 	struct shmid_kernel *shp;
635 	int err, version;
636 	struct ipc_namespace *ns;
637 
638 	if (cmd < 0 || shmid < 0) {
639 		err = -EINVAL;
640 		goto out;
641 	}
642 
643 	version = ipc_parse_version(&cmd);
644 	ns = current->nsproxy->ipc_ns;
645 
646 	switch (cmd) { /* replace with proc interface ? */
647 	case IPC_INFO:
648 	{
649 		struct shminfo64 shminfo;
650 
651 		err = security_shm_shmctl(NULL, cmd);
652 		if (err)
653 			return err;
654 
655 		memset(&shminfo, 0, sizeof(shminfo));
656 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
657 		shminfo.shmmax = ns->shm_ctlmax;
658 		shminfo.shmall = ns->shm_ctlall;
659 
660 		shminfo.shmmin = SHMMIN;
661 		if(copy_shminfo_to_user (buf, &shminfo, version))
662 			return -EFAULT;
663 
664 		down_read(&shm_ids(ns).rw_mutex);
665 		err = ipc_get_maxid(&shm_ids(ns));
666 		up_read(&shm_ids(ns).rw_mutex);
667 
668 		if(err<0)
669 			err = 0;
670 		goto out;
671 	}
672 	case SHM_INFO:
673 	{
674 		struct shm_info shm_info;
675 
676 		err = security_shm_shmctl(NULL, cmd);
677 		if (err)
678 			return err;
679 
680 		memset(&shm_info, 0, sizeof(shm_info));
681 		down_read(&shm_ids(ns).rw_mutex);
682 		shm_info.used_ids = shm_ids(ns).in_use;
683 		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
684 		shm_info.shm_tot = ns->shm_tot;
685 		shm_info.swap_attempts = 0;
686 		shm_info.swap_successes = 0;
687 		err = ipc_get_maxid(&shm_ids(ns));
688 		up_read(&shm_ids(ns).rw_mutex);
689 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
690 			err = -EFAULT;
691 			goto out;
692 		}
693 
694 		err = err < 0 ? 0 : err;
695 		goto out;
696 	}
697 	case SHM_STAT:
698 	case IPC_STAT:
699 	{
700 		struct shmid64_ds tbuf;
701 		int result;
702 
703 		if (cmd == SHM_STAT) {
704 			shp = shm_lock(ns, shmid);
705 			if (IS_ERR(shp)) {
706 				err = PTR_ERR(shp);
707 				goto out;
708 			}
709 			result = shp->shm_perm.id;
710 		} else {
711 			shp = shm_lock_check(ns, shmid);
712 			if (IS_ERR(shp)) {
713 				err = PTR_ERR(shp);
714 				goto out;
715 			}
716 			result = 0;
717 		}
718 		err = -EACCES;
719 		if (ipcperms (&shp->shm_perm, S_IRUGO))
720 			goto out_unlock;
721 		err = security_shm_shmctl(shp, cmd);
722 		if (err)
723 			goto out_unlock;
724 		memset(&tbuf, 0, sizeof(tbuf));
725 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
726 		tbuf.shm_segsz	= shp->shm_segsz;
727 		tbuf.shm_atime	= shp->shm_atim;
728 		tbuf.shm_dtime	= shp->shm_dtim;
729 		tbuf.shm_ctime	= shp->shm_ctim;
730 		tbuf.shm_cpid	= shp->shm_cprid;
731 		tbuf.shm_lpid	= shp->shm_lprid;
732 		tbuf.shm_nattch	= shp->shm_nattch;
733 		shm_unlock(shp);
734 		if(copy_shmid_to_user (buf, &tbuf, version))
735 			err = -EFAULT;
736 		else
737 			err = result;
738 		goto out;
739 	}
740 	case SHM_LOCK:
741 	case SHM_UNLOCK:
742 	{
743 		struct file *uninitialized_var(shm_file);
744 
745 		lru_add_drain_all();  /* drain pagevecs to lru lists */
746 
747 		shp = shm_lock_check(ns, shmid);
748 		if (IS_ERR(shp)) {
749 			err = PTR_ERR(shp);
750 			goto out;
751 		}
752 
753 		audit_ipc_obj(&(shp->shm_perm));
754 
755 		if (!capable(CAP_IPC_LOCK)) {
756 			uid_t euid = current_euid();
757 			err = -EPERM;
758 			if (euid != shp->shm_perm.uid &&
759 			    euid != shp->shm_perm.cuid)
760 				goto out_unlock;
761 			if (cmd == SHM_LOCK &&
762 			    !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
763 				goto out_unlock;
764 		}
765 
766 		err = security_shm_shmctl(shp, cmd);
767 		if (err)
768 			goto out_unlock;
769 
770 		if(cmd==SHM_LOCK) {
771 			struct user_struct *user = current_user();
772 			if (!is_file_hugepages(shp->shm_file)) {
773 				err = shmem_lock(shp->shm_file, 1, user);
774 				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
775 					shp->shm_perm.mode |= SHM_LOCKED;
776 					shp->mlock_user = user;
777 				}
778 			}
779 		} else if (!is_file_hugepages(shp->shm_file)) {
780 			shmem_lock(shp->shm_file, 0, shp->mlock_user);
781 			shp->shm_perm.mode &= ~SHM_LOCKED;
782 			shp->mlock_user = NULL;
783 		}
784 		shm_unlock(shp);
785 		goto out;
786 	}
787 	case IPC_RMID:
788 	case IPC_SET:
789 		err = shmctl_down(ns, shmid, cmd, buf, version);
790 		return err;
791 	default:
792 		return -EINVAL;
793 	}
794 
795 out_unlock:
796 	shm_unlock(shp);
797 out:
798 	return err;
799 }
800 
801 /*
802  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
803  *
804  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
805  * "raddr" thing points to kernel space, and there has to be a wrapper around
806  * this.
807  */
808 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
809 {
810 	struct shmid_kernel *shp;
811 	unsigned long addr;
812 	unsigned long size;
813 	struct file * file;
814 	int    err;
815 	unsigned long flags;
816 	unsigned long prot;
817 	int acc_mode;
818 	unsigned long user_addr;
819 	struct ipc_namespace *ns;
820 	struct shm_file_data *sfd;
821 	struct path path;
822 	fmode_t f_mode;
823 
824 	err = -EINVAL;
825 	if (shmid < 0)
826 		goto out;
827 	else if ((addr = (ulong)shmaddr)) {
828 		if (addr & (SHMLBA-1)) {
829 			if (shmflg & SHM_RND)
830 				addr &= ~(SHMLBA-1);	   /* round down */
831 			else
832 #ifndef __ARCH_FORCE_SHMLBA
833 				if (addr & ~PAGE_MASK)
834 #endif
835 					goto out;
836 		}
837 		flags = MAP_SHARED | MAP_FIXED;
838 	} else {
839 		if ((shmflg & SHM_REMAP))
840 			goto out;
841 
842 		flags = MAP_SHARED;
843 	}
844 
845 	if (shmflg & SHM_RDONLY) {
846 		prot = PROT_READ;
847 		acc_mode = S_IRUGO;
848 		f_mode = FMODE_READ;
849 	} else {
850 		prot = PROT_READ | PROT_WRITE;
851 		acc_mode = S_IRUGO | S_IWUGO;
852 		f_mode = FMODE_READ | FMODE_WRITE;
853 	}
854 	if (shmflg & SHM_EXEC) {
855 		prot |= PROT_EXEC;
856 		acc_mode |= S_IXUGO;
857 	}
858 
859 	/*
860 	 * We cannot rely on the fs check since SYSV IPC does have an
861 	 * additional creator id...
862 	 */
863 	ns = current->nsproxy->ipc_ns;
864 	shp = shm_lock_check(ns, shmid);
865 	if (IS_ERR(shp)) {
866 		err = PTR_ERR(shp);
867 		goto out;
868 	}
869 
870 	err = -EACCES;
871 	if (ipcperms(&shp->shm_perm, acc_mode))
872 		goto out_unlock;
873 
874 	err = security_shm_shmat(shp, shmaddr, shmflg);
875 	if (err)
876 		goto out_unlock;
877 
878 	path.dentry = dget(shp->shm_file->f_path.dentry);
879 	path.mnt    = shp->shm_file->f_path.mnt;
880 	shp->shm_nattch++;
881 	size = i_size_read(path.dentry->d_inode);
882 	shm_unlock(shp);
883 
884 	err = -ENOMEM;
885 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
886 	if (!sfd)
887 		goto out_put_dentry;
888 
889 	file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
890 	if (!file)
891 		goto out_free;
892 	ima_shm_check(file);
893 
894 	file->private_data = sfd;
895 	file->f_mapping = shp->shm_file->f_mapping;
896 	sfd->id = shp->shm_perm.id;
897 	sfd->ns = get_ipc_ns(ns);
898 	sfd->file = shp->shm_file;
899 	sfd->vm_ops = NULL;
900 
901 	down_write(&current->mm->mmap_sem);
902 	if (addr && !(shmflg & SHM_REMAP)) {
903 		err = -EINVAL;
904 		if (find_vma_intersection(current->mm, addr, addr + size))
905 			goto invalid;
906 		/*
907 		 * If shm segment goes below stack, make sure there is some
908 		 * space left for the stack to grow (at least 4 pages).
909 		 */
910 		if (addr < current->mm->start_stack &&
911 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
912 			goto invalid;
913 	}
914 
915 	user_addr = do_mmap (file, addr, size, prot, flags, 0);
916 	*raddr = user_addr;
917 	err = 0;
918 	if (IS_ERR_VALUE(user_addr))
919 		err = (long)user_addr;
920 invalid:
921 	up_write(&current->mm->mmap_sem);
922 
923 	fput(file);
924 
925 out_nattch:
926 	down_write(&shm_ids(ns).rw_mutex);
927 	shp = shm_lock(ns, shmid);
928 	BUG_ON(IS_ERR(shp));
929 	shp->shm_nattch--;
930 	if(shp->shm_nattch == 0 &&
931 	   shp->shm_perm.mode & SHM_DEST)
932 		shm_destroy(ns, shp);
933 	else
934 		shm_unlock(shp);
935 	up_write(&shm_ids(ns).rw_mutex);
936 
937 out:
938 	return err;
939 
940 out_unlock:
941 	shm_unlock(shp);
942 	goto out;
943 
944 out_free:
945 	kfree(sfd);
946 out_put_dentry:
947 	dput(path.dentry);
948 	goto out_nattch;
949 }
950 
951 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
952 {
953 	unsigned long ret;
954 	long err;
955 
956 	err = do_shmat(shmid, shmaddr, shmflg, &ret);
957 	if (err)
958 		return err;
959 	force_successful_syscall_return();
960 	return (long)ret;
961 }
962 
963 /*
964  * detach and kill segment if marked destroyed.
965  * The work is done in shm_close.
966  */
967 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
968 {
969 	struct mm_struct *mm = current->mm;
970 	struct vm_area_struct *vma, *next;
971 	unsigned long addr = (unsigned long)shmaddr;
972 	loff_t size = 0;
973 	int retval = -EINVAL;
974 
975 	if (addr & ~PAGE_MASK)
976 		return retval;
977 
978 	down_write(&mm->mmap_sem);
979 
980 	/*
981 	 * This function tries to be smart and unmap shm segments that
982 	 * were modified by partial mlock or munmap calls:
983 	 * - It first determines the size of the shm segment that should be
984 	 *   unmapped: It searches for a vma that is backed by shm and that
985 	 *   started at address shmaddr. It records it's size and then unmaps
986 	 *   it.
987 	 * - Then it unmaps all shm vmas that started at shmaddr and that
988 	 *   are within the initially determined size.
989 	 * Errors from do_munmap are ignored: the function only fails if
990 	 * it's called with invalid parameters or if it's called to unmap
991 	 * a part of a vma. Both calls in this function are for full vmas,
992 	 * the parameters are directly copied from the vma itself and always
993 	 * valid - therefore do_munmap cannot fail. (famous last words?)
994 	 */
995 	/*
996 	 * If it had been mremap()'d, the starting address would not
997 	 * match the usual checks anyway. So assume all vma's are
998 	 * above the starting address given.
999 	 */
1000 	vma = find_vma(mm, addr);
1001 
1002 #ifdef CONFIG_MMU
1003 	while (vma) {
1004 		next = vma->vm_next;
1005 
1006 		/*
1007 		 * Check if the starting address would match, i.e. it's
1008 		 * a fragment created by mprotect() and/or munmap(), or it
1009 		 * otherwise it starts at this address with no hassles.
1010 		 */
1011 		if ((vma->vm_ops == &shm_vm_ops) &&
1012 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1013 
1014 
1015 			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1016 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1017 			/*
1018 			 * We discovered the size of the shm segment, so
1019 			 * break out of here and fall through to the next
1020 			 * loop that uses the size information to stop
1021 			 * searching for matching vma's.
1022 			 */
1023 			retval = 0;
1024 			vma = next;
1025 			break;
1026 		}
1027 		vma = next;
1028 	}
1029 
1030 	/*
1031 	 * We need look no further than the maximum address a fragment
1032 	 * could possibly have landed at. Also cast things to loff_t to
1033 	 * prevent overflows and make comparisions vs. equal-width types.
1034 	 */
1035 	size = PAGE_ALIGN(size);
1036 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1037 		next = vma->vm_next;
1038 
1039 		/* finding a matching vma now does not alter retval */
1040 		if ((vma->vm_ops == &shm_vm_ops) &&
1041 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1042 
1043 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1044 		vma = next;
1045 	}
1046 
1047 #else /* CONFIG_MMU */
1048 	/* under NOMMU conditions, the exact address to be destroyed must be
1049 	 * given */
1050 	retval = -EINVAL;
1051 	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1052 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1053 		retval = 0;
1054 	}
1055 
1056 #endif
1057 
1058 	up_write(&mm->mmap_sem);
1059 	return retval;
1060 }
1061 
1062 #ifdef CONFIG_PROC_FS
1063 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1064 {
1065 	struct shmid_kernel *shp = it;
1066 
1067 #if BITS_PER_LONG <= 32
1068 #define SIZE_SPEC "%10lu"
1069 #else
1070 #define SIZE_SPEC "%21lu"
1071 #endif
1072 
1073 	return seq_printf(s,
1074 			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1075 			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
1076 			  shp->shm_perm.key,
1077 			  shp->shm_perm.id,
1078 			  shp->shm_perm.mode,
1079 			  shp->shm_segsz,
1080 			  shp->shm_cprid,
1081 			  shp->shm_lprid,
1082 			  shp->shm_nattch,
1083 			  shp->shm_perm.uid,
1084 			  shp->shm_perm.gid,
1085 			  shp->shm_perm.cuid,
1086 			  shp->shm_perm.cgid,
1087 			  shp->shm_atim,
1088 			  shp->shm_dtim,
1089 			  shp->shm_ctim);
1090 }
1091 #endif
1092