xref: /linux/ipc/shm.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  *
23  * Better ipc lock (kern_ipc_perm.lock) handling
24  * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25  */
26 
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
45 
46 #include <linux/uaccess.h>
47 
48 #include "util.h"
49 
50 struct shm_file_data {
51 	int id;
52 	struct ipc_namespace *ns;
53 	struct file *file;
54 	const struct vm_operations_struct *vm_ops;
55 };
56 
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58 
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
61 
62 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
63 
64 #define shm_unlock(shp)			\
65 	ipc_unlock(&(shp)->shm_perm)
66 
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71 #ifdef CONFIG_PROC_FS
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73 #endif
74 
75 void shm_init_ns(struct ipc_namespace *ns)
76 {
77 	ns->shm_ctlmax = SHMMAX;
78 	ns->shm_ctlall = SHMALL;
79 	ns->shm_ctlmni = SHMMNI;
80 	ns->shm_rmid_forced = 0;
81 	ns->shm_tot = 0;
82 	ipc_init_ids(&shm_ids(ns));
83 }
84 
85 /*
86  * Called with shm_ids.rwsem (writer) and the shp structure locked.
87  * Only shm_ids.rwsem remains locked on exit.
88  */
89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90 {
91 	struct shmid_kernel *shp;
92 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93 
94 	if (shp->shm_nattch) {
95 		shp->shm_perm.mode |= SHM_DEST;
96 		/* Do not find it any more */
97 		shp->shm_perm.key = IPC_PRIVATE;
98 		shm_unlock(shp);
99 	} else
100 		shm_destroy(ns, shp);
101 }
102 
103 #ifdef CONFIG_IPC_NS
104 void shm_exit_ns(struct ipc_namespace *ns)
105 {
106 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
107 	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
108 }
109 #endif
110 
111 static int __init ipc_ns_init(void)
112 {
113 	shm_init_ns(&init_ipc_ns);
114 	return 0;
115 }
116 
117 pure_initcall(ipc_ns_init);
118 
119 void __init shm_init(void)
120 {
121 	ipc_init_proc_interface("sysvipc/shm",
122 #if BITS_PER_LONG <= 32
123 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
124 #else
125 				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
126 #endif
127 				IPC_SHM_IDS, sysvipc_shm_proc_show);
128 }
129 
130 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131 {
132 	struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
133 
134 	if (IS_ERR(ipcp))
135 		return ERR_CAST(ipcp);
136 
137 	return container_of(ipcp, struct shmid_kernel, shm_perm);
138 }
139 
140 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
141 {
142 	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
143 
144 	if (IS_ERR(ipcp))
145 		return ERR_CAST(ipcp);
146 
147 	return container_of(ipcp, struct shmid_kernel, shm_perm);
148 }
149 
150 /*
151  * shm_lock_(check_) routines are called in the paths where the rwsem
152  * is not necessarily held.
153  */
154 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155 {
156 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 
158 	if (IS_ERR(ipcp))
159 		return (struct shmid_kernel *)ipcp;
160 
161 	return container_of(ipcp, struct shmid_kernel, shm_perm);
162 }
163 
164 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
165 {
166 	rcu_read_lock();
167 	ipc_lock_object(&ipcp->shm_perm);
168 }
169 
170 static void shm_rcu_free(struct rcu_head *head)
171 {
172 	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174 
175 	security_shm_free(shp);
176 	ipc_rcu_free(head);
177 }
178 
179 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
180 {
181 	list_del(&s->shm_clist);
182 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
183 }
184 
185 
186 /* This is called by fork, once for every shm attach. */
187 static void shm_open(struct vm_area_struct *vma)
188 {
189 	struct file *file = vma->vm_file;
190 	struct shm_file_data *sfd = shm_file_data(file);
191 	struct shmid_kernel *shp;
192 
193 	shp = shm_lock(sfd->ns, sfd->id);
194 	BUG_ON(IS_ERR(shp));
195 	shp->shm_atim = get_seconds();
196 	shp->shm_lprid = task_tgid_vnr(current);
197 	shp->shm_nattch++;
198 	shm_unlock(shp);
199 }
200 
201 /*
202  * shm_destroy - free the struct shmid_kernel
203  *
204  * @ns: namespace
205  * @shp: struct to free
206  *
207  * It has to be called with shp and shm_ids.rwsem (writer) locked,
208  * but returns with shp unlocked and freed.
209  */
210 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
211 {
212 	struct file *shm_file;
213 
214 	shm_file = shp->shm_file;
215 	shp->shm_file = NULL;
216 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
217 	shm_rmid(ns, shp);
218 	shm_unlock(shp);
219 	if (!is_file_hugepages(shm_file))
220 		shmem_lock(shm_file, 0, shp->mlock_user);
221 	else if (shp->mlock_user)
222 		user_shm_unlock(i_size_read(file_inode(shm_file)),
223 				shp->mlock_user);
224 	fput(shm_file);
225 	ipc_rcu_putref(shp, shm_rcu_free);
226 }
227 
228 /*
229  * shm_may_destroy - identifies whether shm segment should be destroyed now
230  *
231  * Returns true if and only if there are no active users of the segment and
232  * one of the following is true:
233  *
234  * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
235  *
236  * 2) sysctl kernel.shm_rmid_forced is set to 1.
237  */
238 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
239 {
240 	return (shp->shm_nattch == 0) &&
241 	       (ns->shm_rmid_forced ||
242 		(shp->shm_perm.mode & SHM_DEST));
243 }
244 
245 /*
246  * remove the attach descriptor vma.
247  * free memory for segment if it is marked destroyed.
248  * The descriptor has already been removed from the current->mm->mmap list
249  * and will later be kfree()d.
250  */
251 static void shm_close(struct vm_area_struct *vma)
252 {
253 	struct file *file = vma->vm_file;
254 	struct shm_file_data *sfd = shm_file_data(file);
255 	struct shmid_kernel *shp;
256 	struct ipc_namespace *ns = sfd->ns;
257 
258 	down_write(&shm_ids(ns).rwsem);
259 	/* remove from the list of attaches of the shm segment */
260 	shp = shm_lock(ns, sfd->id);
261 	BUG_ON(IS_ERR(shp));
262 	shp->shm_lprid = task_tgid_vnr(current);
263 	shp->shm_dtim = get_seconds();
264 	shp->shm_nattch--;
265 	if (shm_may_destroy(ns, shp))
266 		shm_destroy(ns, shp);
267 	else
268 		shm_unlock(shp);
269 	up_write(&shm_ids(ns).rwsem);
270 }
271 
272 /* Called with ns->shm_ids(ns).rwsem locked */
273 static int shm_try_destroy_orphaned(int id, void *p, void *data)
274 {
275 	struct ipc_namespace *ns = data;
276 	struct kern_ipc_perm *ipcp = p;
277 	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
278 
279 	/*
280 	 * We want to destroy segments without users and with already
281 	 * exit'ed originating process.
282 	 *
283 	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
284 	 */
285 	if (shp->shm_creator != NULL)
286 		return 0;
287 
288 	if (shm_may_destroy(ns, shp)) {
289 		shm_lock_by_ptr(shp);
290 		shm_destroy(ns, shp);
291 	}
292 	return 0;
293 }
294 
295 void shm_destroy_orphaned(struct ipc_namespace *ns)
296 {
297 	down_write(&shm_ids(ns).rwsem);
298 	if (shm_ids(ns).in_use)
299 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
300 	up_write(&shm_ids(ns).rwsem);
301 }
302 
303 /* Locking assumes this will only be called with task == current */
304 void exit_shm(struct task_struct *task)
305 {
306 	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
307 	struct shmid_kernel *shp, *n;
308 
309 	if (list_empty(&task->sysvshm.shm_clist))
310 		return;
311 
312 	/*
313 	 * If kernel.shm_rmid_forced is not set then only keep track of
314 	 * which shmids are orphaned, so that a later set of the sysctl
315 	 * can clean them up.
316 	 */
317 	if (!ns->shm_rmid_forced) {
318 		down_read(&shm_ids(ns).rwsem);
319 		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
320 			shp->shm_creator = NULL;
321 		/*
322 		 * Only under read lock but we are only called on current
323 		 * so no entry on the list will be shared.
324 		 */
325 		list_del(&task->sysvshm.shm_clist);
326 		up_read(&shm_ids(ns).rwsem);
327 		return;
328 	}
329 
330 	/*
331 	 * Destroy all already created segments, that were not yet mapped,
332 	 * and mark any mapped as orphan to cover the sysctl toggling.
333 	 * Destroy is skipped if shm_may_destroy() returns false.
334 	 */
335 	down_write(&shm_ids(ns).rwsem);
336 	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
337 		shp->shm_creator = NULL;
338 
339 		if (shm_may_destroy(ns, shp)) {
340 			shm_lock_by_ptr(shp);
341 			shm_destroy(ns, shp);
342 		}
343 	}
344 
345 	/* Remove the list head from any segments still attached. */
346 	list_del(&task->sysvshm.shm_clist);
347 	up_write(&shm_ids(ns).rwsem);
348 }
349 
350 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
351 {
352 	struct file *file = vma->vm_file;
353 	struct shm_file_data *sfd = shm_file_data(file);
354 
355 	return sfd->vm_ops->fault(vma, vmf);
356 }
357 
358 #ifdef CONFIG_NUMA
359 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
360 {
361 	struct file *file = vma->vm_file;
362 	struct shm_file_data *sfd = shm_file_data(file);
363 	int err = 0;
364 	if (sfd->vm_ops->set_policy)
365 		err = sfd->vm_ops->set_policy(vma, new);
366 	return err;
367 }
368 
369 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
370 					unsigned long addr)
371 {
372 	struct file *file = vma->vm_file;
373 	struct shm_file_data *sfd = shm_file_data(file);
374 	struct mempolicy *pol = NULL;
375 
376 	if (sfd->vm_ops->get_policy)
377 		pol = sfd->vm_ops->get_policy(vma, addr);
378 	else if (vma->vm_policy)
379 		pol = vma->vm_policy;
380 
381 	return pol;
382 }
383 #endif
384 
385 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
386 {
387 	struct shm_file_data *sfd = shm_file_data(file);
388 	int ret;
389 
390 	ret = sfd->file->f_op->mmap(sfd->file, vma);
391 	if (ret != 0)
392 		return ret;
393 	sfd->vm_ops = vma->vm_ops;
394 #ifdef CONFIG_MMU
395 	BUG_ON(!sfd->vm_ops->fault);
396 #endif
397 	vma->vm_ops = &shm_vm_ops;
398 	shm_open(vma);
399 
400 	return ret;
401 }
402 
403 static int shm_release(struct inode *ino, struct file *file)
404 {
405 	struct shm_file_data *sfd = shm_file_data(file);
406 
407 	put_ipc_ns(sfd->ns);
408 	shm_file_data(file) = NULL;
409 	kfree(sfd);
410 	return 0;
411 }
412 
413 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
414 {
415 	struct shm_file_data *sfd = shm_file_data(file);
416 
417 	if (!sfd->file->f_op->fsync)
418 		return -EINVAL;
419 	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
420 }
421 
422 static long shm_fallocate(struct file *file, int mode, loff_t offset,
423 			  loff_t len)
424 {
425 	struct shm_file_data *sfd = shm_file_data(file);
426 
427 	if (!sfd->file->f_op->fallocate)
428 		return -EOPNOTSUPP;
429 	return sfd->file->f_op->fallocate(file, mode, offset, len);
430 }
431 
432 static unsigned long shm_get_unmapped_area(struct file *file,
433 	unsigned long addr, unsigned long len, unsigned long pgoff,
434 	unsigned long flags)
435 {
436 	struct shm_file_data *sfd = shm_file_data(file);
437 	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
438 						pgoff, flags);
439 }
440 
441 static const struct file_operations shm_file_operations = {
442 	.mmap		= shm_mmap,
443 	.fsync		= shm_fsync,
444 	.release	= shm_release,
445 #ifndef CONFIG_MMU
446 	.get_unmapped_area	= shm_get_unmapped_area,
447 #endif
448 	.llseek		= noop_llseek,
449 	.fallocate	= shm_fallocate,
450 };
451 
452 static const struct file_operations shm_file_operations_huge = {
453 	.mmap		= shm_mmap,
454 	.fsync		= shm_fsync,
455 	.release	= shm_release,
456 	.get_unmapped_area	= shm_get_unmapped_area,
457 	.llseek		= noop_llseek,
458 	.fallocate	= shm_fallocate,
459 };
460 
461 int is_file_shm_hugepages(struct file *file)
462 {
463 	return file->f_op == &shm_file_operations_huge;
464 }
465 
466 static const struct vm_operations_struct shm_vm_ops = {
467 	.open	= shm_open,	/* callback for a new vm-area open */
468 	.close	= shm_close,	/* callback for when the vm-area is released */
469 	.fault	= shm_fault,
470 #if defined(CONFIG_NUMA)
471 	.set_policy = shm_set_policy,
472 	.get_policy = shm_get_policy,
473 #endif
474 };
475 
476 /**
477  * newseg - Create a new shared memory segment
478  * @ns: namespace
479  * @params: ptr to the structure that contains key, size and shmflg
480  *
481  * Called with shm_ids.rwsem held as a writer.
482  */
483 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
484 {
485 	key_t key = params->key;
486 	int shmflg = params->flg;
487 	size_t size = params->u.size;
488 	int error;
489 	struct shmid_kernel *shp;
490 	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
491 	struct file *file;
492 	char name[13];
493 	int id;
494 	vm_flags_t acctflag = 0;
495 
496 	if (size < SHMMIN || size > ns->shm_ctlmax)
497 		return -EINVAL;
498 
499 	if (numpages << PAGE_SHIFT < size)
500 		return -ENOSPC;
501 
502 	if (ns->shm_tot + numpages < ns->shm_tot ||
503 			ns->shm_tot + numpages > ns->shm_ctlall)
504 		return -ENOSPC;
505 
506 	shp = ipc_rcu_alloc(sizeof(*shp));
507 	if (!shp)
508 		return -ENOMEM;
509 
510 	shp->shm_perm.key = key;
511 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
512 	shp->mlock_user = NULL;
513 
514 	shp->shm_perm.security = NULL;
515 	error = security_shm_alloc(shp);
516 	if (error) {
517 		ipc_rcu_putref(shp, ipc_rcu_free);
518 		return error;
519 	}
520 
521 	sprintf(name, "SYSV%08x", key);
522 	if (shmflg & SHM_HUGETLB) {
523 		struct hstate *hs;
524 		size_t hugesize;
525 
526 		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
527 		if (!hs) {
528 			error = -EINVAL;
529 			goto no_file;
530 		}
531 		hugesize = ALIGN(size, huge_page_size(hs));
532 
533 		/* hugetlb_file_setup applies strict accounting */
534 		if (shmflg & SHM_NORESERVE)
535 			acctflag = VM_NORESERVE;
536 		file = hugetlb_file_setup(name, hugesize, acctflag,
537 				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
538 				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
539 	} else {
540 		/*
541 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
542 		 * if it's asked for.
543 		 */
544 		if  ((shmflg & SHM_NORESERVE) &&
545 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
546 			acctflag = VM_NORESERVE;
547 		file = shmem_file_setup(name, size, acctflag);
548 	}
549 	error = PTR_ERR(file);
550 	if (IS_ERR(file))
551 		goto no_file;
552 
553 	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
554 	if (id < 0) {
555 		error = id;
556 		goto no_id;
557 	}
558 
559 	shp->shm_cprid = task_tgid_vnr(current);
560 	shp->shm_lprid = 0;
561 	shp->shm_atim = shp->shm_dtim = 0;
562 	shp->shm_ctim = get_seconds();
563 	shp->shm_segsz = size;
564 	shp->shm_nattch = 0;
565 	shp->shm_file = file;
566 	shp->shm_creator = current;
567 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
568 
569 	/*
570 	 * shmid gets reported as "inode#" in /proc/pid/maps.
571 	 * proc-ps tools use this. Changing this will break them.
572 	 */
573 	file_inode(file)->i_ino = shp->shm_perm.id;
574 
575 	ns->shm_tot += numpages;
576 	error = shp->shm_perm.id;
577 
578 	ipc_unlock_object(&shp->shm_perm);
579 	rcu_read_unlock();
580 	return error;
581 
582 no_id:
583 	if (is_file_hugepages(file) && shp->mlock_user)
584 		user_shm_unlock(size, shp->mlock_user);
585 	fput(file);
586 no_file:
587 	ipc_rcu_putref(shp, shm_rcu_free);
588 	return error;
589 }
590 
591 /*
592  * Called with shm_ids.rwsem and ipcp locked.
593  */
594 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
595 {
596 	struct shmid_kernel *shp;
597 
598 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
599 	return security_shm_associate(shp, shmflg);
600 }
601 
602 /*
603  * Called with shm_ids.rwsem and ipcp locked.
604  */
605 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
606 				struct ipc_params *params)
607 {
608 	struct shmid_kernel *shp;
609 
610 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
611 	if (shp->shm_segsz < params->u.size)
612 		return -EINVAL;
613 
614 	return 0;
615 }
616 
617 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
618 {
619 	struct ipc_namespace *ns;
620 	static const struct ipc_ops shm_ops = {
621 		.getnew = newseg,
622 		.associate = shm_security,
623 		.more_checks = shm_more_checks,
624 	};
625 	struct ipc_params shm_params;
626 
627 	ns = current->nsproxy->ipc_ns;
628 
629 	shm_params.key = key;
630 	shm_params.flg = shmflg;
631 	shm_params.u.size = size;
632 
633 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
634 }
635 
636 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
637 {
638 	switch (version) {
639 	case IPC_64:
640 		return copy_to_user(buf, in, sizeof(*in));
641 	case IPC_OLD:
642 	    {
643 		struct shmid_ds out;
644 
645 		memset(&out, 0, sizeof(out));
646 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
647 		out.shm_segsz	= in->shm_segsz;
648 		out.shm_atime	= in->shm_atime;
649 		out.shm_dtime	= in->shm_dtime;
650 		out.shm_ctime	= in->shm_ctime;
651 		out.shm_cpid	= in->shm_cpid;
652 		out.shm_lpid	= in->shm_lpid;
653 		out.shm_nattch	= in->shm_nattch;
654 
655 		return copy_to_user(buf, &out, sizeof(out));
656 	    }
657 	default:
658 		return -EINVAL;
659 	}
660 }
661 
662 static inline unsigned long
663 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
664 {
665 	switch (version) {
666 	case IPC_64:
667 		if (copy_from_user(out, buf, sizeof(*out)))
668 			return -EFAULT;
669 		return 0;
670 	case IPC_OLD:
671 	    {
672 		struct shmid_ds tbuf_old;
673 
674 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
675 			return -EFAULT;
676 
677 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
678 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
679 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
680 
681 		return 0;
682 	    }
683 	default:
684 		return -EINVAL;
685 	}
686 }
687 
688 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
689 {
690 	switch (version) {
691 	case IPC_64:
692 		return copy_to_user(buf, in, sizeof(*in));
693 	case IPC_OLD:
694 	    {
695 		struct shminfo out;
696 
697 		if (in->shmmax > INT_MAX)
698 			out.shmmax = INT_MAX;
699 		else
700 			out.shmmax = (int)in->shmmax;
701 
702 		out.shmmin	= in->shmmin;
703 		out.shmmni	= in->shmmni;
704 		out.shmseg	= in->shmseg;
705 		out.shmall	= in->shmall;
706 
707 		return copy_to_user(buf, &out, sizeof(out));
708 	    }
709 	default:
710 		return -EINVAL;
711 	}
712 }
713 
714 /*
715  * Calculate and add used RSS and swap pages of a shm.
716  * Called with shm_ids.rwsem held as a reader
717  */
718 static void shm_add_rss_swap(struct shmid_kernel *shp,
719 	unsigned long *rss_add, unsigned long *swp_add)
720 {
721 	struct inode *inode;
722 
723 	inode = file_inode(shp->shm_file);
724 
725 	if (is_file_hugepages(shp->shm_file)) {
726 		struct address_space *mapping = inode->i_mapping;
727 		struct hstate *h = hstate_file(shp->shm_file);
728 		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
729 	} else {
730 #ifdef CONFIG_SHMEM
731 		struct shmem_inode_info *info = SHMEM_I(inode);
732 		spin_lock(&info->lock);
733 		*rss_add += inode->i_mapping->nrpages;
734 		*swp_add += info->swapped;
735 		spin_unlock(&info->lock);
736 #else
737 		*rss_add += inode->i_mapping->nrpages;
738 #endif
739 	}
740 }
741 
742 /*
743  * Called with shm_ids.rwsem held as a reader
744  */
745 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
746 		unsigned long *swp)
747 {
748 	int next_id;
749 	int total, in_use;
750 
751 	*rss = 0;
752 	*swp = 0;
753 
754 	in_use = shm_ids(ns).in_use;
755 
756 	for (total = 0, next_id = 0; total < in_use; next_id++) {
757 		struct kern_ipc_perm *ipc;
758 		struct shmid_kernel *shp;
759 
760 		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
761 		if (ipc == NULL)
762 			continue;
763 		shp = container_of(ipc, struct shmid_kernel, shm_perm);
764 
765 		shm_add_rss_swap(shp, rss, swp);
766 
767 		total++;
768 	}
769 }
770 
771 /*
772  * This function handles some shmctl commands which require the rwsem
773  * to be held in write mode.
774  * NOTE: no locks must be held, the rwsem is taken inside this function.
775  */
776 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
777 		       struct shmid_ds __user *buf, int version)
778 {
779 	struct kern_ipc_perm *ipcp;
780 	struct shmid64_ds shmid64;
781 	struct shmid_kernel *shp;
782 	int err;
783 
784 	if (cmd == IPC_SET) {
785 		if (copy_shmid_from_user(&shmid64, buf, version))
786 			return -EFAULT;
787 	}
788 
789 	down_write(&shm_ids(ns).rwsem);
790 	rcu_read_lock();
791 
792 	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
793 				      &shmid64.shm_perm, 0);
794 	if (IS_ERR(ipcp)) {
795 		err = PTR_ERR(ipcp);
796 		goto out_unlock1;
797 	}
798 
799 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
800 
801 	err = security_shm_shmctl(shp, cmd);
802 	if (err)
803 		goto out_unlock1;
804 
805 	switch (cmd) {
806 	case IPC_RMID:
807 		ipc_lock_object(&shp->shm_perm);
808 		/* do_shm_rmid unlocks the ipc object and rcu */
809 		do_shm_rmid(ns, ipcp);
810 		goto out_up;
811 	case IPC_SET:
812 		ipc_lock_object(&shp->shm_perm);
813 		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
814 		if (err)
815 			goto out_unlock0;
816 		shp->shm_ctim = get_seconds();
817 		break;
818 	default:
819 		err = -EINVAL;
820 		goto out_unlock1;
821 	}
822 
823 out_unlock0:
824 	ipc_unlock_object(&shp->shm_perm);
825 out_unlock1:
826 	rcu_read_unlock();
827 out_up:
828 	up_write(&shm_ids(ns).rwsem);
829 	return err;
830 }
831 
832 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
833 			 int cmd, int version, void __user *buf)
834 {
835 	int err;
836 	struct shmid_kernel *shp;
837 
838 	/* preliminary security checks for *_INFO */
839 	if (cmd == IPC_INFO || cmd == SHM_INFO) {
840 		err = security_shm_shmctl(NULL, cmd);
841 		if (err)
842 			return err;
843 	}
844 
845 	switch (cmd) {
846 	case IPC_INFO:
847 	{
848 		struct shminfo64 shminfo;
849 
850 		memset(&shminfo, 0, sizeof(shminfo));
851 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
852 		shminfo.shmmax = ns->shm_ctlmax;
853 		shminfo.shmall = ns->shm_ctlall;
854 
855 		shminfo.shmmin = SHMMIN;
856 		if (copy_shminfo_to_user(buf, &shminfo, version))
857 			return -EFAULT;
858 
859 		down_read(&shm_ids(ns).rwsem);
860 		err = ipc_get_maxid(&shm_ids(ns));
861 		up_read(&shm_ids(ns).rwsem);
862 
863 		if (err < 0)
864 			err = 0;
865 		goto out;
866 	}
867 	case SHM_INFO:
868 	{
869 		struct shm_info shm_info;
870 
871 		memset(&shm_info, 0, sizeof(shm_info));
872 		down_read(&shm_ids(ns).rwsem);
873 		shm_info.used_ids = shm_ids(ns).in_use;
874 		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
875 		shm_info.shm_tot = ns->shm_tot;
876 		shm_info.swap_attempts = 0;
877 		shm_info.swap_successes = 0;
878 		err = ipc_get_maxid(&shm_ids(ns));
879 		up_read(&shm_ids(ns).rwsem);
880 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
881 			err = -EFAULT;
882 			goto out;
883 		}
884 
885 		err = err < 0 ? 0 : err;
886 		goto out;
887 	}
888 	case SHM_STAT:
889 	case IPC_STAT:
890 	{
891 		struct shmid64_ds tbuf;
892 		int result;
893 
894 		rcu_read_lock();
895 		if (cmd == SHM_STAT) {
896 			shp = shm_obtain_object(ns, shmid);
897 			if (IS_ERR(shp)) {
898 				err = PTR_ERR(shp);
899 				goto out_unlock;
900 			}
901 			result = shp->shm_perm.id;
902 		} else {
903 			shp = shm_obtain_object_check(ns, shmid);
904 			if (IS_ERR(shp)) {
905 				err = PTR_ERR(shp);
906 				goto out_unlock;
907 			}
908 			result = 0;
909 		}
910 
911 		err = -EACCES;
912 		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
913 			goto out_unlock;
914 
915 		err = security_shm_shmctl(shp, cmd);
916 		if (err)
917 			goto out_unlock;
918 
919 		memset(&tbuf, 0, sizeof(tbuf));
920 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
921 		tbuf.shm_segsz	= shp->shm_segsz;
922 		tbuf.shm_atime	= shp->shm_atim;
923 		tbuf.shm_dtime	= shp->shm_dtim;
924 		tbuf.shm_ctime	= shp->shm_ctim;
925 		tbuf.shm_cpid	= shp->shm_cprid;
926 		tbuf.shm_lpid	= shp->shm_lprid;
927 		tbuf.shm_nattch	= shp->shm_nattch;
928 		rcu_read_unlock();
929 
930 		if (copy_shmid_to_user(buf, &tbuf, version))
931 			err = -EFAULT;
932 		else
933 			err = result;
934 		goto out;
935 	}
936 	default:
937 		return -EINVAL;
938 	}
939 
940 out_unlock:
941 	rcu_read_unlock();
942 out:
943 	return err;
944 }
945 
946 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
947 {
948 	struct shmid_kernel *shp;
949 	int err, version;
950 	struct ipc_namespace *ns;
951 
952 	if (cmd < 0 || shmid < 0)
953 		return -EINVAL;
954 
955 	version = ipc_parse_version(&cmd);
956 	ns = current->nsproxy->ipc_ns;
957 
958 	switch (cmd) {
959 	case IPC_INFO:
960 	case SHM_INFO:
961 	case SHM_STAT:
962 	case IPC_STAT:
963 		return shmctl_nolock(ns, shmid, cmd, version, buf);
964 	case IPC_RMID:
965 	case IPC_SET:
966 		return shmctl_down(ns, shmid, cmd, buf, version);
967 	case SHM_LOCK:
968 	case SHM_UNLOCK:
969 	{
970 		struct file *shm_file;
971 
972 		rcu_read_lock();
973 		shp = shm_obtain_object_check(ns, shmid);
974 		if (IS_ERR(shp)) {
975 			err = PTR_ERR(shp);
976 			goto out_unlock1;
977 		}
978 
979 		audit_ipc_obj(&(shp->shm_perm));
980 		err = security_shm_shmctl(shp, cmd);
981 		if (err)
982 			goto out_unlock1;
983 
984 		ipc_lock_object(&shp->shm_perm);
985 
986 		/* check if shm_destroy() is tearing down shp */
987 		if (!ipc_valid_object(&shp->shm_perm)) {
988 			err = -EIDRM;
989 			goto out_unlock0;
990 		}
991 
992 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
993 			kuid_t euid = current_euid();
994 			if (!uid_eq(euid, shp->shm_perm.uid) &&
995 			    !uid_eq(euid, shp->shm_perm.cuid)) {
996 				err = -EPERM;
997 				goto out_unlock0;
998 			}
999 			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1000 				err = -EPERM;
1001 				goto out_unlock0;
1002 			}
1003 		}
1004 
1005 		shm_file = shp->shm_file;
1006 		if (is_file_hugepages(shm_file))
1007 			goto out_unlock0;
1008 
1009 		if (cmd == SHM_LOCK) {
1010 			struct user_struct *user = current_user();
1011 			err = shmem_lock(shm_file, 1, user);
1012 			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1013 				shp->shm_perm.mode |= SHM_LOCKED;
1014 				shp->mlock_user = user;
1015 			}
1016 			goto out_unlock0;
1017 		}
1018 
1019 		/* SHM_UNLOCK */
1020 		if (!(shp->shm_perm.mode & SHM_LOCKED))
1021 			goto out_unlock0;
1022 		shmem_lock(shm_file, 0, shp->mlock_user);
1023 		shp->shm_perm.mode &= ~SHM_LOCKED;
1024 		shp->mlock_user = NULL;
1025 		get_file(shm_file);
1026 		ipc_unlock_object(&shp->shm_perm);
1027 		rcu_read_unlock();
1028 		shmem_unlock_mapping(shm_file->f_mapping);
1029 
1030 		fput(shm_file);
1031 		return err;
1032 	}
1033 	default:
1034 		return -EINVAL;
1035 	}
1036 
1037 out_unlock0:
1038 	ipc_unlock_object(&shp->shm_perm);
1039 out_unlock1:
1040 	rcu_read_unlock();
1041 	return err;
1042 }
1043 
1044 /*
1045  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1046  *
1047  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1048  * "raddr" thing points to kernel space, and there has to be a wrapper around
1049  * this.
1050  */
1051 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1052 	      unsigned long shmlba)
1053 {
1054 	struct shmid_kernel *shp;
1055 	unsigned long addr;
1056 	unsigned long size;
1057 	struct file *file;
1058 	int    err;
1059 	unsigned long flags;
1060 	unsigned long prot;
1061 	int acc_mode;
1062 	struct ipc_namespace *ns;
1063 	struct shm_file_data *sfd;
1064 	struct path path;
1065 	fmode_t f_mode;
1066 	unsigned long populate = 0;
1067 
1068 	err = -EINVAL;
1069 	if (shmid < 0)
1070 		goto out;
1071 	else if ((addr = (ulong)shmaddr)) {
1072 		if (addr & (shmlba - 1)) {
1073 			if (shmflg & SHM_RND)
1074 				addr &= ~(shmlba - 1);	   /* round down */
1075 			else
1076 #ifndef __ARCH_FORCE_SHMLBA
1077 				if (addr & ~PAGE_MASK)
1078 #endif
1079 					goto out;
1080 		}
1081 		flags = MAP_SHARED | MAP_FIXED;
1082 	} else {
1083 		if ((shmflg & SHM_REMAP))
1084 			goto out;
1085 
1086 		flags = MAP_SHARED;
1087 	}
1088 
1089 	if (shmflg & SHM_RDONLY) {
1090 		prot = PROT_READ;
1091 		acc_mode = S_IRUGO;
1092 		f_mode = FMODE_READ;
1093 	} else {
1094 		prot = PROT_READ | PROT_WRITE;
1095 		acc_mode = S_IRUGO | S_IWUGO;
1096 		f_mode = FMODE_READ | FMODE_WRITE;
1097 	}
1098 	if (shmflg & SHM_EXEC) {
1099 		prot |= PROT_EXEC;
1100 		acc_mode |= S_IXUGO;
1101 	}
1102 
1103 	/*
1104 	 * We cannot rely on the fs check since SYSV IPC does have an
1105 	 * additional creator id...
1106 	 */
1107 	ns = current->nsproxy->ipc_ns;
1108 	rcu_read_lock();
1109 	shp = shm_obtain_object_check(ns, shmid);
1110 	if (IS_ERR(shp)) {
1111 		err = PTR_ERR(shp);
1112 		goto out_unlock;
1113 	}
1114 
1115 	err = -EACCES;
1116 	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1117 		goto out_unlock;
1118 
1119 	err = security_shm_shmat(shp, shmaddr, shmflg);
1120 	if (err)
1121 		goto out_unlock;
1122 
1123 	ipc_lock_object(&shp->shm_perm);
1124 
1125 	/* check if shm_destroy() is tearing down shp */
1126 	if (!ipc_valid_object(&shp->shm_perm)) {
1127 		ipc_unlock_object(&shp->shm_perm);
1128 		err = -EIDRM;
1129 		goto out_unlock;
1130 	}
1131 
1132 	path = shp->shm_file->f_path;
1133 	path_get(&path);
1134 	shp->shm_nattch++;
1135 	size = i_size_read(path.dentry->d_inode);
1136 	ipc_unlock_object(&shp->shm_perm);
1137 	rcu_read_unlock();
1138 
1139 	err = -ENOMEM;
1140 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1141 	if (!sfd) {
1142 		path_put(&path);
1143 		goto out_nattch;
1144 	}
1145 
1146 	file = alloc_file(&path, f_mode,
1147 			  is_file_hugepages(shp->shm_file) ?
1148 				&shm_file_operations_huge :
1149 				&shm_file_operations);
1150 	err = PTR_ERR(file);
1151 	if (IS_ERR(file)) {
1152 		kfree(sfd);
1153 		path_put(&path);
1154 		goto out_nattch;
1155 	}
1156 
1157 	file->private_data = sfd;
1158 	file->f_mapping = shp->shm_file->f_mapping;
1159 	sfd->id = shp->shm_perm.id;
1160 	sfd->ns = get_ipc_ns(ns);
1161 	sfd->file = shp->shm_file;
1162 	sfd->vm_ops = NULL;
1163 
1164 	err = security_mmap_file(file, prot, flags);
1165 	if (err)
1166 		goto out_fput;
1167 
1168 	down_write(&current->mm->mmap_sem);
1169 	if (addr && !(shmflg & SHM_REMAP)) {
1170 		err = -EINVAL;
1171 		if (addr + size < addr)
1172 			goto invalid;
1173 
1174 		if (find_vma_intersection(current->mm, addr, addr + size))
1175 			goto invalid;
1176 	}
1177 
1178 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1179 	*raddr = addr;
1180 	err = 0;
1181 	if (IS_ERR_VALUE(addr))
1182 		err = (long)addr;
1183 invalid:
1184 	up_write(&current->mm->mmap_sem);
1185 	if (populate)
1186 		mm_populate(addr, populate);
1187 
1188 out_fput:
1189 	fput(file);
1190 
1191 out_nattch:
1192 	down_write(&shm_ids(ns).rwsem);
1193 	shp = shm_lock(ns, shmid);
1194 	BUG_ON(IS_ERR(shp));
1195 	shp->shm_nattch--;
1196 	if (shm_may_destroy(ns, shp))
1197 		shm_destroy(ns, shp);
1198 	else
1199 		shm_unlock(shp);
1200 	up_write(&shm_ids(ns).rwsem);
1201 	return err;
1202 
1203 out_unlock:
1204 	rcu_read_unlock();
1205 out:
1206 	return err;
1207 }
1208 
1209 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1210 {
1211 	unsigned long ret;
1212 	long err;
1213 
1214 	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1215 	if (err)
1216 		return err;
1217 	force_successful_syscall_return();
1218 	return (long)ret;
1219 }
1220 
1221 /*
1222  * detach and kill segment if marked destroyed.
1223  * The work is done in shm_close.
1224  */
1225 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1226 {
1227 	struct mm_struct *mm = current->mm;
1228 	struct vm_area_struct *vma;
1229 	unsigned long addr = (unsigned long)shmaddr;
1230 	int retval = -EINVAL;
1231 #ifdef CONFIG_MMU
1232 	loff_t size = 0;
1233 	struct file *file;
1234 	struct vm_area_struct *next;
1235 #endif
1236 
1237 	if (addr & ~PAGE_MASK)
1238 		return retval;
1239 
1240 	down_write(&mm->mmap_sem);
1241 
1242 	/*
1243 	 * This function tries to be smart and unmap shm segments that
1244 	 * were modified by partial mlock or munmap calls:
1245 	 * - It first determines the size of the shm segment that should be
1246 	 *   unmapped: It searches for a vma that is backed by shm and that
1247 	 *   started at address shmaddr. It records it's size and then unmaps
1248 	 *   it.
1249 	 * - Then it unmaps all shm vmas that started at shmaddr and that
1250 	 *   are within the initially determined size and that are from the
1251 	 *   same shm segment from which we determined the size.
1252 	 * Errors from do_munmap are ignored: the function only fails if
1253 	 * it's called with invalid parameters or if it's called to unmap
1254 	 * a part of a vma. Both calls in this function are for full vmas,
1255 	 * the parameters are directly copied from the vma itself and always
1256 	 * valid - therefore do_munmap cannot fail. (famous last words?)
1257 	 */
1258 	/*
1259 	 * If it had been mremap()'d, the starting address would not
1260 	 * match the usual checks anyway. So assume all vma's are
1261 	 * above the starting address given.
1262 	 */
1263 	vma = find_vma(mm, addr);
1264 
1265 #ifdef CONFIG_MMU
1266 	while (vma) {
1267 		next = vma->vm_next;
1268 
1269 		/*
1270 		 * Check if the starting address would match, i.e. it's
1271 		 * a fragment created by mprotect() and/or munmap(), or it
1272 		 * otherwise it starts at this address with no hassles.
1273 		 */
1274 		if ((vma->vm_ops == &shm_vm_ops) &&
1275 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1276 
1277 			/*
1278 			 * Record the file of the shm segment being
1279 			 * unmapped.  With mremap(), someone could place
1280 			 * page from another segment but with equal offsets
1281 			 * in the range we are unmapping.
1282 			 */
1283 			file = vma->vm_file;
1284 			size = i_size_read(file_inode(vma->vm_file));
1285 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1286 			/*
1287 			 * We discovered the size of the shm segment, so
1288 			 * break out of here and fall through to the next
1289 			 * loop that uses the size information to stop
1290 			 * searching for matching vma's.
1291 			 */
1292 			retval = 0;
1293 			vma = next;
1294 			break;
1295 		}
1296 		vma = next;
1297 	}
1298 
1299 	/*
1300 	 * We need look no further than the maximum address a fragment
1301 	 * could possibly have landed at. Also cast things to loff_t to
1302 	 * prevent overflows and make comparisons vs. equal-width types.
1303 	 */
1304 	size = PAGE_ALIGN(size);
1305 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1306 		next = vma->vm_next;
1307 
1308 		/* finding a matching vma now does not alter retval */
1309 		if ((vma->vm_ops == &shm_vm_ops) &&
1310 		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1311 		    (vma->vm_file == file))
1312 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1313 		vma = next;
1314 	}
1315 
1316 #else /* CONFIG_MMU */
1317 	/* under NOMMU conditions, the exact address to be destroyed must be
1318 	 * given */
1319 	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1320 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1321 		retval = 0;
1322 	}
1323 
1324 #endif
1325 
1326 	up_write(&mm->mmap_sem);
1327 	return retval;
1328 }
1329 
1330 #ifdef CONFIG_PROC_FS
1331 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1332 {
1333 	struct user_namespace *user_ns = seq_user_ns(s);
1334 	struct shmid_kernel *shp = it;
1335 	unsigned long rss = 0, swp = 0;
1336 
1337 	shm_add_rss_swap(shp, &rss, &swp);
1338 
1339 #if BITS_PER_LONG <= 32
1340 #define SIZE_SPEC "%10lu"
1341 #else
1342 #define SIZE_SPEC "%21lu"
1343 #endif
1344 
1345 	return seq_printf(s,
1346 			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1347 			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1348 			  SIZE_SPEC " " SIZE_SPEC "\n",
1349 			  shp->shm_perm.key,
1350 			  shp->shm_perm.id,
1351 			  shp->shm_perm.mode,
1352 			  shp->shm_segsz,
1353 			  shp->shm_cprid,
1354 			  shp->shm_lprid,
1355 			  shp->shm_nattch,
1356 			  from_kuid_munged(user_ns, shp->shm_perm.uid),
1357 			  from_kgid_munged(user_ns, shp->shm_perm.gid),
1358 			  from_kuid_munged(user_ns, shp->shm_perm.cuid),
1359 			  from_kgid_munged(user_ns, shp->shm_perm.cgid),
1360 			  shp->shm_atim,
1361 			  shp->shm_dtim,
1362 			  shp->shm_ctim,
1363 			  rss * PAGE_SIZE,
1364 			  swp * PAGE_SIZE);
1365 }
1366 #endif
1367