xref: /linux/ipc/util.c (revision ba6e8564f459211117ce300eae2c7fdd23befe34)
1 /*
2  * linux/ipc/util.c
3  * Copyright (C) 1992 Krishna Balasubramanian
4  *
5  * Sep 1997 - Call suser() last after "normal" permission checks so we
6  *            get BSD style process accounting right.
7  *            Occurs in several places in the IPC code.
8  *            Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9  * Nov 1999 - ipc helper functions, unified SMP locking
10  *	      Manfred Spraul <manfred@colorfullife.com>
11  * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12  *            Mingming Cao <cmm@us.ibm.com>
13  * Mar 2006 - support for audit of ipc object properties
14  *            Dustin Kirkland <dustin.kirkland@us.ibm.com>
15  * Jun 2006 - namespaces ssupport
16  *            OpenVZ, SWsoft Inc.
17  *            Pavel Emelianov <xemul@openvz.org>
18  */
19 
20 #include <linux/mm.h>
21 #include <linux/shm.h>
22 #include <linux/init.h>
23 #include <linux/msg.h>
24 #include <linux/smp_lock.h>
25 #include <linux/vmalloc.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/highuid.h>
29 #include <linux/security.h>
30 #include <linux/rcupdate.h>
31 #include <linux/workqueue.h>
32 #include <linux/seq_file.h>
33 #include <linux/proc_fs.h>
34 #include <linux/audit.h>
35 #include <linux/nsproxy.h>
36 
37 #include <asm/unistd.h>
38 
39 #include "util.h"
40 
41 struct ipc_proc_iface {
42 	const char *path;
43 	const char *header;
44 	int ids;
45 	int (*show)(struct seq_file *, void *);
46 };
47 
48 struct ipc_namespace init_ipc_ns = {
49 	.kref = {
50 		.refcount	= ATOMIC_INIT(2),
51 	},
52 };
53 
54 #ifdef CONFIG_IPC_NS
55 static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns)
56 {
57 	int err;
58 	struct ipc_namespace *ns;
59 
60 	err = -ENOMEM;
61 	ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
62 	if (ns == NULL)
63 		goto err_mem;
64 
65 	err = sem_init_ns(ns);
66 	if (err)
67 		goto err_sem;
68 	err = msg_init_ns(ns);
69 	if (err)
70 		goto err_msg;
71 	err = shm_init_ns(ns);
72 	if (err)
73 		goto err_shm;
74 
75 	kref_init(&ns->kref);
76 	return ns;
77 
78 err_shm:
79 	msg_exit_ns(ns);
80 err_msg:
81 	sem_exit_ns(ns);
82 err_sem:
83 	kfree(ns);
84 err_mem:
85 	return ERR_PTR(err);
86 }
87 
88 int unshare_ipcs(unsigned long unshare_flags, struct ipc_namespace **new_ipc)
89 {
90 	struct ipc_namespace *new;
91 
92 	if (unshare_flags & CLONE_NEWIPC) {
93 		if (!capable(CAP_SYS_ADMIN))
94 			return -EPERM;
95 
96 		new = clone_ipc_ns(current->nsproxy->ipc_ns);
97 		if (IS_ERR(new))
98 			return PTR_ERR(new);
99 
100 		*new_ipc = new;
101 	}
102 
103 	return 0;
104 }
105 
106 int copy_ipcs(unsigned long flags, struct task_struct *tsk)
107 {
108 	struct ipc_namespace *old_ns = tsk->nsproxy->ipc_ns;
109 	struct ipc_namespace *new_ns;
110 	int err = 0;
111 
112 	if (!old_ns)
113 		return 0;
114 
115 	get_ipc_ns(old_ns);
116 
117 	if (!(flags & CLONE_NEWIPC))
118 		return 0;
119 
120 	if (!capable(CAP_SYS_ADMIN)) {
121 		err = -EPERM;
122 		goto out;
123 	}
124 
125 	new_ns = clone_ipc_ns(old_ns);
126 	if (!new_ns) {
127 		err = -ENOMEM;
128 		goto out;
129 	}
130 
131 	tsk->nsproxy->ipc_ns = new_ns;
132 out:
133 	put_ipc_ns(old_ns);
134 	return err;
135 }
136 
137 void free_ipc_ns(struct kref *kref)
138 {
139 	struct ipc_namespace *ns;
140 
141 	ns = container_of(kref, struct ipc_namespace, kref);
142 	sem_exit_ns(ns);
143 	msg_exit_ns(ns);
144 	shm_exit_ns(ns);
145 	kfree(ns);
146 }
147 #else
148 int copy_ipcs(unsigned long flags, struct task_struct *tsk)
149 {
150 	if (flags & CLONE_NEWIPC)
151 		return -EINVAL;
152 	return 0;
153 }
154 #endif
155 
156 /**
157  *	ipc_init	-	initialise IPC subsystem
158  *
159  *	The various system5 IPC resources (semaphores, messages and shared
160  *	memory) are initialised
161  */
162 
163 static int __init ipc_init(void)
164 {
165 	sem_init();
166 	msg_init();
167 	shm_init();
168 	return 0;
169 }
170 __initcall(ipc_init);
171 
172 /**
173  *	ipc_init_ids		-	initialise IPC identifiers
174  *	@ids: Identifier set
175  *	@size: Number of identifiers
176  *
177  *	Given a size for the ipc identifier range (limited below IPCMNI)
178  *	set up the sequence range to use then allocate and initialise the
179  *	array itself.
180  */
181 
182 void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size)
183 {
184 	int i;
185 
186 	mutex_init(&ids->mutex);
187 
188 	if(size > IPCMNI)
189 		size = IPCMNI;
190 	ids->in_use = 0;
191 	ids->max_id = -1;
192 	ids->seq = 0;
193 	{
194 		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
195 		if(seq_limit > USHRT_MAX)
196 			ids->seq_max = USHRT_MAX;
197 		 else
198 		 	ids->seq_max = seq_limit;
199 	}
200 
201 	ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size +
202 				     sizeof(struct ipc_id_ary));
203 
204 	if(ids->entries == NULL) {
205 		printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
206 		size = 0;
207 		ids->entries = &ids->nullentry;
208 	}
209 	ids->entries->size = size;
210 	for(i=0;i<size;i++)
211 		ids->entries->p[i] = NULL;
212 }
213 
214 #ifdef CONFIG_PROC_FS
215 static const struct file_operations sysvipc_proc_fops;
216 /**
217  *	ipc_init_proc_interface	-  Create a proc interface for sysipc types using a seq_file interface.
218  *	@path: Path in procfs
219  *	@header: Banner to be printed at the beginning of the file.
220  *	@ids: ipc id table to iterate.
221  *	@show: show routine.
222  */
223 void __init ipc_init_proc_interface(const char *path, const char *header,
224 		int ids, int (*show)(struct seq_file *, void *))
225 {
226 	struct proc_dir_entry *pde;
227 	struct ipc_proc_iface *iface;
228 
229 	iface = kmalloc(sizeof(*iface), GFP_KERNEL);
230 	if (!iface)
231 		return;
232 	iface->path	= path;
233 	iface->header	= header;
234 	iface->ids	= ids;
235 	iface->show	= show;
236 
237 	pde = create_proc_entry(path,
238 				S_IRUGO,        /* world readable */
239 				NULL            /* parent dir */);
240 	if (pde) {
241 		pde->data = iface;
242 		pde->proc_fops = &sysvipc_proc_fops;
243 	} else {
244 		kfree(iface);
245 	}
246 }
247 #endif
248 
249 /**
250  *	ipc_findkey	-	find a key in an ipc identifier set
251  *	@ids: Identifier set
252  *	@key: The key to find
253  *
254  *	Requires ipc_ids.mutex locked.
255  *	Returns the identifier if found or -1 if not.
256  */
257 
258 int ipc_findkey(struct ipc_ids* ids, key_t key)
259 {
260 	int id;
261 	struct kern_ipc_perm* p;
262 	int max_id = ids->max_id;
263 
264 	/*
265 	 * rcu_dereference() is not needed here
266 	 * since ipc_ids.mutex is held
267 	 */
268 	for (id = 0; id <= max_id; id++) {
269 		p = ids->entries->p[id];
270 		if(p==NULL)
271 			continue;
272 		if (key == p->key)
273 			return id;
274 	}
275 	return -1;
276 }
277 
278 /*
279  * Requires ipc_ids.mutex locked
280  */
281 static int grow_ary(struct ipc_ids* ids, int newsize)
282 {
283 	struct ipc_id_ary* new;
284 	struct ipc_id_ary* old;
285 	int i;
286 	int size = ids->entries->size;
287 
288 	if(newsize > IPCMNI)
289 		newsize = IPCMNI;
290 	if(newsize <= size)
291 		return newsize;
292 
293 	new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
294 			    sizeof(struct ipc_id_ary));
295 	if(new == NULL)
296 		return size;
297 	new->size = newsize;
298 	memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
299 	for(i=size;i<newsize;i++) {
300 		new->p[i] = NULL;
301 	}
302 	old = ids->entries;
303 
304 	/*
305 	 * Use rcu_assign_pointer() to make sure the memcpyed contents
306 	 * of the new array are visible before the new array becomes visible.
307 	 */
308 	rcu_assign_pointer(ids->entries, new);
309 
310 	__ipc_fini_ids(ids, old);
311 	return newsize;
312 }
313 
314 /**
315  *	ipc_addid 	-	add an IPC identifier
316  *	@ids: IPC identifier set
317  *	@new: new IPC permission set
318  *	@size: new size limit for the id array
319  *
320  *	Add an entry 'new' to the IPC arrays. The permissions object is
321  *	initialised and the first free entry is set up and the id assigned
322  *	is returned. The list is returned in a locked state on success.
323  *	On failure the list is not locked and -1 is returned.
324  *
325  *	Called with ipc_ids.mutex held.
326  */
327 
328 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
329 {
330 	int id;
331 
332 	size = grow_ary(ids,size);
333 
334 	/*
335 	 * rcu_dereference()() is not needed here since
336 	 * ipc_ids.mutex is held
337 	 */
338 	for (id = 0; id < size; id++) {
339 		if(ids->entries->p[id] == NULL)
340 			goto found;
341 	}
342 	return -1;
343 found:
344 	ids->in_use++;
345 	if (id > ids->max_id)
346 		ids->max_id = id;
347 
348 	new->cuid = new->uid = current->euid;
349 	new->gid = new->cgid = current->egid;
350 
351 	new->seq = ids->seq++;
352 	if(ids->seq > ids->seq_max)
353 		ids->seq = 0;
354 
355 	spin_lock_init(&new->lock);
356 	new->deleted = 0;
357 	rcu_read_lock();
358 	spin_lock(&new->lock);
359 	ids->entries->p[id] = new;
360 	return id;
361 }
362 
363 /**
364  *	ipc_rmid	-	remove an IPC identifier
365  *	@ids: identifier set
366  *	@id: Identifier to remove
367  *
368  *	The identifier must be valid, and in use. The kernel will panic if
369  *	fed an invalid identifier. The entry is removed and internal
370  *	variables recomputed. The object associated with the identifier
371  *	is returned.
372  *	ipc_ids.mutex and the spinlock for this ID is hold before this function
373  *	is called, and remain locked on the exit.
374  */
375 
376 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
377 {
378 	struct kern_ipc_perm* p;
379 	int lid = id % SEQ_MULTIPLIER;
380 	BUG_ON(lid >= ids->entries->size);
381 
382 	/*
383 	 * do not need a rcu_dereference()() here to force ordering
384 	 * on Alpha, since the ipc_ids.mutex is held.
385 	 */
386 	p = ids->entries->p[lid];
387 	ids->entries->p[lid] = NULL;
388 	BUG_ON(p==NULL);
389 	ids->in_use--;
390 
391 	if (lid == ids->max_id) {
392 		do {
393 			lid--;
394 			if(lid == -1)
395 				break;
396 		} while (ids->entries->p[lid] == NULL);
397 		ids->max_id = lid;
398 	}
399 	p->deleted = 1;
400 	return p;
401 }
402 
403 /**
404  *	ipc_alloc	-	allocate ipc space
405  *	@size: size desired
406  *
407  *	Allocate memory from the appropriate pools and return a pointer to it.
408  *	NULL is returned if the allocation fails
409  */
410 
411 void* ipc_alloc(int size)
412 {
413 	void* out;
414 	if(size > PAGE_SIZE)
415 		out = vmalloc(size);
416 	else
417 		out = kmalloc(size, GFP_KERNEL);
418 	return out;
419 }
420 
421 /**
422  *	ipc_free        -       free ipc space
423  *	@ptr: pointer returned by ipc_alloc
424  *	@size: size of block
425  *
426  *	Free a block created with ipc_alloc(). The caller must know the size
427  *	used in the allocation call.
428  */
429 
430 void ipc_free(void* ptr, int size)
431 {
432 	if(size > PAGE_SIZE)
433 		vfree(ptr);
434 	else
435 		kfree(ptr);
436 }
437 
438 /*
439  * rcu allocations:
440  * There are three headers that are prepended to the actual allocation:
441  * - during use: ipc_rcu_hdr.
442  * - during the rcu grace period: ipc_rcu_grace.
443  * - [only if vmalloc]: ipc_rcu_sched.
444  * Their lifetime doesn't overlap, thus the headers share the same memory.
445  * Unlike a normal union, they are right-aligned, thus some container_of
446  * forward/backward casting is necessary:
447  */
448 struct ipc_rcu_hdr
449 {
450 	int refcount;
451 	int is_vmalloc;
452 	void *data[0];
453 };
454 
455 
456 struct ipc_rcu_grace
457 {
458 	struct rcu_head rcu;
459 	/* "void *" makes sure alignment of following data is sane. */
460 	void *data[0];
461 };
462 
463 struct ipc_rcu_sched
464 {
465 	struct work_struct work;
466 	/* "void *" makes sure alignment of following data is sane. */
467 	void *data[0];
468 };
469 
470 #define HDRLEN_KMALLOC		(sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
471 					sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
472 #define HDRLEN_VMALLOC		(sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
473 					sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
474 
475 static inline int rcu_use_vmalloc(int size)
476 {
477 	/* Too big for a single page? */
478 	if (HDRLEN_KMALLOC + size > PAGE_SIZE)
479 		return 1;
480 	return 0;
481 }
482 
483 /**
484  *	ipc_rcu_alloc	-	allocate ipc and rcu space
485  *	@size: size desired
486  *
487  *	Allocate memory for the rcu header structure +  the object.
488  *	Returns the pointer to the object.
489  *	NULL is returned if the allocation fails.
490  */
491 
492 void* ipc_rcu_alloc(int size)
493 {
494 	void* out;
495 	/*
496 	 * We prepend the allocation with the rcu struct, and
497 	 * workqueue if necessary (for vmalloc).
498 	 */
499 	if (rcu_use_vmalloc(size)) {
500 		out = vmalloc(HDRLEN_VMALLOC + size);
501 		if (out) {
502 			out += HDRLEN_VMALLOC;
503 			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
504 			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
505 		}
506 	} else {
507 		out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
508 		if (out) {
509 			out += HDRLEN_KMALLOC;
510 			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
511 			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
512 		}
513 	}
514 
515 	return out;
516 }
517 
518 void ipc_rcu_getref(void *ptr)
519 {
520 	container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
521 }
522 
523 static void ipc_do_vfree(struct work_struct *work)
524 {
525 	vfree(container_of(work, struct ipc_rcu_sched, work));
526 }
527 
528 /**
529  * ipc_schedule_free - free ipc + rcu space
530  * @head: RCU callback structure for queued work
531  *
532  * Since RCU callback function is called in bh,
533  * we need to defer the vfree to schedule_work().
534  */
535 static void ipc_schedule_free(struct rcu_head *head)
536 {
537 	struct ipc_rcu_grace *grace =
538 		container_of(head, struct ipc_rcu_grace, rcu);
539 	struct ipc_rcu_sched *sched =
540 			container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
541 
542 	INIT_WORK(&sched->work, ipc_do_vfree);
543 	schedule_work(&sched->work);
544 }
545 
546 /**
547  * ipc_immediate_free - free ipc + rcu space
548  * @head: RCU callback structure that contains pointer to be freed
549  *
550  * Free from the RCU callback context.
551  */
552 static void ipc_immediate_free(struct rcu_head *head)
553 {
554 	struct ipc_rcu_grace *free =
555 		container_of(head, struct ipc_rcu_grace, rcu);
556 	kfree(free);
557 }
558 
559 void ipc_rcu_putref(void *ptr)
560 {
561 	if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
562 		return;
563 
564 	if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
565 		call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
566 				ipc_schedule_free);
567 	} else {
568 		call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
569 				ipc_immediate_free);
570 	}
571 }
572 
573 /**
574  *	ipcperms	-	check IPC permissions
575  *	@ipcp: IPC permission set
576  *	@flag: desired permission set.
577  *
578  *	Check user, group, other permissions for access
579  *	to ipc resources. return 0 if allowed
580  */
581 
582 int ipcperms (struct kern_ipc_perm *ipcp, short flag)
583 {	/* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
584 	int requested_mode, granted_mode, err;
585 
586 	if (unlikely((err = audit_ipc_obj(ipcp))))
587 		return err;
588 	requested_mode = (flag >> 6) | (flag >> 3) | flag;
589 	granted_mode = ipcp->mode;
590 	if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
591 		granted_mode >>= 6;
592 	else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
593 		granted_mode >>= 3;
594 	/* is there some bit set in requested_mode but not in granted_mode? */
595 	if ((requested_mode & ~granted_mode & 0007) &&
596 	    !capable(CAP_IPC_OWNER))
597 		return -1;
598 
599 	return security_ipc_permission(ipcp, flag);
600 }
601 
602 /*
603  * Functions to convert between the kern_ipc_perm structure and the
604  * old/new ipc_perm structures
605  */
606 
607 /**
608  *	kernel_to_ipc64_perm	-	convert kernel ipc permissions to user
609  *	@in: kernel permissions
610  *	@out: new style IPC permissions
611  *
612  *	Turn the kernel object @in into a set of permissions descriptions
613  *	for returning to userspace (@out).
614  */
615 
616 
617 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
618 {
619 	out->key	= in->key;
620 	out->uid	= in->uid;
621 	out->gid	= in->gid;
622 	out->cuid	= in->cuid;
623 	out->cgid	= in->cgid;
624 	out->mode	= in->mode;
625 	out->seq	= in->seq;
626 }
627 
628 /**
629  *	ipc64_perm_to_ipc_perm	-	convert old ipc permissions to new
630  *	@in: new style IPC permissions
631  *	@out: old style IPC permissions
632  *
633  *	Turn the new style permissions object @in into a compatibility
634  *	object and store it into the @out pointer.
635  */
636 
637 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
638 {
639 	out->key	= in->key;
640 	SET_UID(out->uid, in->uid);
641 	SET_GID(out->gid, in->gid);
642 	SET_UID(out->cuid, in->cuid);
643 	SET_GID(out->cgid, in->cgid);
644 	out->mode	= in->mode;
645 	out->seq	= in->seq;
646 }
647 
648 /*
649  * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
650  * is called with shm_ids.mutex locked.  Since grow_ary() is also called with
651  * shm_ids.mutex down(for Shared Memory), there is no need to add read
652  * barriers here to gurantee the writes in grow_ary() are seen in order
653  * here (for Alpha).
654  *
655  * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
656  * if in the future ipc_get() is used by other places without ipc_ids.mutex
657  * down, then ipc_get() needs read memery barriers as ipc_lock() does.
658  */
659 struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
660 {
661 	struct kern_ipc_perm* out;
662 	int lid = id % SEQ_MULTIPLIER;
663 	if(lid >= ids->entries->size)
664 		return NULL;
665 	out = ids->entries->p[lid];
666 	return out;
667 }
668 
669 struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
670 {
671 	struct kern_ipc_perm* out;
672 	int lid = id % SEQ_MULTIPLIER;
673 	struct ipc_id_ary* entries;
674 
675 	rcu_read_lock();
676 	entries = rcu_dereference(ids->entries);
677 	if(lid >= entries->size) {
678 		rcu_read_unlock();
679 		return NULL;
680 	}
681 	out = entries->p[lid];
682 	if(out == NULL) {
683 		rcu_read_unlock();
684 		return NULL;
685 	}
686 	spin_lock(&out->lock);
687 
688 	/* ipc_rmid() may have already freed the ID while ipc_lock
689 	 * was spinning: here verify that the structure is still valid
690 	 */
691 	if (out->deleted) {
692 		spin_unlock(&out->lock);
693 		rcu_read_unlock();
694 		return NULL;
695 	}
696 	return out;
697 }
698 
699 void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
700 {
701 	rcu_read_lock();
702 	spin_lock(&perm->lock);
703 }
704 
705 void ipc_unlock(struct kern_ipc_perm* perm)
706 {
707 	spin_unlock(&perm->lock);
708 	rcu_read_unlock();
709 }
710 
711 int ipc_buildid(struct ipc_ids* ids, int id, int seq)
712 {
713 	return SEQ_MULTIPLIER*seq + id;
714 }
715 
716 int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
717 {
718 	if(uid/SEQ_MULTIPLIER != ipcp->seq)
719 		return 1;
720 	return 0;
721 }
722 
723 #ifdef __ARCH_WANT_IPC_PARSE_VERSION
724 
725 
726 /**
727  *	ipc_parse_version	-	IPC call version
728  *	@cmd: pointer to command
729  *
730  *	Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
731  *	The @cmd value is turned from an encoding command and version into
732  *	just the command code.
733  */
734 
735 int ipc_parse_version (int *cmd)
736 {
737 	if (*cmd & IPC_64) {
738 		*cmd ^= IPC_64;
739 		return IPC_64;
740 	} else {
741 		return IPC_OLD;
742 	}
743 }
744 
745 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */
746 
747 #ifdef CONFIG_PROC_FS
748 struct ipc_proc_iter {
749 	struct ipc_namespace *ns;
750 	struct ipc_proc_iface *iface;
751 };
752 
753 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
754 {
755 	struct ipc_proc_iter *iter = s->private;
756 	struct ipc_proc_iface *iface = iter->iface;
757 	struct kern_ipc_perm *ipc = it;
758 	loff_t p;
759 	struct ipc_ids *ids;
760 
761 	ids = iter->ns->ids[iface->ids];
762 
763 	/* If we had an ipc id locked before, unlock it */
764 	if (ipc && ipc != SEQ_START_TOKEN)
765 		ipc_unlock(ipc);
766 
767 	/*
768 	 * p = *pos - 1 (because id 0 starts at position 1)
769 	 *          + 1 (because we increment the position by one)
770 	 */
771 	for (p = *pos; p <= ids->max_id; p++) {
772 		if ((ipc = ipc_lock(ids, p)) != NULL) {
773 			*pos = p + 1;
774 			return ipc;
775 		}
776 	}
777 
778 	/* Out of range - return NULL to terminate iteration */
779 	return NULL;
780 }
781 
782 /*
783  * File positions: pos 0 -> header, pos n -> ipc id + 1.
784  * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START.
785  */
786 static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
787 {
788 	struct ipc_proc_iter *iter = s->private;
789 	struct ipc_proc_iface *iface = iter->iface;
790 	struct kern_ipc_perm *ipc;
791 	loff_t p;
792 	struct ipc_ids *ids;
793 
794 	ids = iter->ns->ids[iface->ids];
795 
796 	/*
797 	 * Take the lock - this will be released by the corresponding
798 	 * call to stop().
799 	 */
800 	mutex_lock(&ids->mutex);
801 
802 	/* pos < 0 is invalid */
803 	if (*pos < 0)
804 		return NULL;
805 
806 	/* pos == 0 means header */
807 	if (*pos == 0)
808 		return SEQ_START_TOKEN;
809 
810 	/* Find the (pos-1)th ipc */
811 	for (p = *pos - 1; p <= ids->max_id; p++) {
812 		if ((ipc = ipc_lock(ids, p)) != NULL) {
813 			*pos = p + 1;
814 			return ipc;
815 		}
816 	}
817 	return NULL;
818 }
819 
820 static void sysvipc_proc_stop(struct seq_file *s, void *it)
821 {
822 	struct kern_ipc_perm *ipc = it;
823 	struct ipc_proc_iter *iter = s->private;
824 	struct ipc_proc_iface *iface = iter->iface;
825 	struct ipc_ids *ids;
826 
827 	/* If we had a locked segment, release it */
828 	if (ipc && ipc != SEQ_START_TOKEN)
829 		ipc_unlock(ipc);
830 
831 	ids = iter->ns->ids[iface->ids];
832 	/* Release the lock we took in start() */
833 	mutex_unlock(&ids->mutex);
834 }
835 
836 static int sysvipc_proc_show(struct seq_file *s, void *it)
837 {
838 	struct ipc_proc_iter *iter = s->private;
839 	struct ipc_proc_iface *iface = iter->iface;
840 
841 	if (it == SEQ_START_TOKEN)
842 		return seq_puts(s, iface->header);
843 
844 	return iface->show(s, it);
845 }
846 
847 static struct seq_operations sysvipc_proc_seqops = {
848 	.start = sysvipc_proc_start,
849 	.stop  = sysvipc_proc_stop,
850 	.next  = sysvipc_proc_next,
851 	.show  = sysvipc_proc_show,
852 };
853 
854 static int sysvipc_proc_open(struct inode *inode, struct file *file)
855 {
856 	int ret;
857 	struct seq_file *seq;
858 	struct ipc_proc_iter *iter;
859 
860 	ret = -ENOMEM;
861 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
862 	if (!iter)
863 		goto out;
864 
865 	ret = seq_open(file, &sysvipc_proc_seqops);
866 	if (ret)
867 		goto out_kfree;
868 
869 	seq = file->private_data;
870 	seq->private = iter;
871 
872 	iter->iface = PDE(inode)->data;
873 	iter->ns    = get_ipc_ns(current->nsproxy->ipc_ns);
874 out:
875 	return ret;
876 out_kfree:
877 	kfree(iter);
878 	goto out;
879 }
880 
881 static int sysvipc_proc_release(struct inode *inode, struct file *file)
882 {
883 	struct seq_file *seq = file->private_data;
884 	struct ipc_proc_iter *iter = seq->private;
885 	put_ipc_ns(iter->ns);
886 	return seq_release_private(inode, file);
887 }
888 
889 static const struct file_operations sysvipc_proc_fops = {
890 	.open    = sysvipc_proc_open,
891 	.read    = seq_read,
892 	.llseek  = seq_lseek,
893 	.release = sysvipc_proc_release,
894 };
895 #endif /* CONFIG_PROC_FS */
896