1 /* 2 * linux/ipc/util.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * 5 * Sep 1997 - Call suser() last after "normal" permission checks so we 6 * get BSD style process accounting right. 7 * Occurs in several places in the IPC code. 8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk> 9 * Nov 1999 - ipc helper functions, unified SMP locking 10 * Manfred Spraul <manfred@colorfullife.com> 11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary(). 12 * Mingming Cao <cmm@us.ibm.com> 13 * Mar 2006 - support for audit of ipc object properties 14 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 15 * Jun 2006 - namespaces ssupport 16 * OpenVZ, SWsoft Inc. 17 * Pavel Emelianov <xemul@openvz.org> 18 */ 19 20 #include <linux/mm.h> 21 #include <linux/shm.h> 22 #include <linux/init.h> 23 #include <linux/msg.h> 24 #include <linux/smp_lock.h> 25 #include <linux/vmalloc.h> 26 #include <linux/slab.h> 27 #include <linux/capability.h> 28 #include <linux/highuid.h> 29 #include <linux/security.h> 30 #include <linux/rcupdate.h> 31 #include <linux/workqueue.h> 32 #include <linux/seq_file.h> 33 #include <linux/proc_fs.h> 34 #include <linux/audit.h> 35 #include <linux/nsproxy.h> 36 37 #include <asm/unistd.h> 38 39 #include "util.h" 40 41 struct ipc_proc_iface { 42 const char *path; 43 const char *header; 44 int ids; 45 int (*show)(struct seq_file *, void *); 46 }; 47 48 struct ipc_namespace init_ipc_ns = { 49 .kref = { 50 .refcount = ATOMIC_INIT(2), 51 }, 52 }; 53 54 #ifdef CONFIG_IPC_NS 55 static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) 56 { 57 int err; 58 struct ipc_namespace *ns; 59 60 err = -ENOMEM; 61 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); 62 if (ns == NULL) 63 goto err_mem; 64 65 err = sem_init_ns(ns); 66 if (err) 67 goto err_sem; 68 err = msg_init_ns(ns); 69 if (err) 70 goto err_msg; 71 err = shm_init_ns(ns); 72 if (err) 73 goto err_shm; 74 75 kref_init(&ns->kref); 76 return ns; 77 78 err_shm: 79 msg_exit_ns(ns); 80 err_msg: 81 sem_exit_ns(ns); 82 err_sem: 83 kfree(ns); 84 err_mem: 85 return ERR_PTR(err); 86 } 87 88 int unshare_ipcs(unsigned long unshare_flags, struct ipc_namespace **new_ipc) 89 { 90 struct ipc_namespace *new; 91 92 if (unshare_flags & CLONE_NEWIPC) { 93 if (!capable(CAP_SYS_ADMIN)) 94 return -EPERM; 95 96 new = clone_ipc_ns(current->nsproxy->ipc_ns); 97 if (IS_ERR(new)) 98 return PTR_ERR(new); 99 100 *new_ipc = new; 101 } 102 103 return 0; 104 } 105 106 int copy_ipcs(unsigned long flags, struct task_struct *tsk) 107 { 108 struct ipc_namespace *old_ns = tsk->nsproxy->ipc_ns; 109 struct ipc_namespace *new_ns; 110 int err = 0; 111 112 if (!old_ns) 113 return 0; 114 115 get_ipc_ns(old_ns); 116 117 if (!(flags & CLONE_NEWIPC)) 118 return 0; 119 120 if (!capable(CAP_SYS_ADMIN)) { 121 err = -EPERM; 122 goto out; 123 } 124 125 new_ns = clone_ipc_ns(old_ns); 126 if (!new_ns) { 127 err = -ENOMEM; 128 goto out; 129 } 130 131 tsk->nsproxy->ipc_ns = new_ns; 132 out: 133 put_ipc_ns(old_ns); 134 return err; 135 } 136 137 void free_ipc_ns(struct kref *kref) 138 { 139 struct ipc_namespace *ns; 140 141 ns = container_of(kref, struct ipc_namespace, kref); 142 sem_exit_ns(ns); 143 msg_exit_ns(ns); 144 shm_exit_ns(ns); 145 kfree(ns); 146 } 147 #endif 148 149 /** 150 * ipc_init - initialise IPC subsystem 151 * 152 * The various system5 IPC resources (semaphores, messages and shared 153 * memory are initialised 154 */ 155 156 static int __init ipc_init(void) 157 { 158 sem_init(); 159 msg_init(); 160 shm_init(); 161 return 0; 162 } 163 __initcall(ipc_init); 164 165 /** 166 * ipc_init_ids - initialise IPC identifiers 167 * @ids: Identifier set 168 * @size: Number of identifiers 169 * 170 * Given a size for the ipc identifier range (limited below IPCMNI) 171 * set up the sequence range to use then allocate and initialise the 172 * array itself. 173 */ 174 175 void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size) 176 { 177 int i; 178 179 mutex_init(&ids->mutex); 180 181 if(size > IPCMNI) 182 size = IPCMNI; 183 ids->in_use = 0; 184 ids->max_id = -1; 185 ids->seq = 0; 186 { 187 int seq_limit = INT_MAX/SEQ_MULTIPLIER; 188 if(seq_limit > USHRT_MAX) 189 ids->seq_max = USHRT_MAX; 190 else 191 ids->seq_max = seq_limit; 192 } 193 194 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + 195 sizeof(struct ipc_id_ary)); 196 197 if(ids->entries == NULL) { 198 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); 199 size = 0; 200 ids->entries = &ids->nullentry; 201 } 202 ids->entries->size = size; 203 for(i=0;i<size;i++) 204 ids->entries->p[i] = NULL; 205 } 206 207 #ifdef CONFIG_PROC_FS 208 static struct file_operations sysvipc_proc_fops; 209 /** 210 * ipc_init_proc_interface - Create a proc interface for sysipc types 211 * using a seq_file interface. 212 * @path: Path in procfs 213 * @header: Banner to be printed at the beginning of the file. 214 * @ids: ipc id table to iterate. 215 * @show: show routine. 216 */ 217 void __init ipc_init_proc_interface(const char *path, const char *header, 218 int ids, int (*show)(struct seq_file *, void *)) 219 { 220 struct proc_dir_entry *pde; 221 struct ipc_proc_iface *iface; 222 223 iface = kmalloc(sizeof(*iface), GFP_KERNEL); 224 if (!iface) 225 return; 226 iface->path = path; 227 iface->header = header; 228 iface->ids = ids; 229 iface->show = show; 230 231 pde = create_proc_entry(path, 232 S_IRUGO, /* world readable */ 233 NULL /* parent dir */); 234 if (pde) { 235 pde->data = iface; 236 pde->proc_fops = &sysvipc_proc_fops; 237 } else { 238 kfree(iface); 239 } 240 } 241 #endif 242 243 /** 244 * ipc_findkey - find a key in an ipc identifier set 245 * @ids: Identifier set 246 * @key: The key to find 247 * 248 * Requires ipc_ids.mutex locked. 249 * Returns the identifier if found or -1 if not. 250 */ 251 252 int ipc_findkey(struct ipc_ids* ids, key_t key) 253 { 254 int id; 255 struct kern_ipc_perm* p; 256 int max_id = ids->max_id; 257 258 /* 259 * rcu_dereference() is not needed here 260 * since ipc_ids.mutex is held 261 */ 262 for (id = 0; id <= max_id; id++) { 263 p = ids->entries->p[id]; 264 if(p==NULL) 265 continue; 266 if (key == p->key) 267 return id; 268 } 269 return -1; 270 } 271 272 /* 273 * Requires ipc_ids.mutex locked 274 */ 275 static int grow_ary(struct ipc_ids* ids, int newsize) 276 { 277 struct ipc_id_ary* new; 278 struct ipc_id_ary* old; 279 int i; 280 int size = ids->entries->size; 281 282 if(newsize > IPCMNI) 283 newsize = IPCMNI; 284 if(newsize <= size) 285 return newsize; 286 287 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize + 288 sizeof(struct ipc_id_ary)); 289 if(new == NULL) 290 return size; 291 new->size = newsize; 292 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size); 293 for(i=size;i<newsize;i++) { 294 new->p[i] = NULL; 295 } 296 old = ids->entries; 297 298 /* 299 * Use rcu_assign_pointer() to make sure the memcpyed contents 300 * of the new array are visible before the new array becomes visible. 301 */ 302 rcu_assign_pointer(ids->entries, new); 303 304 __ipc_fini_ids(ids, old); 305 return newsize; 306 } 307 308 /** 309 * ipc_addid - add an IPC identifier 310 * @ids: IPC identifier set 311 * @new: new IPC permission set 312 * @size: new size limit for the id array 313 * 314 * Add an entry 'new' to the IPC arrays. The permissions object is 315 * initialised and the first free entry is set up and the id assigned 316 * is returned. The list is returned in a locked state on success. 317 * On failure the list is not locked and -1 is returned. 318 * 319 * Called with ipc_ids.mutex held. 320 */ 321 322 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 323 { 324 int id; 325 326 size = grow_ary(ids,size); 327 328 /* 329 * rcu_dereference()() is not needed here since 330 * ipc_ids.mutex is held 331 */ 332 for (id = 0; id < size; id++) { 333 if(ids->entries->p[id] == NULL) 334 goto found; 335 } 336 return -1; 337 found: 338 ids->in_use++; 339 if (id > ids->max_id) 340 ids->max_id = id; 341 342 new->cuid = new->uid = current->euid; 343 new->gid = new->cgid = current->egid; 344 345 new->seq = ids->seq++; 346 if(ids->seq > ids->seq_max) 347 ids->seq = 0; 348 349 spin_lock_init(&new->lock); 350 new->deleted = 0; 351 rcu_read_lock(); 352 spin_lock(&new->lock); 353 ids->entries->p[id] = new; 354 return id; 355 } 356 357 /** 358 * ipc_rmid - remove an IPC identifier 359 * @ids: identifier set 360 * @id: Identifier to remove 361 * 362 * The identifier must be valid, and in use. The kernel will panic if 363 * fed an invalid identifier. The entry is removed and internal 364 * variables recomputed. The object associated with the identifier 365 * is returned. 366 * ipc_ids.mutex and the spinlock for this ID is hold before this function 367 * is called, and remain locked on the exit. 368 */ 369 370 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) 371 { 372 struct kern_ipc_perm* p; 373 int lid = id % SEQ_MULTIPLIER; 374 BUG_ON(lid >= ids->entries->size); 375 376 /* 377 * do not need a rcu_dereference()() here to force ordering 378 * on Alpha, since the ipc_ids.mutex is held. 379 */ 380 p = ids->entries->p[lid]; 381 ids->entries->p[lid] = NULL; 382 BUG_ON(p==NULL); 383 ids->in_use--; 384 385 if (lid == ids->max_id) { 386 do { 387 lid--; 388 if(lid == -1) 389 break; 390 } while (ids->entries->p[lid] == NULL); 391 ids->max_id = lid; 392 } 393 p->deleted = 1; 394 return p; 395 } 396 397 /** 398 * ipc_alloc - allocate ipc space 399 * @size: size desired 400 * 401 * Allocate memory from the appropriate pools and return a pointer to it. 402 * NULL is returned if the allocation fails 403 */ 404 405 void* ipc_alloc(int size) 406 { 407 void* out; 408 if(size > PAGE_SIZE) 409 out = vmalloc(size); 410 else 411 out = kmalloc(size, GFP_KERNEL); 412 return out; 413 } 414 415 /** 416 * ipc_free - free ipc space 417 * @ptr: pointer returned by ipc_alloc 418 * @size: size of block 419 * 420 * Free a block created with ipc_alloc. The caller must know the size 421 * used in the allocation call. 422 */ 423 424 void ipc_free(void* ptr, int size) 425 { 426 if(size > PAGE_SIZE) 427 vfree(ptr); 428 else 429 kfree(ptr); 430 } 431 432 /* 433 * rcu allocations: 434 * There are three headers that are prepended to the actual allocation: 435 * - during use: ipc_rcu_hdr. 436 * - during the rcu grace period: ipc_rcu_grace. 437 * - [only if vmalloc]: ipc_rcu_sched. 438 * Their lifetime doesn't overlap, thus the headers share the same memory. 439 * Unlike a normal union, they are right-aligned, thus some container_of 440 * forward/backward casting is necessary: 441 */ 442 struct ipc_rcu_hdr 443 { 444 int refcount; 445 int is_vmalloc; 446 void *data[0]; 447 }; 448 449 450 struct ipc_rcu_grace 451 { 452 struct rcu_head rcu; 453 /* "void *" makes sure alignment of following data is sane. */ 454 void *data[0]; 455 }; 456 457 struct ipc_rcu_sched 458 { 459 struct work_struct work; 460 /* "void *" makes sure alignment of following data is sane. */ 461 void *data[0]; 462 }; 463 464 #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ 465 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) 466 #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ 467 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) 468 469 static inline int rcu_use_vmalloc(int size) 470 { 471 /* Too big for a single page? */ 472 if (HDRLEN_KMALLOC + size > PAGE_SIZE) 473 return 1; 474 return 0; 475 } 476 477 /** 478 * ipc_rcu_alloc - allocate ipc and rcu space 479 * @size: size desired 480 * 481 * Allocate memory for the rcu header structure + the object. 482 * Returns the pointer to the object. 483 * NULL is returned if the allocation fails. 484 */ 485 486 void* ipc_rcu_alloc(int size) 487 { 488 void* out; 489 /* 490 * We prepend the allocation with the rcu struct, and 491 * workqueue if necessary (for vmalloc). 492 */ 493 if (rcu_use_vmalloc(size)) { 494 out = vmalloc(HDRLEN_VMALLOC + size); 495 if (out) { 496 out += HDRLEN_VMALLOC; 497 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; 498 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 499 } 500 } else { 501 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); 502 if (out) { 503 out += HDRLEN_KMALLOC; 504 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; 505 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 506 } 507 } 508 509 return out; 510 } 511 512 void ipc_rcu_getref(void *ptr) 513 { 514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 515 } 516 517 static void ipc_do_vfree(struct work_struct *work) 518 { 519 vfree(container_of(work, struct ipc_rcu_sched, work)); 520 } 521 522 /** 523 * ipc_schedule_free - free ipc + rcu space 524 * @head: RCU callback structure for queued work 525 * 526 * Since RCU callback function is called in bh, 527 * we need to defer the vfree to schedule_work 528 */ 529 static void ipc_schedule_free(struct rcu_head *head) 530 { 531 struct ipc_rcu_grace *grace = 532 container_of(head, struct ipc_rcu_grace, rcu); 533 struct ipc_rcu_sched *sched = 534 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 535 536 INIT_WORK(&sched->work, ipc_do_vfree); 537 schedule_work(&sched->work); 538 } 539 540 /** 541 * ipc_immediate_free - free ipc + rcu space 542 * @head: RCU callback structure that contains pointer to be freed 543 * 544 * Free from the RCU callback context 545 */ 546 static void ipc_immediate_free(struct rcu_head *head) 547 { 548 struct ipc_rcu_grace *free = 549 container_of(head, struct ipc_rcu_grace, rcu); 550 kfree(free); 551 } 552 553 void ipc_rcu_putref(void *ptr) 554 { 555 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) 556 return; 557 558 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { 559 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 560 ipc_schedule_free); 561 } else { 562 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 563 ipc_immediate_free); 564 } 565 } 566 567 /** 568 * ipcperms - check IPC permissions 569 * @ipcp: IPC permission set 570 * @flag: desired permission set. 571 * 572 * Check user, group, other permissions for access 573 * to ipc resources. return 0 if allowed 574 */ 575 576 int ipcperms (struct kern_ipc_perm *ipcp, short flag) 577 { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ 578 int requested_mode, granted_mode, err; 579 580 if (unlikely((err = audit_ipc_obj(ipcp)))) 581 return err; 582 requested_mode = (flag >> 6) | (flag >> 3) | flag; 583 granted_mode = ipcp->mode; 584 if (current->euid == ipcp->cuid || current->euid == ipcp->uid) 585 granted_mode >>= 6; 586 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) 587 granted_mode >>= 3; 588 /* is there some bit set in requested_mode but not in granted_mode? */ 589 if ((requested_mode & ~granted_mode & 0007) && 590 !capable(CAP_IPC_OWNER)) 591 return -1; 592 593 return security_ipc_permission(ipcp, flag); 594 } 595 596 /* 597 * Functions to convert between the kern_ipc_perm structure and the 598 * old/new ipc_perm structures 599 */ 600 601 /** 602 * kernel_to_ipc64_perm - convert kernel ipc permissions to user 603 * @in: kernel permissions 604 * @out: new style IPC permissions 605 * 606 * Turn the kernel object 'in' into a set of permissions descriptions 607 * for returning to userspace (out). 608 */ 609 610 611 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) 612 { 613 out->key = in->key; 614 out->uid = in->uid; 615 out->gid = in->gid; 616 out->cuid = in->cuid; 617 out->cgid = in->cgid; 618 out->mode = in->mode; 619 out->seq = in->seq; 620 } 621 622 /** 623 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new 624 * @in: new style IPC permissions 625 * @out: old style IPC permissions 626 * 627 * Turn the new style permissions object in into a compatibility 628 * object and store it into the 'out' pointer. 629 */ 630 631 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) 632 { 633 out->key = in->key; 634 SET_UID(out->uid, in->uid); 635 SET_GID(out->gid, in->gid); 636 SET_UID(out->cuid, in->cuid); 637 SET_GID(out->cgid, in->cgid); 638 out->mode = in->mode; 639 out->seq = in->seq; 640 } 641 642 /* 643 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() 644 * is called with shm_ids.mutex locked. Since grow_ary() is also called with 645 * shm_ids.mutex down(for Shared Memory), there is no need to add read 646 * barriers here to gurantee the writes in grow_ary() are seen in order 647 * here (for Alpha). 648 * 649 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So 650 * if in the future ipc_get() is used by other places without ipc_ids.mutex 651 * down, then ipc_get() needs read memery barriers as ipc_lock() does. 652 */ 653 struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) 654 { 655 struct kern_ipc_perm* out; 656 int lid = id % SEQ_MULTIPLIER; 657 if(lid >= ids->entries->size) 658 return NULL; 659 out = ids->entries->p[lid]; 660 return out; 661 } 662 663 struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) 664 { 665 struct kern_ipc_perm* out; 666 int lid = id % SEQ_MULTIPLIER; 667 struct ipc_id_ary* entries; 668 669 rcu_read_lock(); 670 entries = rcu_dereference(ids->entries); 671 if(lid >= entries->size) { 672 rcu_read_unlock(); 673 return NULL; 674 } 675 out = entries->p[lid]; 676 if(out == NULL) { 677 rcu_read_unlock(); 678 return NULL; 679 } 680 spin_lock(&out->lock); 681 682 /* ipc_rmid() may have already freed the ID while ipc_lock 683 * was spinning: here verify that the structure is still valid 684 */ 685 if (out->deleted) { 686 spin_unlock(&out->lock); 687 rcu_read_unlock(); 688 return NULL; 689 } 690 return out; 691 } 692 693 void ipc_lock_by_ptr(struct kern_ipc_perm *perm) 694 { 695 rcu_read_lock(); 696 spin_lock(&perm->lock); 697 } 698 699 void ipc_unlock(struct kern_ipc_perm* perm) 700 { 701 spin_unlock(&perm->lock); 702 rcu_read_unlock(); 703 } 704 705 int ipc_buildid(struct ipc_ids* ids, int id, int seq) 706 { 707 return SEQ_MULTIPLIER*seq + id; 708 } 709 710 int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) 711 { 712 if(uid/SEQ_MULTIPLIER != ipcp->seq) 713 return 1; 714 return 0; 715 } 716 717 #ifdef __ARCH_WANT_IPC_PARSE_VERSION 718 719 720 /** 721 * ipc_parse_version - IPC call version 722 * @cmd: pointer to command 723 * 724 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 725 * The cmd value is turned from an encoding command and version into 726 * just the command code. 727 */ 728 729 int ipc_parse_version (int *cmd) 730 { 731 if (*cmd & IPC_64) { 732 *cmd ^= IPC_64; 733 return IPC_64; 734 } else { 735 return IPC_OLD; 736 } 737 } 738 739 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */ 740 741 #ifdef CONFIG_PROC_FS 742 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) 743 { 744 struct ipc_proc_iface *iface = s->private; 745 struct kern_ipc_perm *ipc = it; 746 loff_t p; 747 struct ipc_ids *ids; 748 749 ids = current->nsproxy->ipc_ns->ids[iface->ids]; 750 751 /* If we had an ipc id locked before, unlock it */ 752 if (ipc && ipc != SEQ_START_TOKEN) 753 ipc_unlock(ipc); 754 755 /* 756 * p = *pos - 1 (because id 0 starts at position 1) 757 * + 1 (because we increment the position by one) 758 */ 759 for (p = *pos; p <= ids->max_id; p++) { 760 if ((ipc = ipc_lock(ids, p)) != NULL) { 761 *pos = p + 1; 762 return ipc; 763 } 764 } 765 766 /* Out of range - return NULL to terminate iteration */ 767 return NULL; 768 } 769 770 /* 771 * File positions: pos 0 -> header, pos n -> ipc id + 1. 772 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. 773 */ 774 static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) 775 { 776 struct ipc_proc_iface *iface = s->private; 777 struct kern_ipc_perm *ipc; 778 loff_t p; 779 struct ipc_ids *ids; 780 781 ids = current->nsproxy->ipc_ns->ids[iface->ids]; 782 783 /* 784 * Take the lock - this will be released by the corresponding 785 * call to stop(). 786 */ 787 mutex_lock(&ids->mutex); 788 789 /* pos < 0 is invalid */ 790 if (*pos < 0) 791 return NULL; 792 793 /* pos == 0 means header */ 794 if (*pos == 0) 795 return SEQ_START_TOKEN; 796 797 /* Find the (pos-1)th ipc */ 798 for (p = *pos - 1; p <= ids->max_id; p++) { 799 if ((ipc = ipc_lock(ids, p)) != NULL) { 800 *pos = p + 1; 801 return ipc; 802 } 803 } 804 return NULL; 805 } 806 807 static void sysvipc_proc_stop(struct seq_file *s, void *it) 808 { 809 struct kern_ipc_perm *ipc = it; 810 struct ipc_proc_iface *iface = s->private; 811 struct ipc_ids *ids; 812 813 /* If we had a locked segment, release it */ 814 if (ipc && ipc != SEQ_START_TOKEN) 815 ipc_unlock(ipc); 816 817 ids = current->nsproxy->ipc_ns->ids[iface->ids]; 818 /* Release the lock we took in start() */ 819 mutex_unlock(&ids->mutex); 820 } 821 822 static int sysvipc_proc_show(struct seq_file *s, void *it) 823 { 824 struct ipc_proc_iface *iface = s->private; 825 826 if (it == SEQ_START_TOKEN) 827 return seq_puts(s, iface->header); 828 829 return iface->show(s, it); 830 } 831 832 static struct seq_operations sysvipc_proc_seqops = { 833 .start = sysvipc_proc_start, 834 .stop = sysvipc_proc_stop, 835 .next = sysvipc_proc_next, 836 .show = sysvipc_proc_show, 837 }; 838 839 static int sysvipc_proc_open(struct inode *inode, struct file *file) { 840 int ret; 841 struct seq_file *seq; 842 843 ret = seq_open(file, &sysvipc_proc_seqops); 844 if (!ret) { 845 seq = file->private_data; 846 seq->private = PDE(inode)->data; 847 } 848 return ret; 849 } 850 851 static struct file_operations sysvipc_proc_fops = { 852 .open = sysvipc_proc_open, 853 .read = seq_read, 854 .llseek = seq_lseek, 855 .release = seq_release, 856 }; 857 #endif /* CONFIG_PROC_FS */ 858