1 /* 2 * The "user cache". 3 * 4 * (C) Copyright 1991-2000 Linus Torvalds 5 * 6 * We have a per-user structure to keep track of how many 7 * processes, files etc the user has claimed, in order to be 8 * able to have per-user limits for system resources. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/bitops.h> 15 #include <linux/key.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/user_namespace.h> 19 #include "cred-internals.h" 20 21 struct user_namespace init_user_ns = { 22 .kref = { 23 .refcount = ATOMIC_INIT(2), 24 }, 25 .creator = &root_user, 26 }; 27 EXPORT_SYMBOL_GPL(init_user_ns); 28 29 /* 30 * UID task count cache, to get fast user lookup in "alloc_uid" 31 * when changing user ID's (ie setuid() and friends). 32 */ 33 34 #define UIDHASH_MASK (UIDHASH_SZ - 1) 35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) 36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) 37 38 static struct kmem_cache *uid_cachep; 39 40 /* 41 * The uidhash_lock is mostly taken from process context, but it is 42 * occasionally also taken from softirq/tasklet context, when 43 * task-structs get RCU-freed. Hence all locking must be softirq-safe. 44 * But free_uid() is also called with local interrupts disabled, and running 45 * local_bh_enable() with local interrupts disabled is an error - we'll run 46 * softirq callbacks, and they can unconditionally enable interrupts, and 47 * the caller of free_uid() didn't expect that.. 48 */ 49 static DEFINE_SPINLOCK(uidhash_lock); 50 51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ 52 struct user_struct root_user = { 53 .__count = ATOMIC_INIT(2), 54 .processes = ATOMIC_INIT(1), 55 .files = ATOMIC_INIT(0), 56 .sigpending = ATOMIC_INIT(0), 57 .locked_shm = 0, 58 .user_ns = &init_user_ns, 59 }; 60 61 /* 62 * These routines must be called with the uidhash spinlock held! 63 */ 64 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) 65 { 66 hlist_add_head(&up->uidhash_node, hashent); 67 } 68 69 static void uid_hash_remove(struct user_struct *up) 70 { 71 hlist_del_init(&up->uidhash_node); 72 put_user_ns(up->user_ns); 73 } 74 75 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) 76 { 77 struct user_struct *user; 78 struct hlist_node *h; 79 80 hlist_for_each_entry(user, h, hashent, uidhash_node) { 81 if (user->uid == uid) { 82 atomic_inc(&user->__count); 83 return user; 84 } 85 } 86 87 return NULL; 88 } 89 90 /* IRQs are disabled and uidhash_lock is held upon function entry. 91 * IRQ state (as stored in flags) is restored and uidhash_lock released 92 * upon function exit. 93 */ 94 static void free_user(struct user_struct *up, unsigned long flags) 95 { 96 uid_hash_remove(up); 97 spin_unlock_irqrestore(&uidhash_lock, flags); 98 key_put(up->uid_keyring); 99 key_put(up->session_keyring); 100 kmem_cache_free(uid_cachep, up); 101 } 102 103 /* 104 * Locate the user_struct for the passed UID. If found, take a ref on it. The 105 * caller must undo that ref with free_uid(). 106 * 107 * If the user_struct could not be found, return NULL. 108 */ 109 struct user_struct *find_user(uid_t uid) 110 { 111 struct user_struct *ret; 112 unsigned long flags; 113 struct user_namespace *ns = current_user_ns(); 114 115 spin_lock_irqsave(&uidhash_lock, flags); 116 ret = uid_hash_find(uid, uidhashentry(ns, uid)); 117 spin_unlock_irqrestore(&uidhash_lock, flags); 118 return ret; 119 } 120 121 void free_uid(struct user_struct *up) 122 { 123 unsigned long flags; 124 125 if (!up) 126 return; 127 128 local_irq_save(flags); 129 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) 130 free_user(up, flags); 131 else 132 local_irq_restore(flags); 133 } 134 135 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) 136 { 137 struct hlist_head *hashent = uidhashentry(ns, uid); 138 struct user_struct *up, *new; 139 140 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() 141 * atomic. 142 */ 143 spin_lock_irq(&uidhash_lock); 144 up = uid_hash_find(uid, hashent); 145 spin_unlock_irq(&uidhash_lock); 146 147 if (!up) { 148 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); 149 if (!new) 150 goto out_unlock; 151 152 new->uid = uid; 153 atomic_set(&new->__count, 1); 154 155 new->user_ns = get_user_ns(ns); 156 157 /* 158 * Before adding this, check whether we raced 159 * on adding the same user already.. 160 */ 161 spin_lock_irq(&uidhash_lock); 162 up = uid_hash_find(uid, hashent); 163 if (up) { 164 /* This case is not possible when CONFIG_USER_SCHED 165 * is defined, since we serialize alloc_uid() using 166 * uids_mutex. Hence no need to call 167 * sched_destroy_user() or remove_user_sysfs_dir(). 168 */ 169 key_put(new->uid_keyring); 170 key_put(new->session_keyring); 171 kmem_cache_free(uid_cachep, new); 172 } else { 173 uid_hash_insert(new, hashent); 174 up = new; 175 } 176 spin_unlock_irq(&uidhash_lock); 177 } 178 179 return up; 180 181 put_user_ns(new->user_ns); 182 kmem_cache_free(uid_cachep, new); 183 out_unlock: 184 return NULL; 185 } 186 187 static int __init uid_cache_init(void) 188 { 189 int n; 190 191 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 192 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 193 194 for(n = 0; n < UIDHASH_SZ; ++n) 195 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); 196 197 /* Insert the root user immediately (init already runs as root) */ 198 spin_lock_irq(&uidhash_lock); 199 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); 200 spin_unlock_irq(&uidhash_lock); 201 202 return 0; 203 } 204 205 module_init(uid_cache_init); 206