1 /* 2 * The "user cache". 3 * 4 * (C) Copyright 1991-2000 Linus Torvalds 5 * 6 * We have a per-user structure to keep track of how many 7 * processes, files etc the user has claimed, in order to be 8 * able to have per-user limits for system resources. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/bitops.h> 15 #include <linux/key.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/user_namespace.h> 19 20 struct user_namespace init_user_ns = { 21 .kref = { 22 .refcount = ATOMIC_INIT(2), 23 }, 24 .creator = &root_user, 25 }; 26 EXPORT_SYMBOL_GPL(init_user_ns); 27 28 /* 29 * UID task count cache, to get fast user lookup in "alloc_uid" 30 * when changing user ID's (ie setuid() and friends). 31 */ 32 33 #define UIDHASH_MASK (UIDHASH_SZ - 1) 34 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) 35 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) 36 37 static struct kmem_cache *uid_cachep; 38 39 /* 40 * The uidhash_lock is mostly taken from process context, but it is 41 * occasionally also taken from softirq/tasklet context, when 42 * task-structs get RCU-freed. Hence all locking must be softirq-safe. 43 * But free_uid() is also called with local interrupts disabled, and running 44 * local_bh_enable() with local interrupts disabled is an error - we'll run 45 * softirq callbacks, and they can unconditionally enable interrupts, and 46 * the caller of free_uid() didn't expect that.. 47 */ 48 static DEFINE_SPINLOCK(uidhash_lock); 49 50 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ 51 struct user_struct root_user = { 52 .__count = ATOMIC_INIT(2), 53 .processes = ATOMIC_INIT(1), 54 .files = ATOMIC_INIT(0), 55 .sigpending = ATOMIC_INIT(0), 56 .locked_shm = 0, 57 .user_ns = &init_user_ns, 58 }; 59 60 /* 61 * These routines must be called with the uidhash spinlock held! 62 */ 63 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) 64 { 65 hlist_add_head(&up->uidhash_node, hashent); 66 } 67 68 static void uid_hash_remove(struct user_struct *up) 69 { 70 hlist_del_init(&up->uidhash_node); 71 put_user_ns(up->user_ns); 72 } 73 74 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) 75 { 76 struct user_struct *user; 77 struct hlist_node *h; 78 79 hlist_for_each_entry(user, h, hashent, uidhash_node) { 80 if (user->uid == uid) { 81 atomic_inc(&user->__count); 82 return user; 83 } 84 } 85 86 return NULL; 87 } 88 89 /* IRQs are disabled and uidhash_lock is held upon function entry. 90 * IRQ state (as stored in flags) is restored and uidhash_lock released 91 * upon function exit. 92 */ 93 static void free_user(struct user_struct *up, unsigned long flags) 94 { 95 uid_hash_remove(up); 96 spin_unlock_irqrestore(&uidhash_lock, flags); 97 key_put(up->uid_keyring); 98 key_put(up->session_keyring); 99 kmem_cache_free(uid_cachep, up); 100 } 101 102 /* 103 * Locate the user_struct for the passed UID. If found, take a ref on it. The 104 * caller must undo that ref with free_uid(). 105 * 106 * If the user_struct could not be found, return NULL. 107 */ 108 struct user_struct *find_user(uid_t uid) 109 { 110 struct user_struct *ret; 111 unsigned long flags; 112 struct user_namespace *ns = current_user_ns(); 113 114 spin_lock_irqsave(&uidhash_lock, flags); 115 ret = uid_hash_find(uid, uidhashentry(ns, uid)); 116 spin_unlock_irqrestore(&uidhash_lock, flags); 117 return ret; 118 } 119 120 void free_uid(struct user_struct *up) 121 { 122 unsigned long flags; 123 124 if (!up) 125 return; 126 127 local_irq_save(flags); 128 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) 129 free_user(up, flags); 130 else 131 local_irq_restore(flags); 132 } 133 134 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) 135 { 136 struct hlist_head *hashent = uidhashentry(ns, uid); 137 struct user_struct *up, *new; 138 139 spin_lock_irq(&uidhash_lock); 140 up = uid_hash_find(uid, hashent); 141 spin_unlock_irq(&uidhash_lock); 142 143 if (!up) { 144 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); 145 if (!new) 146 goto out_unlock; 147 148 new->uid = uid; 149 atomic_set(&new->__count, 1); 150 151 new->user_ns = get_user_ns(ns); 152 153 /* 154 * Before adding this, check whether we raced 155 * on adding the same user already.. 156 */ 157 spin_lock_irq(&uidhash_lock); 158 up = uid_hash_find(uid, hashent); 159 if (up) { 160 key_put(new->uid_keyring); 161 key_put(new->session_keyring); 162 kmem_cache_free(uid_cachep, new); 163 } else { 164 uid_hash_insert(new, hashent); 165 up = new; 166 } 167 spin_unlock_irq(&uidhash_lock); 168 } 169 170 return up; 171 172 out_unlock: 173 return NULL; 174 } 175 176 static int __init uid_cache_init(void) 177 { 178 int n; 179 180 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 181 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 182 183 for(n = 0; n < UIDHASH_SZ; ++n) 184 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); 185 186 /* Insert the root user immediately (init already runs as root) */ 187 spin_lock_irq(&uidhash_lock); 188 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); 189 spin_unlock_irq(&uidhash_lock); 190 191 return 0; 192 } 193 194 module_init(uid_cache_init); 195