1 /* 2 * linux/fs/pnode.c 3 * 4 * (C) Copyright IBM Corporation 2005. 5 * Released under GPL v2. 6 * Author : Ram Pai (linuxram@us.ibm.com) 7 * 8 */ 9 #include <linux/mnt_namespace.h> 10 #include <linux/mount.h> 11 #include <linux/fs.h> 12 #include <linux/nsproxy.h> 13 #include "internal.h" 14 #include "pnode.h" 15 16 /* return the next shared peer mount of @p */ 17 static inline struct mount *next_peer(struct mount *p) 18 { 19 return list_entry(p->mnt_share.next, struct mount, mnt_share); 20 } 21 22 static inline struct mount *first_slave(struct mount *p) 23 { 24 return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); 25 } 26 27 static inline struct mount *next_slave(struct mount *p) 28 { 29 return list_entry(p->mnt_slave.next, struct mount, mnt_slave); 30 } 31 32 static struct mount *get_peer_under_root(struct mount *mnt, 33 struct mnt_namespace *ns, 34 const struct path *root) 35 { 36 struct mount *m = mnt; 37 38 do { 39 /* Check the namespace first for optimization */ 40 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) 41 return m; 42 43 m = next_peer(m); 44 } while (m != mnt); 45 46 return NULL; 47 } 48 49 /* 50 * Get ID of closest dominating peer group having a representative 51 * under the given root. 52 * 53 * Caller must hold namespace_sem 54 */ 55 int get_dominating_id(struct mount *mnt, const struct path *root) 56 { 57 struct mount *m; 58 59 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { 60 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); 61 if (d) 62 return d->mnt_group_id; 63 } 64 65 return 0; 66 } 67 68 static int do_make_slave(struct mount *mnt) 69 { 70 struct mount *peer_mnt = mnt, *master = mnt->mnt_master; 71 struct mount *slave_mnt; 72 73 /* 74 * slave 'mnt' to a peer mount that has the 75 * same root dentry. If none is available then 76 * slave it to anything that is available. 77 */ 78 while ((peer_mnt = next_peer(peer_mnt)) != mnt && 79 peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; 80 81 if (peer_mnt == mnt) { 82 peer_mnt = next_peer(mnt); 83 if (peer_mnt == mnt) 84 peer_mnt = NULL; 85 } 86 if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && 87 list_empty(&mnt->mnt_share)) 88 mnt_release_group_id(mnt); 89 90 list_del_init(&mnt->mnt_share); 91 mnt->mnt_group_id = 0; 92 93 if (peer_mnt) 94 master = peer_mnt; 95 96 if (master) { 97 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) 98 slave_mnt->mnt_master = master; 99 list_move(&mnt->mnt_slave, &master->mnt_slave_list); 100 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); 101 INIT_LIST_HEAD(&mnt->mnt_slave_list); 102 } else { 103 struct list_head *p = &mnt->mnt_slave_list; 104 while (!list_empty(p)) { 105 slave_mnt = list_first_entry(p, 106 struct mount, mnt_slave); 107 list_del_init(&slave_mnt->mnt_slave); 108 slave_mnt->mnt_master = NULL; 109 } 110 } 111 mnt->mnt_master = master; 112 CLEAR_MNT_SHARED(mnt); 113 return 0; 114 } 115 116 /* 117 * vfsmount lock must be held for write 118 */ 119 void change_mnt_propagation(struct mount *mnt, int type) 120 { 121 if (type == MS_SHARED) { 122 set_mnt_shared(mnt); 123 return; 124 } 125 do_make_slave(mnt); 126 if (type != MS_SLAVE) { 127 list_del_init(&mnt->mnt_slave); 128 mnt->mnt_master = NULL; 129 if (type == MS_UNBINDABLE) 130 mnt->mnt.mnt_flags |= MNT_UNBINDABLE; 131 else 132 mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; 133 } 134 } 135 136 /* 137 * get the next mount in the propagation tree. 138 * @m: the mount seen last 139 * @origin: the original mount from where the tree walk initiated 140 * 141 * Note that peer groups form contiguous segments of slave lists. 142 * We rely on that in get_source() to be able to find out if 143 * vfsmount found while iterating with propagation_next() is 144 * a peer of one we'd found earlier. 145 */ 146 static struct mount *propagation_next(struct mount *m, 147 struct mount *origin) 148 { 149 /* are there any slaves of this mount? */ 150 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) 151 return first_slave(m); 152 153 while (1) { 154 struct mount *master = m->mnt_master; 155 156 if (master == origin->mnt_master) { 157 struct mount *next = next_peer(m); 158 return (next == origin) ? NULL : next; 159 } else if (m->mnt_slave.next != &master->mnt_slave_list) 160 return next_slave(m); 161 162 /* back at master */ 163 m = master; 164 } 165 } 166 167 static struct mount *next_group(struct mount *m, struct mount *origin) 168 { 169 while (1) { 170 while (1) { 171 struct mount *next; 172 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) 173 return first_slave(m); 174 next = next_peer(m); 175 if (m->mnt_group_id == origin->mnt_group_id) { 176 if (next == origin) 177 return NULL; 178 } else if (m->mnt_slave.next != &next->mnt_slave) 179 break; 180 m = next; 181 } 182 /* m is the last peer */ 183 while (1) { 184 struct mount *master = m->mnt_master; 185 if (m->mnt_slave.next != &master->mnt_slave_list) 186 return next_slave(m); 187 m = next_peer(master); 188 if (master->mnt_group_id == origin->mnt_group_id) 189 break; 190 if (master->mnt_slave.next == &m->mnt_slave) 191 break; 192 m = master; 193 } 194 if (m == origin) 195 return NULL; 196 } 197 } 198 199 /* all accesses are serialized by namespace_sem */ 200 static struct user_namespace *user_ns; 201 static struct mount *last_dest, *last_source, *dest_master; 202 static struct mountpoint *mp; 203 static struct hlist_head *list; 204 205 static inline bool peers(struct mount *m1, struct mount *m2) 206 { 207 return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; 208 } 209 210 static int propagate_one(struct mount *m) 211 { 212 struct mount *child; 213 int type; 214 /* skip ones added by this propagate_mnt() */ 215 if (IS_MNT_NEW(m)) 216 return 0; 217 /* skip if mountpoint isn't covered by it */ 218 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) 219 return 0; 220 if (peers(m, last_dest)) { 221 type = CL_MAKE_SHARED; 222 } else { 223 struct mount *n, *p; 224 for (n = m; ; n = p) { 225 p = n->mnt_master; 226 if (p == dest_master || IS_MNT_MARKED(p)) { 227 while (last_dest->mnt_master != p) { 228 last_source = last_source->mnt_master; 229 last_dest = last_source->mnt_parent; 230 } 231 if (!peers(n, last_dest)) { 232 last_source = last_source->mnt_master; 233 last_dest = last_source->mnt_parent; 234 } 235 break; 236 } 237 } 238 type = CL_SLAVE; 239 /* beginning of peer group among the slaves? */ 240 if (IS_MNT_SHARED(m)) 241 type |= CL_MAKE_SHARED; 242 } 243 244 /* Notice when we are propagating across user namespaces */ 245 if (m->mnt_ns->user_ns != user_ns) 246 type |= CL_UNPRIVILEGED; 247 child = copy_tree(last_source, last_source->mnt.mnt_root, type); 248 if (IS_ERR(child)) 249 return PTR_ERR(child); 250 child->mnt.mnt_flags &= ~MNT_LOCKED; 251 mnt_set_mountpoint(m, mp, child); 252 last_dest = m; 253 last_source = child; 254 if (m->mnt_master != dest_master) { 255 read_seqlock_excl(&mount_lock); 256 SET_MNT_MARK(m->mnt_master); 257 read_sequnlock_excl(&mount_lock); 258 } 259 hlist_add_head(&child->mnt_hash, list); 260 return 0; 261 } 262 263 /* 264 * mount 'source_mnt' under the destination 'dest_mnt' at 265 * dentry 'dest_dentry'. And propagate that mount to 266 * all the peer and slave mounts of 'dest_mnt'. 267 * Link all the new mounts into a propagation tree headed at 268 * source_mnt. Also link all the new mounts using ->mnt_list 269 * headed at source_mnt's ->mnt_list 270 * 271 * @dest_mnt: destination mount. 272 * @dest_dentry: destination dentry. 273 * @source_mnt: source mount. 274 * @tree_list : list of heads of trees to be attached. 275 */ 276 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, 277 struct mount *source_mnt, struct hlist_head *tree_list) 278 { 279 struct mount *m, *n; 280 int ret = 0; 281 282 /* 283 * we don't want to bother passing tons of arguments to 284 * propagate_one(); everything is serialized by namespace_sem, 285 * so globals will do just fine. 286 */ 287 user_ns = current->nsproxy->mnt_ns->user_ns; 288 last_dest = dest_mnt; 289 last_source = source_mnt; 290 mp = dest_mp; 291 list = tree_list; 292 dest_master = dest_mnt->mnt_master; 293 294 /* all peers of dest_mnt, except dest_mnt itself */ 295 for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { 296 ret = propagate_one(n); 297 if (ret) 298 goto out; 299 } 300 301 /* all slave groups */ 302 for (m = next_group(dest_mnt, dest_mnt); m; 303 m = next_group(m, dest_mnt)) { 304 /* everything in that slave group */ 305 n = m; 306 do { 307 ret = propagate_one(n); 308 if (ret) 309 goto out; 310 n = next_peer(n); 311 } while (n != m); 312 } 313 out: 314 read_seqlock_excl(&mount_lock); 315 hlist_for_each_entry(n, tree_list, mnt_hash) { 316 m = n->mnt_parent; 317 if (m->mnt_master != dest_mnt->mnt_master) 318 CLEAR_MNT_MARK(m->mnt_master); 319 } 320 read_sequnlock_excl(&mount_lock); 321 return ret; 322 } 323 324 /* 325 * return true if the refcount is greater than count 326 */ 327 static inline int do_refcount_check(struct mount *mnt, int count) 328 { 329 return mnt_get_count(mnt) > count; 330 } 331 332 /* 333 * check if the mount 'mnt' can be unmounted successfully. 334 * @mnt: the mount to be checked for unmount 335 * NOTE: unmounting 'mnt' would naturally propagate to all 336 * other mounts its parent propagates to. 337 * Check if any of these mounts that **do not have submounts** 338 * have more references than 'refcnt'. If so return busy. 339 * 340 * vfsmount lock must be held for write 341 */ 342 int propagate_mount_busy(struct mount *mnt, int refcnt) 343 { 344 struct mount *m, *child; 345 struct mount *parent = mnt->mnt_parent; 346 int ret = 0; 347 348 if (mnt == parent) 349 return do_refcount_check(mnt, refcnt); 350 351 /* 352 * quickly check if the current mount can be unmounted. 353 * If not, we don't have to go checking for all other 354 * mounts 355 */ 356 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) 357 return 1; 358 359 for (m = propagation_next(parent, parent); m; 360 m = propagation_next(m, parent)) { 361 child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); 362 if (child && list_empty(&child->mnt_mounts) && 363 (ret = do_refcount_check(child, 1))) 364 break; 365 } 366 return ret; 367 } 368 369 /* 370 * Clear MNT_LOCKED when it can be shown to be safe. 371 * 372 * mount_lock lock must be held for write 373 */ 374 void propagate_mount_unlock(struct mount *mnt) 375 { 376 struct mount *parent = mnt->mnt_parent; 377 struct mount *m, *child; 378 379 BUG_ON(parent == mnt); 380 381 for (m = propagation_next(parent, parent); m; 382 m = propagation_next(m, parent)) { 383 child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); 384 if (child) 385 child->mnt.mnt_flags &= ~MNT_LOCKED; 386 } 387 } 388 389 /* 390 * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. 391 */ 392 static void mark_umount_candidates(struct mount *mnt) 393 { 394 struct mount *parent = mnt->mnt_parent; 395 struct mount *m; 396 397 BUG_ON(parent == mnt); 398 399 for (m = propagation_next(parent, parent); m; 400 m = propagation_next(m, parent)) { 401 struct mount *child = __lookup_mnt_last(&m->mnt, 402 mnt->mnt_mountpoint); 403 if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { 404 SET_MNT_MARK(child); 405 } 406 } 407 } 408 409 /* 410 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its 411 * parent propagates to. 412 */ 413 static void __propagate_umount(struct mount *mnt) 414 { 415 struct mount *parent = mnt->mnt_parent; 416 struct mount *m; 417 418 BUG_ON(parent == mnt); 419 420 for (m = propagation_next(parent, parent); m; 421 m = propagation_next(m, parent)) { 422 423 struct mount *child = __lookup_mnt_last(&m->mnt, 424 mnt->mnt_mountpoint); 425 /* 426 * umount the child only if the child has no children 427 * and the child is marked safe to unmount. 428 */ 429 if (!child || !IS_MNT_MARKED(child)) 430 continue; 431 CLEAR_MNT_MARK(child); 432 if (list_empty(&child->mnt_mounts)) { 433 list_del_init(&child->mnt_child); 434 child->mnt.mnt_flags |= MNT_UMOUNT; 435 list_move_tail(&child->mnt_list, &mnt->mnt_list); 436 } 437 } 438 } 439 440 /* 441 * collect all mounts that receive propagation from the mount in @list, 442 * and return these additional mounts in the same list. 443 * @list: the list of mounts to be unmounted. 444 * 445 * vfsmount lock must be held for write 446 */ 447 int propagate_umount(struct list_head *list) 448 { 449 struct mount *mnt; 450 451 list_for_each_entry_reverse(mnt, list, mnt_list) 452 mark_umount_candidates(mnt); 453 454 list_for_each_entry(mnt, list, mnt_list) 455 __propagate_umount(mnt); 456 return 0; 457 } 458