mnt: Tuck mounts under others instead of creating shadow/side mounts.

am: 839d42687d

Change-Id: Id06d474c7f7b46c1ce05056028ca02837cacf215
This commit is contained in:
Eric W. Biederman 2017-03-15 02:10:19 +00:00 committed by android-build-merger
commit cd237bd504
4 changed files with 110 additions and 63 deletions

View file

@ -86,7 +86,6 @@ static inline int is_mounted(struct vfsmount *mnt)
} }
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *); extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
extern int __legitimize_mnt(struct vfsmount *, unsigned); extern int __legitimize_mnt(struct vfsmount *, unsigned);
extern bool legitimize_mnt(struct vfsmount *, unsigned); extern bool legitimize_mnt(struct vfsmount *, unsigned);

View file

@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
return NULL; return NULL;
} }
/*
* find the last mount at @dentry on vfsmount @mnt.
* mount_lock must be held.
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
struct mount *p, *res = NULL;
p = __lookup_mnt(mnt, dentry);
if (!p)
goto out;
if (!(p->mnt.mnt_flags & MNT_UMOUNT))
res = p;
hlist_for_each_entry_continue(p, mnt_hash) {
if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
break;
if (!(p->mnt.mnt_flags & MNT_UMOUNT))
res = p;
}
out:
return res;
}
/* /*
* lookup_mnt - Return the first child mount mounted at path * lookup_mnt - Return the first child mount mounted at path
* *
@ -880,6 +858,13 @@ void mnt_set_mountpoint(struct mount *mnt,
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
} }
static void __attach_mnt(struct mount *mnt, struct mount *parent)
{
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
/* /*
* vfsmount lock must be held for write * vfsmount lock must be held for write
*/ */
@ -888,28 +873,45 @@ static void attach_mnt(struct mount *mnt,
struct mountpoint *mp) struct mountpoint *mp)
{ {
mnt_set_mountpoint(parent, mp, mnt); mnt_set_mountpoint(parent, mp, mnt);
hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); __attach_mnt(mnt, parent);
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
} }
static void attach_shadowed(struct mount *mnt, void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
struct mount *parent,
struct mount *shadows)
{ {
if (shadows) { struct mountpoint *old_mp = mnt->mnt_mp;
hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); struct dentry *old_mountpoint = mnt->mnt_mountpoint;
list_add(&mnt->mnt_child, &shadows->mnt_child); struct mount *old_parent = mnt->mnt_parent;
} else {
hlist_add_head_rcu(&mnt->mnt_hash, list_del_init(&mnt->mnt_child);
m_hash(&parent->mnt, mnt->mnt_mountpoint)); hlist_del_init(&mnt->mnt_mp_list);
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); hlist_del_init_rcu(&mnt->mnt_hash);
}
attach_mnt(mnt, parent, mp);
put_mountpoint(old_mp);
/*
* Safely avoid even the suggestion this code might sleep or
* lock the mount hash by taking advantage of the knowledge that
* mnt_change_mountpoint will not release the final reference
* to a mountpoint.
*
* During mounting, the mount passed in as the parent mount will
* continue to use the old mountpoint and during unmounting, the
* old mountpoint will continue to exist until namespace_unlock,
* which happens well after mnt_change_mountpoint.
*/
spin_lock(&old_mountpoint->d_lock);
old_mountpoint->d_lockref.count--;
spin_unlock(&old_mountpoint->d_lock);
mnt_add_count(old_parent, -1);
} }
/* /*
* vfsmount lock must be held for write * vfsmount lock must be held for write
*/ */
static void commit_tree(struct mount *mnt, struct mount *shadows) static void commit_tree(struct mount *mnt)
{ {
struct mount *parent = mnt->mnt_parent; struct mount *parent = mnt->mnt_parent;
struct mount *m; struct mount *m;
@ -924,7 +926,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
list_splice(&head, n->list.prev); list_splice(&head, n->list.prev);
attach_shadowed(mnt, parent, shadows); __attach_mnt(mnt, parent);
touch_mnt_namespace(n); touch_mnt_namespace(n);
} }
@ -1738,7 +1740,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
continue; continue;
for (s = r; s; s = next_mnt(s, r)) { for (s = r; s; s = next_mnt(s, r)) {
struct mount *t = NULL;
if (!(flag & CL_COPY_UNBINDABLE) && if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) { IS_MNT_UNBINDABLE(s)) {
s = skip_mnt_tree(s); s = skip_mnt_tree(s);
@ -1760,14 +1761,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
goto out; goto out;
lock_mount_hash(); lock_mount_hash();
list_add_tail(&q->mnt_list, &res->mnt_list); list_add_tail(&q->mnt_list, &res->mnt_list);
mnt_set_mountpoint(parent, p->mnt_mp, q); attach_mnt(q, parent, p->mnt_mp);
if (!list_empty(&parent->mnt_mounts)) {
t = list_last_entry(&parent->mnt_mounts,
struct mount, mnt_child);
if (t->mnt_mp != p->mnt_mp)
t = NULL;
}
attach_shadowed(q, parent, t);
unlock_mount_hash(); unlock_mount_hash();
} }
} }
@ -1945,10 +1939,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct path *parent_path) struct path *parent_path)
{ {
HLIST_HEAD(tree_list); HLIST_HEAD(tree_list);
struct mountpoint *smp;
struct mount *child, *p; struct mount *child, *p;
struct hlist_node *n; struct hlist_node *n;
int err; int err;
/* Preallocate a mountpoint in case the new mounts need
* to be tucked under other mounts.
*/
smp = get_mountpoint(source_mnt->mnt.mnt_root);
if (IS_ERR(smp))
return PTR_ERR(smp);
if (IS_MNT_SHARED(dest_mnt)) { if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true); err = invent_group_ids(source_mnt, true);
if (err) if (err)
@ -1968,16 +1970,19 @@ static int attach_recursive_mnt(struct mount *source_mnt,
touch_mnt_namespace(source_mnt->mnt_ns); touch_mnt_namespace(source_mnt->mnt_ns);
} else { } else {
mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
commit_tree(source_mnt, NULL); commit_tree(source_mnt);
} }
hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
struct mount *q; struct mount *q;
hlist_del_init(&child->mnt_hash); hlist_del_init(&child->mnt_hash);
q = __lookup_mnt_last(&child->mnt_parent->mnt, q = __lookup_mnt(&child->mnt_parent->mnt,
child->mnt_mountpoint); child->mnt_mountpoint);
commit_tree(child, q); if (q)
mnt_change_mountpoint(child, smp, q);
commit_tree(child);
} }
put_mountpoint(smp);
unlock_mount_hash(); unlock_mount_hash();
return 0; return 0;
@ -1990,6 +1995,10 @@ static int attach_recursive_mnt(struct mount *source_mnt,
unlock_mount_hash(); unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL); cleanup_group_ids(source_mnt, NULL);
out: out:
read_seqlock_excl(&mount_lock);
put_mountpoint(smp);
read_sequnlock_excl(&mount_lock);
return err; return err;
} }

View file

@ -324,6 +324,21 @@ out:
return ret; return ret;
} }
static struct mount *find_topper(struct mount *mnt)
{
/* If there is exactly one mount covering mnt completely return it. */
struct mount *child;
if (!list_is_singular(&mnt->mnt_mounts))
return NULL;
child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
if (child->mnt_mountpoint != mnt->mnt.mnt_root)
return NULL;
return child;
}
/* /*
* return true if the refcount is greater than count * return true if the refcount is greater than count
*/ */
@ -344,9 +359,8 @@ static inline int do_refcount_check(struct mount *mnt, int count)
*/ */
int propagate_mount_busy(struct mount *mnt, int refcnt) int propagate_mount_busy(struct mount *mnt, int refcnt)
{ {
struct mount *m, *child; struct mount *m, *child, *topper;
struct mount *parent = mnt->mnt_parent; struct mount *parent = mnt->mnt_parent;
int ret = 0;
if (mnt == parent) if (mnt == parent)
return do_refcount_check(mnt, refcnt); return do_refcount_check(mnt, refcnt);
@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
for (m = propagation_next(parent, parent); m; for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) { m = propagation_next(m, parent)) {
child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); int count = 1;
if (child && list_empty(&child->mnt_mounts) && child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
(ret = do_refcount_check(child, 1))) if (!child)
break; continue;
/* Is there exactly one mount on the child that covers
* it completely whose reference should be ignored?
*/
topper = find_topper(child);
if (topper)
count += 1;
else if (!list_empty(&child->mnt_mounts))
continue;
if (do_refcount_check(child, count))
return 1;
} }
return ret; return 0;
} }
/* /*
@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount *mnt)
for (m = propagation_next(parent, parent); m; for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) { m = propagation_next(m, parent)) {
child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
if (child) if (child)
child->mnt.mnt_flags &= ~MNT_LOCKED; child->mnt.mnt_flags &= ~MNT_LOCKED;
} }
@ -401,9 +427,11 @@ static void mark_umount_candidates(struct mount *mnt)
for (m = propagation_next(parent, parent); m; for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) { m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt_last(&m->mnt, struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint); mnt->mnt_mountpoint);
if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
continue;
if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
SET_MNT_MARK(child); SET_MNT_MARK(child);
} }
} }
@ -422,8 +450,8 @@ static void __propagate_umount(struct mount *mnt)
for (m = propagation_next(parent, parent); m; for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) { m = propagation_next(m, parent)) {
struct mount *topper;
struct mount *child = __lookup_mnt_last(&m->mnt, struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint); mnt->mnt_mountpoint);
/* /*
* umount the child only if the child has no children * umount the child only if the child has no children
@ -432,6 +460,15 @@ static void __propagate_umount(struct mount *mnt)
if (!child || !IS_MNT_MARKED(child)) if (!child || !IS_MNT_MARKED(child))
continue; continue;
CLEAR_MNT_MARK(child); CLEAR_MNT_MARK(child);
/* If there is exactly one mount covering all of child
* replace child with that mount.
*/
topper = find_topper(child);
if (topper)
mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
topper);
if (list_empty(&child->mnt_mounts)) { if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child); list_del_init(&child->mnt_child);
child->mnt.mnt_flags |= MNT_UMOUNT; child->mnt.mnt_flags |= MNT_UMOUNT;

View file

@ -50,6 +50,8 @@ int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt); unsigned int mnt_get_count(struct mount *mnt);
void mnt_set_mountpoint(struct mount *, struct mountpoint *, void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *); struct mount *);
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
struct mount *mnt);
struct mount *copy_tree(struct mount *, struct dentry *, int); struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *, bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root); const struct path *root);