kernel: Only expose su when daemon is running

It has been claimed that the PG implementation of 'su' has security
vulnerabilities even when disabled.  Unfortunately, the people that
find these vulnerabilities often like to keep them private so they
can profit from exploits while leaving users exposed to malicious
hackers.

In order to reduce the attack surface for vulnerabilites, it is
therefore necessary to make 'su' completely inaccessible when it
is not in use (except by the root and system users).

Change-Id: I79716c72f74d0b7af34ec3a8054896c6559a181d
This commit is contained in:
Tom Marshall 2017-01-25 18:01:03 +01:00 committed by codeworkx
parent 836593a31c
commit e76227950e
10 changed files with 83 additions and 0 deletions

View file

@ -1636,6 +1636,11 @@ static int do_execveat_common(int fd, struct filename *filename,
if (retval < 0)
goto out;
if (d_is_su(file->f_path.dentry) && capable(CAP_SYS_ADMIN)) {
current->flags |= PF_SU;
su_exec();
}
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;

View file

@ -2164,6 +2164,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
if (!err) {
struct super_block *sb = nd->inode->i_sb;
if (sb->s_flags & MS_RDONLY) {
if (d_is_su(nd->path.dentry) && !su_visible())
err = -ENOENT;
}
}
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;

View file

@ -39,6 +39,7 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
ctx->pos = file->f_pos;
ctx->romnt = (inode->i_sb->s_flags & MS_RDONLY);
res = file->f_op->iterate(file, ctx);
file->f_pos = ctx->pos;
fsnotify_access(file);
@ -50,6 +51,14 @@ out:
}
EXPORT_SYMBOL(iterate_dir);
static bool hide_name(const char *name, int namlen)
{
if (namlen == 2 && !memcmp(name, "su", 2))
if (!su_visible())
return true;
return false;
}
/*
* Traditional linux readdir() handling..
*
@ -89,6 +98,8 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
buf->result = -EOVERFLOW;
return -EOVERFLOW;
}
if (hide_name(name, namlen) && buf->ctx.romnt)
return 0;
buf->result++;
dirent = buf->dirent;
if (!access_ok(VERIFY_WRITE, dirent,
@ -167,6 +178,8 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
buf->error = -EOVERFLOW;
return -EOVERFLOW;
}
if (hide_name(name, namlen) && buf->ctx.romnt)
return 0;
dirent = buf->previous;
if (dirent) {
if (__put_user(offset, &dirent->d_off))
@ -246,6 +259,8 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
if (hide_name(name, namlen) && buf->ctx.romnt)
return 0;
dirent = buf->previous;
if (dirent) {
if (__put_user(offset, &dirent->d_off))

View file

@ -522,6 +522,12 @@ static inline bool d_is_fallthru(const struct dentry *dentry)
return dentry->d_flags & DCACHE_FALLTHRU;
}
static inline bool d_is_su(const struct dentry *dentry)
{
return dentry &&
dentry->d_name.len == 2 &&
!memcmp(dentry->d_name.name, "su", 2);
}
extern int sysctl_vfs_cache_pressure;

View file

@ -1653,6 +1653,7 @@ typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
struct dir_context {
const filldir_t actor;
loff_t pos;
bool romnt;
};
struct block_device_operations;

View file

@ -63,6 +63,12 @@ struct sched_param {
#include <asm/processor.h>
int su_instances(void);
bool su_running(void);
bool su_visible(void);
void su_exec(void);
void su_exit(void);
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
/*
@ -2412,6 +2418,8 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
#define PF_SU 0x10000000 /* task is su */
/*
* Only the _current_ task can read/write to tsk->flags, but other
* tasks can access tsk->flags in readonly mode for example

View file

@ -55,6 +55,7 @@ static inline gid_t __kgid_val(kgid_t gid)
#define GLOBAL_ROOT_GID KGIDT_INIT(0)
//huruihuan add for cgroup control
#define GLOBAL_SYSTEM_UID KUIDT_INIT(1000)
#define GLOBAL_SYSTEM_GID KGIDT_INIT(1000)
#define INVALID_UID KUIDT_INIT(-1)
#define INVALID_GID KGIDT_INIT(-1)

View file

@ -717,6 +717,10 @@ void do_exit(long code)
sched_exit(tsk);
schedtune_exit_task(tsk);
if (tsk->flags & PF_SU) {
su_exit();
}
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.

View file

@ -365,6 +365,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
err = kaiser_map_thread_stack(tsk->stack);
if (err)
goto free_stack;
tsk->flags &= ~PF_SU;
#ifdef CONFIG_SECCOMP
/*
* We must handle setting up seccomp filters once we're under

View file

@ -98,6 +98,38 @@
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
static atomic_t __su_instances;
int su_instances(void)
{
return atomic_read(&__su_instances);
}
bool su_running(void)
{
return su_instances() > 0;
}
bool su_visible(void)
{
kuid_t uid = current_uid();
if (su_running())
return true;
if (uid_eq(uid, GLOBAL_ROOT_UID) || uid_eq(uid, GLOBAL_SYSTEM_UID))
return true;
return false;
}
void su_exec(void)
{
atomic_inc(&__su_instances);
}
void su_exit(void)
{
atomic_dec(&__su_instances);
}
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
DEFINE_MUTEX(sched_domains_mutex);