Merge branch 'akpm' (incoming from Andrew)
Merge misc updates from Andrew Morton: - a few hotfixes - dynamic-debug updates - ipc updates - various other sweepings off the factory floor * akpm: (31 commits) firmware/google: drop 'select EFI' to avoid recursive dependency compat: fix sys_fanotify_mark checkpatch.pl: check for function declarations without arguments mm/migrate.c: fix setting of cpupid on page migration twice against normal page softirq: use const char * const for softirq_to_name, whitespace neatening softirq: convert printks to pr_<level> softirq: use ffs() in __do_softirq() kernel/kexec.c: use vscnprintf() instead of vsnprintf() in vmcoreinfo_append_str() splice: fix unexpected size truncation ipc: fix compat msgrcv with negative msgtyp ipc,msg: document barriers ipc: delete seq_max field in struct ipc_ids ipc: simplify sysvipc_proc_open() return ipc: remove useless return statement ipc: remove braces for single statements ipc: standardize code comments ipc: whitespace cleanup ipc: change kern_ipc_perm.deleted type to bool ipc: introduce ipc_valid_object() helper to sort out IPC_RMID races ipc/sem.c: avoid overflow of semop undo (semadj) value ...
This commit is contained in:
commit
54c0a4b461
33 changed files with 447 additions and 415 deletions
|
@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
|
|||
kernel_data.end = virt_to_phys(_end - 1);
|
||||
|
||||
for_each_memblock(memory, region) {
|
||||
res = memblock_virt_alloc(sizeof(*res), 0);
|
||||
res = memblock_virt_alloc_low(sizeof(*res), 0);
|
||||
res->name = "System RAM";
|
||||
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
|
||||
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
|
||||
|
|
|
@ -35,17 +35,11 @@ static struct console early_ocd_console = {
|
|||
|
||||
static int __init setup_early_printk(char *buf)
|
||||
{
|
||||
int keep_early;
|
||||
|
||||
if (!buf || early_console)
|
||||
return 0;
|
||||
|
||||
if (strstr(buf, "keep"))
|
||||
keep_early = 1;
|
||||
|
||||
early_console = &early_ocd_console;
|
||||
|
||||
if (keep_early)
|
||||
if (strstr(buf, "keep"))
|
||||
early_console->flags &= ~CON_BOOT;
|
||||
else
|
||||
early_console->flags |= CON_BOOT;
|
||||
|
|
|
@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr);
|
|||
extern unsigned long max_low_pfn_mapped;
|
||||
extern unsigned long max_pfn_mapped;
|
||||
|
||||
static inline phys_addr_t get_max_low_mapped(void)
|
||||
static inline phys_addr_t get_max_mapped(void)
|
||||
{
|
||||
return (phys_addr_t)max_low_pfn_mapped << PAGE_SHIFT;
|
||||
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
|
||||
|
|
|
@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
setup_real_mode();
|
||||
|
||||
memblock_set_current_limit(get_max_low_mapped());
|
||||
memblock_set_current_limit(get_max_mapped());
|
||||
dma_contiguous_reserve(0);
|
||||
|
||||
/*
|
||||
|
|
|
@ -12,8 +12,7 @@ menu "Google Firmware Drivers"
|
|||
|
||||
config GOOGLE_SMI
|
||||
tristate "SMI interface for Google platforms"
|
||||
depends on ACPI && DMI
|
||||
select EFI
|
||||
depends on ACPI && DMI && EFI
|
||||
select EFI_VARS
|
||||
help
|
||||
Say Y here if you want to enable SMI callbacks for Google
|
||||
|
|
|
@ -886,9 +886,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
|
|||
{
|
||||
return sys_fanotify_mark(fanotify_fd, flags,
|
||||
#ifdef __BIG_ENDIAN
|
||||
((__u64)mask1 << 32) | mask0,
|
||||
#else
|
||||
((__u64)mask0 << 32) | mask1,
|
||||
#else
|
||||
((__u64)mask1 << 32) | mask0,
|
||||
#endif
|
||||
dfd, pathname);
|
||||
}
|
||||
|
|
|
@ -948,7 +948,7 @@ leave:
|
|||
ocfs2_free_dir_lookup_result(&orphan_insert);
|
||||
ocfs2_free_dir_lookup_result(&lookup);
|
||||
|
||||
if (status && (status != -ENOTEMPTY))
|
||||
if (status && (status != -ENOTEMPTY) && (status != -ENOENT))
|
||||
mlog_errno(status);
|
||||
|
||||
return status;
|
||||
|
|
|
@ -175,6 +175,27 @@ static inline void * __init memblock_virt_alloc_nopanic(
|
|||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
#ifndef ARCH_LOW_ADDRESS_LIMIT
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
|
||||
#endif
|
||||
|
||||
static inline void * __init memblock_virt_alloc_low(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_virt_alloc_try_nid(size, align,
|
||||
BOOTMEM_LOW_LIMIT,
|
||||
ARCH_LOW_ADDRESS_LIMIT,
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
static inline void * __init memblock_virt_alloc_low_nopanic(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_virt_alloc_try_nid_nopanic(size, align,
|
||||
BOOTMEM_LOW_LIMIT,
|
||||
ARCH_LOW_ADDRESS_LIMIT,
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_from_nopanic(
|
||||
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
|
||||
{
|
||||
|
@ -238,6 +259,22 @@ static inline void * __init memblock_virt_alloc_nopanic(
|
|||
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_low(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
if (!align)
|
||||
align = SMP_CACHE_BYTES;
|
||||
return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_low_nopanic(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
if (!align)
|
||||
align = SMP_CACHE_BYTES;
|
||||
return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_virt_alloc_from_nopanic(
|
||||
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
|
||||
{
|
||||
|
|
|
@ -360,7 +360,7 @@ enum
|
|||
/* map softirq index to softirq name. update 'softirq_to_name' in
|
||||
* kernel/softirq.c when adding a new softirq.
|
||||
*/
|
||||
extern char *softirq_to_name[NR_SOFTIRQS];
|
||||
extern const char * const softirq_to_name[NR_SOFTIRQS];
|
||||
|
||||
/* softirq mask and active fields moved to irq_cpustat_t in
|
||||
* asm/hardirq.h to get better cache usage. KAO
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
struct kern_ipc_perm
|
||||
{
|
||||
spinlock_t lock;
|
||||
int deleted;
|
||||
bool deleted;
|
||||
int id;
|
||||
key_t key;
|
||||
kuid_t uid;
|
||||
|
|
|
@ -21,7 +21,6 @@ struct user_namespace;
|
|||
struct ipc_ids {
|
||||
int in_use;
|
||||
unsigned short seq;
|
||||
unsigned short seq_max;
|
||||
struct rw_semaphore rwsem;
|
||||
struct idr ipcs_idr;
|
||||
int next_id;
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
* Passed to the actors
|
||||
*/
|
||||
struct splice_desc {
|
||||
unsigned int len, total_len; /* current and remaining length */
|
||||
size_t total_len; /* remaining length */
|
||||
unsigned int len; /* current length */
|
||||
unsigned int flags; /* splice flags */
|
||||
/*
|
||||
* actor() private data
|
||||
|
|
|
@ -92,8 +92,6 @@ static int kernel_init(void *);
|
|||
|
||||
extern void init_IRQ(void);
|
||||
extern void fork_init(unsigned long);
|
||||
extern void mca_init(void);
|
||||
extern void sbus_init(void);
|
||||
extern void radix_tree_init(void);
|
||||
#ifndef CONFIG_DEBUG_RODATA
|
||||
static inline void mark_rodata_ro(void) { }
|
||||
|
|
20
ipc/compat.c
20
ipc/compat.c
|
@ -288,11 +288,11 @@ static long do_compat_semctl(int first, int second, int third, u32 pad)
|
|||
break;
|
||||
|
||||
case IPC_SET:
|
||||
if (version == IPC_64) {
|
||||
if (version == IPC_64)
|
||||
err = get_compat_semid64_ds(&s64, compat_ptr(pad));
|
||||
} else {
|
||||
else
|
||||
err = get_compat_semid_ds(&s64, compat_ptr(pad));
|
||||
}
|
||||
|
||||
up64 = compat_alloc_user_space(sizeof(s64));
|
||||
if (copy_to_user(up64, &s64, sizeof(s64)))
|
||||
err = -EFAULT;
|
||||
|
@ -381,7 +381,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
|
|||
uptr = compat_ptr(ipck.msgp);
|
||||
fifth = ipck.msgtyp;
|
||||
}
|
||||
return do_msgrcv(first, uptr, second, fifth, third,
|
||||
return do_msgrcv(first, uptr, second, (s32)fifth, third,
|
||||
compat_do_msg_fill);
|
||||
}
|
||||
case MSGGET:
|
||||
|
@ -515,11 +515,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
|
|||
break;
|
||||
|
||||
case IPC_SET:
|
||||
if (version == IPC_64) {
|
||||
if (version == IPC_64)
|
||||
err = get_compat_msqid64(&m64, uptr);
|
||||
} else {
|
||||
else
|
||||
err = get_compat_msqid(&m64, uptr);
|
||||
}
|
||||
|
||||
if (err)
|
||||
break;
|
||||
p = compat_alloc_user_space(sizeof(m64));
|
||||
|
@ -702,11 +702,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
|
|||
|
||||
|
||||
case IPC_SET:
|
||||
if (version == IPC_64) {
|
||||
if (version == IPC_64)
|
||||
err = get_compat_shmid64_ds(&s64, uptr);
|
||||
} else {
|
||||
else
|
||||
err = get_compat_shmid_ds(&s64, uptr);
|
||||
}
|
||||
|
||||
if (err)
|
||||
break;
|
||||
p = compat_alloc_user_space(sizeof(s64));
|
||||
|
|
|
@ -1303,11 +1303,11 @@ retry:
|
|||
out_fput:
|
||||
fdput(f);
|
||||
out:
|
||||
if (sock) {
|
||||
if (sock)
|
||||
netlink_detachskb(sock, nc);
|
||||
} else if (nc) {
|
||||
else if (nc)
|
||||
dev_kfree_skb(nc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
26
ipc/msg.c
26
ipc/msg.c
|
@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res)
|
|||
struct msg_receiver *msr, *t;
|
||||
|
||||
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
|
||||
msr->r_msg = NULL;
|
||||
msr->r_msg = NULL; /* initialize expunge ordering */
|
||||
wake_up_process(msr->r_tsk);
|
||||
/*
|
||||
* Ensure that the wakeup is visible before setting r_msg as
|
||||
* the receiving end depends on it: either spinning on a nil,
|
||||
* or dealing with -EAGAIN cases. See lockless receive part 1
|
||||
* and 2 in do_msgrcv().
|
||||
*/
|
||||
smp_mb();
|
||||
msr->r_msg = ERR_PTR(res);
|
||||
}
|
||||
|
@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
|
|||
|
||||
list_del(&msr->r_list);
|
||||
if (msr->r_maxsize < msg->m_ts) {
|
||||
/* initialize pipelined send ordering */
|
||||
msr->r_msg = NULL;
|
||||
wake_up_process(msr->r_tsk);
|
||||
smp_mb();
|
||||
smp_mb(); /* see barrier comment below */
|
||||
msr->r_msg = ERR_PTR(-E2BIG);
|
||||
} else {
|
||||
msr->r_msg = NULL;
|
||||
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
|
||||
msq->q_rtime = get_seconds();
|
||||
wake_up_process(msr->r_tsk);
|
||||
/*
|
||||
* Ensure that the wakeup is visible before
|
||||
* setting r_msg, as the receiving end depends
|
||||
* on it. See lockless receive part 1 and 2 in
|
||||
* do_msgrcv().
|
||||
*/
|
||||
smp_mb();
|
||||
msr->r_msg = msg;
|
||||
|
||||
|
@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -696,7 +710,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
|
|||
goto out_unlock0;
|
||||
|
||||
/* raced with RMID? */
|
||||
if (msq->q_perm.deleted) {
|
||||
if (!ipc_valid_object(&msq->q_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock0;
|
||||
}
|
||||
|
@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
|
|||
goto out_unlock0;
|
||||
}
|
||||
|
||||
/* enqueue the sender and prepare to block */
|
||||
ss_add(msq, &s);
|
||||
|
||||
if (!ipc_rcu_getref(msq)) {
|
||||
|
@ -731,7 +746,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
|
|||
ipc_lock_object(&msq->q_perm);
|
||||
|
||||
ipc_rcu_putref(msq, ipc_rcu_free);
|
||||
if (msq->q_perm.deleted) {
|
||||
/* raced with RMID? */
|
||||
if (!ipc_valid_object(&msq->q_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock0;
|
||||
}
|
||||
|
@ -909,7 +925,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
|
|||
ipc_lock_object(&msq->q_perm);
|
||||
|
||||
/* raced with RMID? */
|
||||
if (msq->q_perm.deleted) {
|
||||
if (!ipc_valid_object(&msq->q_perm)) {
|
||||
msg = ERR_PTR(-EIDRM);
|
||||
goto out_unlock0;
|
||||
}
|
||||
|
|
78
ipc/sem.c
78
ipc/sem.c
|
@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma)
|
|||
}
|
||||
|
||||
/**
|
||||
* merge_queues - Merge single semop queues into global queue
|
||||
* merge_queues - merge single semop queues into global queue
|
||||
* @sma: semaphore array
|
||||
*
|
||||
* This function merges all per-semaphore queues into the global queue.
|
||||
|
@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
|
|||
/* ipc_rmid() may have already freed the ID while sem_lock
|
||||
* was spinning: verify that the structure is still valid
|
||||
*/
|
||||
if (!ipcp->deleted)
|
||||
if (ipc_valid_object(ipcp))
|
||||
return container_of(ipcp, struct sem_array, sem_perm);
|
||||
|
||||
sem_unlock(sma, *locknum);
|
||||
|
@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
|
|||
*
|
||||
* Called with sem_ids.rwsem held (as a writer)
|
||||
*/
|
||||
|
||||
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
||||
{
|
||||
int id;
|
||||
|
@ -493,9 +492,9 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
|||
|
||||
size = sizeof(*sma) + nsems * sizeof(struct sem);
|
||||
sma = ipc_rcu_alloc(size);
|
||||
if (!sma) {
|
||||
if (!sma)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(sma, 0, size);
|
||||
|
||||
sma->sem_perm.mode = (semflg & S_IRWXUGO);
|
||||
|
@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
|
|||
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
|
||||
}
|
||||
|
||||
/** perform_atomic_semop - Perform (if possible) a semaphore operation
|
||||
/**
|
||||
* perform_atomic_semop - Perform (if possible) a semaphore operation
|
||||
* @sma: semaphore array
|
||||
* @sops: array with operations that should be checked
|
||||
* @nsems: number of sops
|
||||
* @nsops: number of operations
|
||||
* @un: undo array
|
||||
* @pid: pid that did the change
|
||||
*
|
||||
|
@ -595,7 +595,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
|
|||
* Returns 1 if the operation is impossible, the caller must sleep.
|
||||
* Negative values are error codes.
|
||||
*/
|
||||
|
||||
static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
|
||||
int nsops, struct sem_undo *un, int pid)
|
||||
{
|
||||
|
@ -616,22 +615,21 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
|
|||
goto would_block;
|
||||
if (result > SEMVMX)
|
||||
goto out_of_range;
|
||||
|
||||
if (sop->sem_flg & SEM_UNDO) {
|
||||
int undo = un->semadj[sop->sem_num] - sem_op;
|
||||
/*
|
||||
* Exceeding the undo range is an error.
|
||||
*/
|
||||
/* Exceeding the undo range is an error. */
|
||||
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
|
||||
goto out_of_range;
|
||||
un->semadj[sop->sem_num] = undo;
|
||||
}
|
||||
|
||||
curr->semval = result;
|
||||
}
|
||||
|
||||
sop--;
|
||||
while (sop >= sops) {
|
||||
sma->sem_base[sop->sem_num].sempid = pid;
|
||||
if (sop->sem_flg & SEM_UNDO)
|
||||
un->semadj[sop->sem_num] -= sop->sem_op;
|
||||
sop--;
|
||||
}
|
||||
|
||||
|
@ -650,7 +648,10 @@ would_block:
|
|||
undo:
|
||||
sop--;
|
||||
while (sop >= sops) {
|
||||
sma->sem_base[sop->sem_num].semval -= sop->sem_op;
|
||||
sem_op = sop->sem_op;
|
||||
sma->sem_base[sop->sem_num].semval -= sem_op;
|
||||
if (sop->sem_flg & SEM_UNDO)
|
||||
un->semadj[sop->sem_num] += sem_op;
|
||||
sop--;
|
||||
}
|
||||
|
||||
|
@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
|
|||
}
|
||||
|
||||
/**
|
||||
* wake_up_sem_queue_do(pt) - do the actual wake-up
|
||||
* wake_up_sem_queue_do - do the actual wake-up
|
||||
* @pt: list of tasks to be woken up
|
||||
*
|
||||
* Do the actual wake-up.
|
||||
|
@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
|
|||
}
|
||||
|
||||
/**
|
||||
* wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks
|
||||
* wake_const_ops - wake up non-alter tasks
|
||||
* @sma: semaphore array.
|
||||
* @semnum: semaphore that was modified.
|
||||
* @pt: list head for the tasks that must be woken up.
|
||||
|
@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
|
|||
}
|
||||
|
||||
/**
|
||||
* do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks
|
||||
* do_smart_wakeup_zero - wakeup all wait for zero tasks
|
||||
* @sma: semaphore array
|
||||
* @sops: operations that were performed
|
||||
* @nsops: number of operations
|
||||
* @pt: list head of the tasks that must be woken up.
|
||||
*
|
||||
* do_smart_wakeup_zero() checks all required queue for wait-for-zero
|
||||
* operations, based on the actual changes that were performed on the
|
||||
* semaphore array.
|
||||
* Checks all required queue for wait-for-zero operations, based
|
||||
* on the actual changes that were performed on the semaphore array.
|
||||
* The function returns 1 if at least one operation was completed successfully.
|
||||
*/
|
||||
static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
|
||||
|
@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
|
|||
|
||||
|
||||
/**
|
||||
* update_queue(sma, semnum): Look for tasks that can be completed.
|
||||
* update_queue - look for tasks that can be completed.
|
||||
* @sma: semaphore array.
|
||||
* @semnum: semaphore that was modified.
|
||||
* @pt: list head for the tasks that must be woken up.
|
||||
|
@ -918,7 +918,7 @@ again:
|
|||
}
|
||||
|
||||
/**
|
||||
* set_semotime(sma, sops) - set sem_otime
|
||||
* set_semotime - set sem_otime
|
||||
* @sma: semaphore array
|
||||
* @sops: operations that modified the array, may be NULL
|
||||
*
|
||||
|
@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops)
|
|||
}
|
||||
|
||||
/**
|
||||
* do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
|
||||
* do_smart_update - optimized update_queue
|
||||
* @sma: semaphore array
|
||||
* @sops: operations that were performed
|
||||
* @nsops: number of operations
|
||||
|
@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
|
|||
|
||||
sem_lock(sma, NULL, -1);
|
||||
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
sem_unlock(sma, -1);
|
||||
rcu_read_unlock();
|
||||
return -EIDRM;
|
||||
|
@ -1342,7 +1342,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
|
|||
int i;
|
||||
|
||||
sem_lock(sma, NULL, -1);
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -1361,7 +1361,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
|
|||
|
||||
rcu_read_lock();
|
||||
sem_lock_and_putref(sma);
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
|
|||
}
|
||||
rcu_read_lock();
|
||||
sem_lock_and_putref(sma);
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
|
|||
goto out_rcu_wakeup;
|
||||
|
||||
sem_lock(sma, NULL, -1);
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
|
|||
}
|
||||
|
||||
/**
|
||||
* find_alloc_undo - Lookup (and if not present create) undo array
|
||||
* find_alloc_undo - lookup (and if not present create) undo array
|
||||
* @ns: namespace
|
||||
* @semid: semaphore array id
|
||||
*
|
||||
|
@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
|
|||
/* step 3: Acquire the lock on semaphore array */
|
||||
rcu_read_lock();
|
||||
sem_lock_and_putref(sma);
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
sem_unlock(sma, -1);
|
||||
rcu_read_unlock();
|
||||
kfree(new);
|
||||
|
@ -1735,7 +1735,7 @@ out:
|
|||
|
||||
|
||||
/**
|
||||
* get_queue_result - Retrieve the result code from sem_queue
|
||||
* get_queue_result - retrieve the result code from sem_queue
|
||||
* @q: Pointer to queue structure
|
||||
*
|
||||
* Retrieve the return code from the pending queue. If IN_WAKEUP is found in
|
||||
|
@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
|
|||
|
||||
error = -EIDRM;
|
||||
locknum = sem_lock(sma, sops, nsops);
|
||||
if (sma->sem_perm.deleted)
|
||||
/*
|
||||
* We eventually might perform the following check in a lockless
|
||||
* fashion, considering ipc_valid_object() locking constraints.
|
||||
* If nsops == 1 and there is no contention for sem_perm.lock, then
|
||||
* only a per-semaphore lock is held and it's OK to proceed with the
|
||||
* check below. More details on the fine grained locking scheme
|
||||
* entangled here and why it's RMID race safe on comments at sem_lock()
|
||||
*/
|
||||
if (!ipc_valid_object(&sma->sem_perm))
|
||||
goto out_unlock_free;
|
||||
/*
|
||||
* semid identifiers are not unique - find_alloc_undo may have
|
||||
|
@ -1959,10 +1967,8 @@ sleep_again:
|
|||
* If queue.status != -EINTR we are woken up by another process.
|
||||
* Leave without unlink_queue(), but with sem_unlock().
|
||||
*/
|
||||
|
||||
if (error != -EINTR) {
|
||||
if (error != -EINTR)
|
||||
goto out_unlock_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* If an interrupt occurred we have to clean up the queue
|
||||
|
@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk)
|
|||
|
||||
sem_lock(sma, NULL, -1);
|
||||
/* exit_sem raced with IPC_RMID, nothing to do */
|
||||
if (sma->sem_perm.deleted) {
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
sem_unlock(sma, -1);
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
|
|
17
ipc/shm.c
17
ipc/shm.c
|
@ -477,7 +477,6 @@ static const struct vm_operations_struct shm_vm_ops = {
|
|||
*
|
||||
* Called with shm_ids.rwsem held as a writer.
|
||||
*/
|
||||
|
||||
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
||||
{
|
||||
key_t key = params->key;
|
||||
|
@ -975,6 +974,13 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
|
|||
goto out_unlock1;
|
||||
|
||||
ipc_lock_object(&shp->shm_perm);
|
||||
|
||||
/* check if shm_destroy() is tearing down shp */
|
||||
if (!ipc_valid_object(&shp->shm_perm)) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock0;
|
||||
}
|
||||
|
||||
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
|
||||
kuid_t euid = current_euid();
|
||||
if (!uid_eq(euid, shp->shm_perm.uid) &&
|
||||
|
@ -989,13 +995,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
|
|||
}
|
||||
|
||||
shm_file = shp->shm_file;
|
||||
|
||||
/* check if shm_destroy() is tearing down shp */
|
||||
if (shm_file == NULL) {
|
||||
err = -EIDRM;
|
||||
goto out_unlock0;
|
||||
}
|
||||
|
||||
if (is_file_hugepages(shm_file))
|
||||
goto out_unlock0;
|
||||
|
||||
|
@ -1116,7 +1115,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
|
|||
ipc_lock_object(&shp->shm_perm);
|
||||
|
||||
/* check if shm_destroy() is tearing down shp */
|
||||
if (shp->shm_file == NULL) {
|
||||
if (!ipc_valid_object(&shp->shm_perm)) {
|
||||
ipc_unlock_object(&shp->shm_perm);
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
|
|
128
ipc/util.c
128
ipc/util.c
|
@ -110,15 +110,15 @@ static struct notifier_block ipc_memory_nb = {
|
|||
};
|
||||
|
||||
/**
|
||||
* ipc_init - initialise IPC subsystem
|
||||
* ipc_init - initialise ipc subsystem
|
||||
*
|
||||
* The various sysv ipc resources (semaphores, messages and shared
|
||||
* memory) are initialised.
|
||||
*
|
||||
* The various system5 IPC resources (semaphores, messages and shared
|
||||
* memory) are initialised
|
||||
* A callback routine is registered into the memory hotplug notifier
|
||||
* chain: since msgmni scales to lowmem this callback routine will be
|
||||
* called upon successful memory add / remove to recompute msmgni.
|
||||
*/
|
||||
|
||||
static int __init ipc_init(void)
|
||||
{
|
||||
sem_init();
|
||||
|
@ -131,35 +131,25 @@ static int __init ipc_init(void)
|
|||
__initcall(ipc_init);
|
||||
|
||||
/**
|
||||
* ipc_init_ids - initialise IPC identifiers
|
||||
* @ids: Identifier set
|
||||
* ipc_init_ids - initialise ipc identifiers
|
||||
* @ids: ipc identifier set
|
||||
*
|
||||
* Set up the sequence range to use for the ipc identifier range (limited
|
||||
* below IPCMNI) then initialise the ids idr.
|
||||
*/
|
||||
|
||||
void ipc_init_ids(struct ipc_ids *ids)
|
||||
{
|
||||
init_rwsem(&ids->rwsem);
|
||||
|
||||
ids->in_use = 0;
|
||||
ids->seq = 0;
|
||||
ids->next_id = -1;
|
||||
{
|
||||
int seq_limit = INT_MAX/SEQ_MULTIPLIER;
|
||||
if (seq_limit > USHRT_MAX)
|
||||
ids->seq_max = USHRT_MAX;
|
||||
else
|
||||
ids->seq_max = seq_limit;
|
||||
}
|
||||
|
||||
init_rwsem(&ids->rwsem);
|
||||
idr_init(&ids->ipcs_idr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static const struct file_operations sysvipc_proc_fops;
|
||||
/**
|
||||
* ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
|
||||
* ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface.
|
||||
* @path: Path in procfs
|
||||
* @header: Banner to be printed at the beginning of the file.
|
||||
* @ids: ipc id table to iterate.
|
||||
|
@ -184,23 +174,21 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
|
|||
NULL, /* parent dir */
|
||||
&sysvipc_proc_fops,
|
||||
iface);
|
||||
if (!pde) {
|
||||
if (!pde)
|
||||
kfree(iface);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ipc_findkey - find a key in an ipc identifier set
|
||||
* @ids: Identifier set
|
||||
* @key: The key to find
|
||||
* @ids: ipc identifier set
|
||||
* @key: key to find
|
||||
*
|
||||
* Requires ipc_ids.rwsem locked.
|
||||
* Returns the LOCKED pointer to the ipc structure if found or NULL
|
||||
* if not.
|
||||
* If key is found ipc points to the owning ipc structure
|
||||
* Returns the locked pointer to the ipc structure if found or NULL
|
||||
* otherwise. If key is found ipc points to the owning ipc structure
|
||||
*
|
||||
* Called with ipc_ids.rwsem held.
|
||||
*/
|
||||
|
||||
static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
|
||||
{
|
||||
struct kern_ipc_perm *ipc;
|
||||
|
@ -228,11 +216,10 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
|
|||
|
||||
/**
|
||||
* ipc_get_maxid - get the last assigned id
|
||||
* @ids: IPC identifier set
|
||||
* @ids: ipc identifier set
|
||||
*
|
||||
* Called with ipc_ids.rwsem held.
|
||||
*/
|
||||
|
||||
int ipc_get_maxid(struct ipc_ids *ids)
|
||||
{
|
||||
struct kern_ipc_perm *ipc;
|
||||
|
@ -258,12 +245,12 @@ int ipc_get_maxid(struct ipc_ids *ids)
|
|||
}
|
||||
|
||||
/**
|
||||
* ipc_addid - add an IPC identifier
|
||||
* @ids: IPC identifier set
|
||||
* @new: new IPC permission set
|
||||
* ipc_addid - add an ipc identifier
|
||||
* @ids: ipc identifier set
|
||||
* @new: new ipc permission set
|
||||
* @size: limit for the number of used ids
|
||||
*
|
||||
* Add an entry 'new' to the IPC ids idr. The permissions object is
|
||||
* Add an entry 'new' to the ipc ids idr. The permissions object is
|
||||
* initialised and the first free entry is set up and the id assigned
|
||||
* is returned. The 'new' entry is returned in a locked state on success.
|
||||
* On failure the entry is not locked and a negative err-code is returned.
|
||||
|
@ -286,7 +273,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
|||
idr_preload(GFP_KERNEL);
|
||||
|
||||
spin_lock_init(&new->lock);
|
||||
new->deleted = 0;
|
||||
new->deleted = false;
|
||||
rcu_read_lock();
|
||||
spin_lock(&new->lock);
|
||||
|
||||
|
@ -308,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
|||
|
||||
if (next_id < 0) {
|
||||
new->seq = ids->seq++;
|
||||
if (ids->seq > ids->seq_max)
|
||||
if (ids->seq > IPCID_SEQ_MAX)
|
||||
ids->seq = 0;
|
||||
} else {
|
||||
new->seq = ipcid_to_seqx(next_id);
|
||||
|
@ -321,8 +308,8 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
|||
|
||||
/**
|
||||
* ipcget_new - create a new ipc object
|
||||
* @ns: namespace
|
||||
* @ids: IPC identifer set
|
||||
* @ns: ipc namespace
|
||||
* @ids: ipc identifer set
|
||||
* @ops: the actual creation routine to call
|
||||
* @params: its parameters
|
||||
*
|
||||
|
@ -341,17 +328,17 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
|
|||
}
|
||||
|
||||
/**
|
||||
* ipc_check_perms - check security and permissions for an IPC
|
||||
* @ns: IPC namespace
|
||||
* ipc_check_perms - check security and permissions for an ipc object
|
||||
* @ns: ipc namespace
|
||||
* @ipcp: ipc permission set
|
||||
* @ops: the actual security routine to call
|
||||
* @params: its parameters
|
||||
*
|
||||
* This routine is called by sys_msgget(), sys_semget() and sys_shmget()
|
||||
* when the key is not IPC_PRIVATE and that key already exists in the
|
||||
* ids IDR.
|
||||
* ds IDR.
|
||||
*
|
||||
* On success, the IPC id is returned.
|
||||
* On success, the ipc id is returned.
|
||||
*
|
||||
* It is called with ipc_ids.rwsem and ipcp->lock held.
|
||||
*/
|
||||
|
@ -375,8 +362,8 @@ static int ipc_check_perms(struct ipc_namespace *ns,
|
|||
|
||||
/**
|
||||
* ipcget_public - get an ipc object or create a new one
|
||||
* @ns: namespace
|
||||
* @ids: IPC identifer set
|
||||
* @ns: ipc namespace
|
||||
* @ids: ipc identifer set
|
||||
* @ops: the actual creation routine to call
|
||||
* @params: its parameters
|
||||
*
|
||||
|
@ -431,25 +418,20 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
|
|||
|
||||
|
||||
/**
|
||||
* ipc_rmid - remove an IPC identifier
|
||||
* @ids: IPC identifier set
|
||||
* ipc_rmid - remove an ipc identifier
|
||||
* @ids: ipc identifier set
|
||||
* @ipcp: ipc perm structure containing the identifier to remove
|
||||
*
|
||||
* ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
|
||||
* before this function is called, and remain locked on the exit.
|
||||
*/
|
||||
|
||||
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
int lid = ipcid_to_idx(ipcp->id);
|
||||
|
||||
idr_remove(&ids->ipcs_idr, lid);
|
||||
|
||||
ids->in_use--;
|
||||
|
||||
ipcp->deleted = 1;
|
||||
|
||||
return;
|
||||
ipcp->deleted = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -459,7 +441,6 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
|||
* Allocate memory from the appropriate pools and return a pointer to it.
|
||||
* NULL is returned if the allocation fails
|
||||
*/
|
||||
|
||||
void *ipc_alloc(int size)
|
||||
{
|
||||
void *out;
|
||||
|
@ -478,7 +459,6 @@ void *ipc_alloc(int size)
|
|||
* Free a block created with ipc_alloc(). The caller must know the size
|
||||
* used in the allocation call.
|
||||
*/
|
||||
|
||||
void ipc_free(void *ptr, int size)
|
||||
{
|
||||
if (size > PAGE_SIZE)
|
||||
|
@ -534,17 +514,16 @@ void ipc_rcu_free(struct rcu_head *head)
|
|||
}
|
||||
|
||||
/**
|
||||
* ipcperms - check IPC permissions
|
||||
* @ns: IPC namespace
|
||||
* @ipcp: IPC permission set
|
||||
* @flag: desired permission set.
|
||||
* ipcperms - check ipc permissions
|
||||
* @ns: ipc namespace
|
||||
* @ipcp: ipc permission set
|
||||
* @flag: desired permission set
|
||||
*
|
||||
* Check user, group, other permissions for access
|
||||
* to ipc resources. return 0 if allowed
|
||||
*
|
||||
* @flag will most probably be 0 or S_...UGO from <linux/stat.h>
|
||||
*/
|
||||
|
||||
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
|
||||
{
|
||||
kuid_t euid = current_euid();
|
||||
|
@ -574,13 +553,11 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
|
|||
/**
|
||||
* kernel_to_ipc64_perm - convert kernel ipc permissions to user
|
||||
* @in: kernel permissions
|
||||
* @out: new style IPC permissions
|
||||
* @out: new style ipc permissions
|
||||
*
|
||||
* Turn the kernel object @in into a set of permissions descriptions
|
||||
* for returning to userspace (@out).
|
||||
*/
|
||||
|
||||
|
||||
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out)
|
||||
{
|
||||
out->key = in->key;
|
||||
|
@ -594,13 +571,12 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
|
|||
|
||||
/**
|
||||
* ipc64_perm_to_ipc_perm - convert new ipc permissions to old
|
||||
* @in: new style IPC permissions
|
||||
* @out: old style IPC permissions
|
||||
* @in: new style ipc permissions
|
||||
* @out: old style ipc permissions
|
||||
*
|
||||
* Turn the new style permissions object @in into a compatibility
|
||||
* object and store it into the @out pointer.
|
||||
*/
|
||||
|
||||
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
|
||||
{
|
||||
out->key = in->key;
|
||||
|
@ -635,8 +611,8 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
|
|||
}
|
||||
|
||||
/**
|
||||
* ipc_lock - Lock an ipc structure without rwsem held
|
||||
* @ids: IPC identifier set
|
||||
* ipc_lock - lock an ipc structure without rwsem held
|
||||
* @ids: ipc identifier set
|
||||
* @id: ipc id to look for
|
||||
*
|
||||
* Look for an id in the ipc ids idr and lock the associated ipc object.
|
||||
|
@ -657,7 +633,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
|
|||
/* ipc_rmid() may have already freed the ID while ipc_lock
|
||||
* was spinning: here verify that the structure is still valid
|
||||
*/
|
||||
if (!out->deleted)
|
||||
if (ipc_valid_object(out))
|
||||
return out;
|
||||
|
||||
spin_unlock(&out->lock);
|
||||
|
@ -694,7 +670,7 @@ out:
|
|||
/**
|
||||
* ipcget - Common sys_*get() code
|
||||
* @ns: namsepace
|
||||
* @ids : IPC identifier set
|
||||
* @ids: ipc identifier set
|
||||
* @ops: operations to be called on ipc object creation, permission checks
|
||||
* and further checks
|
||||
* @params: the parameters needed by the previous operations.
|
||||
|
@ -711,7 +687,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
|
|||
}
|
||||
|
||||
/**
|
||||
* ipc_update_perm - update the permissions of an IPC.
|
||||
* ipc_update_perm - update the permissions of an ipc object
|
||||
* @in: the permission given as input.
|
||||
* @out: the permission of the ipc to set.
|
||||
*/
|
||||
|
@ -732,7 +708,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
|
|||
|
||||
/**
|
||||
* ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
|
||||
* @ns: the ipc namespace
|
||||
* @ns: ipc namespace
|
||||
* @ids: the table of ids where to look for the ipc
|
||||
* @id: the id of the ipc to retrieve
|
||||
* @cmd: the cmd to check
|
||||
|
@ -779,14 +755,13 @@ err:
|
|||
|
||||
|
||||
/**
|
||||
* ipc_parse_version - IPC call version
|
||||
* ipc_parse_version - ipc call version
|
||||
* @cmd: pointer to command
|
||||
*
|
||||
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
|
||||
* The @cmd value is turned from an encoding command and version into
|
||||
* just the command code.
|
||||
*/
|
||||
|
||||
int ipc_parse_version(int *cmd)
|
||||
{
|
||||
if (*cmd & IPC_64) {
|
||||
|
@ -927,8 +902,10 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
|
|||
goto out;
|
||||
|
||||
ret = seq_open(file, &sysvipc_proc_seqops);
|
||||
if (ret)
|
||||
goto out_kfree;
|
||||
if (ret) {
|
||||
kfree(iter);
|
||||
goto out;
|
||||
}
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = iter;
|
||||
|
@ -937,9 +914,6 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
|
|||
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
|
||||
out:
|
||||
return ret;
|
||||
out_kfree:
|
||||
kfree(iter);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int sysvipc_proc_release(struct inode *inode, struct file *file)
|
||||
|
|
14
ipc/util.h
14
ipc/util.h
|
@ -100,6 +100,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
|
|||
|
||||
#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
|
||||
#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
|
||||
#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX)
|
||||
|
||||
/* must be called with ids->rwsem acquired for writing */
|
||||
int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
|
||||
|
@ -185,6 +186,19 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* ipc_valid_object() - helper to sort out IPC_RMID races for codepaths
|
||||
* where the respective ipc_ids.rwsem is not being held down.
|
||||
* Checks whether the ipc object is still around or if it's gone already, as
|
||||
* ipc_rmid() may have already freed the ID while the ipc lock was spinning.
|
||||
* Needs to be called with kern_ipc_perm.lock held -- exception made for one
|
||||
* checkpoint case at sys_semtimedop() as noted in code commentary.
|
||||
*/
|
||||
static inline bool ipc_valid_object(struct kern_ipc_perm *perm)
|
||||
{
|
||||
return !perm->deleted;
|
||||
}
|
||||
|
||||
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
|
||||
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
|
||||
struct ipc_ops *ops, struct ipc_params *params);
|
||||
|
|
|
@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
|
|||
size_t r;
|
||||
|
||||
va_start(args, fmt);
|
||||
r = vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
r = vscnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
|
|||
|
||||
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
|
||||
char *softirq_to_name[NR_SOFTIRQS] = {
|
||||
const char * const softirq_to_name[NR_SOFTIRQS] = {
|
||||
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
|
||||
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
||||
};
|
||||
|
@ -136,7 +138,6 @@ void _local_bh_enable(void)
|
|||
WARN_ON_ONCE(in_irq());
|
||||
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(_local_bh_enable);
|
||||
|
||||
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
||||
|
@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void)
|
|||
struct softirq_action *h;
|
||||
bool in_hardirq;
|
||||
__u32 pending;
|
||||
int softirq_bit;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
|
@ -253,10 +255,14 @@ restart:
|
|||
|
||||
h = softirq_vec;
|
||||
|
||||
do {
|
||||
if (pending & 1) {
|
||||
unsigned int vec_nr = h - softirq_vec;
|
||||
int prev_count = preempt_count();
|
||||
while ((softirq_bit = ffs(pending))) {
|
||||
unsigned int vec_nr;
|
||||
int prev_count;
|
||||
|
||||
h += softirq_bit - 1;
|
||||
|
||||
vec_nr = h - softirq_vec;
|
||||
prev_count = preempt_count();
|
||||
|
||||
kstat_incr_softirqs_this_cpu(vec_nr);
|
||||
|
||||
|
@ -264,19 +270,15 @@ restart:
|
|||
h->action(h);
|
||||
trace_softirq_exit(vec_nr);
|
||||
if (unlikely(prev_count != preempt_count())) {
|
||||
printk(KERN_ERR "huh, entered softirq %u %s %p"
|
||||
"with preempt_count %08x,"
|
||||
" exited with %08x?\n", vec_nr,
|
||||
softirq_to_name[vec_nr], h->action,
|
||||
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
|
||||
vec_nr, softirq_to_name[vec_nr], h->action,
|
||||
prev_count, preempt_count());
|
||||
preempt_count_set(prev_count);
|
||||
}
|
||||
|
||||
rcu_bh_qs(cpu);
|
||||
}
|
||||
h++;
|
||||
pending >>= 1;
|
||||
} while (pending);
|
||||
pending >>= softirq_bit;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
|
@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
|
|||
/*
|
||||
* Tasklets
|
||||
*/
|
||||
struct tasklet_head
|
||||
{
|
||||
struct tasklet_head {
|
||||
struct tasklet_struct *head;
|
||||
struct tasklet_struct **tail;
|
||||
};
|
||||
|
@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
|||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__tasklet_schedule);
|
||||
|
||||
void __tasklet_hi_schedule(struct tasklet_struct *t)
|
||||
|
@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
|||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__tasklet_hi_schedule);
|
||||
|
||||
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||
|
@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
|||
__this_cpu_write(tasklet_hi_vec.head, t);
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
|
||||
|
||||
static void tasklet_action(struct softirq_action *a)
|
||||
|
@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a)
|
|||
|
||||
if (tasklet_trylock(t)) {
|
||||
if (!atomic_read(&t->count)) {
|
||||
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
||||
if (!test_and_clear_bit(TASKLET_STATE_SCHED,
|
||||
&t->state))
|
||||
BUG();
|
||||
t->func(t->data);
|
||||
tasklet_unlock(t);
|
||||
|
@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|||
|
||||
if (tasklet_trylock(t)) {
|
||||
if (!atomic_read(&t->count)) {
|
||||
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
|
||||
if (!test_and_clear_bit(TASKLET_STATE_SCHED,
|
||||
&t->state))
|
||||
BUG();
|
||||
t->func(t->data);
|
||||
tasklet_unlock(t);
|
||||
|
@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void tasklet_init(struct tasklet_struct *t,
|
||||
void (*func)(unsigned long), unsigned long data)
|
||||
{
|
||||
|
@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t,
|
|||
t->func = func;
|
||||
t->data = data;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(tasklet_init);
|
||||
|
||||
void tasklet_kill(struct tasklet_struct *t)
|
||||
{
|
||||
if (in_interrupt())
|
||||
printk("Attempt to kill tasklet from interrupt\n");
|
||||
pr_notice("Attempt to kill tasklet from interrupt\n");
|
||||
|
||||
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
|
||||
do {
|
||||
|
@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t)
|
|||
tasklet_unlock_wait(t);
|
||||
clear_bit(TASKLET_STATE_SCHED, &t->state);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(tasklet_kill);
|
||||
|
||||
/*
|
||||
|
@ -727,8 +724,7 @@ static void takeover_tasklets(unsigned int cpu)
|
|||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
static int cpu_callback(struct notifier_block *nfb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
switch (action) {
|
||||
|
|
|
@ -268,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
|
|||
*/
|
||||
static inline int parse_lineno(const char *str, unsigned int *val)
|
||||
{
|
||||
char *end = NULL;
|
||||
BUG_ON(str == NULL);
|
||||
if (*str == '\0') {
|
||||
*val = 0;
|
||||
return 0;
|
||||
}
|
||||
*val = simple_strtoul(str, &end, 10);
|
||||
if (end == NULL || end == str || *end != '\0') {
|
||||
if (kstrtouint(str, 10, val) < 0) {
|
||||
pr_err("bad line-number: %s\n", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -348,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords,
|
|||
}
|
||||
if (last)
|
||||
*last++ = '\0';
|
||||
if (parse_lineno(first, &query->first_lineno) < 0) {
|
||||
pr_err("line-number is <0\n");
|
||||
if (parse_lineno(first, &query->first_lineno) < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (last) {
|
||||
/* range <first>-<last> */
|
||||
if (parse_lineno(last, &query->last_lineno)
|
||||
< query->first_lineno) {
|
||||
if (parse_lineno(last, &query->last_lineno) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (query->last_lineno < query->first_lineno) {
|
||||
pr_err("last-line:%d < 1st-line:%d\n",
|
||||
query->last_lineno,
|
||||
query->first_lineno);
|
||||
|
|
|
@ -172,7 +172,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
|||
/*
|
||||
* Get the overflow emergency buffer
|
||||
*/
|
||||
v_overflow_buffer = memblock_virt_alloc_nopanic(
|
||||
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
|
||||
PAGE_ALIGN(io_tlb_overflow),
|
||||
PAGE_SIZE);
|
||||
if (!v_overflow_buffer)
|
||||
|
@ -220,7 +220,7 @@ swiotlb_init(int verbose)
|
|||
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
|
||||
/* Get IO TLB memory from the low pages */
|
||||
vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
|
||||
vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
|
||||
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
|
||||
return;
|
||||
|
||||
|
|
|
@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
|
|||
if (!align)
|
||||
align = SMP_CACHE_BYTES;
|
||||
|
||||
/* align @size to avoid excessive fragmentation on reserved array */
|
||||
size = round_up(size, align);
|
||||
|
||||
found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
|
||||
if (found && !memblock_reserve(found, size))
|
||||
return found;
|
||||
|
@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal(
|
|||
if (!align)
|
||||
align = SMP_CACHE_BYTES;
|
||||
|
||||
/* align @size to avoid excessive fragmentation on reserved array */
|
||||
size = round_up(size, align);
|
||||
|
||||
again:
|
||||
alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
|
||||
nid);
|
||||
|
|
|
@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
|
|||
__GFP_NOMEMALLOC | __GFP_NORETRY |
|
||||
__GFP_NOWARN) &
|
||||
~GFP_IOFS, 0);
|
||||
if (newpage)
|
||||
page_cpupid_xchg_last(newpage, page_cpupid_last(page));
|
||||
|
||||
return newpage;
|
||||
}
|
||||
|
|
|
@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
pure_initcall(mm_sysfs_init);
|
||||
postcore_initcall(mm_sysfs_init);
|
||||
|
|
28
mm/vmalloc.c
28
mm/vmalloc.c
|
@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
|
|||
}
|
||||
|
||||
/*
|
||||
* Walk a vmap address to the physical pfn it maps to.
|
||||
* Walk a vmap address to the struct page it maps.
|
||||
*/
|
||||
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
|
||||
struct page *vmalloc_to_page(const void *vmalloc_addr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) vmalloc_addr;
|
||||
unsigned long pfn = 0;
|
||||
struct page *page = NULL;
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
|
||||
/*
|
||||
|
@ -244,24 +244,24 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
|
|||
ptep = pte_offset_map(pmd, addr);
|
||||
pte = *ptep;
|
||||
if (pte_present(pte))
|
||||
pfn = pte_pfn(pte);
|
||||
page = pte_page(pte);
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
}
|
||||
}
|
||||
return pfn;
|
||||
}
|
||||
EXPORT_SYMBOL(vmalloc_to_pfn);
|
||||
|
||||
/*
|
||||
* Map a vmalloc()-space virtual address to the struct page.
|
||||
*/
|
||||
struct page *vmalloc_to_page(const void *vmalloc_addr)
|
||||
{
|
||||
return pfn_to_page(vmalloc_to_pfn(vmalloc_addr));
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL(vmalloc_to_page);
|
||||
|
||||
/*
|
||||
* Map a vmalloc()-space virtual address to the physical page frame number.
|
||||
*/
|
||||
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
|
||||
{
|
||||
return page_to_pfn(vmalloc_to_page(vmalloc_addr));
|
||||
}
|
||||
EXPORT_SYMBOL(vmalloc_to_pfn);
|
||||
|
||||
|
||||
/*** Global kva allocator ***/
|
||||
|
||||
|
|
|
@ -2665,6 +2665,15 @@ sub process {
|
|||
$herecurr);
|
||||
}
|
||||
|
||||
# check for function declarations without arguments like "int foo()"
|
||||
if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
|
||||
if (ERROR("FUNCTION_WITHOUT_ARGS",
|
||||
"Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
|
||||
$fix) {
|
||||
$fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
|
||||
}
|
||||
}
|
||||
|
||||
# check for uses of DEFINE_PCI_DEVICE_TABLE
|
||||
if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
|
||||
if (WARN("DEFINE_PCI_DEVICE_TABLE",
|
||||
|
|
Loading…
Add table
Reference in a new issue