Merge "Merge android-4.4-p.200 (903fbe7) into msm-4.4"

This commit is contained in:
Linux Build Service Account 2019-11-14 15:08:49 -08:00 committed by Gerrit - the friendly Code Review server
commit 01c47b8a7e
141 changed files with 4393 additions and 1672 deletions

View file

@ -297,7 +297,10 @@ them as any other INPUT_PROP_BUTTONPAD device.
INPUT_PROP_ACCELEROMETER
-------------------------
Directional axes on this device (absolute and/or relative x, y, z) represent
accelerometer data. All other axes retain their meaning. A device must not mix
accelerometer data. Some devices also report gyroscope data, which devices
can report through the rotational axes (absolute and/or relative rx, ry, rz).
All other axes retain their meaning. A device must not mix
regular directional axes and accelerometer axes on the same event node.
Guidelines:

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 198
SUBLEVEL = 200
EXTRAVERSION =
NAME = Blurry Fish Butt
@ -844,6 +844,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=strict-prototypes)
# Prohibit date/time macros, which would make the build non-deterministic
KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)

View file

@ -169,3 +169,7 @@
&twl_gpio {
ti,use-leds;
};
&twl_keypad {
status = "disabled";
};

View file

@ -22,9 +22,7 @@
#include <linux/io.h>
#include <asm/barrier.h>
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
#include <asm/cp15.h>
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
@ -102,58 +100,55 @@
static inline void gic_write_eoir(u32 irq)
{
asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
write_sysreg(irq, ICC_EOIR1);
isb();
}
static inline void gic_write_dir(u32 val)
{
asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
write_sysreg(val, ICC_DIR);
isb();
}
static inline u32 gic_read_iar(void)
{
u32 irqstat;
u32 irqstat = read_sysreg(ICC_IAR1);
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
dsb(sy);
return irqstat;
}
static inline void gic_write_pmr(u32 val)
{
asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
write_sysreg(val, ICC_PMR);
}
static inline void gic_write_ctlr(u32 val)
{
asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
write_sysreg(val, ICC_CTLR);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
write_sysreg(val, ICC_IGRPEN1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
write_sysreg(val, ICC_SGI1R);
}
static inline u32 gic_read_sre(void)
{
u32 val;
asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
return val;
return read_sysreg(ICC_SRE);
}
static inline void gic_write_sre(u32 val)
{
asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
write_sysreg(val, ICC_SRE);
isb();
}

View file

@ -441,11 +441,34 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif
.endm
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
#ifdef CONFIG_CPU_SPECTRE
sub \tmp, \limit, #1
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
movlo \addr, #0 @ if (tmp < 0) addr = NULL
csdb
#endif
.endm

View file

@ -18,6 +18,12 @@
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#ifdef CONFIG_THUMB2_KERNEL
#define CSDB ".inst.w 0xf3af8014"
#else
#define CSDB ".inst 0xe320f014"
#endif
#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
@ -38,6 +44,13 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
#ifndef CSDB
#define CSDB
#endif
#ifndef csdb
#define csdb()
#endif
#ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void);
extern void arm_heavy_mb(void);
@ -95,5 +108,26 @@ do { \
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
#ifdef CONFIG_CPU_SPECTRE
static inline unsigned long array_index_mask_nospec(unsigned long idx,
unsigned long sz)
{
unsigned long mask;
asm volatile(
"cmp %1, %2\n"
" sbc %0, %1, %1\n"
CSDB
: "=r" (mask)
: "r" (idx), "Ir" (sz)
: "cc");
return mask;
}
#define array_index_mask_nospec array_index_mask_nospec
#endif
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */

View file

@ -10,12 +10,14 @@
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs()
#ifdef CONFIG_MMU
extern void check_bugs(void);
extern void check_other_bugs(void);
#else
#define check_bugs() do { } while (0)
#define check_other_bugs() do { } while (0)
#endif
#endif

View file

@ -49,6 +49,24 @@
#ifdef CONFIG_CPU_CP15
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
#define __ACCESS_CP15_64(Op1, CRm) \
"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
#define __read_sysreg(r, w, c, t) ({ \
t __val; \
asm volatile(r " " c : "=r" (__val)); \
__val; \
})
#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)

View file

@ -74,8 +74,16 @@
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_CORTEX_A53 0x4100d030
#define ARM_CPU_PART_CORTEX_A57 0x4100d070
#define ARM_CPU_PART_CORTEX_A72 0x4100d080
#define ARM_CPU_PART_CORTEX_A73 0x4100d090
#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
#define ARM_CPU_PART_MASK 0xff00fff0
/* Broadcom cores */
#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
@ -85,6 +93,7 @@
#define ARM_CPU_PART_SCORPION 0x510002d0
extern unsigned int processor_id;
struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \

View file

@ -23,7 +23,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
extern struct processor {
struct processor {
/* MISC
* get data abort address/flags
*/
@ -36,6 +36,10 @@ extern struct processor {
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Check for processor bugs
*/
void (*check_bugs)(void);
/*
* Disable any processor specifics
*/
@ -75,9 +79,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
} processor;
};
#ifndef MULTI_CPU
static inline void init_proc_vtable(const struct processor *p)
{
}
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
@ -94,17 +102,50 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
#define cpu_proc_init processor._proc_init
#define cpu_proc_fin processor._proc_fin
#define cpu_reset processor.reset
#define cpu_do_idle processor._do_idle
#define cpu_dcache_clean_area processor.dcache_clean_area
#define cpu_set_pte_ext processor.set_pte_ext
#define cpu_do_switch_mm processor.switch_mm
/* These three are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend processor.do_suspend
#define cpu_do_resume processor.do_resume
extern struct processor processor;
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
#include <linux/smp.h>
/*
* This can't be a per-cpu variable because we need to access it before
* per-cpu has been initialised. We have a couple of functions that are
* called in a pre-emptible context, and so can't use smp_processor_id()
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
* function pointers for these are identical across all CPUs.
*/
extern struct processor *cpu_vtable[];
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
#define PROC_TABLE(f) cpu_vtable[0]->f
static inline void init_proc_vtable(const struct processor *p)
{
unsigned int cpu = smp_processor_id();
*cpu_vtable[cpu] = *p;
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
cpu_vtable[0]->dcache_clean_area);
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
cpu_vtable[0]->set_pte_ext);
}
#else
#define PROC_VTABLE(f) processor.f
#define PROC_TABLE(f) processor.f
static inline void init_proc_vtable(const struct processor *p)
{
processor = *p;
}
#endif
#define cpu_proc_init PROC_VTABLE(_proc_init)
#define cpu_check_bugs PROC_VTABLE(check_bugs)
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
#define cpu_reset PROC_VTABLE(reset)
#define cpu_do_idle PROC_VTABLE(_do_idle)
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
/* These two are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend PROC_VTABLE(do_suspend)
#define cpu_do_resume PROC_VTABLE(do_resume)
#endif
extern void cpu_resume(void);

View file

@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/reboot.h>
#include <linux/percpu.h>
extern void cpu_init(void);
@ -14,6 +15,20 @@ void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
typedef void (*harden_branch_predictor_fn_t)(void);
DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
static inline void harden_branch_predictor(void)
{
harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
smp_processor_id());
if (fn)
fn();
}
#else
#define harden_branch_predictor() do { } while (0)
#endif
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)

View file

@ -124,10 +124,10 @@ extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
extern int vfp_restore_user_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
struct user_vfp_exc *);
extern int vfp_restore_user_hwstate(struct user_vfp *,
struct user_vfp_exc *);
#endif
/*

View file

@ -99,6 +99,14 @@ extern int __put_user_bad(void);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
/*
* Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation.
*/
dsb(nsh);
isb();
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
@ -122,6 +130,39 @@ static inline void set_fs(mm_segment_t fs)
: "cc"); \
flag; })
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __inttype(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/*
* Sanitise a uaccess pointer such that it becomes NULL if addr+size
* is above the current addr_limit.
*/
#define uaccess_mask_range_ptr(ptr, size) \
((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
size_t size)
{
void __user *safe_ptr = (void __user *)ptr;
unsigned long tmp;
asm volatile(
" sub %1, %3, #1\n"
" subs %1, %1, %0\n"
" addhs %1, %1, #1\n"
" subhss %1, %1, %2\n"
" movlo %0, #0\n"
: "+r" (safe_ptr), "=&r" (tmp)
: "r" (size), "r" (current_thread_info()->addr_limit)
: "cc");
csdb();
return safe_ptr;
}
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@ -191,7 +232,7 @@ extern int __get_user_64t_4(void *);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register typeof(x) __r2 asm("r2"); \
register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
@ -238,49 +279,23 @@ extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
#define __put_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
__asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
#define __put_user_check(x, p) \
#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
const typeof(*(p)) __user *__tmp_p = (p); \
register typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
__put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
__put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
__put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
uaccess_restore(__ua_flags); \
__e; \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
__asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc"); \
__err = __e; \
})
#define put_user(x, p) \
({ \
might_fault(); \
__put_user_check(x, p); \
})
#else /* CONFIG_MMU */
/*
@ -298,7 +313,7 @@ static inline void set_fs(mm_segment_t fs)
}
#define get_user(x, p) __get_user(x, p)
#define put_user(x, p) __put_user(x, p)
#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
@ -307,6 +322,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
#ifdef CONFIG_CPU_SPECTRE
/*
* When mitigating Spectre variant 1, it is not worth fixing the non-
* verifying accessors, because we need to add verification of the
* address space there. Force these to use the standard get_user()
* version instead.
*/
#define __get_user(x, ptr) get_user(x, ptr)
#else
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@ -323,12 +348,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \
})
#define __get_user_error(x, ptr, err) \
({ \
__get_user_err((x), (ptr), err); \
(void) 0; \
})
#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
@ -388,37 +407,58 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
#define __put_user(x, ptr) \
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
unsigned int __ua_flags; \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
default: __err = __put_user_bad(); break; \
} \
uaccess_restore(__ua_flags); \
} while (0)
#define put_user(x, ptr) \
({ \
long __pu_err = 0; \
__put_user_err((x), (ptr), __pu_err); \
int __pu_err = 0; \
__put_user_switch((x), (ptr), __pu_err, __put_user_check); \
__pu_err; \
})
#define __put_user_error(x, ptr, err) \
#ifdef CONFIG_CPU_SPECTRE
/*
* When mitigating Spectre variant 1.1, all accessors need to include
* verification of the address space.
*/
#define __put_user(x, ptr) put_user(x, ptr)
#else
#define __put_user(x, ptr) \
({ \
__put_user_err((x), (ptr), err); \
(void) 0; \
long __pu_err = 0; \
__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
__pu_err; \
})
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
uaccess_restore(__ua_flags); \
} while (0)
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
__put_user_nocheck_##__size(x, __pu_addr, __err); \
} while (0)
#define __put_user_nocheck_1 __put_user_asm_byte
#define __put_user_nocheck_2 __put_user_asm_half
#define __put_user_nocheck_4 __put_user_asm_word
#define __put_user_nocheck_8 __put_user_asm_dword
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
@ -488,6 +528,7 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
#endif /* !CONFIG_CPU_SPECTRE */
#ifdef CONFIG_MMU
extern unsigned long __must_check

View file

@ -30,6 +30,7 @@ else
obj-y += entry-armv.o
endif
obj-$(CONFIG_MMU) += bugs.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o

18
arch/arm/kernel/bugs.c Normal file
View file

@ -0,0 +1,18 @@
// SPDX-Identifier: GPL-2.0
#include <linux/init.h>
#include <asm/bugs.h>
#include <asm/proc-fns.h>
void check_other_bugs(void)
{
#ifdef MULTI_CPU
if (cpu_check_bugs)
cpu_check_bugs();
#endif
}
void __init check_bugs(void)
{
check_writebuffer_bugs();
check_other_bugs();
}

View file

@ -233,9 +233,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
badr lr, ret_fast_syscall @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
invoke_syscall tbl, scno, r10, ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@ -268,14 +266,8 @@ __sys_trace:
mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
badr lr, __sys_trace_return @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
ldmccia r1, {r0 - r6} @ have to reload r0 - r6
stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
mov scno, r0
invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
@ -327,6 +319,10 @@ sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
#ifdef CONFIG_CPU_SPECTRE
movhs scno, #0
csdb
#endif
stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2

View file

@ -373,6 +373,31 @@
#endif
.endm
.macro invoke_syscall, table, nr, tmp, ret, reload=0
#ifdef CONFIG_CPU_SPECTRE
mov \tmp, \nr
cmp \tmp, #NR_syscalls @ check upper syscall limit
movcs \tmp, #0
csdb
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
cmp \nr, #NR_syscalls @ check upper syscall limit
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.

View file

@ -122,6 +122,9 @@ __mmap_switched_data:
.long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data
__FINIT
.text
/*
* This provides a C-API version of __lookup_processor_type
*/
@ -133,9 +136,6 @@ ENTRY(lookup_processor_type)
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
__FINIT
.text
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses

View file

@ -122,6 +122,11 @@ EXPORT_SYMBOL(cold_boot);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
struct processor *cpu_vtable[NR_CPUS] = {
[0] = &processor,
};
#endif
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __read_mostly;
@ -608,28 +613,33 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
/*
* locate processor in the list of supported processor types. The linker
* builds this table for us from the entries in arch/arm/mm/proc-*.S
*/
struct proc_info_list *lookup_processor(u32 midr)
{
struct proc_info_list *list = lookup_processor_type(midr);
if (!list) {
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
smp_processor_id(), midr);
while (1)
/* can't use cpu_relax() here as it may require MMU setup */;
}
return list;
}
static void __init setup_processor(void)
{
struct proc_info_list *list;
/*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc-*.S
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
read_cpuid_id());
while (1);
}
unsigned int midr = read_cpuid_id();
struct proc_info_list *list = lookup_processor(midr);
cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture();
#ifdef MULTI_CPU
processor = *list->proc;
#endif
init_proc_vtable(list->proc);
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
@ -641,7 +651,7 @@ static void __init setup_processor(void)
#endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",

View file

@ -95,34 +95,34 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
const unsigned long magic = VFP_MAGIC;
const unsigned long size = VFP_STORAGE_SIZE;
struct vfp_sigframe kframe;
int err = 0;
__put_user_error(magic, &frame->magic, err);
__put_user_error(size, &frame->size, err);
memset(&kframe, 0, sizeof(kframe));
kframe.magic = VFP_MAGIC;
kframe.size = VFP_STORAGE_SIZE;
err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
if (err)
return -EFAULT;
return err;
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
return __copy_to_user(frame, &kframe, sizeof(kframe));
}
static int restore_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(struct vfp_sigframe __user *auxp)
{
unsigned long magic;
unsigned long size;
int err = 0;
struct vfp_sigframe frame;
int err;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
err = __copy_from_user(&frame, (char __user *) auxp, sizeof(frame));
if (err)
return -EFAULT;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return err;
if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@ -142,6 +142,7 @@ struct rt_sigframe {
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
struct sigcontext context;
struct aux_sigframe __user *aux;
sigset_t set;
int err;
@ -150,23 +151,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
if (err == 0) {
regs->ARM_r0 = context.arm_r0;
regs->ARM_r1 = context.arm_r1;
regs->ARM_r2 = context.arm_r2;
regs->ARM_r3 = context.arm_r3;
regs->ARM_r4 = context.arm_r4;
regs->ARM_r5 = context.arm_r5;
regs->ARM_r6 = context.arm_r6;
regs->ARM_r7 = context.arm_r7;
regs->ARM_r8 = context.arm_r8;
regs->ARM_r9 = context.arm_r9;
regs->ARM_r10 = context.arm_r10;
regs->ARM_fp = context.arm_fp;
regs->ARM_ip = context.arm_ip;
regs->ARM_sp = context.arm_sp;
regs->ARM_lr = context.arm_lr;
regs->ARM_pc = context.arm_pc;
regs->ARM_cpsr = context.arm_cpsr;
}
err |= !valid_user_regs(regs);
@ -254,30 +258,35 @@ static int
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
struct sigcontext context;
int err = 0;
__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
context = (struct sigcontext) {
.arm_r0 = regs->ARM_r0,
.arm_r1 = regs->ARM_r1,
.arm_r2 = regs->ARM_r2,
.arm_r3 = regs->ARM_r3,
.arm_r4 = regs->ARM_r4,
.arm_r5 = regs->ARM_r5,
.arm_r6 = regs->ARM_r6,
.arm_r7 = regs->ARM_r7,
.arm_r8 = regs->ARM_r8,
.arm_r9 = regs->ARM_r9,
.arm_r10 = regs->ARM_r10,
.arm_fp = regs->ARM_fp,
.arm_ip = regs->ARM_ip,
.arm_sp = regs->ARM_sp,
.arm_lr = regs->ARM_lr,
.arm_pc = regs->ARM_pc,
.arm_cpsr = regs->ARM_cpsr,
__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
.trap_no = current->thread.trap_no,
.error_code = current->thread.error_code,
.fault_address = current->thread.address,
.oldmask = set->sig[0],
};
err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
@ -294,7 +303,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
__put_user_error(0, &aux->end_magic, err);
err |= __put_user(0, &aux->end_magic);
return err;
}
@ -426,7 +435,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
@ -446,8 +455,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
__put_user_error(0, &frame->sig.uc.uc_flags, err);
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
err |= __put_user(0, &frame->sig.uc.uc_flags);
err |= __put_user(NULL, &frame->sig.uc.uc_link);
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
err |= setup_sigframe(&frame->sig, regs, set);

View file

@ -27,8 +27,10 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/bugs.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@ -39,6 +41,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@ -95,6 +98,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif
}
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
static int secondary_biglittle_prepare(unsigned int cpu)
{
if (!cpu_vtable[cpu])
cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
return cpu_vtable[cpu] ? 0 : -ENOMEM;
}
static void secondary_biglittle_init(void)
{
init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
}
#else
static int secondary_biglittle_prepare(unsigned int cpu)
{
return 0;
}
static void secondary_biglittle_init(void)
{
}
#endif
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@ -102,6 +129,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary)
return -ENOSYS;
ret = secondary_biglittle_prepare(cpu);
if (ret)
return ret;
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
@ -353,6 +384,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu;
secondary_biglittle_init();
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
@ -396,6 +429,9 @@ asmlinkage void secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
check_other_bugs();
complete(&cpu_running);
local_irq_enable();

View file

@ -1,6 +1,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
@ -34,6 +35,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
local_flush_tlb_all();
check_other_bugs();
}
return ret;

View file

@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
int maxevents, int timeout)
{
struct epoll_event *kbuf;
struct oabi_epoll_event e;
mm_segment_t fs;
long ret, err, i;
@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
set_fs(fs);
err = 0;
for (i = 0; i < ret; i++) {
__put_user_error(kbuf[i].events, &events->events, err);
__put_user_error(kbuf[i].data, &events->data, err);
e.events = kbuf[i].events;
e.data = kbuf[i].data;
err = __copy_to_user(events, &e, sizeof(e));
if (err)
break;
events++;
}
kfree(kbuf);
@ -328,9 +332,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
struct oabi_sembuf osb;
err |= __copy_from_user(&osb, tsops, sizeof(osb));
sops[i].sem_num = osb.sem_num;
sops[i].sem_op = osb.sem_op;
sops[i].sem_flg = osb.sem_flg;
tsops++;
}
if (timeout) {

View file

@ -90,6 +90,11 @@
.text
ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
uaccess_mask_range_ptr r1, r2, r3, ip
#endif
#include "copy_template.S"

View file

@ -396,6 +396,7 @@ config CPU_V7
select CPU_CP15_MPU if !MMU
select CPU_HAS_ASID if MMU
select CPU_PABRT_V7
select CPU_SPECTRE if MMU
select CPU_TLB_V7 if MMU
# ARMv7M
@ -793,6 +794,28 @@ config CPU_BPREDICT_DISABLE
help
Say Y here to disable branch prediction. If unsure, say N.
config CPU_SPECTRE
bool
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
depends on CPU_SPECTRE
default y
help
Speculation attacks against some high-performance processors rely
on being able to manipulate the branch predictor for a victim
context by executing aliasing branches in the attacker context.
Such attacks can be partially mitigated against by clearing
internal branch predictor state and limiting the prediction
logic in some situations.
This config option will take CPU-specific actions to harden
the branch predictor against aliasing attacks and may rely on
specific instruction sequences or control bits being set by
the system firmware.
If unsure, say Y.
config TLS_REG_EMUL
bool
select NEED_KUSER_HELPERS

View file

@ -92,7 +92,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6

View file

@ -767,6 +767,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
return NULL;
}
static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long *inst)
{
u32 instr = 0;
int fault;
if (user_mode(regs))
fault = get_user(instr, ip);
else
fault = probe_kernel_address(ip, instr);
*inst = __mem_to_opcode_arm(instr);
return fault;
}
static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
{
u16 instr = 0;
int fault;
if (user_mode(regs))
fault = get_user(instr, ip);
else
fault = probe_kernel_address(ip, instr);
*inst = __mem_to_opcode_thumb16(instr);
return fault;
}
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@ -774,10 +804,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
unsigned int fault;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
int fault;
if (interrupts_enabled(regs))
local_irq_enable();
@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
tinstr = __mem_to_opcode_thumb16(tinstr);
fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
tinst2 = __mem_to_opcode_thumb16(tinst2);
u16 tinst2;
fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else {
fault = probe_kernel_address((void *)instrptr, instr);
instr = __mem_to_opcode_arm(instr);
fault = alignment_get_arm(regs, (void *)instrptr, &instr);
}
if (fault) {

View file

@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
if (addr > TASK_SIZE)
harden_branch_predictor();
#ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {

View file

@ -258,13 +258,21 @@
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
.endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
/*
* If we are building for big.Little with branch predictor hardening,
* we need the processor function tables to remain available after boot.
*/
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
.section ".rodata"
#endif
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
.word \dabort
.word \pabort
.word cpu_\name\()_proc_init
.word \bugs
.word cpu_\name\()_proc_fin
.word cpu_\name\()_reset
.word cpu_\name\()_do_idle
@ -293,6 +301,9 @@ ENTRY(\name\()_processor_functions)
.endif
.size \name\()_processor_functions, . - \name\()_processor_functions
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
.previous
#endif
.endm
.macro define_cache_functions name:req

View file

@ -41,11 +41,6 @@
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
#endif
bx lr
ENDPROC(cpu_v7_switch_mm)
ENDPROC(cpu_ca8_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)

161
arch/arm/mm/proc-v7-bugs.c Normal file
View file

@ -0,0 +1,161 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
#include <linux/psci.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/proc-fns.h>
#include <asm/system_misc.h>
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static void harden_branch_predictor_bpiall(void)
{
write_sysreg(0, BPIALL);
}
static void harden_branch_predictor_iciallu(void)
{
write_sysreg(0, ICIALLU);
}
static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void __maybe_unused call_hvc_arch_workaround_1(void)
{
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void cpu_v7_spectre_init(void)
{
const char *spectre_v2_method = NULL;
int cpu = smp_processor_id();
if (per_cpu(harden_branch_predictor_fn, cpu))
return;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A8:
case ARM_CPU_PART_CORTEX_A9:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
break;
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
break;
#ifdef CONFIG_ARM_PSCI
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
/* fallthrough */
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
struct arm_smccc_res res;
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
break;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
default:
break;
}
}
#endif
}
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
}
#else
static void cpu_v7_spectre_init(void)
{
}
#endif
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg)
{
u32 aux_cr;
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
if ((aux_cr & mask) != mask) {
if (!*warned)
pr_err("CPU%u: %s", smp_processor_id(), msg);
*warned = true;
return false;
}
return true;
}
static DEFINE_PER_CPU(bool, spectre_warned);
static bool check_spectre_auxcr(bool *warned, u32 bit)
{
return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
cpu_v7_check_auxcr_set(warned, bit,
"Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
}
void cpu_v7_ca8_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
cpu_v7_spectre_init();
}
void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_init();
}
void cpu_v7_bugs_init(void)
{
cpu_v7_spectre_init();
}

View file

@ -9,6 +9,7 @@
*
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
@ -87,6 +88,37 @@ ENTRY(cpu_v7_dcache_clean_area)
ret lr
ENDPROC(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
ENTRY(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
.arch_extension virt
ENTRY(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_hvc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
ENDPROC(cpu_v7_iciallu_switch_mm)
ENTRY(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
ENDPROC(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor"
.align
@ -152,31 +184,6 @@ ENTRY(cpu_v7_do_resume)
ENDPROC(cpu_v7_do_resume)
#endif
/*
* Cortex-A8
*/
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
/*
* Cortex-A9 processor functions
*/
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
@ -488,12 +495,79 @@ __v7_setup_stack:
__INITDATA
.weak cpu_v7_bugs_init
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#ifndef CONFIG_ARM_LPAE
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@ generic v7 bpiall on context switch
globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
#endif
define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
#else
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
#endif
#ifndef CONFIG_ARM_LPAE
@ Cortex-A8 - always needs bpiall switch_mm implementation
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
@ Cortex-A9 - needs more registers preserved across suspend/resume
@ and bpiall switch_mm for hardening
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
#else
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#endif
@ Cortex-A15 - needs iciallu switch_mm for hardening
globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca15_reset, cpu_v7_reset
globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
#else
globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
@ -600,7 +674,7 @@ __v7_ca7mp_proc_info:
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
@ -610,7 +684,7 @@ __v7_ca12mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
@ -620,7 +694,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
@ -630,9 +704,25 @@ __v7_b15mp_proc_info:
__v7_ca17mp_proc_info:
.long 0x410fc0e0
.long 0xff0ffff0
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
/* ARM Ltd. Cortex A73 processor */
.type __v7_ca73_proc_info, #object
__v7_ca73_proc_info:
.long 0x410fd090
.long 0xff0ffff0
__v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca73_proc_info, . - __v7_ca73_proc_info
/* ARM Ltd. Cortex A75 processor */
.type __v7_ca75_proc_info, #object
__v7_ca75_proc_info:
.long 0x410fd0a0
.long 0xff0ffff0
__v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca75_proc_info, . - __v7_ca75_proc_info
/*
* Qualcomm Inc. Krait processors.
*/

View file

@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
struct user_vfp_exc __user *ufp_exc)
int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
int err = 0;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
sizeof(hwstate->fpregs));
memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
ufp->fpscr = hwstate->fpscr;
/*
* Copy the exception registers.
*/
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
if (err)
return -EFAULT;
ufp_exc->fpexc = hwstate->fpexc;
ufp_exc->fpinst = hwstate->fpinst;
ufp_exc->fpinst2 = hwstate->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
@ -597,13 +593,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
}
/* Sanitise and restore the current VFP state from the provided structures. */
int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
struct user_vfp_exc __user *ufp_exc)
int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
@ -612,17 +606,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
sizeof(hwstate->fpregs));
memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &ufp_exc->fpexc, err);
fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
@ -631,10 +624,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
hwstate->fpinst = ufp_exc->fpinst;
hwstate->fpinst2 = ufp_exc->fpinst2;
return err ? -EFAULT : 0;
return 0;
}
/*

View file

@ -84,7 +84,7 @@ void __init prom_init(void)
* Here we will start up CPU1 in the background and ask it to
* reconfigure itself then go back to sleep.
*/
memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
__sync();
set_c0_cause(C_SW0);
cpumask_set_cpu(1, &bmips_booted_mask);

View file

@ -42,7 +42,7 @@
/* O32 stack has to be 8-byte aligned. */
static u64 o32_stk[4096];
#define O32_STK &o32_stk[sizeof(o32_stk)]
#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")

View file

@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
#endif
}
extern char bmips_reset_nmi_vec;
extern char bmips_reset_nmi_vec_end;
extern char bmips_smp_movevec;
extern char bmips_smp_int_vec;
extern char bmips_smp_int_vec_end;
extern char bmips_reset_nmi_vec[];
extern char bmips_reset_nmi_vec_end[];
extern char bmips_smp_movevec[];
extern char bmips_smp_int_vec[];
extern char bmips_smp_int_vec_end[];
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;

View file

@ -451,10 +451,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end)
static inline void bmips_nmi_handler_setup(void)
{
bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
&bmips_reset_nmi_vec_end);
bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
&bmips_smp_int_vec_end);
bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
bmips_reset_nmi_vec_end);
bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
bmips_smp_int_vec_end);
}
struct reset_vec_info {

View file

@ -306,16 +306,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
len = *lenp;
if (copy_from_user(buf, buffer,
len > sizeof(buf) ? sizeof(buf) : len))
len = min(*lenp, sizeof(buf));
if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@ -323,9 +323,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += len;
}
*lenp = len;
*ppos += len;
return 0;
}

View file

@ -5,7 +5,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
* "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "WESTMERE2".
@ -67,6 +67,7 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */

View file

@ -859,9 +859,6 @@ static void __init kexec_enter_virtual_mode(void)
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
/* clean DUMMY object */
efi_delete_dummy_variable();
#endif
}

View file

@ -671,7 +671,21 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
/*
* If we have transactions queued, then some might be committed to the
* hardware in the desc fifo. The only way to reset the desc fifo is
* to do a hardware reset (either by pipe or the entire block).
* bam_chan_init_hw() will trigger a pipe reset, and also reinit the
* pipe. If the pipe is left disabled (default state after pipe reset)
* and is accessed by a connected hardware engine, a fatal error in
* the BAM will occur. There is a small window where this could happen
* with bam_chan_init_hw(), but it is assumed that the caller has
* stopped activity on any attached hardware engine. Make sure to do
* this first so that the BAM hardware doesn't cause memory corruption
* by accessing freed resources.
*/
if (bchan->curr_txd) {
bam_chan_init_hw(bchan, bchan->curr_txd->dir);
list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
bchan->curr_txd = NULL;
}

View file

@ -375,7 +375,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,

View file

@ -58,7 +58,10 @@ bool psci_tos_resident_on(int cpu)
return cpu == resident_cpu;
}
struct psci_operations psci_ops;
struct psci_operations psci_ops = {
.conduit = PSCI_CONDUIT_NONE,
.smccc_version = SMCCC_VERSION_1_0,
};
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
@ -189,6 +192,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
0, 0, 0);
}
static void set_conduit(enum psci_conduit conduit)
{
switch (conduit) {
case PSCI_CONDUIT_HVC:
invoke_psci_fn = __invoke_psci_fn_hvc;
break;
case PSCI_CONDUIT_SMC:
invoke_psci_fn = __invoke_psci_fn_smc;
break;
default:
WARN(1, "Unexpected PSCI conduit %d\n", conduit);
}
psci_ops.conduit = conduit;
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@ -201,9 +220,9 @@ static int get_set_conduit_method(struct device_node *np)
}
if (!strcmp("hvc", method)) {
invoke_psci_fn = __invoke_psci_fn_hvc;
set_conduit(PSCI_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
set_conduit(PSCI_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
@ -428,6 +447,31 @@ static void __init psci_init_migrate(void)
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
static void __init psci_init_smccc(void)
{
u32 ver = ARM_SMCCC_VERSION_1_0;
int feature;
feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
if (feature != PSCI_RET_NOT_SUPPORTED) {
u32 ret;
ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
if (ret == ARM_SMCCC_VERSION_1_1) {
psci_ops.smccc_version = SMCCC_VERSION_1_1;
ver = ret;
}
}
/*
* Conveniently, the SMCCC and PSCI versions are encoded the
* same way. No, this isn't accidental.
*/
pr_info("SMC Calling Convention v%d.%d\n",
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
}
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
@ -476,6 +520,7 @@ static int __init psci_probe(void)
psci_init_migrate();
if (PSCI_VERSION_MAJOR(ver) >= 1) {
psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
}
@ -589,9 +634,9 @@ int __init psci_acpi_init(void)
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
invoke_psci_fn = __invoke_psci_fn_hvc;
set_conduit(PSCI_CONDUIT_HVC);
else
invoke_psci_fn = __invoke_psci_fn_smc;
set_conduit(PSCI_CONDUIT_SMC);
return psci_probe();
}

View file

@ -726,6 +726,14 @@ config HID_SPEEDLINK
---help---
Support for Speedlink Vicious and Divine Cezanne mouse.
config HID_STEAM
tristate "Steam Controller support"
depends on HID
---help---
Say Y here if you have a Steam Controller if you want to use it
without running the Steam Client. It supports both the wired and
the wireless adaptor.
config HID_STEELSERIES
tristate "Steelseries SRW-S1 steering wheel support"
depends on HID

View file

@ -85,6 +85,7 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
obj-$(CONFIG_HID_STEAM) += hid-steam.o
obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o

View file

@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid)
{
struct axff_device *axff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -976,6 +976,7 @@ int hid_open_report(struct hid_device *device)
__u8 *start;
__u8 *buf;
__u8 *end;
__u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
@ -1029,7 +1030,8 @@ int hid_open_report(struct hid_device *device)
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
while ((start = fetch_item(start, end, &item)) != NULL) {
while ((next = fetch_item(start, end, &item)) != NULL) {
start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
@ -1058,7 +1060,8 @@ int hid_open_report(struct hid_device *device)
}
}
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
hid_err(device, "item fetching failed at offset %u/%u\n",
size - (unsigned int)(end - start), size);
err:
vfree(parser);
hid_close_report(device);
@ -2070,6 +2073,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },

View file

@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid)
{
struct drff_device *drff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -140,13 +140,19 @@ static int holtekff_init(struct hid_device *hid)
{
struct holtekff_device *holtekff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;

View file

@ -900,6 +900,11 @@
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
#define USB_VENDOR_ID_VALVE 0x28de
#define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102
#define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142
#define USB_DEVICE_ID_STEAM_CONTROLLER_BT 0x1106
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410

View file

@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid)
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)

View file

@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;

View file

@ -1158,8 +1158,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@ -1171,6 +1171,13 @@ int lg4ff_init(struct hid_device *hid)
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;

View file

@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;

File diff suppressed because it is too large Load diff

1141
drivers/hid/hid-steam.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -136,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *input_dev;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
input_dev = hidinput->input;
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;

View file

@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid)
{
struct zpff_device *zpff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int i, error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)

View file

@ -126,7 +126,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
#define BMC150_ACCEL_TEMP_CENTER_VAL 24
#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000

View file

@ -1976,9 +1976,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
return ret;
}
mutex_unlock(&conn_id->handler_mutex);

View file

@ -32,7 +32,7 @@ static struct kmem_cache *_cell_cache;
*/
struct dm_bio_prison *dm_bio_prison_create(void)
{
struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
if (!prison)
return NULL;

View file

@ -50,7 +50,7 @@ struct dm_io_client *dm_io_client_create(void)
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
client = kmalloc(sizeof(*client), GFP_KERNEL);
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);

View file

@ -827,7 +827,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
return ERR_PTR(-ENOMEM);

View file

@ -179,7 +179,7 @@ struct dm_region_hash *dm_region_hash_create(
;
nr_buckets >>= 1;
rh = kmalloc(sizeof(*rh), GFP_KERNEL);
rh = kzalloc(sizeof(*rh), GFP_KERNEL);
if (!rh) {
DMERR("unable to allocate region hash memory");
return ERR_PTR(-ENOMEM);

View file

@ -19,7 +19,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
#include <linux/semaphore.h>
#include "dm.h"
@ -48,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
struct rw_semaphore lock;
struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@ -106,8 +105,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
/* Maximum number of in-flight COW jobs. */
struct semaphore cow_count;
unsigned in_progress;
wait_queue_head_t in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@ -158,8 +157,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
static int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@ -456,9 +455,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
down_read(&s->lock);
mutex_lock(&s->lock);
active = s->active;
up_read(&s->lock);
mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@ -926,7 +925,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
down_write(&s->lock);
mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@ -941,7 +940,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
up_write(&s->lock);
mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@ -1000,9 +999,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
down_write(&s->lock);
mutex_lock(&s->lock);
s->merge_failed = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
}
goto shut;
}
@ -1043,10 +1042,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
down_write(&s->lock);
mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
up_write(&s->lock);
mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@ -1088,10 +1087,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
down_write(&s->lock);
mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
up_write(&s->lock);
mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@ -1137,7 +1136,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE;
}
s = kmalloc(sizeof(*s), GFP_KERNEL);
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
@ -1190,7 +1189,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock);
mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@ -1206,7 +1205,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@ -1357,9 +1356,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
down_write(&snap_dest->lock);
mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
up_write(&snap_dest->lock);
mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@ -1390,13 +1389,62 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
mutex_destroy(&s->lock);
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
WARN_ON(s->in_progress);
kfree(s);
}
static void account_start_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
s->in_progress++;
spin_unlock(&s->in_progress_wait.lock);
}
static void account_end_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
BUG_ON(!s->in_progress);
s->in_progress--;
if (likely(s->in_progress <= cow_threshold) &&
unlikely(waitqueue_active(&s->in_progress_wait)))
wake_up_locked(&s->in_progress_wait);
spin_unlock(&s->in_progress_wait.lock);
}
static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{
if (unlikely(s->in_progress > cow_threshold)) {
spin_lock(&s->in_progress_wait.lock);
if (likely(s->in_progress > cow_threshold)) {
/*
* NOTE: this throttle doesn't account for whether
* the caller is servicing an IO that will trigger a COW
* so excess throttling may result for chunks not required
* to be COW'd. But if cow_threshold was reached, extra
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
if (unlock_origins)
up_read(&_origins_lock);
io_schedule();
remove_wait_queue(&s->in_progress_wait, &wait);
return false;
}
spin_unlock(&s->in_progress_wait.lock);
}
return true;
}
/*
* Flush a list of buffers.
*/
@ -1412,7 +1460,7 @@ static void flush_bios(struct bio *bio)
}
}
static int do_origin(struct dm_dev *origin, struct bio *bio);
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@ -1425,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
r = do_origin(s->origin, bio);
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@ -1477,7 +1525,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
down_write(&s->lock);
mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@ -1485,14 +1533,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
down_write(&s->lock);
mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
down_write(&s->lock);
mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@ -1517,7 +1565,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
up_write(&s->lock);
mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@ -1579,7 +1627,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
}
list_add(&pe->out_of_order_entry, lh);
}
up(&s->cow_count);
account_end_copy(s);
}
/*
@ -1603,7 +1651,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
down(&s->cow_count);
account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@ -1623,7 +1671,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
down(&s->cow_count);
account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@ -1714,9 +1762,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return -EIO;
/* FIXME: should only take write lock if we need
* to copy an exception */
down_write(&s->lock);
if (bio_data_dir(bio) == WRITE) {
while (unlikely(!wait_for_in_progress(s, false)))
; /* wait_for_in_progress() has slept */
}
mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
r = -EIO;
@ -1738,9 +1789,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_rw(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
up_write(&s->lock);
mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
down_write(&s->lock);
mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@ -1775,7 +1826,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@ -1785,7 +1836,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@ -1795,7 +1846,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
up_write(&s->lock);
mutex_unlock(&s->lock);
out:
return r;
}
@ -1831,7 +1882,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@ -1862,12 +1913,12 @@ redirect_to_origin:
bio->bi_bdev = s->origin->bdev;
if (bio_rw(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
mutex_unlock(&s->lock);
return do_origin(s->origin, bio, false);
}
out_unlock:
up_write(&s->lock);
mutex_unlock(&s->lock);
return r;
}
@ -1898,7 +1949,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
down_read(&snap_src->lock);
mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@ -1908,7 +1959,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
up_read(&snap_src->lock);
mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@ -1954,11 +2005,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
down_write(&snap_src->lock);
down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
mutex_lock(&snap_src->lock);
mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
up_write(&snap_dest->lock);
up_write(&snap_src->lock);
mutex_unlock(&snap_dest->lock);
mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@ -1973,9 +2024,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
down_write(&s->lock);
mutex_lock(&s->lock);
s->active = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@ -2015,7 +2066,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
down_write(&snap->lock);
mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@ -2040,7 +2091,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
up_write(&snap->lock);
mutex_unlock(&snap->lock);
break;
@ -2106,7 +2157,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
down_write(&snap->lock);
mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@ -2133,9 +2184,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
up_write(&snap->lock);
mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
down_write(&snap->lock);
mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@ -2178,7 +2229,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
up_write(&snap->lock);
mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@ -2199,15 +2250,24 @@ next_snapshot:
/*
* Called on a write from the origin driver.
*/
static int do_origin(struct dm_dev *origin, struct bio *bio)
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
if (o) {
if (limit) {
struct dm_snapshot *s;
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
}
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
}
up_read(&_origins_lock);
return r;
@ -2320,7 +2380,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
return do_origin(o->dev, bio);
return do_origin(o->dev, bio, true);
}
/*

View file

@ -2882,7 +2882,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return (struct pool *)pmd;
}
pool = kmalloc(sizeof(*pool), GFP_KERNEL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
*error = "Error allocating memory for pool";
err_p = ERR_PTR(-ENOMEM);

View file

@ -3889,7 +3889,7 @@ out:
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
for (idx = 0; idx < old_arr->count; idx++) {
for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];

View file

@ -174,6 +174,7 @@ struct hip04_priv {
dma_addr_t rx_phys[RX_DESC_NUM];
unsigned int rx_head;
unsigned int rx_buf_size;
unsigned int rx_cnt_remaining;
struct device_node *phy_node;
struct phy_device *phy;
@ -487,7 +488,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
struct net_device *ndev = priv->ndev;
struct net_device_stats *stats = &ndev->stats;
unsigned int cnt = hip04_recv_cnt(priv);
struct rx_desc *desc;
struct sk_buff *skb;
unsigned char *buf;
@ -500,8 +500,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
/* clean up tx descriptors */
tx_remaining = hip04_tx_reclaim(ndev, false);
while (cnt && !last) {
priv->rx_cnt_remaining += hip04_recv_cnt(priv);
while (priv->rx_cnt_remaining && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
if (unlikely(!skb))
@ -544,11 +544,13 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
hip04_set_recv_desc(priv, phys);
priv->rx_head = RX_NEXT(priv->rx_head);
if (rx >= budget)
if (rx >= budget) {
--priv->rx_cnt_remaining;
goto done;
}
if (--cnt == 0)
cnt = hip04_recv_cnt(priv);
if (--priv->rx_cnt_remaining == 0)
priv->rx_cnt_remaining += hip04_recv_cnt(priv);
}
if (!(priv->reg_inten & RCV_INT)) {
@ -633,6 +635,7 @@ static int hip04_mac_open(struct net_device *ndev)
int i;
priv->rx_head = 0;
priv->rx_cnt_remaining = 0;
priv->tx_head = 0;
priv->tx_tail = 0;
hip04_reset_ppe(priv);

View file

@ -463,12 +463,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
static int
mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
struct resource_allocator *res_alloc,
int vf)
{
/* reduce the sink counter */
return (dev->caps.max_counters - 1 -
(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
/ MLX4_MAX_PORTS;
struct mlx4_active_ports actv_ports;
int ports, counters_guaranteed;
/* For master, only allocate according to the number of phys ports */
if (vf == mlx4_master_func_num(dev))
return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
/* calculate real number of ports for the VF */
actv_ports = mlx4_get_active_ports(dev, vf);
ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
/* If we do not have enough counters for this VF, do not
* allocate any for it. '-1' to reduce the sink counter.
*/
if ((res_alloc->res_reserved + counters_guaranteed) >
(dev->caps.max_counters - 1))
return 0;
return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@ -476,7 +495,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@ -593,16 +611,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
if (t == mlx4_master_func_num(dev))
res_alloc->guaranteed[t] =
MLX4_PF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else if (t <= max_vfs_guarantee_counter)
res_alloc->guaranteed[t] =
MLX4_VF_COUNTERS_PER_PORT *
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
res_alloc->guaranteed[t] =
mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:

View file

@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
__le16 res;
__le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);

View file

@ -2006,8 +2006,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ttl = info->key.ttl;
tos = info->key.tos;
if (info->options_len)
if (info->options_len) {
if (info->options_len < sizeof(*md))
goto drop;
md = ip_tunnel_info_opts(info);
}
} else {
md->gbp = skb->mark;
}

View file

@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
struct ath6kl_urb_context *urb_context = NULL;
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return NULL;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context =
@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
{
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;

View file

@ -924,6 +924,7 @@ static int __init unittest_data_add(void)
of_fdt_unflatten_tree(unittest_data, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
kfree(unittest_data);
return -ENODATA;
}
of_node_set_flag(unittest_data_node, OF_DETACHED);

View file

@ -609,7 +609,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
ret = regmap_read(pfuze_chip->regmap,
desc->vsel_reg, &val);
if (ret) {
dev_err(&client->dev, "Fails to read from the register.\n");
return ret;
}
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000) {
desc->volt_table = pfuze3000_sw2hi;

View file

@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
while (timeout++ <= abb->settling_time) {
status = ti_abb_check_txdone(abb);
if (status)
break;
return 0;
udelay(1);
}
if (timeout > abb->settling_time) {
dev_warn_ratelimited(dev,
"%s:TRANXDONE timeout(%duS) int=0x%08x\n",
__func__, timeout, readl(abb->int_base));
return -ETIMEDOUT;
}
return 0;
dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
__func__, timeout, readl(abb->int_base));
return -ETIMEDOUT;
}
/**
@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
status = ti_abb_check_txdone(abb);
if (!status)
break;
return 0;
udelay(1);
}
if (timeout > abb->settling_time) {
dev_warn_ratelimited(dev,
"%s:TRANXDONE timeout(%duS) int=0x%08x\n",
__func__, timeout, readl(abb->int_base));
return -ETIMEDOUT;
}
return 0;
dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
__func__, timeout, readl(abb->int_base));
return -ETIMEDOUT;
}
/**

View file

@ -1013,7 +1013,7 @@ config SCSI_SNI_53C710
config 53C700_LE_ON_BE
bool
depends on SCSI_LASI700
depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX

View file

@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev)
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
if (!hostdata) {
dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
if (!hostdata)
return -ENOMEM;
}
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));

View file

@ -1056,27 +1056,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
{
unsigned char *cdb = cmd->t_task_cdb;
/*
* Clear a lun set in the cdb if the initiator talking to use spoke
* and old standards version, as we can't assume the underlying device
* won't choke up on it.
*/
switch (cdb[0]) {
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */
case READ_16: /* SBC - RDProtect */
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
case VERIFY: /* SBC - VRProtect */
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
break;
}
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.

View file

@ -94,9 +94,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
iowrite16(value, ring_desc_base(ring) + offset);
/*
* The other 16-bits in the register is read-only and writes to it
* are ignored by the hardware so we can save one ioread32() by
* filling the read-only bits with zeroes.
*/
iowrite32(cons, ring_desc_base(ring) + 8);
}
static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{
/* See ring_iowrite_cons() above for explanation */
iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@ -148,7 +159,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
if (ring->is_tx)
ring_iowrite_prod(ring, ring->head);
else
ring_iowrite_cons(ring, ring->head);
}
}
@ -368,7 +382,7 @@ void ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;

View file

@ -332,6 +332,7 @@ struct sc16is7xx_port {
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work irq_work;
struct mutex efr_lock;
struct sc16is7xx_one p[0];
};
@ -496,6 +497,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= 4;
}
/* In an amazing feat of design, the Enhanced Features Register shares
* the address of the Interrupt Identification Register, and is
* switched in by writing a magic value (0xbf) to the Line Control
* Register. Any interrupt firing during this time will see the EFR
* where it expects the IIR to be, leading to "Unexpected interrupt"
* messages.
*
* Prevent this possibility by claiming a mutex while accessing the
* EFR, and claiming the same mutex from within the interrupt handler.
* This is similar to disabling the interrupt, but that doesn't work
* because the bulk of the interrupt processing is run as a workqueue
* job in thread context.
*/
mutex_lock(&s->efr_lock);
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
/* Open the LCR divisors for configuration */
@ -511,6 +527,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
@ -693,6 +711,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
mutex_lock(&s->efr_lock);
while (1) {
bool keep_polling = false;
int i;
@ -702,6 +722,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
if (!keep_polling)
break;
}
mutex_unlock(&s->efr_lock);
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
@ -888,6 +910,9 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
/* As above, claim the mutex while accessing the EFR. */
mutex_lock(&s->efr_lock);
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
@ -909,6 +934,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 4 / 0xffff,
@ -1172,6 +1199,7 @@ static int sc16is7xx_probe(struct device *dev,
s->regmap = regmap;
s->devtype = devtype;
dev_set_drvdata(dev, s);
mutex_init(&s->efr_lock);
init_kthread_worker(&s->kworker);
init_kthread_work(&s->irq_work, sc16is7xx_ist);

View file

@ -67,6 +67,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
if (gpios == NULL)
return NULL;
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);

View file

@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@ -1103,6 +1105,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
/* Make sure a warm-reset request is handled by port_event */
if (type == HUB_RESUME &&
hub_port_warm_reset_required(hub, port1, portstatus))
set_bit(port1, hub->event_bits);
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after

View file

@ -499,11 +499,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = -EFAULT;
goto unlock_exit;
}
dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
@ -584,7 +584,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
USB_CTRL_SET_TIMEOUT * HZ);
USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",

View file

@ -910,7 +910,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
if (result < sizeof(*get_version_reply)) {
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
dev_err(idev, "get version request failed: %d\n", result);

View file

@ -604,6 +604,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
if (command_port->bulk_out_size < datasize + 1)
return -EIO;
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
@ -677,6 +681,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
speed_t baud;
port_settings.port = port->port_number + 1;
@ -737,11 +742,13 @@ static void firm_setup_port(struct tty_struct *tty)
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
port_settings.baud = tty_get_baud_rate(tty);
dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
baud = tty_get_baud_rate(tty);
port_settings.baud = cpu_to_le32(baud);
dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
tty_encode_baud_rate(tty, baud, baud);
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;

View file

@ -91,7 +91,7 @@ struct whiteheat_simple {
struct whiteheat_port_settings {
__u8 port; /* port number (1 to N) */
__u32 baud; /* any value 7 - 460800, firmware calculates
__le32 baud; /* any value 7 - 460800, firmware calculates
best fit; arrives little endian */
__u8 bits; /* 5, 6, 7, or 8 */
__u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */

View file

@ -772,29 +772,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
int maxp;
sdev->hostdata = devinfo;
/*
* We have two requirements here. We must satisfy the requirements
* of the physical HC and the demands of the protocol, as we
* definitely want no additional memory allocation in this path
* ruling out using bounce buffers.
*
* For a transmission on USB to continue we must never send
* a package that is smaller than maxpacket. Hence the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value.
* If the HC does not ensure that through SG,
* the upper layer must do that. We must assume nothing
* about the capabilities off the HC, so we use the most
* pessimistic requirement.
*/
maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.

View file

@ -14,14 +14,31 @@
#include <linux/err.h>
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
static inline char *next_non_spacetab(char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
static inline char *next_terminator(char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
return first;
return NULL;
}
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
char *cp;
char *cp, *buf_end;
struct file *file;
char interp[BINPRM_BUF_SIZE];
int retval;
/* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@ -34,18 +51,40 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
/* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
if ((cp = strchr(bprm->buf, '\n')) == NULL)
cp = bprm->buf+BINPRM_BUF_SIZE-1;
/*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
* (though the buffer will have trailing NUL padding when the
* file size was smaller than the buffer size).
*
* We do not want to exec a truncated interpreter path, so either
* we find a newline (which indicates nothing is truncated), or
* we find a space/tab/NUL after the interpreter path (which
* itself may be preceded by spaces/tabs). Truncating the
* arguments is fine: the interpreter can re-read the script to
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
if (!cp) {
cp = next_non_spacetab(bprm->buf + 2, buf_end);
if (!cp)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
if (!next_terminator(cp, buf_end))
return -ENOEXEC;
cp = buf_end;
}
/* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
while (cp > bprm->buf) {
cp--;

View file

@ -1152,6 +1152,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
struct cifsInodeInfo {
bool can_cache_brlcks;
struct list_head llist; /* locks helb by this inode */
/*
* NOTE: Some code paths call down_read(lock_sem) twice, so
* we must always use use cifs_down_write() instead of down_write()
* for this semaphore to avoid deadlocks.
*/
struct rw_semaphore lock_sem; /* protect the fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;

View file

@ -138,6 +138,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
struct file_lock *flock, const unsigned int xid);
extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
extern void cifs_down_write(struct rw_semaphore *sem);
extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
struct file *file,
struct tcon_link *tlink,

View file

@ -280,6 +280,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
return has_locks;
}
void
cifs_down_write(struct rw_semaphore *sem)
{
while (!down_write_trylock(sem))
msleep(10);
}
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
@ -305,7 +312,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
list_add(&fdlocks->llist, &cinode->llist);
up_write(&cinode->lock_sem);
@ -438,7 +445,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway.
*/
down_write(&cifsi->lock_sem);
cifs_down_write(&cifsi->lock_sem);
list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
@ -947,7 +954,7 @@ static void
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
}
@ -969,7 +976,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
try_again:
exist = false;
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
lock->type, &conf_lock, CIFS_LOCK_OP);
@ -991,7 +998,7 @@ try_again:
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
list_del_init(&lock->blist);
}
@ -1044,7 +1051,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
return rc;
try_again:
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@ -1242,7 +1249,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
int rc = 0;
/* we are going to update can_cache_brlcks here - need a write access */
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@ -1430,7 +1437,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
if (!buf)
return -ENOMEM;
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;

View file

@ -130,10 +130,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{0, 0}
};
static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
{0, 0}
};
/*
* Convert a string containing text IPv4 or IPv6 address to binary form.
*

View file

@ -138,7 +138,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
cur = buf;
down_write(&cinode->lock_sem);
cifs_down_write(&cinode->lock_sem);
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <

View file

@ -1676,6 +1676,19 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
/* Flush dirty data/metadata before non-truncate SETATTR */
if (is_wb && S_ISREG(inode->i_mode) &&
attr->ia_valid &
(ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
ATTR_TIMES_SET)) {
err = write_inode_now(inode, true);
if (err)
return err;
fuse_set_nowrite(inode);
fuse_release_nowrite(inode);
}
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

View file

@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
bool lock_inode = (file->f_flags & O_TRUNC) &&
bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
if (lock_inode)
if (is_wb_truncate) {
mutex_lock(&inode->i_mutex);
fuse_set_nowrite(inode);
}
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
if (lock_inode)
if (is_wb_truncate) {
fuse_release_nowrite(inode);
mutex_unlock(&inode->i_mutex);
}
return err;
}

View file

@ -5255,6 +5255,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
}
status = task->tk_status;
if (setclientid.sc_cred) {
kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}

View file

@ -289,7 +289,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
if (inode_alloc)
mutex_lock(&inode_alloc->i_mutex);
if (o2info_coherent(&fi->ifi_req)) {
if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);

View file

@ -1475,18 +1475,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi);
}
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
/*
* We can't leave the new entry's xe_name_offset at zero or
* add_namevalue() will go nuts. We set it to the size of our
* storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
@ -2118,29 +2106,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc)
goto out;
if (loc->xl_entry) {
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!loc->xl_entry) {
rc = -EINVAL;
goto out;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
ocfs2_xa_wipe_namevalue(loc);
} else
ocfs2_xa_add_entry(loc, name_hash);
}
ocfs2_xa_wipe_namevalue(loc);
/*
* If we get here, we have a blank entry. Fill it. We grow our

Some files were not shown because too many files have changed in this diff Show more