Merge "Merge android-4.4.177 (0c3b8c4) into msm-4.4"

This commit is contained in:
Linux Build Service Account 2019-04-02 03:57:43 -07:00 committed by Gerrit - the friendly Code Review server
commit 6a214b82af
293 changed files with 2646 additions and 935 deletions

View file

@ -82,6 +82,29 @@ Only the lists of names from directories are merged. Other content
such as metadata and extended attributes are reported for the upper
directory only. These attributes of the lower directory are hidden.
credentials
-----------
By default, all access to the upper, lower and work directories is the
recorded mounter's MAC and DAC credentials. The incoming accesses are
checked against the caller's credentials.
In the case where caller MAC or DAC credentials do not overlap, a
use case available in older versions of the driver, the
override_creds mount flag can be turned off and help when the use
pattern has caller with legitimate credentials where the mounter
does not. Several unintended side effects will occur though. The
caller without certain key capabilities or lower privilege will not
always be able to delete files or directories, create nodes, or
search some restricted directories. The ability to search and read
a directory entry is spotty as a result of the cache mechanism not
retesting the credentials because of the assumption, a privileged
caller can fill cache, then a lower privilege can read the directory
cache. The uneven security model where cache, upperdir and workdir
are opened at privilege, but accessed without creating a form of
privilege escalation, should only be used with strict understanding
of the side effects and of the security policies.
whiteouts and opaque directories
--------------------------------

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 176
SUBLEVEL = 177
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -286,7 +286,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __ffs(unsigned long word)
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@ -346,9 +346,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
static inline __attribute__ ((const)) int __ffs(unsigned long x)
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
int n;
unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */

View file

@ -209,7 +209,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return n;
}
@ -438,7 +438,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return n;
}
@ -658,7 +658,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return res;
}
@ -691,7 +691,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
: "lp_count", "lp_start", "lp_end", "memory");
: "lp_count", "memory");
return res;
}

View file

@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
#ifdef CONFIG_ISA_ARCV2
; Unaligned access is disabled at reset, so re-enable early as
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
bset r5, r5, STATUS_AD_BIT
kflag r5
#endif
.endm
.section .init.text, "ax",@progbits

View file

@ -1516,6 +1516,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.

View file

@ -150,6 +150,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
clock-names = "clkout8";
clocks = <&cmu CLK_FIN_PLL>;
#clock-cells = <1>;
};
mipi_phy: video-phy@10020710 {

View file

@ -0,0 +1,25 @@
/*
* Device tree sources for Exynos5420 TMU sensor configuration
*
* Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
* Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <dt-bindings/thermal/thermal_exynos.h>
#thermal-sensor-cells = <0>;
samsung,tmu_gain = <8>;
samsung,tmu_reference_voltage = <16>;
samsung,tmu_noise_cancel_mode = <4>;
samsung,tmu_efuse_value = <55>;
samsung,tmu_min_efuse_value = <0>;
samsung,tmu_max_efuse_value = <100>;
samsung,tmu_first_point_trim = <25>;
samsung,tmu_second_point_trim = <85>;
samsung,tmu_default_temp_offset = <50>;
samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;

View file

@ -777,7 +777,7 @@
interrupts = <0 65 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu1: tmu@10064000 {
@ -786,7 +786,7 @@
interrupts = <0 183 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu2: tmu@10068000 {
@ -795,7 +795,7 @@
interrupts = <0 184 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu3: tmu@1006c000 {
@ -804,7 +804,7 @@
interrupts = <0 185 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_gpu: tmu@100a0000 {
@ -813,7 +813,7 @@
interrupts = <0 215 0>;
clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#include "exynos5420-tmu-sensor-conf.dtsi"
};
thermal-zones {

View file

@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);

View file

@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@ -120,72 +119,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
cpumask_copy(&available_cpus, affinity);
cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
affinity = &available_cpus;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
cpumask_andnot(&available_cpus, cpu_online_mask,
cpu_isolated_mask);
if (cpumask_empty(affinity))
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */

View file

@ -93,77 +93,6 @@ void arch_cpu_idle_exit(void)
idle_notifier_call_chain(IDLE_END);
}
/*
* dump a block of kernel memory from around the given address
*/
static void show_data(unsigned long addr, int nbytes, const char *name)
{
int i, j;
int nlines;
u32 *p;
/*
* don't attempt to dump non-kernel addresses or
* values that are probably just small negative numbers
*/
if (addr < PAGE_OFFSET || addr > -256UL)
return;
printk("\n%s: %#lx:\n", name, addr);
/*
* round address down to a 32 bit boundary
* and always dump a multiple of 32 bytes
*/
p = (u32 *)(addr & ~(sizeof(u32) - 1));
nbytes += (addr & (sizeof(u32) - 1));
nlines = (nbytes + 31) / 32;
for (i = 0; i < nlines; i++) {
/*
* just display low 16 bits of address to keep
* each line of the dump < 80 characters
*/
printk("%04lx ", (unsigned long)p & 0xffff);
for (j = 0; j < 8; j++) {
u32 data;
if (probe_kernel_address(p, data)) {
printk(" ********");
} else {
printk(" %08x", data);
}
++p;
}
printk("\n");
}
}
static void show_extra_register_data(struct pt_regs *regs, int nbytes)
{
mm_segment_t fs;
fs = get_fs();
set_fs(KERNEL_DS);
show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
set_fs(fs);
}
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@ -251,8 +180,6 @@ void __show_regs(struct pt_regs *regs)
printk("Control: %08x%s\n", ctrl, buf);
}
#endif
show_extra_register_data(regs, 128);
}
void show_regs(struct pt_regs * regs)

View file

@ -218,7 +218,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU

View file

@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@ -207,14 +206,17 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)
memcpy(run->mmio.data, data_buf, len);
kvm_handle_mmio_return(vcpu, run);
return 1;
}
if (is_write)
memcpy(run->mmio.data, data_buf, len);
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}

View file

@ -115,6 +115,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
u32 reg;
int ret;
if (dsi_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@ -130,7 +131,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
ret = regmap_read(omap4_dsi_mux_syscon,
OMAP4_DSIPHY_SYSCON_OFFSET,
&reg);
if (ret)
return ret;
reg &= ~enable_mask;
reg &= ~pipd_mask;

View file

@ -70,16 +70,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
switch (val) {
case CPUFREQ_PRECHANGE:
if (old_dvs & !new_dvs ||
cur_dvs & !new_dvs) {
if ((old_dvs && !new_dvs) ||
(cur_dvs && !new_dvs)) {
pr_debug("%s: exiting dvs\n", __func__);
cur_dvs = false;
gpio_set_value(OSIRIS_GPIO_DVS, 1);
}
break;
case CPUFREQ_POSTCHANGE:
if (!old_dvs & new_dvs ||
!cur_dvs & new_dvs) {
if ((!old_dvs && new_dvs) ||
(!cur_dvs && new_dvs)) {
pr_debug("entering dvs\n");
cur_dvs = true;
gpio_set_value(OSIRIS_GPIO_DVS, 0);

View file

@ -238,8 +238,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
iounmap(ssp->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@ -249,7 +247,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
kfree(ssp);
return 0;
}

View file

@ -278,6 +278,12 @@ CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_HRTIMER=y
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_INTEL8X0=y
# CONFIG_SND_USB is not set
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_A4TECH=y

View file

@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
8: mov w7, w8
8: cbz w8, 91f
mov w7, w8
add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
91: eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret

View file

@ -517,8 +517,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
cmp x0, #1
b.ne 3f
cbz x0, 3f
mrs_s x0, ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1

View file

@ -59,7 +59,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
KBUILD_AFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += $(cpuflags-y) -pipe
KBUILD_CFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += -pipe -ffreestanding
ifdef CONFIG_MMU
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2

View file

@ -74,6 +74,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set

View file

@ -71,14 +71,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);

View file

@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
void __init init_IRQ(void)
{
int i;
unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@ -59,8 +60,7 @@ void __init init_IRQ(void)
arch_init_irq();
for_each_possible_cpu(i) {
int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,

View file

@ -343,7 +343,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@ -355,10 +355,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
ip_end = (void *)ip + info->func_size;
for (i = 0; i < max_insns && ip < ip_end; i++) {
for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.halfword[0] = 0;
insn.halfword[1] = ip->halfword[0];

View file

@ -508,7 +508,7 @@ static unsigned long epapr_hypercall(unsigned long *in,
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
unsigned long r;
@ -520,7 +520,7 @@ static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
static inline long epapr_hypercall0(unsigned int nr)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
return epapr_hypercall(in, out, nr);
@ -528,7 +528,7 @@ static inline long epapr_hypercall0(unsigned int nr)
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@ -538,7 +538,7 @@ static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
unsigned long p2)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@ -549,7 +549,7 @@ static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@ -562,7 +562,7 @@ static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
unsigned long in[8];
unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;

View file

@ -685,6 +685,9 @@ fast_exception_return:
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r11)
REST_GPR(10, r11)
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
@ -915,6 +918,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtcrf 0xFF,r10
mtlr r11
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r1)
/*
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
@ -952,6 +958,9 @@ exc_exit_restart_end:
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r1)
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:

View file

@ -26,13 +26,13 @@
#define SS_MSR 0x74
#define SS_SDR1 0x78
#define SS_LR 0x7c
#define SS_SPRG 0x80 /* 4 SPRGs */
#define SS_DBAT 0x90 /* 8 DBATs */
#define SS_IBAT 0xd0 /* 8 IBATs */
#define SS_TB 0x110
#define SS_CR 0x118
#define SS_GPREG 0x11c /* r12-r31 */
#define STATE_SAVE_SIZE 0x16c
#define SS_SPRG 0x80 /* 8 SPRGs */
#define SS_DBAT 0xa0 /* 8 DBATs */
#define SS_IBAT 0xe0 /* 8 IBATs */
#define SS_TB 0x120
#define SS_CR 0x128
#define SS_GPREG 0x12c /* r12-r31 */
#define STATE_SAVE_SIZE 0x17c
.section .data
.align 5
@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
stw r7, SS_SPRG+12(r3)
stw r8, SS_SDR1(r3)
mfspr r4, SPRN_SPRG4
mfspr r5, SPRN_SPRG5
mfspr r6, SPRN_SPRG6
mfspr r7, SPRN_SPRG7
stw r4, SS_SPRG+16(r3)
stw r5, SS_SPRG+20(r3)
stw r6, SS_SPRG+24(r3)
stw r7, SS_SPRG+28(r3)
mfspr r4, SPRN_DBAT0U
mfspr r5, SPRN_DBAT0L
mfspr r6, SPRN_DBAT1U
@ -493,6 +503,16 @@ mpc83xx_deep_resume:
mtspr SPRN_IBAT7U, r6
mtspr SPRN_IBAT7L, r7
lwz r4, SS_SPRG+16(r3)
lwz r5, SS_SPRG+20(r3)
lwz r6, SS_SPRG+24(r3)
lwz r7, SS_SPRG+28(r3)
mtspr SPRN_SPRG4, r4
mtspr SPRN_SPRG5, r5
mtspr SPRN_SPRG6, r6
mtspr SPRN_SPRG7, r7
lwz r4, SS_SPRG+0(r3)
lwz r5, SS_SPRG+4(r3)
lwz r6, SS_SPRG+8(r3)

View file

@ -104,6 +104,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
/* MEM2 64MB@0x10000000 */
delta = wii_hole_start + wii_hole_size;
size = top - delta;
if (__map_without_bats)
return delta;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > size)
break;

View file

@ -92,7 +92,7 @@ out:
}
static struct bin_attribute opal_msglog_attr = {
.attr = {.name = "msglog", .mode = 0444},
.attr = {.name = "msglog", .mode = 0400},
.read = opal_msglog_read
};

View file

@ -306,6 +306,12 @@ CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_HRTIMER=y
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_INTEL8X0=y
# CONFIG_SND_USB is not set
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_A4TECH=y

View file

@ -2,7 +2,11 @@
#define _ASM_X86_PAGE_64_DEFS_H
#ifdef CONFIG_KASAN
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_STACK_ORDER 2
#else
#define KASAN_STACK_ORDER 1
#endif
#else
#define KASAN_STACK_ORDER 0
#endif

View file

@ -319,8 +319,7 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
__put_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
__put_user_bad(); \
@ -431,8 +430,10 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
__typeof__(*(ptr)) __pu_val; \
__pu_val = x; \
__uaccess_begin(); \
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
__put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})

View file

@ -48,8 +48,7 @@ enum {
BIOS_STATUS_SUCCESS = 0,
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
BIOS_STATUS_EINVAL = -EINVAL,
BIOS_STATUS_UNAVAIL = -EBUSY,
BIOS_STATUS_ABORT = -EINTR,
BIOS_STATUS_UNAVAIL = -EBUSY
};
/*
@ -112,9 +111,4 @@ extern long system_serial_number;
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
/*
* EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
*/
extern struct semaphore __efi_uv_runtime_lock;
#endif /* _ASM_X86_UV_BIOS_H */

View file

@ -716,11 +716,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
/*
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
* all up to and including B1.
*/
if (c->x86_model <= 1 && c->x86_mask <= 1)
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}

View file

@ -168,6 +168,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
if (!current_ei->efi_memmap_size)
return 0;

View file

@ -2388,6 +2388,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
/*
* Drop what we picked up for L2 via svm_complete_interrupts() so it
* doesn't end up in L1.
*/
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
return 0;
}

View file

@ -5574,6 +5574,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
static int handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
return 0;
}
@ -6656,6 +6657,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
if (addr_size == 1)
off = (gva_t)sign_extend64(off, 31);
else if (addr_size == 0)
off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
@ -6698,10 +6703,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
/* Protected mode: #GP(0)/#SS(0) if the memory
* operand is outside the segment limit.
/*
* Protected mode: #GP(0)/#SS(0) if the memory operand is
* outside the segment limit. All CPUs that support VMX ignore
* limit checks for flat segments, i.e. segments with base==0,
* limit==0xffffffff and of type expand-up data or code.
*/
exn = exn || (off + sizeof(u64) > s.limit);
if (!(s.base == 0 && s.limit == 0xffffffff &&
((s.type & 8) || !(s.type & 4))))
exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,

View file

@ -6478,6 +6478,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
r = 0;
goto out;
}

View file

@ -28,8 +28,7 @@
static struct uv_systab uv_systab;
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
u64 a4, u64 a5)
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
{
struct uv_systab *tab = &uv_systab;
s64 ret;
@ -44,19 +43,6 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
a1, a2, a3, a4, a5);
return ret;
}
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
{
s64 ret;
if (down_interruptible(&__efi_uv_runtime_lock))
return BIOS_STATUS_ABORT;
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
up(&__efi_uv_runtime_lock);
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@ -65,15 +51,10 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
unsigned long bios_flags;
s64 ret;
if (down_interruptible(&__efi_uv_runtime_lock))
return BIOS_STATUS_ABORT;
local_irq_save(bios_flags);
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
local_irq_restore(bios_flags);
up(&__efi_uv_runtime_lock);
return ret;
}

View file

@ -35,6 +35,7 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"

View file

@ -286,12 +286,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
memw
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@ -328,11 +329,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b

View file

@ -80,7 +80,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
for (i = 0; i < max_cpus; ++i)
for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@ -93,6 +93,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
if (ncpus > NR_CPUS) {
ncpus = NR_CPUS;
pr_info("%s: limiting core count by %d\n", __func__, ncpus);
}
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@ -192,9 +197,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
cpu_start_id = cpu;
system_flush_invalidate_dcache_range(
(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
WRITE_ONCE(cpu_start_id, cpu);
/* Pairs with the third memw in the cpu_restart */
mb();
system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@ -203,18 +210,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
cpu_start_ccount = ccount;
WRITE_ONCE(cpu_start_ccount, ccount);
while (time_before(jiffies, timeout)) {
do {
/*
* Pairs with the first two memws in the
* .Lboot_secondary.
*/
mb();
if (!cpu_start_ccount)
break;
}
ccount = READ_ONCE(cpu_start_ccount);
} while (ccount && time_before(jiffies, timeout));
if (cpu_start_ccount) {
if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
(void *)cpu, 1);
cpu_start_ccount = 0;
(void *)cpu, 1);
WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@ -234,6 +244,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@ -295,8 +306,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
if (cpu_start_id == -cpu) {
sizeof(cpu_start_id));
/* Pairs with the second memw in the cpu_restart */
mb();
if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}

View file

@ -87,7 +87,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
disable_irq(evt->irq);
disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;

View file

@ -85,17 +85,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
if (walk->entrylen && (walk->offset & alignmask) && !err) {
unsigned int nbytes;
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(walk->entrylen,
(unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@ -115,7 +115,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
if (nbytes) {
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);

View file

@ -52,7 +52,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
@ -76,7 +76,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@ -89,8 +89,6 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@ -130,7 +128,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
@ -142,8 +140,6 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@ -156,7 +152,7 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@ -169,8 +165,6 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}

View file

@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
acpi_status status;
int len, count;
int i, nval;
char *c;
acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
if (ACPI_FAILURE(status))
return -ENODEV;
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);

View file

@ -356,6 +356,7 @@ struct binder_error {
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
* @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
@ -395,6 +396,7 @@ struct binder_node {
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@ -652,6 +654,7 @@ struct binder_transaction {
struct binder_priority saved_priority;
bool set_priority_called;
kuid_t sender_euid;
binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@ -1361,6 +1364,7 @@ static struct binder_node *binder_init_node_ilocked(
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@ -2899,6 +2903,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@ -3122,6 +3128,20 @@ static void binder_transaction(struct binder_proc *proc,
t->priority = target_proc->default_priority;
}
if (target_node && target_node->txn_security_ctx) {
u32 secid;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@ -3138,6 +3158,19 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
if (secctx) {
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
char *kptr = t->buffer->data + buf_offset;
t->security_ctx = (uintptr_t)kptr +
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
memcpy(kptr, secctx, secctx_sz);
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@ -3408,6 +3441,9 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@ -4054,11 +4090,13 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_transaction_data_secctx tr;
struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@ -4254,41 +4292,47 @@ retry:
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
trd->target.ptr = target_node->ptr;
trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
trd->target.ptr = 0;
trd->cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
trd->code = t->code;
trd->flags = t->flags;
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
trd->sender_pid =
task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
trd->sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
trd->data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
tr.secctx = t->security_ctx;
if (t->security_ctx) {
cmd = BR_TRANSACTION_SEC_CTX;
trsize = sizeof(tr);
}
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@ -4299,7 +4343,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@ -4308,7 +4352,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(tr);
ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@ -4316,16 +4360,18 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
(u64)trd->data.ptr.buffer,
(u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@ -4670,7 +4716,8 @@ out:
return ret;
}
static int binder_ioctl_set_ctx_mgr(struct file *filp)
static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@ -4699,7 +4746,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, NULL);
new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@ -4821,8 +4868,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;

View file

@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */

View file

@ -114,7 +114,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
if (!ws)
return;
del_timer_sync(&ws->timer);
__pm_relax(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_drop);
@ -202,6 +201,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
del_timer_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);

View file

@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/nospec.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
if (IndexCard >= MAX_BOARD)
return -EINVAL;
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
static int warncount = 10;
if (warncount) {
printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
warncount--;
}
kfree(adgl);
mutex_unlock(&ac_mutex);
return -EINVAL;
}
if (cmd != 6 && IndexCard >= MAX_BOARD)
goto err;
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
if (cmd != 6 && !apbs[IndexCard].RamIO)
goto err;
switch (cmd) {
@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
err:
if (warncount) {
pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
(int)IndexCard + 1);
warncount--;
}
kfree(adgl);
mutex_unlock(&ac_mutex);
return -EINVAL;
}

View file

@ -355,16 +355,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
long rate = *parent_rate;
unsigned int div = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
if (clk_info->type & CGU_CLK_DIV)
rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
rate /= clk_info->fixdiv.div;
div = clk_info->fixdiv.div;
return rate;
return DIV_ROUND_UP(*parent_rate, div);
}
static int
@ -384,7 +384,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
rate = parent_rate / div;
rate = DIV_ROUND_UP(parent_rate, div);
if (rate != req_rate)
return -EINVAL;

View file

@ -379,6 +379,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
/* Clear the MCT tick interrupt */
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@ -395,6 +402,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
exynos4_mct_tick_clear(mevt);
return 0;
}
@ -411,8 +419,11 @@ static int set_state_periodic(struct clock_event_device *evt)
return 0;
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
@ -421,16 +432,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
/* Clear the MCT tick interrupt */
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);

View file

@ -580,13 +580,13 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
struct attribute *attr, char *buf)
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret, enable;

View file

@ -48,11 +48,11 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Create attributes */
#define gov_sys_attr_ro(_name) \
static struct global_attr _name##_gov_sys = \
static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
#define gov_sys_attr_rw(_name) \
static struct global_attr _name##_gov_sys = \
static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
#define gov_pol_attr_ro(_name) \
@ -74,7 +74,7 @@ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
/* Create show/store routines */
#define show_one(_gov, file_name) \
static ssize_t show_##file_name##_gov_sys \
(struct kobject *kobj, struct attribute *attr, char *buf) \
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
@ -90,7 +90,7 @@ static ssize_t show_##file_name##_gov_pol \
#define store_one(_gov, file_name) \
static ssize_t store_##file_name##_gov_sys \
(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return store_##file_name(dbs_data, buf, count); \
@ -254,7 +254,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
#define declare_show_sampling_rate_min(_gov) \
static ssize_t show_sampling_rate_min_gov_sys \
(struct kobject *kobj, struct attribute *attr, char *buf) \
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \

View file

@ -1406,7 +1406,7 @@ static ssize_t store_use_migration_notif(
*/
#define show_gov_pol_sys(file_name) \
static ssize_t show_##file_name##_gov_sys \
(struct kobject *kobj, struct attribute *attr, char *buf) \
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return show_##file_name(common_tunables, buf); \
} \
@ -1419,7 +1419,7 @@ static ssize_t show_##file_name##_gov_pol \
#define store_gov_pol_sys(file_name) \
static ssize_t store_##file_name##_gov_sys \
(struct kobject *kobj, struct attribute *attr, const char *buf, \
(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, \
size_t count) \
{ \
return store_##file_name(common_tunables, buf, count); \
@ -1455,7 +1455,7 @@ show_store_gov_pol_sys(fast_ramp_down);
show_store_gov_pol_sys(enable_prediction);
#define gov_sys_attr_rw(_name) \
static struct global_attr _name##_gov_sys = \
static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
#define gov_pol_attr_rw(_name) \
@ -1484,7 +1484,7 @@ gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
gov_sys_pol_attr_rw(fast_ramp_down);
gov_sys_pol_attr_rw(enable_prediction);
static struct global_attr boostpulse_gov_sys =
static struct kobj_attribute boostpulse_gov_sys =
__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
static struct freq_attr boostpulse_gov_pol =

View file

@ -368,13 +368,13 @@ static void __init intel_pstate_debug_expose_params(void)
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct kobject *kobj, struct attribute *attr, char *buf) \
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", limits->object); \
}
static ssize_t show_turbo_pct(struct kobject *kobj,
struct attribute *attr, char *buf)
struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@ -390,7 +390,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
}
static ssize_t show_num_pstates(struct kobject *kobj,
struct attribute *attr, char *buf)
struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@ -401,7 +401,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct attribute *attr, char *buf)
struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@ -414,7 +414,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret;
}
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@ -438,7 +438,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count;
}
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@ -463,7 +463,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;

View file

@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret;
}
static void __init pxa_cpufreq_init_voltages(void)
static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0;
}
static void __init pxa_cpufreq_init_voltages(void) { }
static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,

View file

@ -141,6 +141,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
of_node_put(np);
return 0;
out_switch_to_pllx:

View file

@ -2081,6 +2081,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->dst_nents) {
dst_dma = sg_dma_address(req->dst);
out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *

View file

@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@ -1582,8 +1583,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc;
u32 error_mask;
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
__func__, atchan->status);
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@ -1591,15 +1592,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
} else if ((atchan->status & AT_XDMAC_CIS_LIS)
|| (atchan->status & error_mask)) {
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|| (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
if (atchan->status & AT_XDMAC_CIS_RBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->status & AT_XDMAC_CIS_WBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->status & AT_XDMAC_CIS_ROIS)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
@ -1654,7 +1655,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
atchan->status = chan_status & chan_imr;
atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@ -1668,7 +1669,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);

View file

@ -563,11 +563,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->to_cnt++;
}
@ -582,11 +580,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->bidi_cnt++;
}
@ -611,12 +607,10 @@ static int dmatest_func(void *data)
}
if (!tx) {
dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
done->done = false;
@ -625,12 +619,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@ -643,16 +635,14 @@ static int dmatest_func(void *data)
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
failed_tests++;
continue;
goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
dmaengine_unmap_put(um);
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
dmaengine_unmap_put(um);
@ -691,6 +681,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
continue;
error_unmap_continue:
dmaengine_unmap_put(um);
failed_tests++;
}
runtime = ktime_us_delta(ktime_get(), ktime);

View file

@ -700,6 +700,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
NULL)
};

View file

@ -87,13 +87,6 @@ static DEFINE_SPINLOCK(efi_runtime_lock);
* context through efi_pstore_write().
*/
/*
* Expose the EFI runtime lock to the UV platform
*/
#ifdef CONFIG_X86_UV
extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
#endif
/*
* As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
* the EFI specification requires that callers of the time related runtime

View file

@ -513,6 +513,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_CHAP_TYPE:
rc = S_IRUGO;
break;
case ISCSI_BOOT_TGT_NAME:
if (tgt->tgt_name_len)
rc = S_IRUGO;

View file

@ -227,6 +227,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@ -265,6 +266,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);

View file

@ -112,7 +112,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
char *fptr = &fifo->buf[fifo->head];
int n;
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
if (!rd->open)
return;
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
@ -201,7 +203,10 @@ out:
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
rd->open = false;
wake_up_all(&rd->fifo_event);
return 0;
}

View file

@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;

View file

@ -746,8 +746,8 @@ static struct ipu_devtype ipu_type_imx51 = {
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
.csi0_ofs = 0x1f030000,
.csi1_ofs = 0x1f038000,
.csi0_ofs = 0x1e030000,
.csi1_ofs = 0x1e038000,
.ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
@ -762,8 +762,8 @@ static struct ipu_devtype ipu_type_imx53 = {
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
.csi0_ofs = 0x07030000,
.csi1_ofs = 0x07038000,
.csi0_ofs = 0x06030000,
.csi1_ofs = 0x06038000,
.ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,

View file

@ -591,11 +591,15 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
int master;
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
}

View file

@ -251,6 +251,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
;
if (i == width)
return pos;
/* step over [pos..pos+i) to continue search */
pos += i;
}
return -1;
@ -526,7 +529,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
int ret = -EINVAL;
int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
@ -554,8 +557,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
if (id->width < 1 ||
id->width > PAGE_SIZE / stm->data->sw_mmiosz)
if (stm->data->sw_mmiosz)
wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
if (id->width < 1 || id->width > wlimit)
goto err_free;
ret = stm_file_assign(stmf, id->id, id->width);

View file

@ -378,8 +378,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@ -436,8 +438,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */

View file

@ -696,7 +696,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
/* payload size is only 12 bit */
static struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_read_len = 4096,
.max_write_len = 4096,
.max_write_len = 4096 - 12,
};
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {

View file

@ -515,7 +515,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;

View file

@ -2594,7 +2594,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@ -2606,15 +2605,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
for (j = 0; j < target->req_ring_size; ++j) {
struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
}
return SUCCESS;
}

View file

@ -220,7 +220,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
keypad->stopped = true;
spin_unlock_irq(&keypad->lock);
flush_work(&keypad->work.work);
flush_delayed_work(&keypad->work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
* we should disable them now.

View file

@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
keypad_data->input_dev = input_dev;
error = keypad_matrix_key_parse_dt(keypad_data);
if (error)
return error;
@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad_data);
keypad_data->input_dev = input_dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(keypad_data->base))

View file

@ -1241,6 +1241,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
{ "ELAN0601", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },

View file

@ -187,6 +187,7 @@ enum {
MODEL_DIGITIZER_II = 0x5544, /* UD */
MODEL_GRAPHIRE = 0x4554, /* ET */
MODEL_PENPARTNER = 0x4354, /* CT */
MODEL_ARTPAD_II = 0x4B54, /* KT */
};
static void wacom_handle_model_response(struct wacom *wacom)
@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
break;
case MODEL_ARTPAD_II:
case MODEL_DIGITIZER_II:
wacom->dev->name = "Wacom Digitizer II";
wacom->dev->id.version = MODEL_DIGITIZER_II;

View file

@ -1982,6 +1982,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
@ -1997,10 +1998,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
dev_data->domain->dev_cnt -= 1;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
/* Flush IOTLB */
domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1;
}
/*

View file

@ -34,6 +34,9 @@
#define SEL_INT_PENDING (1 << 6)
#define SEL_INT_NUM_MASK 0x3f
#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
struct icu_chip_data {
int nr_irqs;
unsigned int virq_base;
@ -190,7 +193,8 @@ static struct mmp_intc_conf mmp_conf = {
static struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
.conf_mask = 0x7f,
.conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)

View file

@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
cinfo->version[j] = "\0\0" + 1;
cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)

View file

@ -786,7 +786,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
strncpy(&cmd.parm.cmsg.para[6], id, l);
strscpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
cmd.driver = info->isdn_driver;
cmd.arg = info->isdn_channel;
@ -1459,15 +1459,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
modem_info *info = (modem_info *) tty->driver_data;
mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
tty->termios.c_ospeed == old_termios->c_ospeed)
tty->termios.c_ospeed == old_termios->c_ospeed) {
mutex_unlock(&modem_info_mutex);
return;
}
isdn_tty_change_speed(info);
}
mutex_unlock(&modem_info_mutex);
}
/*

View file

@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
lp55xx_read(chip, LP5523_REG_STATUS, &status);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {

View file

@ -3755,6 +3755,8 @@ static int run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
if (!mddev->sync_thread)
goto out_free_conf;
}
return 0;
@ -4442,7 +4444,6 @@ bio_full:
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
sector_nr += nr_sectors;
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;

View file

@ -6973,6 +6973,8 @@ static int run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
if (!mddev->sync_thread)
goto abort;
}
/* Ok, everything is just fine now */

View file

@ -1019,11 +1019,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
/* Make sure the terminal type MSB is not null, otherwise it
* could be confused with a unit.
/*
* Reject invalid terminal types that would cause issues:
*
* - The high byte must be non-zero, otherwise it would be
* confused with a unit.
*
* - Bit 15 must be 0, as we use it internally as a terminal
* direction flag.
*
* Other unknown types are accepted.
*/
type = get_unaligned_le16(&buffer[4]);
if ((type & 0xff00) == 0) {
if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL %d has invalid "
"type 0x%04x, skipping\n", udev->devnum,

View file

@ -638,6 +638,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
if (!uvc_hw_timestamps_param)
return;
/*
* We will get called from __vb2_queue_cancel() if there are buffers
* done but not dequeued by the user, but the sample array has already
* been released at that time. Just bail out in that case.
*/
if (!clock->samples)
return;
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < clock->size)

View file

@ -259,7 +259,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
return ret;
return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,

View file

@ -2610,7 +2610,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
static __init char *fw_project_name(u32 project)
static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
@ -2758,7 +2758,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
static void __init init_prcm_registers(void)
static void init_prcm_registers(void)
{
u32 val;

View file

@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
if (ret)
goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;

View file

@ -570,6 +570,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
return -EFAULT;
}
writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);

View file

@ -279,8 +279,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
cell->pdata_size = sizeof(tscadc);
}
err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
tscadc->used_cells, NULL, 0, NULL);
err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
tscadc->cells, tscadc->used_cells, NULL,
0, NULL);
if (err < 0)
goto err_disable_clk;

View file

@ -982,7 +982,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
static inline int __init protect_pm_master(void)
static inline int protect_pm_master(void)
{
int e = 0;
@ -991,7 +991,7 @@ static inline int __init protect_pm_master(void)
return e;
}
static inline int __init unprotect_pm_master(void)
static inline int unprotect_pm_master(void)
{
int e = 0;

View file

@ -1622,6 +1622,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
{ 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@ -2877,6 +2878,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:

View file

@ -1451,6 +1451,7 @@ static int mmc_spi_probe(struct spi_device *spi)
if (status != 0)
goto fail_add_host;
}
mmc_detect_change(mmc, 0);
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),

View file

@ -712,7 +712,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
if (s->sizeof_stat == 8)
_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
}
value = (((u64)high) << 16) | low;
value = (((u64)high) << 32) | low;
return value;
}

View file

@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
& 0xffff;
if (inuse) { /* Tx FIFO is not empty */
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
ready = max_t(int,
priv->tx_prod - priv->tx_cons - inuse - 1, 0);
} else {
/* Check for buffered last packet */
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));

View file

@ -716,8 +716,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
priv->phy_iface);
if (IS_ERR(phydev))
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
phydev = NULL;
}
} else {
int ret;

View file

@ -1338,13 +1338,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
static int cards_found;
static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
cards_found = 0;
err = pci_enable_device(pdev);
if (err)
return err;

Some files were not shown because too many files have changed in this diff Show more