Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4
Conflicts: drivers/base/power/opp/core.c
This commit is contained in:
commit
138f2c357e
175 changed files with 1640 additions and 689 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 8
|
||||
SUBLEVEL = 9
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -792,3 +792,8 @@
|
|||
tx-num-evt = <32>;
|
||||
rx-num-evt = <32>;
|
||||
};
|
||||
|
||||
&synctimer_32kclk {
|
||||
assigned-clocks = <&mux_synctimer32k_ck>;
|
||||
assigned-clock-parents = <&clkdiv32k_ick>;
|
||||
};
|
||||
|
|
|
@ -529,7 +529,7 @@
|
|||
};
|
||||
|
||||
sata@a0000 {
|
||||
compatible = "marvell,orion-sata";
|
||||
compatible = "marvell,armada-370-sata";
|
||||
reg = <0xa0000 0x5000>;
|
||||
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gateclk 14>, <&gateclk 20>;
|
||||
|
|
|
@ -117,7 +117,7 @@
|
|||
};
|
||||
|
||||
/* USB part of the eSATA/USB 2.0 port */
|
||||
usb@50000 {
|
||||
usb@58000 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
reg = <0x43100000 90>;
|
||||
interrupts = <45>;
|
||||
clocks = <&clks CLK_NAND>;
|
||||
dmas = <&pdma 97>;
|
||||
dmas = <&pdma 97 3>;
|
||||
dma-names = "data";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -26,6 +26,7 @@ menuconfig ARCH_EXYNOS
|
|||
select S5P_DEV_MFC
|
||||
select SRAM
|
||||
select THERMAL
|
||||
select THERMAL_OF
|
||||
select MFD_SYSCON
|
||||
help
|
||||
Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "pm.h"
|
||||
#include "control.h"
|
||||
#include "common.h"
|
||||
#include "soc.h"
|
||||
|
||||
/* Mach specific information to be recorded in the C-state driver_data */
|
||||
struct omap3_idle_statedata {
|
||||
|
@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
|
|||
.safe_state_index = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* Numbers based on measurements made in October 2009 for PM optimized kernel
|
||||
* with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
|
||||
* and worst case latencies).
|
||||
*/
|
||||
static struct cpuidle_driver omap3430_idle_driver = {
|
||||
.name = "omap3430_idle",
|
||||
.owner = THIS_MODULE,
|
||||
.states = {
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 110 + 162,
|
||||
.target_residency = 5,
|
||||
.name = "C1",
|
||||
.desc = "MPU ON + CORE ON",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 106 + 180,
|
||||
.target_residency = 309,
|
||||
.name = "C2",
|
||||
.desc = "MPU ON + CORE ON",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 107 + 410,
|
||||
.target_residency = 46057,
|
||||
.name = "C3",
|
||||
.desc = "MPU RET + CORE ON",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 121 + 3374,
|
||||
.target_residency = 46057,
|
||||
.name = "C4",
|
||||
.desc = "MPU OFF + CORE ON",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 855 + 1146,
|
||||
.target_residency = 46057,
|
||||
.name = "C5",
|
||||
.desc = "MPU RET + CORE RET",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 7580 + 4134,
|
||||
.target_residency = 484329,
|
||||
.name = "C6",
|
||||
.desc = "MPU OFF + CORE RET",
|
||||
},
|
||||
{
|
||||
.enter = omap3_enter_idle_bm,
|
||||
.exit_latency = 7505 + 15274,
|
||||
.target_residency = 484329,
|
||||
.name = "C7",
|
||||
.desc = "MPU OFF + CORE OFF",
|
||||
},
|
||||
},
|
||||
.state_count = ARRAY_SIZE(omap3_idle_data),
|
||||
.safe_state_index = 0,
|
||||
};
|
||||
|
||||
/* Public functions */
|
||||
|
||||
/**
|
||||
|
@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
|
|||
if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
|
||||
return -ENODEV;
|
||||
|
||||
return cpuidle_register(&omap3_idle_driver, NULL);
|
||||
if (cpu_is_omap3430())
|
||||
return cpuidle_register(&omap3430_idle_driver, NULL);
|
||||
else
|
||||
return cpuidle_register(&omap3_idle_driver, NULL);
|
||||
}
|
||||
|
|
|
@ -368,6 +368,7 @@ void __init omap5_map_io(void)
|
|||
void __init dra7xx_map_io(void)
|
||||
{
|
||||
iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
|
||||
omap_barriers_init();
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
|
|||
(sf & SYSC_HAS_CLOCKACTIVITY))
|
||||
_set_clockactivity(oh, oh->class->sysc->clockact, &v);
|
||||
|
||||
/* If the cached value is the same as the new value, skip the write */
|
||||
if (oh->_sysc_cache != v)
|
||||
_write_sysconfig(v, oh);
|
||||
_write_sysconfig(v, oh);
|
||||
|
||||
/*
|
||||
* Set the autoidle bit only after setting the smartidle bit
|
||||
|
@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
|
|||
_set_master_standbymode(oh, idlemode, &v);
|
||||
}
|
||||
|
||||
_write_sysconfig(v, oh);
|
||||
/* If the cached value is the same as the new value, skip the write */
|
||||
if (oh->_sysc_cache != v)
|
||||
_write_sysconfig(v, oh);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
menuconfig ARCH_SIRF
|
||||
bool "CSR SiRF" if ARCH_MULTI_V7
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select RESET_CONTROLLER
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_IRQ_CHIP
|
||||
select NO_IOPORT_MAP
|
||||
|
|
|
@ -69,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
|
||||
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
|
||||
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
|
@ -83,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
|
@ -155,6 +155,7 @@ extern struct page *empty_zero_page;
|
|||
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
|
||||
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
|
||||
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
|
||||
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
|
||||
|
||||
#ifdef CONFIG_ARM64_HW_AFDBM
|
||||
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
|
||||
|
@ -165,8 +166,6 @@ extern struct page *empty_zero_page;
|
|||
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
||||
|
||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||
#define pte_valid_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
||||
#define pte_valid_not_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||
|
||||
|
@ -264,13 +263,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
|
|||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
if (pte_valid_user(pte)) {
|
||||
if (!pte_special(pte) && pte_exec(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
if (pte_present(pte)) {
|
||||
if (pte_sw_dirty(pte) && pte_write(pte))
|
||||
pte_val(pte) &= ~PTE_RDONLY;
|
||||
else
|
||||
pte_val(pte) |= PTE_RDONLY;
|
||||
if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -641,6 +640,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
|
|||
* bits 0-1: present (must be zero)
|
||||
* bits 2-7: swap type
|
||||
* bits 8-57: swap offset
|
||||
* bit 58: PTE_PROT_NONE (must be zero)
|
||||
*/
|
||||
#define __SWP_TYPE_SHIFT 2
|
||||
#define __SWP_TYPE_BITS 6
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
|
||||
0x00000040
|
||||
|
||||
/* Reserved - do not use 0x00000004 */
|
||||
#define PPC_FEATURE_TRUE_LE 0x00000002
|
||||
#define PPC_FEATURE_PPC_LE 0x00000001
|
||||
|
||||
|
|
|
@ -148,23 +148,25 @@ static struct ibm_pa_feature {
|
|||
unsigned long cpu_features; /* CPU_FTR_xxx bit */
|
||||
unsigned long mmu_features; /* MMU_FTR_xxx bit */
|
||||
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
|
||||
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
|
||||
unsigned char pabyte; /* byte number in ibm,pa-features */
|
||||
unsigned char pabit; /* bit number (big-endian) */
|
||||
unsigned char invert; /* if 1, pa bit set => clear feature */
|
||||
} ibm_pa_features[] __initdata = {
|
||||
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
|
||||
{CPU_FTR_CTRL, 0, 0, 0, 3, 0},
|
||||
{CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
|
||||
{CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
|
||||
{0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
|
||||
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
|
||||
{CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
|
||||
{CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
|
||||
{CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
|
||||
{0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
|
||||
{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
|
||||
/*
|
||||
* If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
||||
* we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
|
||||
* which is 0 if the kernel doesn't support TM.
|
||||
* If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
||||
* we don't want to turn on TM here, so we use the *_COMP versions
|
||||
* which are 0 if the kernel doesn't support TM.
|
||||
*/
|
||||
{CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
|
||||
{CPU_FTR_TM_COMP, 0, 0,
|
||||
PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
|
||||
};
|
||||
|
||||
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
||||
|
@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
|||
if (bit ^ fp->invert) {
|
||||
cur_cpu_spec->cpu_features |= fp->cpu_features;
|
||||
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
|
||||
cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
|
||||
cur_cpu_spec->mmu_features |= fp->mmu_features;
|
||||
} else {
|
||||
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
|
||||
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
|
||||
cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
|
||||
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,8 @@ struct zpci_fmb {
|
|||
u64 rpcit_ops;
|
||||
u64 dma_rbytes;
|
||||
u64 dma_wbytes;
|
||||
} __packed __aligned(64);
|
||||
u64 pad[2];
|
||||
} __packed __aligned(128);
|
||||
|
||||
enum zpci_state {
|
||||
ZPCI_FN_STATE_RESERVED,
|
||||
|
|
|
@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
|||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
#define hugepages_supported() cpu_has_pse
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
|
|
@ -254,7 +254,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
|||
struct irq_desc *desc;
|
||||
int cpu, vector;
|
||||
|
||||
BUG_ON(!data->cfg.vector);
|
||||
if (!data->cfg.vector)
|
||||
return;
|
||||
|
||||
vector = data->cfg.vector;
|
||||
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
|
||||
|
|
|
@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
|
|||
void mce_gen_pool_process(void)
|
||||
{
|
||||
struct llist_node *head;
|
||||
struct mce_evt_llist *node;
|
||||
struct mce_evt_llist *node, *tmp;
|
||||
struct mce *mce;
|
||||
|
||||
head = llist_del_all(&mce_event_llist);
|
||||
|
@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
|
|||
return;
|
||||
|
||||
head = llist_reverse_order(head);
|
||||
llist_for_each_entry(node, head, llnode) {
|
||||
llist_for_each_entry_safe(node, tmp, head, llnode) {
|
||||
mce = &node->mce;
|
||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
|
||||
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
|
||||
|
|
|
@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|||
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
|
||||
return 1;
|
||||
}
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
vcpu->arch.xcr0 = xcr0;
|
||||
|
||||
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
|
||||
|
@ -6495,8 +6494,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
kvm_x86_ops->prepare_guest_switch(vcpu);
|
||||
if (vcpu->fpu_active)
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
kvm_load_guest_xcr0(vcpu);
|
||||
|
||||
vcpu->mode = IN_GUEST_MODE;
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
@ -6519,6 +6516,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
goto cancel_injection;
|
||||
}
|
||||
|
||||
kvm_load_guest_xcr0(vcpu);
|
||||
|
||||
if (req_immediate_exit)
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
|
||||
|
@ -6568,6 +6567,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
smp_wmb();
|
||||
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
|
||||
/* Interrupt is enabled by handle_external_intr() */
|
||||
kvm_x86_ops->handle_external_intr(vcpu);
|
||||
|
||||
|
@ -7215,7 +7216,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
* and assume host would use all available bits.
|
||||
* Guest xcr0 would be loaded later.
|
||||
*/
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
__kernel_fpu_begin();
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
||||
|
@ -7224,8 +7224,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
|
||||
if (!vcpu->guest_fpu_loaded) {
|
||||
vcpu->fpu_counter = 0;
|
||||
return;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
struct kmmio_fault_page {
|
||||
struct list_head list;
|
||||
struct kmmio_fault_page *release_next;
|
||||
unsigned long page; /* location of the fault page */
|
||||
unsigned long addr; /* the requested address */
|
||||
pteval_t old_presence; /* page presence prior to arming */
|
||||
bool armed;
|
||||
|
||||
|
@ -70,9 +70,16 @@ unsigned int kmmio_count;
|
|||
static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
|
||||
static LIST_HEAD(kmmio_probes);
|
||||
|
||||
static struct list_head *kmmio_page_list(unsigned long page)
|
||||
static struct list_head *kmmio_page_list(unsigned long addr)
|
||||
{
|
||||
return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
|
||||
unsigned int l;
|
||||
pte_t *pte = lookup_address(addr, &l);
|
||||
|
||||
if (!pte)
|
||||
return NULL;
|
||||
addr &= page_level_mask(l);
|
||||
|
||||
return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
|
||||
}
|
||||
|
||||
/* Accessed per-cpu */
|
||||
|
@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
|
|||
}
|
||||
|
||||
/* You must be holding RCU read lock. */
|
||||
static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
|
||||
static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
|
||||
{
|
||||
struct list_head *head;
|
||||
struct kmmio_fault_page *f;
|
||||
unsigned int l;
|
||||
pte_t *pte = lookup_address(addr, &l);
|
||||
|
||||
page &= PAGE_MASK;
|
||||
head = kmmio_page_list(page);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
addr &= page_level_mask(l);
|
||||
head = kmmio_page_list(addr);
|
||||
list_for_each_entry_rcu(f, head, list) {
|
||||
if (f->page == page)
|
||||
if (f->addr == addr)
|
||||
return f;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
|
|||
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
|
||||
{
|
||||
unsigned int level;
|
||||
pte_t *pte = lookup_address(f->page, &level);
|
||||
pte_t *pte = lookup_address(f->addr, &level);
|
||||
|
||||
if (!pte) {
|
||||
pr_err("no pte for page 0x%08lx\n", f->page);
|
||||
pr_err("no pte for addr 0x%08lx\n", f->addr);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
|
|||
return -1;
|
||||
}
|
||||
|
||||
__flush_tlb_one(f->page);
|
||||
__flush_tlb_one(f->addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
|
|||
int ret;
|
||||
WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
|
||||
if (f->armed) {
|
||||
pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
|
||||
f->page, f->count, !!f->old_presence);
|
||||
pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
|
||||
f->addr, f->count, !!f->old_presence);
|
||||
}
|
||||
ret = clear_page_presence(f, true);
|
||||
WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
|
||||
f->page);
|
||||
WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
|
||||
f->addr);
|
||||
f->armed = true;
|
||||
return ret;
|
||||
}
|
||||
|
@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
|
|||
{
|
||||
int ret = clear_page_presence(f, false);
|
||||
WARN_ONCE(ret < 0,
|
||||
KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
|
||||
KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
|
||||
f->armed = false;
|
||||
}
|
||||
|
||||
|
@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
struct kmmio_context *ctx;
|
||||
struct kmmio_fault_page *faultpage;
|
||||
int ret = 0; /* default to fault not handled */
|
||||
unsigned long page_base = addr;
|
||||
unsigned int l;
|
||||
pte_t *pte = lookup_address(addr, &l);
|
||||
if (!pte)
|
||||
return -EINVAL;
|
||||
page_base &= page_level_mask(l);
|
||||
|
||||
/*
|
||||
* Preemption is now disabled to prevent process switch during
|
||||
|
@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
|
||||
faultpage = get_kmmio_fault_page(addr);
|
||||
faultpage = get_kmmio_fault_page(page_base);
|
||||
if (!faultpage) {
|
||||
/*
|
||||
* Either this page fault is not caused by kmmio, or
|
||||
|
@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
|
||||
ctx = &get_cpu_var(kmmio_ctx);
|
||||
if (ctx->active) {
|
||||
if (addr == ctx->addr) {
|
||||
if (page_base == ctx->addr) {
|
||||
/*
|
||||
* A second fault on the same page means some other
|
||||
* condition needs handling by do_page_fault(), the
|
||||
|
@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
ctx->active++;
|
||||
|
||||
ctx->fpage = faultpage;
|
||||
ctx->probe = get_kmmio_probe(addr);
|
||||
ctx->probe = get_kmmio_probe(page_base);
|
||||
ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
|
||||
ctx->addr = addr;
|
||||
ctx->addr = page_base;
|
||||
|
||||
if (ctx->probe && ctx->probe->pre_handler)
|
||||
ctx->probe->pre_handler(ctx->probe, regs, addr);
|
||||
|
@ -354,12 +371,11 @@ out:
|
|||
}
|
||||
|
||||
/* You must be holding kmmio_lock. */
|
||||
static int add_kmmio_fault_page(unsigned long page)
|
||||
static int add_kmmio_fault_page(unsigned long addr)
|
||||
{
|
||||
struct kmmio_fault_page *f;
|
||||
|
||||
page &= PAGE_MASK;
|
||||
f = get_kmmio_fault_page(page);
|
||||
f = get_kmmio_fault_page(addr);
|
||||
if (f) {
|
||||
if (!f->count)
|
||||
arm_kmmio_fault_page(f);
|
||||
|
@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
|
|||
return -1;
|
||||
|
||||
f->count = 1;
|
||||
f->page = page;
|
||||
f->addr = addr;
|
||||
|
||||
if (arm_kmmio_fault_page(f)) {
|
||||
kfree(f);
|
||||
return -1;
|
||||
}
|
||||
|
||||
list_add_rcu(&f->list, kmmio_page_list(f->page));
|
||||
list_add_rcu(&f->list, kmmio_page_list(f->addr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* You must be holding kmmio_lock. */
|
||||
static void release_kmmio_fault_page(unsigned long page,
|
||||
static void release_kmmio_fault_page(unsigned long addr,
|
||||
struct kmmio_fault_page **release_list)
|
||||
{
|
||||
struct kmmio_fault_page *f;
|
||||
|
||||
page &= PAGE_MASK;
|
||||
f = get_kmmio_fault_page(page);
|
||||
f = get_kmmio_fault_page(addr);
|
||||
if (!f)
|
||||
return;
|
||||
|
||||
|
@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
|
|||
int ret = 0;
|
||||
unsigned long size = 0;
|
||||
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
|
||||
unsigned int l;
|
||||
pte_t *pte;
|
||||
|
||||
spin_lock_irqsave(&kmmio_lock, flags);
|
||||
if (get_kmmio_probe(p->addr)) {
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pte = lookup_address(p->addr, &l);
|
||||
if (!pte) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kmmio_count++;
|
||||
list_add_rcu(&p->list, &kmmio_probes);
|
||||
while (size < size_lim) {
|
||||
if (add_kmmio_fault_page(p->addr + size))
|
||||
pr_err("Unable to set page fault.\n");
|
||||
size += PAGE_SIZE;
|
||||
size += page_level_size(l);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&kmmio_lock, flags);
|
||||
|
@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
|
|||
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
|
||||
struct kmmio_fault_page *release_list = NULL;
|
||||
struct kmmio_delayed_release *drelease;
|
||||
unsigned int l;
|
||||
pte_t *pte;
|
||||
|
||||
pte = lookup_address(p->addr, &l);
|
||||
if (!pte)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&kmmio_lock, flags);
|
||||
while (size < size_lim) {
|
||||
release_kmmio_fault_page(p->addr + size, &release_list);
|
||||
size += PAGE_SIZE;
|
||||
size += page_level_size(l);
|
||||
}
|
||||
list_del_rcu(&p->list);
|
||||
kmmio_count--;
|
||||
|
|
|
@ -349,15 +349,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
|||
goto out_del;
|
||||
}
|
||||
|
||||
err = hd_ref_init(p);
|
||||
if (err) {
|
||||
if (flags & ADDPART_FLAG_WHOLEDISK)
|
||||
goto out_remove_file;
|
||||
goto out_del;
|
||||
}
|
||||
|
||||
/* everything is up and running, commence */
|
||||
rcu_assign_pointer(ptbl->part[partno], p);
|
||||
|
||||
/* suppress uevent if the disk suppresses it */
|
||||
if (!dev_get_uevent_suppress(ddev))
|
||||
kobject_uevent(&pdev->kobj, KOBJ_ADD);
|
||||
|
||||
if (!hd_ref_init(p))
|
||||
return p;
|
||||
return p;
|
||||
|
||||
out_free_info:
|
||||
free_part_info(p);
|
||||
|
@ -366,6 +371,8 @@ out_free_stats:
|
|||
out_free:
|
||||
kfree(p);
|
||||
return ERR_PTR(err);
|
||||
out_remove_file:
|
||||
device_remove_file(pdev, &dev_attr_whole_disk);
|
||||
out_del:
|
||||
kobject_put(p->holder_dir);
|
||||
device_del(pdev);
|
||||
|
|
|
@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
|
||||
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
|
||||
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
|
||||
subdomain->name);
|
||||
ret = -EBUSY;
|
||||
|
|
|
@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
|||
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
|
||||
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
|
||||
bio_segments(bio), blk_rq_bytes(cmd->rq));
|
||||
/*
|
||||
* This bio may be started from the middle of the 'bvec'
|
||||
* because of bio splitting, so offset from the bvec must
|
||||
* be passed to iov iterator
|
||||
*/
|
||||
iter.iov_offset = bio->bi_iter.bi_bvec_done;
|
||||
|
||||
cmd->iocb.ki_pos = pos;
|
||||
cmd->iocb.ki_filp = file;
|
||||
|
|
|
@ -126,7 +126,7 @@
|
|||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
static bool verbose = 0;
|
||||
static int verbose = 0;
|
||||
static int major = PD_MAJOR;
|
||||
static char *name = PD_NAME;
|
||||
static int cluster = 64;
|
||||
|
@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
|
|||
static DEFINE_MUTEX(pd_mutex);
|
||||
static DEFINE_SPINLOCK(pd_lock);
|
||||
|
||||
module_param(verbose, bool, 0);
|
||||
module_param(verbose, int, 0);
|
||||
module_param(major, int, 0);
|
||||
module_param(name, charp, 0);
|
||||
module_param(cluster, int, 0);
|
||||
|
|
|
@ -117,7 +117,7 @@
|
|||
|
||||
*/
|
||||
|
||||
static bool verbose = 0;
|
||||
static int verbose = 0;
|
||||
static int major = PT_MAJOR;
|
||||
static char *name = PT_NAME;
|
||||
static int disable = 0;
|
||||
|
@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
module_param(verbose, bool, 0);
|
||||
module_param(verbose, int, 0);
|
||||
module_param(major, int, 0);
|
||||
module_param(name, charp, 0);
|
||||
module_param_array(drive0, int, NULL, 0);
|
||||
|
|
|
@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
for_each_child_of_node(pdev->dev.of_node, child) {
|
||||
for_each_available_child_of_node(pdev->dev.of_node, child) {
|
||||
if (!child->name)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -667,6 +667,11 @@ static int core_get_max_pstate(void)
|
|||
if (err)
|
||||
goto skip_tar;
|
||||
|
||||
/* For level 1 and 2, bits[23:16] contain the ratio */
|
||||
if (tdp_ctrl)
|
||||
tdp_ratio >>= 16;
|
||||
|
||||
tdp_ratio &= 0xff; /* ratios are only 8 bits long */
|
||||
if (tdp_ratio - 1 == tar) {
|
||||
max_pstate = tar;
|
||||
pr_debug("max_pstate=TAC %x\n", max_pstate);
|
||||
|
|
|
@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
|
|||
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ccp_aes_cmac_exp_ctx state;
|
||||
|
||||
/* Don't let anything leak to 'out' */
|
||||
memset(&state, 0, sizeof(state));
|
||||
|
||||
state.null_msg = rctx->null_msg;
|
||||
memcpy(state.iv, rctx->iv, sizeof(state.iv));
|
||||
state.buf_count = rctx->buf_count;
|
||||
|
|
|
@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
|
|||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ccp_sha_exp_ctx state;
|
||||
|
||||
/* Don't let anything leak to 'out' */
|
||||
memset(&state, 0, sizeof(state));
|
||||
|
||||
state.type = rctx->type;
|
||||
state.msg_bits = rctx->msg_bits;
|
||||
state.first = rctx->first;
|
||||
|
|
|
@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
|
|||
ptr->eptr = upper_32_bits(dma_addr);
|
||||
}
|
||||
|
||||
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
|
||||
struct talitos_ptr *src_ptr, bool is_sec1)
|
||||
{
|
||||
dst_ptr->ptr = src_ptr->ptr;
|
||||
if (!is_sec1)
|
||||
dst_ptr->eptr = src_ptr->eptr;
|
||||
}
|
||||
|
||||
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
|
||||
bool is_sec1)
|
||||
{
|
||||
|
@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|||
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
|
||||
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
|
||||
: DMA_TO_DEVICE);
|
||||
|
||||
/* hmac data */
|
||||
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
|
||||
areq->assoclen,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
|
||||
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
|
||||
sizeof(struct talitos_ptr), 0);
|
||||
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
|
||||
|
||||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
|
||||
desc->ptr[1].j_extent = 0;
|
||||
|
@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|||
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
|
||||
sg_link_tbl_len += authsize;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
|
||||
sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
|
||||
areq->assoclen, sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
|
||||
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
|
||||
tbl_off *
|
||||
|
@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* cipher out */
|
||||
desc->ptr[5].len = cpu_to_be16(cryptlen);
|
||||
|
@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|||
|
||||
edesc->icv_ool = false;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((sg_count =
|
||||
sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
areq->assoclen, cryptlen,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
|
||||
|
||||
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
|
||||
|
@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
edesc->icv_ool = true;
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* iv out */
|
||||
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
|
||||
|
@ -2519,21 +2533,11 @@ struct talitos_crypto_alg {
|
|||
struct talitos_alg_template algt;
|
||||
};
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
static int talitos_init_common(struct talitos_ctx *ctx,
|
||||
struct talitos_crypto_alg *talitos_alg)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct talitos_private *priv;
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
/* update context with ptr to dev */
|
||||
ctx->dev = talitos_alg->dev;
|
||||
|
||||
|
@ -2551,10 +2555,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_aead(struct crypto_aead *tfm)
|
||||
{
|
||||
talitos_cra_init(crypto_aead_tfm(tfm));
|
||||
return 0;
|
||||
struct aead_alg *alg = crypto_aead_alg(tfm);
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.aead);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
|
||||
|
|
|
@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||
static void dwc_initialize(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
struct dw_dma_slave *dws = dwc->chan.private;
|
||||
u32 cfghi = DWC_CFGH_FIFO_MODE;
|
||||
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
||||
|
||||
if (dwc->initialized == true)
|
||||
return;
|
||||
|
||||
if (dws) {
|
||||
/*
|
||||
* We need controller-specific data to set up slave
|
||||
* transfers.
|
||||
*/
|
||||
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
|
||||
|
||||
cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
|
||||
cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
|
||||
} else {
|
||||
cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
|
||||
cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
|
||||
}
|
||||
cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
|
||||
cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
|
||||
|
||||
channel_writel(dwc, CFG_LO, cfglo);
|
||||
channel_writel(dwc, CFG_HI, cfghi);
|
||||
|
@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
|
|||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma_slave *dws = param;
|
||||
|
||||
if (!dws || dws->dma_dev != chan->device->dev)
|
||||
if (dws->dma_dev != chan->device->dev)
|
||||
return false;
|
||||
|
||||
/* We have to copy data since dws can be temporary storage */
|
||||
|
@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
* doesn't mean what you think it means), and status writeback.
|
||||
*/
|
||||
|
||||
/*
|
||||
* We need controller-specific data to set up slave transfers.
|
||||
*/
|
||||
if (chan->private && !dw_dma_filter(chan, chan->private)) {
|
||||
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Enable controller here if needed */
|
||||
if (!dw->in_use)
|
||||
dw_dma_on(dw);
|
||||
|
@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_splice_init(&dwc->free_list, &list);
|
||||
dwc->descs_allocated = 0;
|
||||
|
||||
/* Clear custom channel configuration */
|
||||
dwc->src_id = 0;
|
||||
dwc->dst_id = 0;
|
||||
|
||||
dwc->src_master = 0;
|
||||
dwc->dst_master = 0;
|
||||
|
||||
dwc->initialized = false;
|
||||
|
||||
/* Disable interrupts */
|
||||
|
|
|
@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
|
|||
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
|
||||
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
|
||||
|
||||
return sr;
|
||||
return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
|
||||
}
|
||||
|
||||
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
|
||||
|
|
|
@ -41,6 +41,9 @@
|
|||
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
|
||||
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
|
||||
#define HSU_CH_SR_CHE BIT(15)
|
||||
#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
|
||||
#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
|
||||
#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
|
||||
|
||||
/* Bits in HSU_CH_CR */
|
||||
#define HSU_CH_CR_CHA BIT(0)
|
||||
|
|
|
@ -122,6 +122,7 @@ struct pxad_chan {
|
|||
struct pxad_device {
|
||||
struct dma_device slave;
|
||||
int nr_chans;
|
||||
int nr_requestors;
|
||||
void __iomem *base;
|
||||
struct pxad_phy *phys;
|
||||
spinlock_t phy_lock; /* Phy association */
|
||||
|
@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
|
|||
return;
|
||||
|
||||
/* clear the channel mapping in DRCMR */
|
||||
if (chan->drcmr <= DRCMR_CHLNUM) {
|
||||
if (chan->drcmr <= pdev->nr_requestors) {
|
||||
reg = pxad_drcmr(chan->drcmr);
|
||||
writel_relaxed(0, chan->phy->base + reg);
|
||||
}
|
||||
|
@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
|
|||
|
||||
static void phy_enable(struct pxad_phy *phy, bool misaligned)
|
||||
{
|
||||
struct pxad_device *pdev;
|
||||
u32 reg, dalgn;
|
||||
|
||||
if (!phy->vchan)
|
||||
|
@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
|
|||
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
|
||||
phy, phy->idx, misaligned);
|
||||
|
||||
if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
|
||||
pdev = to_pxad_dev(phy->vchan->vc.chan.device);
|
||||
if (phy->vchan->drcmr <= pdev->nr_requestors) {
|
||||
reg = pxad_drcmr(phy->vchan->drcmr);
|
||||
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
|
||||
}
|
||||
|
@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
|
|||
{
|
||||
u32 maxburst = 0, dev_addr = 0;
|
||||
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
|
||||
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
|
||||
|
||||
*dcmd = 0;
|
||||
if (dir == DMA_DEV_TO_MEM) {
|
||||
|
@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
|
|||
dev_addr = chan->cfg.src_addr;
|
||||
*dev_src = dev_addr;
|
||||
*dcmd |= PXA_DCMD_INCTRGADDR;
|
||||
if (chan->drcmr <= DRCMR_CHLNUM)
|
||||
if (chan->drcmr <= pdev->nr_requestors)
|
||||
*dcmd |= PXA_DCMD_FLOWSRC;
|
||||
}
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
|
@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
|
|||
dev_addr = chan->cfg.dst_addr;
|
||||
*dev_dst = dev_addr;
|
||||
*dcmd |= PXA_DCMD_INCSRCADDR;
|
||||
if (chan->drcmr <= DRCMR_CHLNUM)
|
||||
if (chan->drcmr <= pdev->nr_requestors)
|
||||
*dcmd |= PXA_DCMD_FLOWTRG;
|
||||
}
|
||||
if (dir == DMA_MEM_TO_MEM)
|
||||
|
@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
|
|||
|
||||
static int pxad_init_dmadev(struct platform_device *op,
|
||||
struct pxad_device *pdev,
|
||||
unsigned int nr_phy_chans)
|
||||
unsigned int nr_phy_chans,
|
||||
unsigned int nr_requestors)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
struct pxad_chan *c;
|
||||
|
||||
pdev->nr_chans = nr_phy_chans;
|
||||
pdev->nr_requestors = nr_requestors;
|
||||
INIT_LIST_HEAD(&pdev->slave.channels);
|
||||
pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
|
||||
pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
|
||||
|
@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
|
|||
const struct of_device_id *of_id;
|
||||
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
|
||||
struct resource *iores;
|
||||
int ret, dma_channels = 0;
|
||||
int ret, dma_channels = 0, nb_requestors = 0;
|
||||
const enum dma_slave_buswidth widths =
|
||||
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
|
|||
return PTR_ERR(pdev->base);
|
||||
|
||||
of_id = of_match_device(pxad_dt_ids, &op->dev);
|
||||
if (of_id)
|
||||
if (of_id) {
|
||||
of_property_read_u32(op->dev.of_node, "#dma-channels",
|
||||
&dma_channels);
|
||||
else if (pdata && pdata->dma_channels)
|
||||
ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
|
||||
&nb_requestors);
|
||||
if (ret) {
|
||||
dev_warn(pdev->slave.dev,
|
||||
"#dma-requests set to default 32 as missing in OF: %d",
|
||||
ret);
|
||||
nb_requestors = 32;
|
||||
};
|
||||
} else if (pdata && pdata->dma_channels) {
|
||||
dma_channels = pdata->dma_channels;
|
||||
else
|
||||
nb_requestors = pdata->nb_requestors;
|
||||
} else {
|
||||
dma_channels = 32; /* default 32 channel */
|
||||
}
|
||||
|
||||
dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
|
||||
|
@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op)
|
|||
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
|
||||
pdev->slave.dev = &op->dev;
|
||||
ret = pxad_init_dmadev(op, pdev, dma_channels);
|
||||
ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
|
||||
if (ret) {
|
||||
dev_err(pdev->slave.dev, "unable to register\n");
|
||||
return ret;
|
||||
|
@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op)
|
|||
|
||||
platform_set_drvdata(op, pdev);
|
||||
pxad_init_debugfs(pdev);
|
||||
dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
|
||||
dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
|
||||
dma_channels, nb_requestors);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
|
|||
|
||||
i7_dev = get_i7core_dev(mce->socketid);
|
||||
if (!i7_dev)
|
||||
return NOTIFY_BAD;
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mci = i7_dev->mci;
|
||||
pvt = mci->pvt_info;
|
||||
|
|
|
@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|||
}
|
||||
|
||||
ch_way = TAD_CH(reg) + 1;
|
||||
sck_way = 1 << TAD_SOCK(reg);
|
||||
sck_way = TAD_SOCK(reg);
|
||||
|
||||
if (ch_way == 3)
|
||||
idx = addr >> 6;
|
||||
|
@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|||
switch(ch_way) {
|
||||
case 2:
|
||||
case 4:
|
||||
sck_xch = 1 << sck_way * (ch_way >> 1);
|
||||
sck_xch = (1 << sck_way) * (ch_way >> 1);
|
||||
break;
|
||||
default:
|
||||
sprintf(msg, "Invalid mirror set. Can't decode addr");
|
||||
|
@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|||
|
||||
ch_addr = addr - offset;
|
||||
ch_addr >>= (6 + shiftup);
|
||||
ch_addr /= ch_way * sck_way;
|
||||
ch_addr /= sck_xch;
|
||||
ch_addr <<= (6 + shiftup);
|
||||
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
|
||||
|
||||
|
@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
|
|||
|
||||
mci = get_mci_for_node_id(mce->socketid);
|
||||
if (!mci)
|
||||
return NOTIFY_BAD;
|
||||
return NOTIFY_DONE;
|
||||
pvt = mci->pvt_info;
|
||||
|
||||
/*
|
||||
|
|
|
@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
|
|||
/* Clear IRQ bits before request IRQs */
|
||||
ret = regmap_bulk_read(max77843->regmap_muic,
|
||||
MAX77843_MUIC_REG_INT1, info->status,
|
||||
MAX77843_MUIC_IRQ_NUM);
|
||||
MAX77843_MUIC_STATUS_NUM);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
|
||||
goto err_muic_irq;
|
||||
|
|
|
@ -180,6 +180,7 @@ static int generic_ops_register(void)
|
|||
{
|
||||
generic_ops.get_variable = efi.get_variable;
|
||||
generic_ops.set_variable = efi.set_variable;
|
||||
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
|
||||
generic_ops.get_next_variable = efi.get_next_variable;
|
||||
generic_ops.query_variable_store = efi_query_variable_store;
|
||||
|
||||
|
|
|
@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
|
|||
{ NULL_GUID, "", NULL },
|
||||
};
|
||||
|
||||
/*
|
||||
* Check if @var_name matches the pattern given in @match_name.
|
||||
*
|
||||
* @var_name: an array of @len non-NUL characters.
|
||||
* @match_name: a NUL-terminated pattern string, optionally ending in "*". A
|
||||
* final "*" character matches any trailing characters @var_name,
|
||||
* including the case when there are none left in @var_name.
|
||||
* @match: on output, the number of non-wildcard characters in @match_name
|
||||
* that @var_name matches, regardless of the return value.
|
||||
* @return: whether @var_name fully matches @match_name.
|
||||
*/
|
||||
static bool
|
||||
variable_matches(const char *var_name, size_t len, const char *match_name,
|
||||
int *match)
|
||||
{
|
||||
for (*match = 0; ; (*match)++) {
|
||||
char c = match_name[*match];
|
||||
char u = var_name[*match];
|
||||
|
||||
/* Wildcard in the matching name means we've matched */
|
||||
if (c == '*')
|
||||
switch (c) {
|
||||
case '*':
|
||||
/* Wildcard in @match_name means we've matched. */
|
||||
return true;
|
||||
|
||||
/* Case sensitive match */
|
||||
if (!c && *match == len)
|
||||
return true;
|
||||
case '\0':
|
||||
/* @match_name has ended. Has @var_name too? */
|
||||
return (*match == len);
|
||||
|
||||
if (c != u)
|
||||
default:
|
||||
/*
|
||||
* We've reached a non-wildcard char in @match_name.
|
||||
* Continue only if there's an identical character in
|
||||
* @var_name.
|
||||
*/
|
||||
if (*match < len && c == var_name[*match])
|
||||
continue;
|
||||
return false;
|
||||
|
||||
if (!c)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
|
@ -1673,6 +1673,7 @@ struct amdgpu_uvd {
|
|||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
unsigned fw_version;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
|
|
|
@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
|
|||
return amdgpu_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool amdgpu_has_atpx_dgpu_power_cntl(void) {
|
||||
return amdgpu_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
|
|||
*/
|
||||
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
atpx->functions.power_cntl = true;
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
|
|
@ -61,12 +61,6 @@ static const char *amdgpu_asic_name[] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool amdgpu_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
bool amdgpu_device_is_px(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
@ -1475,7 +1469,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
|
||||
if (amdgpu_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
|
||||
if (amdgpu_device_is_px(ddev))
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
|
|
@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
fw_info.feature = adev->vce.fb_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_UVD:
|
||||
fw_info.ver = 0;
|
||||
fw_info.ver = adev->uvd.fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GMC:
|
||||
|
|
|
@ -52,7 +52,7 @@ struct amdgpu_hpd;
|
|||
|
||||
#define AMDGPU_MAX_HPD_PINS 6
|
||||
#define AMDGPU_MAX_CRTCS 6
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 7
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 9
|
||||
|
||||
enum amdgpu_rmx_type {
|
||||
RMX_OFF,
|
||||
|
@ -308,8 +308,8 @@ struct amdgpu_mode_info {
|
|||
struct atom_context *atom_context;
|
||||
struct card_info *atom_card_info;
|
||||
bool mode_config_initialized;
|
||||
struct amdgpu_crtc *crtcs[6];
|
||||
struct amdgpu_afmt *afmt[7];
|
||||
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
|
||||
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
|
||||
/* DVI-I properties */
|
||||
struct drm_property *coherent_mode_property;
|
||||
/* DAC enable load detect */
|
||||
|
|
|
@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||
(family_id << 8));
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
|
@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
|||
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
|
||||
(adev->uvd.fw->size) - offset);
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
|
|
@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
|||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
/* TODO: suspending running encoding sessions isn't supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
|
|
|
@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
|
|||
union SQ_CMD_BITS *in_reg_sq_cmd,
|
||||
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
|
||||
{
|
||||
int status;
|
||||
int status = 0;
|
||||
union SQ_CMD_BITS reg_sq_cmd;
|
||||
union GRBM_GFX_INDEX_BITS reg_gfx_index;
|
||||
struct HsaDbgWaveMsgAMDGen2 *pMsg;
|
||||
|
|
|
@ -1665,13 +1665,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
struct drm_dp_mst_branch *mstb;
|
||||
int len, ret, port_num;
|
||||
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
port_num = port->port_num;
|
||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||
if (!mstb) {
|
||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||
|
||||
if (!mstb)
|
||||
if (!mstb) {
|
||||
drm_dp_put_port(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
|
@ -1697,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
kfree(txmsg);
|
||||
fail_put:
|
||||
drm_dp_put_mst_branch_device(mstb);
|
||||
drm_dp_put_port(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1779,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
|||
req_payload.start_slot = cur_slots;
|
||||
if (mgr->proposed_vcpis[i]) {
|
||||
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port) {
|
||||
mutex_unlock(&mgr->payload_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
|
||||
} else {
|
||||
port = NULL;
|
||||
|
@ -1804,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
|||
mgr->payloads[i].payload_state = req_payload.payload_state;
|
||||
}
|
||||
cur_slots += req_payload.num_slots;
|
||||
|
||||
if (port)
|
||||
drm_dp_put_port(port);
|
||||
}
|
||||
|
||||
for (i = 0; i < mgr->max_payloads; i++) {
|
||||
|
@ -2109,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
|
|||
|
||||
if (mgr->mst_primary) {
|
||||
int sret;
|
||||
u8 guid[16];
|
||||
|
||||
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
|
||||
if (sret != DP_RECEIVER_CAP_SIZE) {
|
||||
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
|
||||
|
@ -2123,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
|
|||
ret = -1;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Some hubs forget their guids after they resume */
|
||||
sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
|
||||
if (sret != 16) {
|
||||
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
|
||||
ret = -1;
|
||||
goto out_unlock;
|
||||
}
|
||||
drm_dp_check_mstb_guid(mgr->mst_primary, guid);
|
||||
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -1;
|
||||
|
|
|
@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
|||
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
|
||||
|
||||
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
|
||||
&state->scaler_state.scaler_id, DRM_ROTATE_0,
|
||||
&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
|
||||
state->pipe_src_w, state->pipe_src_h,
|
||||
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
|
||||
}
|
||||
|
|
|
@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
/* need to nuke the connector */
|
||||
drm_modeset_lock_all(dev);
|
||||
if (connector->state->crtc) {
|
||||
|
@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|||
|
||||
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_remove_from_fbdev(intel_connector);
|
||||
drm_connector_cleanup(connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
|
|
@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
|
|||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
|
|
@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void cleanup_phys_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
|
||||
if (!dev_priv->status_page_dmah)
|
||||
return;
|
||||
|
||||
drm_pci_free(ring->dev, dev_priv->status_page_dmah);
|
||||
ring->status_page.page_addr = NULL;
|
||||
}
|
||||
|
||||
static void cleanup_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
|
|||
|
||||
static int init_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj = ring->status_page.obj;
|
||||
|
||||
if ((obj = ring->status_page.obj) == NULL) {
|
||||
if (obj == NULL) {
|
||||
unsigned flags;
|
||||
int ret;
|
||||
|
||||
|
@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto error;
|
||||
} else {
|
||||
BUG_ON(ring->id != RCS);
|
||||
WARN_ON(ring->id != RCS);
|
||||
ret = init_phys_status_page(ring);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
||||
cleanup_status_page(ring);
|
||||
if (I915_NEED_GFX_HWS(ring->dev)) {
|
||||
cleanup_status_page(ring);
|
||||
} else {
|
||||
WARN_ON(ring->id != RCS);
|
||||
cleanup_phys_status_page(ring);
|
||||
}
|
||||
|
||||
i915_cmd_parser_fini_ring(ring);
|
||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||
|
@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
|||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
|
|
@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
|||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_thread_status;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
if (IS_HASWELL(dev))
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
fw_domains_put_with_fifo;
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
|
|
|
@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
|
|||
struct nvkm_ramht *ramht = *pramht;
|
||||
if (ramht) {
|
||||
nvkm_gpuobj_del(&ramht->gpuobj);
|
||||
kfree(*pramht);
|
||||
vfree(*pramht);
|
||||
*pramht = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
|
|||
struct nvkm_ramht *ramht;
|
||||
int ret, i;
|
||||
|
||||
if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
|
||||
sizeof(*ramht->data), GFP_KERNEL)))
|
||||
if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
|
||||
(size >> 3) * sizeof(*ramht->data))))
|
||||
return -ENOMEM;
|
||||
|
||||
ramht->device = device;
|
||||
|
|
|
@ -1717,6 +1717,8 @@ gf100_gr_init(struct gf100_gr *gr)
|
|||
|
||||
gf100_gr_mmio(gr, gr->func->mmio);
|
||||
|
||||
nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
|
||||
|
||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
||||
do {
|
||||
|
|
|
@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
qxl_bo_kunmap(user_bo);
|
||||
|
||||
qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
|
||||
qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
|
||||
qcrtc->hot_spot_x = hot_x;
|
||||
qcrtc->hot_spot_y = hot_y;
|
||||
|
||||
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_CURSOR_SET;
|
||||
cmd->u.set.position.x = qcrtc->cur_x;
|
||||
cmd->u.set.position.y = qcrtc->cur_y;
|
||||
cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
|
||||
cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
|
||||
|
||||
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
|
||||
|
||||
|
@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
|
|||
|
||||
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_CURSOR_MOVE;
|
||||
cmd->u.position.x = qcrtc->cur_x;
|
||||
cmd->u.position.y = qcrtc->cur_y;
|
||||
cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
|
||||
cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
|
|
@ -135,6 +135,8 @@ struct qxl_crtc {
|
|||
int index;
|
||||
int cur_x;
|
||||
int cur_y;
|
||||
int hot_spot_x;
|
||||
int hot_spot_y;
|
||||
};
|
||||
|
||||
struct qxl_output {
|
||||
|
|
|
@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
|
|||
WREG32(VM_CONTEXT1_CNTL, 0);
|
||||
}
|
||||
|
||||
static const unsigned ni_dig_offsets[] =
|
||||
{
|
||||
NI_DIG0_REGISTER_OFFSET,
|
||||
NI_DIG1_REGISTER_OFFSET,
|
||||
NI_DIG2_REGISTER_OFFSET,
|
||||
NI_DIG3_REGISTER_OFFSET,
|
||||
NI_DIG4_REGISTER_OFFSET,
|
||||
NI_DIG5_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
static const unsigned ni_tx_offsets[] =
|
||||
{
|
||||
NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
|
||||
NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
|
||||
NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
|
||||
NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
|
||||
NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
|
||||
NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
|
||||
};
|
||||
|
||||
static const unsigned evergreen_dp_offsets[] =
|
||||
{
|
||||
EVERGREEN_DP0_REGISTER_OFFSET,
|
||||
EVERGREEN_DP1_REGISTER_OFFSET,
|
||||
EVERGREEN_DP2_REGISTER_OFFSET,
|
||||
EVERGREEN_DP3_REGISTER_OFFSET,
|
||||
EVERGREEN_DP4_REGISTER_OFFSET,
|
||||
EVERGREEN_DP5_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
|
||||
* We go from crtc to connector and it is not relible since it
|
||||
* should be an opposite direction .If crtc is enable then
|
||||
* find the dig_fe which selects this crtc and insure that it enable.
|
||||
* if such dig_fe is found then find dig_be which selects found dig_be and
|
||||
* insure that it enable and in DP_SST mode.
|
||||
* if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
|
||||
* from dp symbols clocks .
|
||||
*/
|
||||
static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
|
||||
unsigned crtc_id, unsigned *ret_dig_fe)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned dig_fe;
|
||||
unsigned dig_be;
|
||||
unsigned dig_en_be;
|
||||
unsigned uniphy_pll;
|
||||
unsigned digs_fe_selected;
|
||||
unsigned dig_be_mode;
|
||||
unsigned dig_fe_mask;
|
||||
bool is_enabled = false;
|
||||
bool found_crtc = false;
|
||||
|
||||
/* loop through all running dig_fe to find selected crtc */
|
||||
for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
|
||||
dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
|
||||
if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
|
||||
crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
|
||||
/* found running pipe */
|
||||
found_crtc = true;
|
||||
dig_fe_mask = 1 << i;
|
||||
dig_fe = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_crtc) {
|
||||
/* loop through all running dig_be to find selected dig_fe */
|
||||
for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
|
||||
dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
|
||||
/* if dig_fe_selected by dig_be? */
|
||||
digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
|
||||
dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
|
||||
if (dig_fe_mask & digs_fe_selected &&
|
||||
/* if dig_be in sst mode? */
|
||||
dig_be_mode == NI_DIG_BE_DPSST) {
|
||||
dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
|
||||
ni_dig_offsets[i]);
|
||||
uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
|
||||
ni_tx_offsets[i]);
|
||||
/* dig_be enable and tx is running */
|
||||
if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
|
||||
dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
|
||||
uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
|
||||
is_enabled = true;
|
||||
*ret_dig_fe = dig_fe;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return is_enabled;
|
||||
}
|
||||
|
||||
/*
|
||||
* Blank dig when in dp sst mode
|
||||
* Dig ignores crtc timing
|
||||
*/
|
||||
static void evergreen_blank_dp_output(struct radeon_device *rdev,
|
||||
unsigned dig_fe)
|
||||
{
|
||||
unsigned stream_ctrl;
|
||||
unsigned fifo_ctrl;
|
||||
unsigned counter = 0;
|
||||
|
||||
if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
|
||||
DRM_ERROR("invalid dig_fe %d\n", dig_fe);
|
||||
return;
|
||||
}
|
||||
|
||||
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
|
||||
evergreen_dp_offsets[dig_fe]);
|
||||
if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
|
||||
DRM_ERROR("dig %d , should be enable\n", dig_fe);
|
||||
return;
|
||||
}
|
||||
|
||||
stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
|
||||
WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
|
||||
evergreen_dp_offsets[dig_fe], stream_ctrl);
|
||||
|
||||
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
|
||||
evergreen_dp_offsets[dig_fe]);
|
||||
while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
|
||||
msleep(1);
|
||||
counter++;
|
||||
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
|
||||
evergreen_dp_offsets[dig_fe]);
|
||||
}
|
||||
if (counter >= 32 )
|
||||
DRM_ERROR("counter exceeds %d\n", counter);
|
||||
|
||||
fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
|
||||
fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
|
||||
WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
|
||||
|
||||
}
|
||||
|
||||
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||
{
|
||||
u32 crtc_enabled, tmp, frame_count, blackout;
|
||||
int i, j;
|
||||
unsigned dig_fe;
|
||||
|
||||
if (!ASIC_IS_NODCE(rdev)) {
|
||||
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
|
||||
|
@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
|||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/*we should disable dig if it drives dp sst*/
|
||||
/*but we are in radeon_device_init and the topology is unknown*/
|
||||
/*and it is available after radeon_modeset_init*/
|
||||
/*the following method radeon_atom_encoder_dpms_dig*/
|
||||
/*does the job if we initialize it properly*/
|
||||
/*for now we do it this manually*/
|
||||
/**/
|
||||
if (ASIC_IS_DCE5(rdev) &&
|
||||
evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
|
||||
evergreen_blank_dp_output(rdev, dig_fe);
|
||||
/*we could remove 6 lines below*/
|
||||
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
|
|
|
@ -250,8 +250,43 @@
|
|||
|
||||
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
|
||||
#define EVERGREEN_HDMI_BASE 0x7030
|
||||
/*DIG block*/
|
||||
#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
|
||||
#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
|
||||
#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
|
||||
#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
|
||||
#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
|
||||
#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
|
||||
|
||||
|
||||
#define NI_DIG_FE_CNTL 0x7000
|
||||
# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
|
||||
# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
|
||||
|
||||
|
||||
#define NI_DIG_BE_CNTL 0x7140
|
||||
# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
|
||||
# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
|
||||
|
||||
#define NI_DIG_BE_EN_CNTL 0x7144
|
||||
# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
|
||||
# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
|
||||
# define NI_DIG_BE_DPSST 0
|
||||
|
||||
/* Display Port block */
|
||||
#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
|
||||
#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
|
||||
#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
|
||||
#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
|
||||
#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
|
||||
#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
|
||||
|
||||
|
||||
#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
|
||||
# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
|
||||
# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
|
||||
#define EVERGREEN_DP_STEER_FIFO 0x7310
|
||||
# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
|
||||
#define EVERGREEN_DP_SEC_CNTL 0x7280
|
||||
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
|
||||
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
|
||||
|
@ -266,4 +301,15 @@
|
|||
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
|
||||
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
|
||||
|
||||
/*DCIO_UNIPHY block*/
|
||||
#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
|
||||
#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
|
||||
#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
|
||||
#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
|
||||
#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
|
||||
#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
|
||||
|
||||
#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
|
||||
# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
|
|||
return radeon_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void) {
|
||||
return radeon_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
|
|||
*/
|
||||
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
atpx->functions.power_cntl = true;
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
|
|
@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
|
||||
if (radeon_audio != 0)
|
||||
if (radeon_audio != 0) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.output_csc_property,
|
||||
|
@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||
radeon_connector->dac_load_detect = true;
|
||||
|
@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
|
@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
|
|
|
@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
|
@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
* ignore it */
|
||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
|
||||
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
|
|
@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
|||
{
|
||||
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
|
||||
if (radeon_ttm_tt_has_userptr(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
|||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config STM
|
||||
tristate "System Trace Module devices"
|
||||
select CONFIGFS_FS
|
||||
select SRCU
|
||||
help
|
||||
A System Trace Module (STM) is a device exporting data in System
|
||||
Trace Protocol (STP) format as defined by MIPI STP standards.
|
||||
|
|
|
@ -116,8 +116,8 @@ struct cpm_i2c {
|
|||
cbd_t __iomem *rbase;
|
||||
u_char *txbuf[CPM_MAXBD];
|
||||
u_char *rxbuf[CPM_MAXBD];
|
||||
u32 txdma[CPM_MAXBD];
|
||||
u32 rxdma[CPM_MAXBD];
|
||||
dma_addr_t txdma[CPM_MAXBD];
|
||||
dma_addr_t rxdma[CPM_MAXBD];
|
||||
};
|
||||
|
||||
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
|
||||
|
|
|
@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
clk_prepare_enable(i2c->clk);
|
||||
ret = clk_enable(i2c->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < num; i++, msgs++) {
|
||||
stop = (i == num - 1);
|
||||
|
@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
|
|||
}
|
||||
|
||||
out:
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
clk_disable(i2c->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
clk_prepare_enable(i2c->clk);
|
||||
ret = clk_prepare_enable(i2c->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
|
||||
|
@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, i2c);
|
||||
|
||||
clk_disable(i2c->clk);
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk:
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
return ret;
|
||||
|
@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
|
|||
|
||||
i2c_del_adapter(&i2c->adap);
|
||||
|
||||
clk_unprepare(i2c->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
|
|||
|
||||
i2c->suspended = 1;
|
||||
|
||||
clk_unprepare(i2c->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
|
|||
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
|
||||
int ret = 0;
|
||||
|
||||
clk_prepare_enable(i2c->clk);
|
||||
ret = clk_prepare_enable(i2c->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = exynos5_hsi2c_clock_setup(i2c);
|
||||
if (ret) {
|
||||
|
@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
|
|||
}
|
||||
|
||||
exynos5_i2c_init(i2c);
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
clk_disable(i2c->clk);
|
||||
i2c->suspended = 0;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
#include <rdma/ib_cm.h>
|
||||
#include <rdma/ib_user_cm.h>
|
||||
#include <rdma/ib_marshall.h>
|
||||
|
@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
|
|||
struct ib_ucm_cmd_hdr hdr;
|
||||
ssize_t result;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
|||
struct rdma_ucm_cmd_hdr hdr;
|
||||
ssize_t ret;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
|
||||
#include "uverbs.h"
|
||||
|
||||
MODULE_AUTHOR("Roland Dreier");
|
||||
|
@ -682,6 +684,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
|||
int srcu_key;
|
||||
ssize_t ret;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (count < sizeof hdr)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
sizeof(struct mlx5_wqe_ctrl_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
props->max_sge = min(max_rq_sg, max_sq_sg);
|
||||
props->max_sge_rd = props->max_sge;
|
||||
props->max_sge_rd = MLX5_MAX_SGE_RD;
|
||||
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
|
||||
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
|
||||
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
|
||||
#include "qib.h"
|
||||
#include "qib_common.h"
|
||||
#include "qib_user_sdma.h"
|
||||
|
@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
|
|||
ssize_t ret = 0;
|
||||
void *dest;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
|
||||
return -EACCES;
|
||||
|
||||
if (count < sizeof(cmd.type)) {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
|
|
|
@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
|||
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
|
||||
kpd_delay = 15625;
|
||||
|
||||
if (kpd_delay > 62500 || kpd_delay == 0) {
|
||||
/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
|
||||
if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
|
||||
dev_err(&pdev->dev, "invalid power key trigger delay\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
|||
pwr->name = "pmic8xxx_pwrkey";
|
||||
pwr->phys = "pmic8xxx_pwrkey/input0";
|
||||
|
||||
delay = (kpd_delay << 10) / USEC_PER_SEC;
|
||||
delay = 1 + ilog2(delay);
|
||||
delay = (kpd_delay << 6) / USEC_PER_SEC;
|
||||
delay = ilog2(delay);
|
||||
|
||||
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
|
||||
if (err < 0) {
|
||||
|
|
|
@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
|||
goto err_free_buf;
|
||||
}
|
||||
|
||||
/* Sanity check that a device has an endpoint */
|
||||
if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
|
||||
dev_err(&usbinterface->dev,
|
||||
"Invalid number of endpoints\n");
|
||||
error = -EINVAL;
|
||||
goto err_free_urb;
|
||||
}
|
||||
|
||||
/*
|
||||
* The endpoint is always altsetting 0, we know this since we know
|
||||
* this device only has one interrupt endpoint
|
||||
|
@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
|||
* HID report descriptor
|
||||
*/
|
||||
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
|
||||
HID_DEVICE_TYPE, &hid_desc) != 0){
|
||||
HID_DEVICE_TYPE, &hid_desc) != 0) {
|
||||
dev_err(&usbinterface->dev,
|
||||
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
|
||||
error = -EIO;
|
||||
|
|
|
@ -91,6 +91,7 @@ struct iommu_dev_data {
|
|||
struct list_head dev_data_list; /* For global dev_data_list */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
u16 devid; /* PCI Device ID */
|
||||
u16 alias; /* Alias Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Device is identity mapped */
|
||||
struct {
|
||||
|
@ -125,6 +126,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
|||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
static inline u16 get_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
@ -162,6 +170,68 @@ out_unlock:
|
|||
return dev_data;
|
||||
}
|
||||
|
||||
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||
{
|
||||
*(u16 *)data = alias;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 get_alias(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
u16 devid, ivrs_alias, pci_alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
||||
if (ivrs_alias == pci_alias)
|
||||
return ivrs_alias;
|
||||
|
||||
/*
|
||||
* DMA alias showdown
|
||||
*
|
||||
* The IVRS is fairly reliable in telling us about aliases, but it
|
||||
* can't know about every screwy device. If we don't have an IVRS
|
||||
* reported alias, use the PCI reported alias. In that case we may
|
||||
* still need to initialize the rlookup and dev_table entries if the
|
||||
* alias is to a non-existent device.
|
||||
*/
|
||||
if (ivrs_alias == devid) {
|
||||
if (!amd_iommu_rlookup_table[pci_alias]) {
|
||||
amd_iommu_rlookup_table[pci_alias] =
|
||||
amd_iommu_rlookup_table[devid];
|
||||
memcpy(amd_iommu_dev_table[pci_alias].data,
|
||||
amd_iommu_dev_table[devid].data,
|
||||
sizeof(amd_iommu_dev_table[pci_alias].data));
|
||||
}
|
||||
|
||||
return pci_alias;
|
||||
}
|
||||
|
||||
pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
|
||||
"for device %s[%04x:%04x], kernel reported alias "
|
||||
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
|
||||
PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
|
||||
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
|
||||
PCI_FUNC(pci_alias));
|
||||
|
||||
/*
|
||||
* If we don't have a PCI DMA alias and the IVRS alias is on the same
|
||||
* bus, then the IVRS table may know about a quirk that we don't.
|
||||
*/
|
||||
if (pci_alias == devid &&
|
||||
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
|
||||
pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
||||
pdev->dma_alias_devfn = ivrs_alias & 0xff;
|
||||
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
|
||||
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
return ivrs_alias;
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
@ -174,13 +244,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
|
|||
return dev_data;
|
||||
}
|
||||
|
||||
static inline u16 get_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *get_dev_data(struct device *dev)
|
||||
{
|
||||
return dev->archdata.iommu;
|
||||
|
@ -308,6 +371,8 @@ static int iommu_init_device(struct device *dev)
|
|||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_data->alias = get_alias(dev);
|
||||
|
||||
if (pci_iommuv2_capable(pdev)) {
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
|
@ -328,7 +393,7 @@ static void iommu_ignore_device(struct device *dev)
|
|||
u16 devid, alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
alias = amd_iommu_alias_table[devid];
|
||||
alias = get_alias(dev);
|
||||
|
||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
|
||||
|
@ -1017,7 +1082,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
|
|||
int ret;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
ret = iommu_flush_dte(iommu, dev_data->devid);
|
||||
if (!ret && alias != dev_data->devid)
|
||||
|
@ -1891,7 +1956,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
|||
bool ats;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
ats = dev_data->ats.enabled;
|
||||
|
||||
/* Update data structures */
|
||||
|
@ -1925,7 +1990,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
|||
return;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
/* decrease reference counters */
|
||||
dev_data->domain->dev_iommu[iommu->index] -= 1;
|
||||
|
|
|
@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
unsigned int s_length = sg_dma_len(s);
|
||||
unsigned int s_dma_len = s->length;
|
||||
|
||||
s->offset = s_offset;
|
||||
s->offset += s_offset;
|
||||
s->length = s_length;
|
||||
sg_dma_address(s) = dma_addr + s_offset;
|
||||
dma_addr += s_dma_len;
|
||||
|
@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
|||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (sg_dma_address(s) != DMA_ERROR_CODE)
|
||||
s->offset = sg_dma_address(s);
|
||||
s->offset += sg_dma_address(s);
|
||||
if (sg_dma_len(s))
|
||||
s->length = sg_dma_len(s);
|
||||
sg_dma_address(s) = DMA_ERROR_CODE;
|
||||
|
|
|
@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
|
|||
void __iomem *icoll_base;
|
||||
|
||||
icoll_base = of_io_request_and_map(np, 0, np->name);
|
||||
if (!icoll_base)
|
||||
if (IS_ERR(icoll_base))
|
||||
panic("%s: unable to map resource", np->full_name);
|
||||
return icoll_base;
|
||||
}
|
||||
|
|
|
@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
|
|||
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
|
||||
if (!gc->reg_base) {
|
||||
if (IS_ERR(gc->reg_base)) {
|
||||
pr_err("unable to map resource\n");
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(gc->reg_base);
|
||||
goto fail_irqd_remove;
|
||||
}
|
||||
|
||||
|
|
|
@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define WRITE_LOCK(cmd) \
|
||||
down_write(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_write(&cmd->root_lock); \
|
||||
return -EINVAL; \
|
||||
static bool cmd_write_lock(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
down_write(&cmd->root_lock);
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
|
||||
up_write(&cmd->root_lock);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#define WRITE_LOCK_VOID(cmd) \
|
||||
down_write(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_write(&cmd->root_lock); \
|
||||
return; \
|
||||
}
|
||||
#define WRITE_LOCK(cmd) \
|
||||
do { \
|
||||
if (!cmd_write_lock((cmd))) \
|
||||
return -EINVAL; \
|
||||
} while(0)
|
||||
|
||||
#define WRITE_LOCK_VOID(cmd) \
|
||||
do { \
|
||||
if (!cmd_write_lock((cmd))) \
|
||||
return; \
|
||||
} while(0)
|
||||
|
||||
#define WRITE_UNLOCK(cmd) \
|
||||
up_write(&cmd->root_lock)
|
||||
up_write(&(cmd)->root_lock)
|
||||
|
||||
#define READ_LOCK(cmd) \
|
||||
down_read(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_read(&cmd->root_lock); \
|
||||
return -EINVAL; \
|
||||
static bool cmd_read_lock(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
down_read(&cmd->root_lock);
|
||||
if (cmd->fail_io) {
|
||||
up_read(&cmd->root_lock);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#define READ_LOCK_VOID(cmd) \
|
||||
down_read(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_read(&cmd->root_lock); \
|
||||
return; \
|
||||
}
|
||||
#define READ_LOCK(cmd) \
|
||||
do { \
|
||||
if (!cmd_read_lock((cmd))) \
|
||||
return -EINVAL; \
|
||||
} while(0)
|
||||
|
||||
#define READ_LOCK_VOID(cmd) \
|
||||
do { \
|
||||
if (!cmd_read_lock((cmd))) \
|
||||
return; \
|
||||
} while(0)
|
||||
|
||||
#define READ_UNLOCK(cmd) \
|
||||
up_read(&cmd->root_lock)
|
||||
up_read(&(cmd)->root_lock)
|
||||
|
||||
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
|
||||
{
|
||||
|
|
|
@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
|
|||
* Will sleep if required for nonblocking == false.
|
||||
*/
|
||||
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
|
||||
int nonblocking)
|
||||
void *pb, int nonblocking)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
|
|||
/*
|
||||
* Only remove the buffer from done_list if v4l2_buffer can handle all
|
||||
* the planes.
|
||||
* Verifying planes is NOT necessary since it already has been checked
|
||||
* before the buffer is queued/prepared. So it can never fail.
|
||||
*/
|
||||
list_del(&(*vb)->done_entry);
|
||||
ret = call_bufop(q, verify_planes_array, *vb, pb);
|
||||
if (!ret)
|
||||
list_del(&(*vb)->done_entry);
|
||||
spin_unlock_irqrestore(&q->done_lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
|
|||
struct vb2_buffer *vb = NULL;
|
||||
int ret;
|
||||
|
||||
ret = __vb2_get_done_vb(q, &vb, nonblocking);
|
||||
ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
|
|||
vec = frame_vector_create(nr);
|
||||
if (!vec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = get_vaddr_frames(start, nr, write, 1, vec);
|
||||
ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
|
||||
if (ret < 0)
|
||||
goto out_destroy;
|
||||
/* We accept only complete set of PFNs */
|
||||
|
|
|
@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
|
||||
{
|
||||
return __verify_planes_array(vb, pb);
|
||||
}
|
||||
|
||||
/**
|
||||
* __verify_length() - Verify that the bytesused value for each plane fits in
|
||||
* the plane length and that the data offset doesn't exceed the bytesused value.
|
||||
|
@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
|
|||
}
|
||||
|
||||
static const struct vb2_buf_ops v4l2_buf_ops = {
|
||||
.verify_planes_array = __verify_planes_array_core,
|
||||
.fill_user_buffer = __fill_v4l2_buffer,
|
||||
.fill_vb2_buffer = __fill_vb2_buffer,
|
||||
.set_timestamp = __set_timestamp,
|
||||
|
|
|
@ -439,7 +439,7 @@ config ARM_CHARLCD
|
|||
still useful.
|
||||
|
||||
config BMP085
|
||||
bool
|
||||
tristate
|
||||
depends on SYSFS
|
||||
|
||||
config BMP085_I2C
|
||||
|
|
|
@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
|
|||
*/
|
||||
value = swab16(value);
|
||||
|
||||
if (dpot->uid == DPOT_UID(AD5271_ID))
|
||||
if (dpot->uid == DPOT_UID(AD5274_ID))
|
||||
value = value >> 2;
|
||||
return value;
|
||||
default:
|
||||
|
|
|
@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
|
|||
void cxl_unmap_irq(unsigned int virq, void *cookie)
|
||||
{
|
||||
free_irq(virq, cookie);
|
||||
irq_dispose_mapping(virq);
|
||||
}
|
||||
|
||||
static int cxl_register_one_irq(struct cxl *adapter,
|
||||
|
|
|
@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
|
|||
if ((map_flags & SCIF_MAP_FIXED) &&
|
||||
((ALIGN(offset, PAGE_SIZE) != offset) ||
|
||||
(offset < 0) ||
|
||||
(offset + (off_t)len < offset)))
|
||||
(len > LONG_MAX - offset)))
|
||||
return -EINVAL;
|
||||
|
||||
might_sleep();
|
||||
|
@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
|
|||
if ((map_flags & SCIF_MAP_FIXED) &&
|
||||
((ALIGN(offset, PAGE_SIZE) != offset) ||
|
||||
(offset < 0) ||
|
||||
(offset + (off_t)len < offset)))
|
||||
(len > LONG_MAX - offset)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Unsupported protection requested */
|
||||
|
@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
|
|||
|
||||
/* Offset is not page aligned or offset+len wraps around */
|
||||
if ((ALIGN(offset, PAGE_SIZE) != offset) ||
|
||||
(offset + (off_t)len < offset))
|
||||
(offset < 0) ||
|
||||
(len > LONG_MAX - offset))
|
||||
return -EINVAL;
|
||||
|
||||
err = scif_verify_epd(ep);
|
||||
|
|
|
@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
|
|||
[BRCMNAND_FC_BASE] = 0x400,
|
||||
};
|
||||
|
||||
/* BRCMNAND v7.1 */
|
||||
static const u16 brcmnand_regs_v71[] = {
|
||||
[BRCMNAND_CMD_START] = 0x04,
|
||||
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
|
||||
[BRCMNAND_CMD_ADDRESS] = 0x0c,
|
||||
[BRCMNAND_INTFC_STATUS] = 0x14,
|
||||
[BRCMNAND_CS_SELECT] = 0x18,
|
||||
[BRCMNAND_CS_XOR] = 0x1c,
|
||||
[BRCMNAND_LL_OP] = 0x20,
|
||||
[BRCMNAND_CS0_BASE] = 0x50,
|
||||
[BRCMNAND_CS1_BASE] = 0,
|
||||
[BRCMNAND_CORR_THRESHOLD] = 0xdc,
|
||||
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
|
||||
[BRCMNAND_UNCORR_COUNT] = 0xfc,
|
||||
[BRCMNAND_CORR_COUNT] = 0x100,
|
||||
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
|
||||
[BRCMNAND_CORR_ADDR] = 0x110,
|
||||
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
|
||||
[BRCMNAND_UNCORR_ADDR] = 0x118,
|
||||
[BRCMNAND_SEMAPHORE] = 0x150,
|
||||
[BRCMNAND_ID] = 0x194,
|
||||
[BRCMNAND_ID_EXT] = 0x198,
|
||||
[BRCMNAND_LL_RDATA] = 0x19c,
|
||||
[BRCMNAND_OOB_READ_BASE] = 0x200,
|
||||
[BRCMNAND_OOB_READ_10_BASE] = 0,
|
||||
[BRCMNAND_OOB_WRITE_BASE] = 0x280,
|
||||
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
|
||||
[BRCMNAND_FC_BASE] = 0x400,
|
||||
};
|
||||
|
||||
enum brcmnand_cs_reg {
|
||||
BRCMNAND_CS_CFG_EXT = 0,
|
||||
BRCMNAND_CS_CFG,
|
||||
|
@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
|
|||
}
|
||||
|
||||
/* Register offsets */
|
||||
if (ctrl->nand_version >= 0x0600)
|
||||
if (ctrl->nand_version >= 0x0701)
|
||||
ctrl->reg_offsets = brcmnand_regs_v71;
|
||||
else if (ctrl->nand_version >= 0x0600)
|
||||
ctrl->reg_offsets = brcmnand_regs_v60;
|
||||
else if (ctrl->nand_version >= 0x0500)
|
||||
ctrl->reg_offsets = brcmnand_regs_v50;
|
||||
|
|
|
@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
* This is the first phase of the normal nand_scan() function. It reads the
|
||||
* flash ID and sets up MTD fields accordingly.
|
||||
*
|
||||
* The mtd->owner field must be set to the module of the caller.
|
||||
*/
|
||||
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
|
||||
struct nand_flash_dev *table)
|
||||
|
@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
|
|||
*
|
||||
* This fills out all the uninitialized function pointers with the defaults.
|
||||
* The flash ID is read and the mtd/chip structures are filled with the
|
||||
* appropriate values. The mtd->owner field must be set to the module of the
|
||||
* caller.
|
||||
* appropriate values.
|
||||
*/
|
||||
int nand_scan(struct mtd_info *mtd, int maxchips)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Many callers got this wrong, so check for it for a while... */
|
||||
if (!mtd->owner && caller_is_module()) {
|
||||
pr_crit("%s called with NULL mtd->owner!\n", __func__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
ret = nand_scan_ident(mtd, maxchips, NULL);
|
||||
if (!ret)
|
||||
ret = nand_scan_tail(mtd);
|
||||
|
|
|
@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int micron_quad_enable(struct spi_nor *nor)
|
||||
{
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(nor->dev, "error %d reading EVCR\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
write_enable(nor);
|
||||
|
||||
/* set EVCR, enable quad I/O */
|
||||
nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
|
||||
ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(nor->dev, "error while writing EVCR register\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = spi_nor_wait_till_ready(nor);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* read EVCR and check it */
|
||||
ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(nor->dev, "error %d reading EVCR\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (val & EVCR_QUAD_EN_MICRON) {
|
||||
dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
|
||||
{
|
||||
int status;
|
||||
|
@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
|
|||
}
|
||||
return status;
|
||||
case SNOR_MFR_MICRON:
|
||||
status = micron_quad_enable(nor);
|
||||
if (status) {
|
||||
dev_err(nor->dev, "Micron quad-read not enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return status;
|
||||
return 0;
|
||||
default:
|
||||
status = spansion_quad_enable(nor);
|
||||
if (status) {
|
||||
|
|
|
@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
|
|||
}
|
||||
|
||||
static inline void
|
||||
jme_clear_pm(struct jme_adapter *jme)
|
||||
jme_clear_pm_enable_wol(struct jme_adapter *jme)
|
||||
{
|
||||
jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
|
||||
}
|
||||
|
||||
static inline void
|
||||
jme_clear_pm_disable_wol(struct jme_adapter *jme)
|
||||
{
|
||||
jwrite32(jme, JME_PMCS, PMCS_STMASK);
|
||||
}
|
||||
|
||||
static int
|
||||
jme_reload_eeprom(struct jme_adapter *jme)
|
||||
{
|
||||
|
@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
|
|||
struct jme_adapter *jme = netdev_priv(netdev);
|
||||
int rc;
|
||||
|
||||
jme_clear_pm(jme);
|
||||
jme_clear_pm_disable_wol(jme);
|
||||
JME_NAPI_ENABLE(jme);
|
||||
|
||||
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
|
||||
|
@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
|
|||
static void
|
||||
jme_powersave_phy(struct jme_adapter *jme)
|
||||
{
|
||||
if (jme->reg_pmcs) {
|
||||
if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
|
||||
jme_set_100m_half(jme);
|
||||
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
|
||||
jme_wait_link(jme);
|
||||
jme_clear_pm(jme);
|
||||
jme_clear_pm_enable_wol(jme);
|
||||
} else {
|
||||
jme_phy_off(jme);
|
||||
}
|
||||
|
@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
|
|||
if (wol->wolopts & WAKE_MAGIC)
|
||||
jme->reg_pmcs |= PMCS_MFEN;
|
||||
|
||||
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
|
||||
device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
|
|||
jme->mii_if.mdio_read = jme_mdio_read;
|
||||
jme->mii_if.mdio_write = jme_mdio_write;
|
||||
|
||||
jme_clear_pm(jme);
|
||||
device_set_wakeup_enable(&pdev->dev, true);
|
||||
jme_clear_pm_disable_wol(jme);
|
||||
device_init_wakeup(&pdev->dev, true);
|
||||
|
||||
jme_set_phyfifo_5level(jme);
|
||||
jme->pcirev = pdev->revision;
|
||||
|
@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
|
|||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
jme_clear_pm(jme);
|
||||
jme_clear_pm_disable_wol(jme);
|
||||
jme_phy_on(jme);
|
||||
if (test_bit(JME_FLAG_SSET, &jme->flags))
|
||||
jme_set_settings(netdev, &jme->old_ecmd);
|
||||
|
|
|
@ -1557,6 +1557,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||
iwl_mvm_del_aux_sta(mvm);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
/*
|
||||
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
||||
* won't be called in this case).
|
||||
|
|
|
@ -645,8 +645,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
|||
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
||||
kfree(mvm->nvm_sections[i].data);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
iwl_mvm_tof_clean(mvm);
|
||||
|
||||
ieee80211_free_hw(mvm->hw);
|
||||
|
|
|
@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
|
|||
*/
|
||||
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
||||
if (val & (BIT(1) | BIT(17))) {
|
||||
IWL_INFO(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -939,7 +939,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
|
|||
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
|
||||
int eint_num, virq, eint_offset;
|
||||
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
|
||||
static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
|
||||
static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
|
||||
128000, 256000};
|
||||
const struct mtk_desc_pin *pin;
|
||||
struct irq_data *d;
|
||||
|
||||
|
@ -957,9 +958,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
|
|||
if (!mtk_eint_can_en_debounce(pctl, eint_num))
|
||||
return -ENOSYS;
|
||||
|
||||
dbnc = ARRAY_SIZE(dbnc_arr);
|
||||
for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
|
||||
if (debounce <= dbnc_arr[i]) {
|
||||
dbnc = ARRAY_SIZE(debounce_time);
|
||||
for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
|
||||
if (debounce <= debounce_time[i]) {
|
||||
dbnc = i;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
|
|||
|
||||
/* Parse pins in each row from LSB */
|
||||
while (mask) {
|
||||
bit_pos = ffs(mask);
|
||||
bit_pos = __ffs(mask);
|
||||
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
|
||||
mask_pos = ((pcs->fmask) << (bit_pos - 1));
|
||||
mask_pos = ((pcs->fmask) << bit_pos);
|
||||
val_pos = val & mask_pos;
|
||||
submask = mask & mask_pos;
|
||||
|
||||
|
@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
|
|||
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
|
||||
&pcs->fmask);
|
||||
if (!ret) {
|
||||
pcs->fshift = ffs(pcs->fmask) - 1;
|
||||
pcs->fshift = __ffs(pcs->fmask);
|
||||
pcs->fmax = pcs->fmask >> pcs->fshift;
|
||||
} else {
|
||||
/* If mask property doesn't exist, function mux is invalid. */
|
||||
|
|
|
@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
|
|||
/* Field definitions */
|
||||
#define HCI_ACCEL_MASK 0x7fff
|
||||
#define HCI_HOTKEY_DISABLE 0x0b
|
||||
#define HCI_HOTKEY_ENABLE 0x01
|
||||
#define HCI_HOTKEY_ENABLE 0x09
|
||||
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
|
||||
#define HCI_LCD_BRIGHTNESS_BITS 3
|
||||
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
|
||||
|
|
|
@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
|
|||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
p->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (!p->base) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(p->base)) {
|
||||
ret = PTR_ERR(p->base);
|
||||
goto out_clk;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
|
||||
for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
|
||||
mutex_lock_nested(&rdev->mutex, i);
|
||||
}
|
||||
|
||||
|
|
|
@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
|
|||
}
|
||||
}
|
||||
|
||||
if (i < s5m8767->num_regulators)
|
||||
*enable_ctrl =
|
||||
s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
|
||||
if (i >= s5m8767->num_regulators)
|
||||
return -EINVAL;
|
||||
|
||||
*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
|
|||
else
|
||||
regulators[id].vsel_mask = 0xff;
|
||||
|
||||
s5m8767_get_register(s5m8767, id, &enable_reg,
|
||||
ret = s5m8767_get_register(s5m8767, id, &enable_reg,
|
||||
&enable_val);
|
||||
if (ret) {
|
||||
dev_err(s5m8767->dev, "error reading registers\n");
|
||||
return ret;
|
||||
}
|
||||
regulators[id].enable_reg = enable_reg;
|
||||
regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
|
||||
regulators[id].enable_val = enable_val;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue