Merge android-4.4.148 (f057ff9
) into msm-4.4
* refs/heads/tmp-f057ff9 Linux 4.4.148 x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Fix up CPU feature flags x86/mm/kmmio: Make the tracer robust against L1TF x86/mm/pat: Make set_memory_np() L1TF safe x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Fix up pte->pfn conversion for PAE x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/cpufeatures: Add detection of L1D cache flush support. x86/speculation/l1tf: Extend 64bit swap file size limit x86/bugs: Move the l1tf function and define pr_fmt properly x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings mm: fix cache mode tracking in vm_insert_mixed() mm: Add vm_insert_pfn_prot() x86/speculation/l1tf: Add sysfs reporting for l1tf x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Change order of offset/type in swap entry mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/mm: Fix swap entry comment and macro x86/mm: Move swap offset/type up in PTE to work around erratum x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT x86/irqflags: Provide a declaration for native_save_fl kprobes/x86: Fix %p uses in error messages x86/speculation: Protect against userspace-userspace spectreRSB x86/paravirt: Fix spectre-v2 mitigations for paravirt guests ARM: dts: imx6sx: fix irq for pcie bridge IB/ocrdma: fix out of bounds access to local buffer IB/mlx4: Mark user MR as writable if actual virtual memory is writable IB/core: Make testing MR flags for writability a static inline function fix __legitimize_mnt()/mntput() race fix mntput/mntput race root dentries need RCU-delayed freeing scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices xen/netfront: don't cache skb_shinfo() parisc: Define mb() and add memory barriers to assembler unlock sequences parisc: Enable CONFIG_MLONGCALLS by default fork: unconditionally clear stack on fork ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV tpm: fix race condition in tpm_common_write() ext4: fix check to prevent initializing reserved inodes Linux 4.4.147 jfs: Fix inconsistency between memory allocation and ea_buf->max_size i2c: imx: Fix reinit_completion() use ring_buffer: tracing: Inherit the tracing setting to next ring buffer ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle ext4: fix false negatives *and* false positives in ext4_check_descriptors() netlink: Don't shift on 64 for ngroups netlink: Don't shift with UB on nlk->ngroups netlink: Do not subscribe to non-existent groups nohz: Fix local_timer_softirq_pending() genirq: Make force irq threading setup more robust scsi: qla2xxx: Return error when TMF returns scsi: qla2xxx: Fix ISP recovery on unload Conflicts: include/linux/swapfile.h Removed CONFIG_CRYPTO_ECHAINIV from defconfig files since this upmerge is adding this config to Kconfig file. Change-Id: Ide96c29f919d76590c2bdccf356d1d464a892fd7 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
79de04d806
74 changed files with 763 additions and 215 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 146
|
||||
SUBLEVEL = 148
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -1250,7 +1250,7 @@
|
|||
/* non-prefetchable memory */
|
||||
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
|
||||
num-lanes = <1>;
|
||||
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
|
||||
<&clks IMX6SX_CLK_PCIE_AXI>,
|
||||
<&clks IMX6SX_CLK_LVDS1_OUT>,
|
||||
|
|
|
@ -556,7 +556,6 @@ CONFIG_CORESIGHT_SOURCE_DUMMY=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -662,7 +662,6 @@ CONFIG_SECURITY=y
|
|||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -700,7 +700,6 @@ CONFIG_SECURITY=y
|
|||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -575,7 +575,6 @@ CONFIG_SECURITY=y
|
|||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -649,7 +649,6 @@ CONFIG_SECURITY=y
|
|||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -624,7 +624,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -668,7 +668,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -592,7 +592,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -629,7 +629,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -659,7 +659,6 @@ CONFIG_HARDENED_USERCOPY=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -735,7 +735,6 @@ CONFIG_HARDENED_USERCOPY=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -632,7 +632,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -693,7 +693,6 @@ CONFIG_PFK=y
|
|||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -663,7 +663,6 @@ CONFIG_HARDENED_USERCOPY=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -735,7 +735,6 @@ CONFIG_HARDENED_USERCOPY=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SMACK=y
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_MD4=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -178,7 +178,7 @@ config PREFETCH
|
|||
|
||||
config MLONGCALLS
|
||||
bool "Enable the -mlong-calls compiler option for big kernels"
|
||||
def_bool y if (!MODULES)
|
||||
default y
|
||||
depends on PA8X00
|
||||
help
|
||||
If you configure the kernel to include many drivers built-in instead
|
||||
|
|
32
arch/parisc/include/asm/barrier.h
Normal file
32
arch/parisc/include/asm/barrier.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_BARRIER_H
|
||||
#define __ASM_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* The synchronize caches instruction executes as a nop on systems in
|
||||
which all memory references are performed in order. */
|
||||
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
#define mb() do { synchronize_caches(); } while (0)
|
||||
#define rmb() mb()
|
||||
#define wmb() mb()
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
#else
|
||||
#define mb() barrier()
|
||||
#define rmb() barrier()
|
||||
#define wmb() barrier()
|
||||
#define dma_rmb() barrier()
|
||||
#define dma_wmb() barrier()
|
||||
#endif
|
||||
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() mb()
|
||||
#define __smp_wmb() mb()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_BARRIER_H */
|
|
@ -481,6 +481,8 @@
|
|||
/* Release pa_tlb_lock lock without reloading lock address. */
|
||||
.macro tlb_unlock0 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
sync
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw \spc,0(\tmp)
|
||||
#endif
|
||||
|
|
|
@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local)
|
|||
.macro tlb_unlock la,flags,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
ldi 1,\tmp
|
||||
sync
|
||||
stw \tmp,0(\la)
|
||||
mtsm \flags
|
||||
#endif
|
||||
|
|
|
@ -631,6 +631,7 @@ cas_action:
|
|||
sub,<> %r28, %r25, %r0
|
||||
2: stw,ma %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
/* Clear thread register indicator */
|
||||
|
@ -645,6 +646,7 @@ cas_action:
|
|||
3:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
sync
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
stw %r0, 4(%sr2,%r20)
|
||||
|
@ -846,6 +848,7 @@ cas2_action:
|
|||
|
||||
cas2_end:
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
ssm PSW_SM_I, %r0
|
||||
|
@ -856,6 +859,7 @@ cas2_end:
|
|||
22:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
sync
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
ssm PSW_SM_I, %r0
|
||||
ldo 1(%r0),%r28
|
||||
|
|
|
@ -193,12 +193,12 @@
|
|||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
||||
|
||||
|
@ -214,7 +214,7 @@
|
|||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@ -310,6 +310,7 @@
|
|||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
|
||||
|
@ -331,5 +332,6 @@
|
|||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
* Interrupt control:
|
||||
*/
|
||||
|
||||
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
|
||||
extern inline unsigned long native_save_fl(void);
|
||||
extern inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
|
@ -27,8 +27,13 @@
|
|||
#define N_EXCEPTION_STACKS 1
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* 44=32+12, the limit we can fit into an unsigned long pfn */
|
||||
#define __PHYSICAL_MASK_SHIFT 44
|
||||
/*
|
||||
* This is beyond the 44 bit limit imposed by the 32bit long pfns,
|
||||
* but we need the full mask to make sure inverted PROT_NONE
|
||||
* entries have all the host bits set in a guest.
|
||||
* The real limit is still 44 bits.
|
||||
*/
|
||||
#define __PHYSICAL_MASK_SHIFT 52
|
||||
#define __VIRTUAL_MASK_SHIFT 32
|
||||
|
||||
#else /* !CONFIG_X86_PAE */
|
||||
|
|
|
@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
/* No inverted PFNs on 2 level page tables */
|
||||
|
||||
static inline u64 protnone_mask(u64 val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline bool __pte_needs_invert(u64 val)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
|
||||
|
|
|
@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
|
|||
#endif
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define SWP_TYPE_BITS 5
|
||||
|
||||
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
||||
|
||||
/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
||||
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
||||
#define __swp_type(x) (((x).val) & 0x1f)
|
||||
#define __swp_offset(x) ((x).val >> 5)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
|
||||
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
|
||||
|
||||
/*
|
||||
* Normally, __swp_entry() converts from arch-independent swp_entry_t to
|
||||
* arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
|
||||
* to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
|
||||
* whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
|
||||
* __swp_entry_to_pte() through the following helper macro based on 64bit
|
||||
* __swp_entry().
|
||||
*/
|
||||
#define __swp_pteval_entry(type, offset) ((pteval_t) { \
|
||||
(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
||||
| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
|
||||
|
||||
#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
|
||||
__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
|
||||
/*
|
||||
* Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
|
||||
* swp_entry_t, but also has to convert it from 64bit to the 32bit
|
||||
* intermediate representation, using the following macros based on 64bit
|
||||
* __swp_type() and __swp_offset().
|
||||
*/
|
||||
#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
|
||||
#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
|
||||
|
||||
#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
|
||||
__pteval_swp_offset(pte)))
|
||||
|
||||
#include <asm/pgtable-invert.h>
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
|
||||
|
|
32
arch/x86/include/asm/pgtable-invert.h
Normal file
32
arch/x86/include/asm/pgtable-invert.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_PGTABLE_INVERT_H
|
||||
#define _ASM_PGTABLE_INVERT_H 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline bool __pte_needs_invert(u64 val)
|
||||
{
|
||||
return !(val & _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
/* Get a mask to xor with the page table entry to get the correct pfn. */
|
||||
static inline u64 protnone_mask(u64 val)
|
||||
{
|
||||
return __pte_needs_invert(val) ? ~0ull : 0;
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
||||
{
|
||||
/*
|
||||
* When a PTE transitions from NONE to !NONE or vice-versa
|
||||
* invert the PFN part to stop speculation.
|
||||
* pte_pfn undoes this when needed.
|
||||
*/
|
||||
if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
|
||||
val = (val & ~mask) | (~val & mask);
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
|
@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte)
|
|||
return pte_flags(pte) & _PAGE_SPECIAL;
|
||||
}
|
||||
|
||||
/* Entries that were set to PROT_NONE are inverted */
|
||||
|
||||
static inline u64 protnone_mask(u64 val);
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pte_val(pte);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pmd_val(pmd);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_pfn(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pud_val(pud);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
@ -305,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|||
return pmd_set_flags(pmd, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline int pte_soft_dirty(pte_t pte)
|
||||
{
|
||||
|
@ -359,19 +364,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
|
|||
|
||||
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PTE_PFN_MASK;
|
||||
return __pte(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PHYSICAL_PMD_PAGE_MASK;
|
||||
return __pmd(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
phys_addr_t pfn = page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PHYSICAL_PUD_PAGE_MASK;
|
||||
return __pud(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return pfn_pmd(pmd_pfn(pmd),
|
||||
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
|
||||
}
|
||||
|
||||
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v | set);
|
||||
}
|
||||
|
||||
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pud_t pud_mkhuge(pud_t pud)
|
||||
{
|
||||
return pud_set_flags(pud, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
pteval_t val = pte_val(pte);
|
||||
pteval_t val = pte_val(pte), oldval = val;
|
||||
|
||||
/*
|
||||
* Chop off the NX bit (if present), and add the NX portion of
|
||||
|
@ -379,17 +423,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
*/
|
||||
val &= _PAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
|
||||
|
||||
val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
|
||||
return __pte(val);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmdval_t val = pmd_val(pmd);
|
||||
pmdval_t val = pmd_val(pmd), oldval = val;
|
||||
|
||||
val &= _HPAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
|
||||
|
||||
val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
|
||||
return __pmd(val);
|
||||
}
|
||||
|
||||
|
@ -926,6 +970,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
|
||||
extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
|
||||
|
||||
static inline bool arch_has_pfn_modify_check(void)
|
||||
{
|
||||
return boot_cpu_has_bug(X86_BUG_L1TF);
|
||||
}
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
|
|||
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
||||
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define SWP_TYPE_BITS 5
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
||||
/*
|
||||
* Encode and de-code a swap entry
|
||||
*
|
||||
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
|
||||
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
|
||||
* | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
|
||||
*
|
||||
* G (8) is aliased and used as a PROT_NONE indicator for
|
||||
* !present ptes. We need to start storing swap entries above
|
||||
* there. We also need to avoid using A and D because of an
|
||||
* erratum where they can be incorrectly set by hardware on
|
||||
* non-present PTEs.
|
||||
*
|
||||
* SD (1) in swp entry is used to store soft dirty bit, which helps us
|
||||
* remember soft dirty over page migration
|
||||
*
|
||||
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
|
||||
* but also L and G.
|
||||
*
|
||||
* The offset is inverted by a binary not operation to make the high
|
||||
* physical bits set.
|
||||
*/
|
||||
#define SWP_TYPE_BITS 5
|
||||
|
||||
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
||||
|
||||
/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
||||
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
||||
|
||||
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
||||
& ((1U << SWP_TYPE_BITS) - 1))
|
||||
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
((type) << (_PAGE_BIT_PRESENT + 1)) \
|
||||
| ((offset) << SWP_OFFSET_SHIFT) })
|
||||
/* Extract the high bits for type */
|
||||
#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
|
||||
|
||||
/* Shift up (to get rid of type), then down to get value */
|
||||
#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
|
||||
|
||||
/*
|
||||
* Shift the offset up "too far" by TYPE bits, then down again
|
||||
* The offset is inverted by a binary not operation to make the high
|
||||
* physical bits set.
|
||||
*/
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
||||
| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
|
||||
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
|
@ -201,6 +235,8 @@ extern void cleanup_highmap(void);
|
|||
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
|
||||
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
|
||||
|
||||
#include <asm/pgtable-invert.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_64_H */
|
||||
|
|
|
@ -70,15 +70,15 @@
|
|||
/*
|
||||
* Tracking soft dirty bit when a page goes to a swap is tricky.
|
||||
* We need a bit which can be stored in pte _and_ not conflict
|
||||
* with swap entry format. On x86 bits 6 and 7 are *not* involved
|
||||
* into swap entry computation, but bit 6 is used for nonlinear
|
||||
* file mapping, so we borrow bit 7 for soft dirty tracking.
|
||||
* with swap entry format. On x86 bits 1-4 are *not* involved
|
||||
* into swap entry computation, but bit 7 is used for thp migration,
|
||||
* so we borrow bit 1 for soft dirty tracking.
|
||||
*
|
||||
* Please note that this bit must be treated as swap dirty page
|
||||
* mark if and only if the PTE has present bit clear!
|
||||
* mark if and only if the PTE/PMD has present bit clear!
|
||||
*/
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
|
||||
#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
|
||||
#else
|
||||
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
|
|
@ -172,6 +172,11 @@ extern const struct seq_operations cpuinfo_op;
|
|||
|
||||
extern void cpu_detect(struct cpuinfo_x86 *c);
|
||||
|
||||
static inline unsigned long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
extern void identify_boot_cpu(void);
|
||||
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
||||
|
|
|
@ -26,9 +26,11 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
|
@ -80,6 +82,8 @@ void __init check_bugs(void)
|
|||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
l1tf_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
return cmd;
|
||||
}
|
||||
|
||||
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||
static bool __init is_skylake_era(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
|
@ -386,22 +373,15 @@ retpoline_auto:
|
|||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP nor PTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
* If spectre v2 protection has been enabled, unconditionally fill
|
||||
* RSB during a context switch; this protects against two independent
|
||||
* issues:
|
||||
*
|
||||
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||
* or deactivated in favour of retpolines the RSB fill on context
|
||||
* switch is required.
|
||||
* - RSB underflow (and switch to BTB) on Skylake+
|
||||
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
|
||||
*/
|
||||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
|
||||
}
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
|
@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void)
|
|||
x86_amd_ssb_disable();
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "L1TF: " fmt
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
|
||||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
|
||||
}
|
||||
#undef pr_fmt
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
|
@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
case X86_BUG_L1TF:
|
||||
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
||||
return sprintf(buf, "Mitigation: Page Table Inversion\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
|||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -880,6 +880,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
||||
/* in addition to cpu_no_speculation */
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
@ -905,6 +920,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
if (x86_match_cpu(cpu_no_l1tf))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_L1TF);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -394,7 +394,6 @@ int __copy_instruction(u8 *dest, u8 *src)
|
|||
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
|
||||
if ((s64) (s32) newdisp != newdisp) {
|
||||
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
|
||||
pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
|
||||
return 0;
|
||||
}
|
||||
disp = (u8 *) dest + insn_offset_displacement(&insn);
|
||||
|
@ -610,8 +609,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||
* Raise a BUG or we'll continue in an endless reentering loop
|
||||
* and eventually a stack overflow.
|
||||
*/
|
||||
printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
|
||||
p->addr);
|
||||
pr_err("Unrecoverable kprobe detected.\n");
|
||||
dump_kprobe(p);
|
||||
BUG();
|
||||
default:
|
||||
|
|
|
@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf,
|
|||
struct branch *b = insnbuf;
|
||||
unsigned long delta = (unsigned long)target - (addr+5);
|
||||
|
||||
if (tgt_clobbers & ~site_clobbers)
|
||||
return len; /* target would clobber too much for this site */
|
||||
if (len < 5)
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
|
||||
b->opcode = 0xe8; /* call */
|
||||
b->delta = delta;
|
||||
|
@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
|||
struct branch *b = insnbuf;
|
||||
unsigned long delta = (unsigned long)target - (addr+5);
|
||||
|
||||
if (len < 5)
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
|
||||
b->opcode = 0xe9; /* jmp */
|
||||
b->delta = delta;
|
||||
|
|
|
@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
|
|||
memblock_reserve(__pa_symbol(_text),
|
||||
(unsigned long)__bss_stop - (unsigned long)_text);
|
||||
|
||||
/*
|
||||
* Make sure page 0 is always reserved because on systems with
|
||||
* L1TF its contents can be leaked to user processes.
|
||||
*/
|
||||
memblock_reserve(0, PAGE_SIZE);
|
||||
|
||||
early_reserve_initrd();
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h> /* for max_low_pfn */
|
||||
#include <linux/swapfile.h>
|
||||
#include <linux/swapops.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/e820.h>
|
||||
|
@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
|
|||
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
|
||||
__pte2cachemode_tbl[entry] = cache;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
unsigned long max_swapfile_size(void)
|
||||
{
|
||||
unsigned long pages;
|
||||
|
||||
pages = generic_max_swapfile_size();
|
||||
|
||||
if (boot_cpu_has_bug(X86_BUG_L1TF)) {
|
||||
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
|
||||
unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
|
||||
/*
|
||||
* We encode swap offsets also with 3 bits below those for pfn
|
||||
* which makes the usable limit higher.
|
||||
*/
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
|
||||
#endif
|
||||
pages = min_t(unsigned long, l1tf_limit, pages);
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
|
|||
|
||||
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
|
||||
{
|
||||
pmd_t new_pmd;
|
||||
pmdval_t v = pmd_val(*pmd);
|
||||
if (clear) {
|
||||
*old = v & _PAGE_PRESENT;
|
||||
v &= ~_PAGE_PRESENT;
|
||||
} else /* presume this has been called with clear==true previously */
|
||||
v |= *old;
|
||||
set_pmd(pmd, __pmd(v));
|
||||
*old = v;
|
||||
new_pmd = pmd_mknotpresent(*pmd);
|
||||
} else {
|
||||
/* Presume this has been called with clear==true previously */
|
||||
new_pmd = __pmd(*old);
|
||||
}
|
||||
set_pmd(pmd, new_pmd);
|
||||
}
|
||||
|
||||
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
|
||||
{
|
||||
pteval_t v = pte_val(*pte);
|
||||
if (clear) {
|
||||
*old = v & _PAGE_PRESENT;
|
||||
v &= ~_PAGE_PRESENT;
|
||||
} else /* presume this has been called with clear==true previously */
|
||||
v |= *old;
|
||||
set_pte_atomic(pte, __pte(v));
|
||||
*old = v;
|
||||
/* Nothing should care about address */
|
||||
pte_clear(&init_mm, 0, pte);
|
||||
} else {
|
||||
/* Presume this has been called with clear==true previously */
|
||||
set_pte_atomic(pte, __pte(*old));
|
||||
}
|
||||
}
|
||||
|
||||
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
|
||||
|
|
|
@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|||
return "[mpx]";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only allow root to set high MMIO mappings to PROT_NONE.
|
||||
* This prevents an unpriv. user to set them to PROT_NONE and invert
|
||||
* them, then pointing to valid memory for L1TF speculation.
|
||||
*
|
||||
* Note: for locked down kernels may want to disable the root override.
|
||||
*/
|
||||
bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return true;
|
||||
if (!__pte_needs_invert(pgprot_val(prot)))
|
||||
return true;
|
||||
/* If it's real memory always allow */
|
||||
if (pfn_valid(pfn))
|
||||
return true;
|
||||
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa,
|
|||
|
||||
pmd = pmd_offset(pud, start);
|
||||
|
||||
set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
|
||||
massage_pgprot(pmd_pgprot)));
|
||||
set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
|
||||
canon_pgprot(pmd_pgprot))));
|
||||
|
||||
start += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE;
|
||||
|
@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
|
||||
massage_pgprot(pud_pgprot)));
|
||||
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
|
||||
canon_pgprot(pud_pgprot))));
|
||||
|
||||
start += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE;
|
||||
|
|
|
@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
|
|||
|
||||
static const struct lpss_device_desc byt_pwm_dev_desc = {
|
||||
.flags = LPSS_SAVE_CTX,
|
||||
.prv_offset = 0x800,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_pwm_dev_desc = {
|
||||
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
|
||||
.prv_offset = 0x800,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc byt_uart_dev_desc = {
|
||||
|
|
|
@ -699,16 +699,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
|||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
&dev_attr_spectre_v1.attr,
|
||||
&dev_attr_spectre_v2.attr,
|
||||
&dev_attr_spec_store_bypass.attr,
|
||||
&dev_attr_l1tf.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ struct file_priv {
|
|||
struct tpm_chip *chip;
|
||||
|
||||
/* Data passed to and from the tpm via the read/write calls */
|
||||
atomic_t data_pending;
|
||||
size_t data_pending;
|
||||
struct mutex buffer_mutex;
|
||||
|
||||
struct timer_list user_read_timer; /* user needs to claim result */
|
||||
|
@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
|
|||
struct file_priv *priv = container_of(work, struct file_priv, work);
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
priv->data_pending = 0;
|
||||
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
}
|
||||
|
@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
priv->chip = chip;
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
mutex_init(&priv->buffer_mutex);
|
||||
setup_timer(&priv->user_read_timer, user_reader_timeout,
|
||||
(unsigned long)priv);
|
||||
|
@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
|
|||
size_t size, loff_t *off)
|
||||
{
|
||||
struct file_priv *priv = file->private_data;
|
||||
ssize_t ret_size;
|
||||
ssize_t ret_size = 0;
|
||||
int rc;
|
||||
|
||||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->work);
|
||||
ret_size = atomic_read(&priv->data_pending);
|
||||
if (ret_size > 0) { /* relay data */
|
||||
ssize_t orig_ret_size = ret_size;
|
||||
if (size < ret_size)
|
||||
ret_size = size;
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
if (priv->data_pending) {
|
||||
ret_size = min_t(ssize_t, size, priv->data_pending);
|
||||
rc = copy_to_user(buf, priv->data_buffer, ret_size);
|
||||
memset(priv->data_buffer, 0, orig_ret_size);
|
||||
memset(priv->data_buffer, 0, priv->data_pending);
|
||||
if (rc)
|
||||
ret_size = -EFAULT;
|
||||
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
priv->data_pending = 0;
|
||||
}
|
||||
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
return ret_size;
|
||||
}
|
||||
|
||||
|
@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|||
size_t in_size = size;
|
||||
ssize_t out_size;
|
||||
|
||||
/* cannot perform a write until the read has cleared
|
||||
either via tpm_read or a user_read_timer timeout.
|
||||
This also prevents splitted buffered writes from blocking here.
|
||||
*/
|
||||
if (atomic_read(&priv->data_pending) != 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (in_size > TPM_BUFSIZE)
|
||||
return -E2BIG;
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
|
||||
/* Cannot perform a write until the read has cleared either via
|
||||
* tpm_read or a user_read_timer timeout. This also prevents split
|
||||
* buffered writes from blocking here.
|
||||
*/
|
||||
if (priv->data_pending != 0) {
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (copy_from_user
|
||||
(priv->data_buffer, (void __user *) buf, in_size)) {
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
|
@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|||
return out_size;
|
||||
}
|
||||
|
||||
atomic_set(&priv->data_pending, out_size);
|
||||
priv->data_pending = out_size;
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
|
||||
/* Set a timeout by which the reader must come claim the result */
|
||||
|
@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file)
|
|||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->work);
|
||||
file->private_data = NULL;
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
priv->data_pending = 0;
|
||||
clear_bit(0, &priv->chip->is_open);
|
||||
kfree(priv);
|
||||
return 0;
|
||||
|
|
|
@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
|
|||
goto err_desc;
|
||||
}
|
||||
|
||||
reinit_completion(&dma->cmd_complete);
|
||||
txdesc->callback = i2c_imx_dma_callback;
|
||||
txdesc->callback_param = i2c_imx;
|
||||
if (dma_submit_error(dmaengine_submit(txdesc))) {
|
||||
|
@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
|
|||
* The first byte must be transmitted by the CPU.
|
||||
*/
|
||||
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
|
|||
if (result)
|
||||
return result;
|
||||
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
|
|
@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
umem->address = addr;
|
||||
umem->page_size = PAGE_SIZE;
|
||||
umem->pid = get_task_pid(current, PIDTYPE_PID);
|
||||
/*
|
||||
* We ask for writable memory if any of the following
|
||||
* access flags are set. "Local write" and "remote write"
|
||||
* obviously require write access. "Remote atomic" can do
|
||||
* things like fetch and add, which will modify memory, and
|
||||
* "MW bind" can change permissions by binding a window.
|
||||
*/
|
||||
umem->writable = !!(access &
|
||||
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
|
||||
umem->writable = ib_access_writable(access);
|
||||
|
||||
if (access & IB_ACCESS_ON_DEMAND) {
|
||||
put_pid(umem->pid);
|
||||
|
|
|
@ -130,6 +130,40 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
|
||||
u64 length, u64 virt_addr,
|
||||
int access_flags)
|
||||
{
|
||||
/*
|
||||
* Force registering the memory as writable if the underlying pages
|
||||
* are writable. This is so rereg can change the access permissions
|
||||
* from readable to writable without having to run through ib_umem_get
|
||||
* again
|
||||
*/
|
||||
if (!ib_access_writable(access_flags)) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
/*
|
||||
* FIXME: Ideally this would iterate over all the vmas that
|
||||
* cover the memory, but for now it requires a single vma to
|
||||
* entirely cover the MR to support RO mappings.
|
||||
*/
|
||||
vma = find_vma(current->mm, start);
|
||||
if (vma && vma->vm_end >= start + length &&
|
||||
vma->vm_start <= start) {
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
} else {
|
||||
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
}
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
return ib_umem_get(context, start, length, access_flags, 0);
|
||||
}
|
||||
|
||||
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata)
|
||||
|
@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Force registering the memory as writable. */
|
||||
/* Used for memory re-registeration. HCA protects the access */
|
||||
mr->umem = ib_umem_get(pd->uobject->context, start, length,
|
||||
access_flags | IB_ACCESS_LOCAL_WRITE, 0);
|
||||
mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
|
||||
virt_addr, access_flags);
|
||||
if (IS_ERR(mr->umem)) {
|
||||
err = PTR_ERR(mr->umem);
|
||||
goto err_free;
|
||||
|
@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
}
|
||||
|
||||
if (flags & IB_MR_REREG_ACCESS) {
|
||||
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
|
||||
return -EPERM;
|
||||
|
||||
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
|
||||
convert_access(mr_access_flags));
|
||||
|
||||
|
@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
|
||||
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
|
||||
ib_umem_release(mmr->umem);
|
||||
mmr->umem = ib_umem_get(mr->uobject->context, start, length,
|
||||
mr_access_flags |
|
||||
IB_ACCESS_LOCAL_WRITE,
|
||||
0);
|
||||
mmr->umem =
|
||||
mlx4_get_umem_mr(mr->uobject->context, start, length,
|
||||
virt_addr, mr_access_flags);
|
||||
if (IS_ERR(mmr->umem)) {
|
||||
err = PTR_ERR(mmr->umem);
|
||||
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
|
||||
|
|
|
@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
|
|||
struct ocrdma_stats *pstats = filp->private_data;
|
||||
struct ocrdma_dev *dev = pstats->dev;
|
||||
|
||||
if (count > 32)
|
||||
if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
|
||||
goto err;
|
||||
|
||||
if (copy_from_user(tmp_str, buffer, count))
|
||||
|
|
|
@ -879,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
struct sk_buff *nskb;
|
||||
|
||||
|
@ -888,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||
|
||||
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
BUG_ON(pull_to <= skb_headlen(skb));
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
||||
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
skb_frag_page(nfrag),
|
||||
rx->offset, rx->status, PAGE_SIZE);
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = 0;
|
||||
|
|
|
@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
|
|||
union acpi_object *obj;
|
||||
struct pci_host_bridge *bridge;
|
||||
|
||||
if (acpi_pci_disabled || !bus->bridge)
|
||||
if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
|
||||
return;
|
||||
|
||||
acpi_pci_slot_enumerate(bus);
|
||||
|
|
|
@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
|
|||
|
||||
wait_for_completion(&tm_iocb->u.tmf.comp);
|
||||
|
||||
rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
|
||||
QLA_SUCCESS : QLA_FUNCTION_FAILED;
|
||||
rval = tm_iocb->u.tmf.data;
|
||||
|
||||
if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8030,
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x8030,
|
||||
"TM IOCB failed (%x).\n", rval);
|
||||
}
|
||||
|
||||
|
|
|
@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data)
|
|||
}
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(ISP_ABORT_NEEDED,
|
||||
&base_vha->dpc_flags)) {
|
||||
if (test_and_clear_bit
|
||||
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
|
||||
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
|
||||
|
||||
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
|
||||
"ISP abort scheduled.\n");
|
||||
|
|
|
@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
|
|||
static int sr_block_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct scsi_cd *cd;
|
||||
struct scsi_device *sdev;
|
||||
int ret = -ENXIO;
|
||||
|
||||
cd = scsi_cd_get(bdev->bd_disk);
|
||||
if (!cd)
|
||||
goto out;
|
||||
|
||||
sdev = cd->device;
|
||||
scsi_autopm_get_device(sdev);
|
||||
check_disk_change(bdev);
|
||||
|
||||
mutex_lock(&sr_mutex);
|
||||
cd = scsi_cd_get(bdev->bd_disk);
|
||||
if (cd) {
|
||||
ret = cdrom_open(&cd->cdi, bdev, mode);
|
||||
if (ret)
|
||||
scsi_cd_put(cd);
|
||||
}
|
||||
ret = cdrom_open(&cd->cdi, bdev, mode);
|
||||
mutex_unlock(&sr_mutex);
|
||||
|
||||
scsi_autopm_put_device(sdev);
|
||||
if (ret)
|
||||
scsi_cd_put(cd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
scsi_autopm_get_device(sdev);
|
||||
|
||||
/*
|
||||
* Send SCSI addressing ioctls directly to mid level, send other
|
||||
* ioctls to cdrom/block level.
|
||||
|
@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
case SCSI_IOCTL_GET_IDLUN:
|
||||
case SCSI_IOCTL_GET_BUS_NUMBER:
|
||||
ret = scsi_ioctl(sdev, cmd, argp);
|
||||
goto out;
|
||||
goto put;
|
||||
}
|
||||
|
||||
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
|
||||
if (ret != -ENOSYS)
|
||||
goto out;
|
||||
goto put;
|
||||
|
||||
ret = scsi_ioctl(sdev, cmd, argp);
|
||||
|
||||
put:
|
||||
scsi_autopm_put_device(sdev);
|
||||
|
||||
out:
|
||||
mutex_unlock(&sr_mutex);
|
||||
return ret;
|
||||
|
|
|
@ -1932,10 +1932,12 @@ struct dentry *d_make_root(struct inode *root_inode)
|
|||
static const struct qstr name = QSTR_INIT("/", 1);
|
||||
|
||||
res = __d_alloc(root_inode->i_sb, &name);
|
||||
if (res)
|
||||
if (res) {
|
||||
res->d_flags |= DCACHE_RCUACCESS;
|
||||
d_instantiate(res, root_inode);
|
||||
else
|
||||
} else {
|
||||
iput(root_inode);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -2101,7 +2101,7 @@ static int ext4_check_descriptors(struct super_block *sb,
|
|||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
|
||||
ext4_fsblk_t last_block;
|
||||
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
|
||||
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
|
||||
ext4_fsblk_t block_bitmap;
|
||||
ext4_fsblk_t inode_bitmap;
|
||||
ext4_fsblk_t inode_table;
|
||||
|
@ -3770,13 +3770,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto failed_mount2;
|
||||
}
|
||||
}
|
||||
sbi->s_gdb_count = db_count;
|
||||
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
|
||||
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
|
||||
ret = -EFSCORRUPTED;
|
||||
goto failed_mount2;
|
||||
}
|
||||
|
||||
sbi->s_gdb_count = db_count;
|
||||
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
|
||||
spin_lock_init(&sbi->s_next_gen_lock);
|
||||
|
||||
|
|
|
@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
|
|||
if (size > PSIZE) {
|
||||
/*
|
||||
* To keep the rest of the code simple. Allocate a
|
||||
* contiguous buffer to work with
|
||||
* contiguous buffer to work with. Make the buffer large
|
||||
* enough to make use of the whole extent.
|
||||
*/
|
||||
ea_buf->xattr = kmalloc(size, GFP_KERNEL);
|
||||
ea_buf->max_size = (size + sb->s_blocksize - 1) &
|
||||
~(sb->s_blocksize - 1);
|
||||
|
||||
ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
|
||||
if (ea_buf->xattr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ea_buf->flag = EA_MALLOC;
|
||||
ea_buf->max_size = (size + sb->s_blocksize - 1) &
|
||||
~(sb->s_blocksize - 1);
|
||||
|
||||
if (ea_size == 0)
|
||||
return 0;
|
||||
|
|
|
@ -605,12 +605,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
|
|||
return 0;
|
||||
mnt = real_mount(bastard);
|
||||
mnt_add_count(mnt, 1);
|
||||
smp_mb(); // see mntput_no_expire()
|
||||
if (likely(!read_seqretry(&mount_lock, seq)))
|
||||
return 0;
|
||||
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
|
||||
mnt_add_count(mnt, -1);
|
||||
return 1;
|
||||
}
|
||||
lock_mount_hash();
|
||||
if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
|
||||
mnt_add_count(mnt, -1);
|
||||
unlock_mount_hash();
|
||||
return 1;
|
||||
}
|
||||
unlock_mount_hash();
|
||||
/* caller will mntput() */
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1142,12 +1151,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
|
|||
static void mntput_no_expire(struct mount *mnt)
|
||||
{
|
||||
rcu_read_lock();
|
||||
mnt_add_count(mnt, -1);
|
||||
if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
|
||||
if (likely(READ_ONCE(mnt->mnt_ns))) {
|
||||
/*
|
||||
* Since we don't do lock_mount_hash() here,
|
||||
* ->mnt_ns can change under us. However, if it's
|
||||
* non-NULL, then there's a reference that won't
|
||||
* be dropped until after an RCU delay done after
|
||||
* turning ->mnt_ns NULL. So if we observe it
|
||||
* non-NULL under rcu_read_lock(), the reference
|
||||
* we are dropping is not the final one.
|
||||
*/
|
||||
mnt_add_count(mnt, -1);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
lock_mount_hash();
|
||||
/*
|
||||
* make sure that if __legitimize_mnt() has not seen us grab
|
||||
* mount_lock, we'll see their refcount increment here.
|
||||
*/
|
||||
smp_mb();
|
||||
mnt_add_count(mnt, -1);
|
||||
if (mnt_get_count(mnt)) {
|
||||
rcu_read_unlock();
|
||||
unlock_mount_hash();
|
||||
|
|
|
@ -799,6 +799,18 @@ static inline int pmd_free_pte_page(pmd_t *pmd)
|
|||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
||||
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
|
||||
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_has_pfn_modify_check(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
|
|
|
@ -61,6 +61,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
|
|
|
@ -2103,6 +2103,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
|||
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn);
|
||||
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot);
|
||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn);
|
||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
||||
|
|
|
@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
|
|||
void ring_buffer_record_off(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_on(struct ring_buffer *buffer);
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
|
||||
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
|
|
|
@ -14,5 +14,7 @@ extern int try_to_unuse(unsigned int, bool, unsigned long);
|
|||
extern int swap_ratio(struct swap_info_struct **si);
|
||||
extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
|
||||
extern bool is_swap_ratio_group(int prio);
|
||||
extern unsigned long generic_max_swapfile_size(void);
|
||||
extern unsigned long max_swapfile_size(void);
|
||||
|
||||
#endif /* _LINUX_SWAPFILE_H */
|
||||
|
|
|
@ -26,11 +26,7 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||
#else
|
||||
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
|
||||
#endif
|
||||
#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||
|
||||
/*
|
||||
* flag set/clear/test wrappers
|
||||
|
|
|
@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ib_access_writable(int access_flags)
|
||||
{
|
||||
/*
|
||||
* We have writable memory backing the MR if any of the following
|
||||
* access flags are set. "Local write" and "remote write" obviously
|
||||
* require write access. "Remote atomic" can do things like fetch and
|
||||
* add, which will modify memory, and "MW bind" can change permissions
|
||||
* by binding a window.
|
||||
*/
|
||||
return access_flags &
|
||||
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_check_mr_status: lightweight check of MR status.
|
||||
* This routine may provide status checks on a selected
|
||||
|
|
|
@ -1030,6 +1030,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
|
|||
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* No further action required for interrupts which are requested as
|
||||
* threaded interrupts already
|
||||
*/
|
||||
if (new->handler == irq_default_primary_handler)
|
||||
return 0;
|
||||
|
||||
new->flags |= IRQF_ONESHOT;
|
||||
|
||||
/*
|
||||
|
@ -1037,7 +1044,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
|
|||
* thread handler. We force thread them as well by creating a
|
||||
* secondary action.
|
||||
*/
|
||||
if (new->handler != irq_default_primary_handler && new->thread_fn) {
|
||||
if (new->handler && new->thread_fn) {
|
||||
/* Allocate the secondary action */
|
||||
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
if (!new->secondary)
|
||||
|
|
|
@ -591,7 +591,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
|||
|
||||
static inline bool local_timer_softirq_pending(void)
|
||||
{
|
||||
return local_softirq_pending() & TIMER_SOFTIRQ;
|
||||
return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
|
|
|
@ -3141,6 +3141,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
|
|||
return !atomic_read(&buffer->record_disabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_is_set_on - return true if the ring buffer is set writable
|
||||
* @buffer: The ring buffer to see if write is set enabled
|
||||
*
|
||||
* Returns true if the ring buffer is set writable by ring_buffer_record_on().
|
||||
* Note that this does NOT mean it is in a writable state.
|
||||
*
|
||||
* It may return true when the ring buffer has been disabled by
|
||||
* ring_buffer_record_disable(), as that is a temporary disabling of
|
||||
* the ring buffer.
|
||||
*/
|
||||
int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
|
||||
{
|
||||
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
|
||||
* @buffer: The ring buffer to stop writes to.
|
||||
|
|
|
@ -1094,6 +1094,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|||
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
|
||||
/* Inherit the recordable setting from trace_buffer */
|
||||
if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
|
||||
ring_buffer_record_on(tr->max_buffer.buffer);
|
||||
else
|
||||
ring_buffer_record_off(tr->max_buffer.buffer);
|
||||
|
||||
buf = tr->trace_buffer.buffer;
|
||||
tr->trace_buffer.buffer = tr->max_buffer.buffer;
|
||||
tr->max_buffer.buffer = buf;
|
||||
|
|
62
mm/memory.c
62
mm/memory.c
|
@ -1604,9 +1604,30 @@ out:
|
|||
*/
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pfn);
|
||||
|
||||
/**
|
||||
* vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
|
||||
* @vma: user vma to map to
|
||||
* @addr: target user address of this page
|
||||
* @pfn: source kernel pfn
|
||||
* @pgprot: pgprot flags for the inserted page
|
||||
*
|
||||
* This is exactly like vm_insert_pfn, except that it allows drivers to
|
||||
* to override pgprot on a per-page basis.
|
||||
*
|
||||
* This only makes sense for IO mappings, and it makes no sense for
|
||||
* cow mappings. In general, using multiple vmas is preferable;
|
||||
* vm_insert_pfn_prot should only be used if using multiple VMAs is
|
||||
* impractical.
|
||||
*/
|
||||
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot)
|
||||
{
|
||||
int ret;
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
/*
|
||||
* Technically, architectures with pte_special can avoid all these
|
||||
* restrictions (same for remap_pfn_range). However we would like
|
||||
|
@ -1624,19 +1645,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||
if (track_pfn_insert(vma, &pgprot, pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (!pfn_modify_allowed(pfn, pgprot))
|
||||
return -EACCES;
|
||||
|
||||
ret = insert_pfn(vma, addr, pfn, pgprot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pfn);
|
||||
EXPORT_SYMBOL(vm_insert_pfn_prot);
|
||||
|
||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
|
||||
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return -EFAULT;
|
||||
if (track_pfn_insert(vma, &pgprot, pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (!pfn_modify_allowed(pfn, pgprot))
|
||||
return -EACCES;
|
||||
|
||||
/*
|
||||
* If we don't have pte special, then we have to use the pfn_valid()
|
||||
|
@ -1649,9 +1680,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|||
struct page *page;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
return insert_page(vma, addr, page, vma->vm_page_prot);
|
||||
return insert_page(vma, addr, page, pgprot);
|
||||
}
|
||||
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||
return insert_pfn(vma, addr, pfn, pgprot);
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_mixed);
|
||||
|
||||
|
@ -1666,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
{
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
int err = 0;
|
||||
|
||||
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
||||
if (!pte)
|
||||
|
@ -1673,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
arch_enter_lazy_mmu_mode();
|
||||
do {
|
||||
BUG_ON(!pte_none(*pte));
|
||||
if (!pfn_modify_allowed(pfn, prot)) {
|
||||
err = -EACCES;
|
||||
break;
|
||||
}
|
||||
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
|
||||
pfn++;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
arch_leave_lazy_mmu_mode();
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
||||
|
@ -1687,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
int err;
|
||||
|
||||
pfn -= addr >> PAGE_SHIFT;
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
|
@ -1695,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (remap_pte_range(mm, pmd, addr, next,
|
||||
pfn + (addr >> PAGE_SHIFT), prot))
|
||||
return -ENOMEM;
|
||||
err = remap_pte_range(mm, pmd, addr, next,
|
||||
pfn + (addr >> PAGE_SHIFT), prot);
|
||||
if (err)
|
||||
return err;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1708,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
int err;
|
||||
|
||||
pfn -= addr >> PAGE_SHIFT;
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
|
@ -1715,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|||
return -ENOMEM;
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (remap_pmd_range(mm, pud, addr, next,
|
||||
pfn + (addr >> PAGE_SHIFT), prot))
|
||||
return -ENOMEM;
|
||||
err = remap_pmd_range(mm, pud, addr, next,
|
||||
pfn + (addr >> PAGE_SHIFT), prot);
|
||||
if (err)
|
||||
return err;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
|||
return pages;
|
||||
}
|
||||
|
||||
static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
|
||||
0 : -EACCES;
|
||||
}
|
||||
|
||||
static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
||||
unsigned long addr, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
|
||||
0 : -EACCES;
|
||||
}
|
||||
|
||||
static int prot_none_test(unsigned long addr, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, unsigned long newflags)
|
||||
{
|
||||
pgprot_t new_pgprot = vm_get_page_prot(newflags);
|
||||
struct mm_walk prot_none_walk = {
|
||||
.pte_entry = prot_none_pte_entry,
|
||||
.hugetlb_entry = prot_none_hugetlb_entry,
|
||||
.test_walk = prot_none_test,
|
||||
.mm = current->mm,
|
||||
.private = &new_pgprot,
|
||||
};
|
||||
|
||||
return walk_page_range(start, end, &prot_none_walk);
|
||||
}
|
||||
|
||||
int
|
||||
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
||||
unsigned long start, unsigned long end, unsigned long newflags)
|
||||
|
@ -272,6 +308,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do PROT_NONE PFN permission checks here when we can still
|
||||
* bail out without undoing a lot of state. This is a rather
|
||||
* uncommon case, so doesn't need to be very optimized.
|
||||
*/
|
||||
if (arch_has_pfn_modify_check() &&
|
||||
(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
|
||||
(newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
|
||||
error = prot_none_walk(vma, start, end, newflags);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we make a private mapping writable we increase our commit;
|
||||
* but (without finer accounting) cannot reduce our commit if we
|
||||
|
|
|
@ -2249,6 +2249,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Find out how many pages are allowed for a single swap device. There
|
||||
* are two limiting factors:
|
||||
* 1) the number of bits for the swap offset in the swp_entry_t type, and
|
||||
* 2) the number of bits in the swap pte, as defined by the different
|
||||
* architectures.
|
||||
*
|
||||
* In order to find the largest possible bit mask, a swap entry with
|
||||
* swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
|
||||
* decoded to a swp_entry_t again, and finally the swap offset is
|
||||
* extracted.
|
||||
*
|
||||
* This will mask all the bits from the initial ~0UL mask that can't
|
||||
* be encoded in either the swp_entry_t or the architecture definition
|
||||
* of a swap pte.
|
||||
*/
|
||||
unsigned long generic_max_swapfile_size(void)
|
||||
{
|
||||
return swp_offset(pte_to_swp_entry(
|
||||
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
|
||||
}
|
||||
|
||||
/* Can be overridden by an architecture for additional checks. */
|
||||
__weak unsigned long max_swapfile_size(void)
|
||||
{
|
||||
return generic_max_swapfile_size();
|
||||
}
|
||||
|
||||
static unsigned long read_swap_header(struct swap_info_struct *p,
|
||||
union swap_header *swap_header,
|
||||
struct inode *inode)
|
||||
|
@ -2284,22 +2313,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
|
|||
p->cluster_next = 1;
|
||||
p->cluster_nr = 0;
|
||||
|
||||
/*
|
||||
* Find out how many pages are allowed for a single swap
|
||||
* device. There are two limiting factors: 1) the number
|
||||
* of bits for the swap offset in the swp_entry_t type, and
|
||||
* 2) the number of bits in the swap pte as defined by the
|
||||
* different architectures. In order to find the
|
||||
* largest possible bit mask, a swap entry with swap type 0
|
||||
* and swap offset ~0UL is created, encoded to a swap pte,
|
||||
* decoded to a swp_entry_t again, and finally the swap
|
||||
* offset is extracted. This will mask all the bits from
|
||||
* the initial ~0UL mask that can't be encoded in either
|
||||
* the swp_entry_t or the architecture definition of a
|
||||
* swap pte.
|
||||
*/
|
||||
maxpages = swp_offset(pte_to_swp_entry(
|
||||
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
|
||||
maxpages = max_swapfile_size();
|
||||
last_page = swap_header->info.last_page;
|
||||
if (!last_page) {
|
||||
pr_warn("Empty swap-file\n");
|
||||
|
|
|
@ -354,6 +354,7 @@ config INET_ESP
|
|||
select CRYPTO_CBC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_ECHAINIV
|
||||
---help---
|
||||
Support for IPsec ESP.
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ config INET6_ESP
|
|||
select CRYPTO_CBC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_ECHAINIV
|
||||
---help---
|
||||
Support for IPsec ESP.
|
||||
|
||||
|
|
|
@ -966,6 +966,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (nlk->ngroups == 0)
|
||||
groups = 0;
|
||||
else if (nlk->ngroups < 8*sizeof(groups))
|
||||
groups &= (1UL << nlk->ngroups) - 1;
|
||||
|
||||
bound = nlk->bound;
|
||||
if (bound) {
|
||||
/* Ensure nlk->portid is up-to-date. */
|
||||
|
|
Loading…
Add table
Reference in a new issue