Merge tag 'v4.4.14' into linux-linaro-lsk-v4.4
This is the 4.4.14 stable release
This commit is contained in:
commit
ffc4aa8f52
87 changed files with 1444 additions and 1090 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 13
|
||||
SUBLEVEL = 14
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
vfp_flush_hwstate(thread);
|
||||
thread->vfpstate.hard = new_vfp;
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
#define COMPAT_ELF_PLATFORM ("v8b")
|
||||
#else
|
||||
#define COMPAT_ELF_PLATFORM ("v8l")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
|
||||
|
||||
/* AArch32 registers. */
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/personality.h>
|
||||
|
@ -102,6 +104,7 @@ static const char *const compat_hwcap2_str[] = {
|
|||
static int c_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int i, j;
|
||||
bool compat = personality(current->personality) == PER_LINUX32;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
|
||||
|
@ -113,6 +116,9 @@ static int c_show(struct seq_file *m, void *v)
|
|||
* "processor". Give glibc what it expects.
|
||||
*/
|
||||
seq_printf(m, "processor\t: %d\n", i);
|
||||
if (compat)
|
||||
seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
|
||||
MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
|
||||
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
loops_per_jiffy / (500000UL/HZ),
|
||||
|
@ -125,7 +131,7 @@ static int c_show(struct seq_file *m, void *v)
|
|||
* software which does already (at least for 32-bit).
|
||||
*/
|
||||
seq_puts(m, "Features\t:");
|
||||
if (personality(current->personality) == PER_LINUX32) {
|
||||
if (compat) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
for (j = 0; compat_hwcap_str[j]; j++)
|
||||
if (compat_elf_hwcap & (1 << j))
|
||||
|
|
|
@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
* PTE_RDONLY is cleared by default in the asm below, so set it in
|
||||
* back if necessary (read-only or clean PTE).
|
||||
*/
|
||||
if (!pte_write(entry) || !dirty)
|
||||
if (!pte_write(entry) || !pte_sw_dirty(entry))
|
||||
pte_val(entry) |= PTE_RDONLY;
|
||||
|
||||
/*
|
||||
|
|
|
@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
|
|||
* User space process size: 2GB. This is hardcoded into a few places,
|
||||
* so don't change it unless you know what you are doing.
|
||||
*/
|
||||
#define TASK_SIZE 0x7fff8000UL
|
||||
#define TASK_SIZE 0x80000000UL
|
||||
#endif
|
||||
|
||||
#define STACK_TOP_MAX TASK_SIZE
|
||||
|
|
|
@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
|
|||
break;
|
||||
}
|
||||
|
||||
if (modify && R1(regs->iir))
|
||||
if (ret == 0 && modify && R1(regs->iir))
|
||||
regs->gr[R1(regs->iir)] = newbase;
|
||||
|
||||
|
||||
|
@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
|
|||
|
||||
if (ret)
|
||||
{
|
||||
/*
|
||||
* The unaligned handler failed.
|
||||
* If we were called by __get_user() or __put_user() jump
|
||||
* to it's exception fixup handler instead of crashing.
|
||||
*/
|
||||
if (!user_mode(regs) && fixup_exception(regs))
|
||||
return;
|
||||
|
||||
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
|
||||
die_if_kernel("Unaligned data reference", regs, 28);
|
||||
|
||||
|
|
|
@ -707,7 +707,7 @@
|
|||
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
|
||||
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
|
||||
#define SPRN_MMCR1 798
|
||||
#define SPRN_MMCR2 769
|
||||
#define SPRN_MMCR2 785
|
||||
#define SPRN_MMCRA 0x312
|
||||
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
|
||||
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
|
||||
|
@ -744,13 +744,13 @@
|
|||
#define SPRN_PMC6 792
|
||||
#define SPRN_PMC7 793
|
||||
#define SPRN_PMC8 794
|
||||
#define SPRN_SIAR 780
|
||||
#define SPRN_SDAR 781
|
||||
#define SPRN_SIER 784
|
||||
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
|
||||
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
#define SPRN_SIAR 796
|
||||
#define SPRN_SDAR 797
|
||||
#define SPRN_TACR 888
|
||||
#define SPRN_TCSCR 889
|
||||
#define SPRN_CSIGR 890
|
||||
|
|
|
@ -655,6 +655,7 @@ unsigned char ibm_architecture_vec[] = {
|
|||
W(0xffff0000), W(0x003e0000), /* POWER6 */
|
||||
W(0xffff0000), W(0x003f0000), /* POWER7 */
|
||||
W(0xffff0000), W(0x004b0000), /* POWER8E */
|
||||
W(0xffff0000), W(0x004c0000), /* POWER8NVL */
|
||||
W(0xffff0000), W(0x004d0000), /* POWER8 */
|
||||
W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
|
||||
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
|
||||
|
|
|
@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
|
|||
{
|
||||
int config_addr;
|
||||
int ret;
|
||||
/* Waiting 0.2s maximum before skipping configuration */
|
||||
int max_wait = 200;
|
||||
|
||||
/* Figure out the PE address */
|
||||
config_addr = pe->config_addr;
|
||||
if (pe->addr)
|
||||
config_addr = pe->addr;
|
||||
|
||||
/* Use new configure-pe function, if supported */
|
||||
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else {
|
||||
return -EFAULT;
|
||||
while (max_wait > 0) {
|
||||
/* Use new configure-pe function, if supported */
|
||||
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If RTAS returns a delay value that's above 100ms, cut it
|
||||
* down to 100ms in case firmware made a mistake. For more
|
||||
* on how these delay values work see rtas_busy_delay_time
|
||||
*/
|
||||
if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
|
||||
ret <= RTAS_EXTENDED_DELAY_MAX)
|
||||
ret = RTAS_EXTENDED_DELAY_MIN+2;
|
||||
|
||||
max_wait -= rtas_busy_delay_time(ret);
|
||||
|
||||
if (max_wait < 0)
|
||||
break;
|
||||
|
||||
rtas_busy_delay(ret);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
|
||||
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
|||
* | | |
|
||||
* +---------------+ |
|
||||
* | 8 byte skbp | |
|
||||
* R15+170 -> +---------------+ |
|
||||
* R15+176 -> +---------------+ |
|
||||
* | 8 byte hlen | |
|
||||
* R15+168 -> +---------------+ |
|
||||
* | 4 byte align | |
|
||||
|
@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
|||
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
|
||||
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
|
||||
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
|
||||
#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
|
||||
#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
|
||||
|
||||
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
|
||||
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
|
||||
|
|
|
@ -45,7 +45,7 @@ struct bpf_jit {
|
|||
int labels[1]; /* Labels for local jumps */
|
||||
};
|
||||
|
||||
#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
|
||||
#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
|
||||
|
||||
#define SEEN_SKB 1 /* skb access */
|
||||
#define SEEN_MEM 2 /* use mem[] for temporary storage */
|
||||
|
@ -446,7 +446,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
|
|||
emit_load_skb_data_hlen(jit);
|
||||
if (jit->seen & SEEN_SKB_CHANGE)
|
||||
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
|
||||
STK_OFF_SKBP);
|
||||
/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
|
||||
if (is_classic) {
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
|
||||
#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
|
||||
|
||||
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
|
||||
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
|
||||
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
|
||||
|
||||
#define __CHEETAH_ID 0x003e0014
|
||||
#define __JALAPENO_ID 0x003e0016
|
||||
#define __SERRANO_ID 0x003e0022
|
||||
|
|
|
@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
|||
#define pgprot_noncached pgprot_noncached
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
static inline unsigned long __pte_huge_mask(void)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
|
@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
|||
: "=r" (mask)
|
||||
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
|
||||
|
||||
return __pte(pte_val(pte) | mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | __pte_huge_mask());
|
||||
}
|
||||
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return !!(pte_val(pte) & __pte_huge_mask());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
|
@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
|||
return __pmd(pte_val(pte));
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
|
@ -865,6 +881,19 @@ static inline unsigned long pud_pfn(pud_t pud)
|
|||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm);
|
||||
|
||||
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
@ -881,15 +910,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t orig = *ptep;
|
||||
|
||||
*ptep = pte;
|
||||
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pte) \
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define TLB_BATCH_NR 192
|
||||
|
||||
struct tlb_batch {
|
||||
bool huge;
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
|
@ -16,7 +17,7 @@ struct tlb_batch {
|
|||
|
||||
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
void flush_tsb_user(struct tlb_batch *tb);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
|
|
|
@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
|
|||
restored; \
|
||||
nop; nop; nop; nop; nop; nop; \
|
||||
nop; nop; nop; nop; nop; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup;
|
||||
|
||||
|
||||
|
@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
|
|||
restored; \
|
||||
nop; nop; nop; nop; nop; \
|
||||
nop; nop; nop; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
|
||||
ba,a,pt %xcc, user_rtt_fill_fixup;
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
|
|||
CFLAGS_REMOVE_pcr.o := -pg
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SPARC64) += urtt_fill.o
|
||||
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
|
||||
obj-$(CONFIG_SPARC32) += etrap_32.o
|
||||
obj-$(CONFIG_SPARC32) += rtrap_32.o
|
||||
|
|
|
@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
|
|||
subcc %g1, %g2, %g1 ! Next cacheline
|
||||
bge,pt %icc, 1b
|
||||
nop
|
||||
ba,pt %xcc, dcpe_icpe_tl1_common
|
||||
nop
|
||||
ba,a,pt %xcc, dcpe_icpe_tl1_common
|
||||
|
||||
do_dcpe_tl1_fatal:
|
||||
sethi %hi(1f), %g7
|
||||
|
@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
|
|||
mov 0x2, %o0
|
||||
call cheetah_plus_parity_error
|
||||
add %sp, PTREGS_OFF, %o1
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size do_dcpe_tl1,.-do_dcpe_tl1
|
||||
|
||||
.globl do_icpe_tl1
|
||||
|
@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
|
|||
subcc %g1, %g2, %g1
|
||||
bge,pt %icc, 1b
|
||||
nop
|
||||
ba,pt %xcc, dcpe_icpe_tl1_common
|
||||
nop
|
||||
ba,a,pt %xcc, dcpe_icpe_tl1_common
|
||||
|
||||
do_icpe_tl1_fatal:
|
||||
sethi %hi(1f), %g7
|
||||
|
@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
|
|||
mov 0x3, %o0
|
||||
call cheetah_plus_parity_error
|
||||
add %sp, PTREGS_OFF, %o1
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size do_icpe_tl1,.-do_icpe_tl1
|
||||
|
||||
.type dcpe_icpe_tl1_common,#function
|
||||
|
@ -456,7 +452,7 @@ __cheetah_log_error:
|
|||
cmp %g2, 0x63
|
||||
be c_cee
|
||||
nop
|
||||
ba,pt %xcc, c_deferred
|
||||
ba,a,pt %xcc, c_deferred
|
||||
.size __cheetah_log_error,.-__cheetah_log_error
|
||||
|
||||
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
|
||||
|
|
|
@ -948,7 +948,24 @@ linux_syscall_trace:
|
|||
cmp %o0, 0
|
||||
bne 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
/* Syscall tracing can modify the registers. */
|
||||
ld [%sp + STACKFRAME_SZ + PT_G1], %g1
|
||||
sethi %hi(sys_call_table), %l7
|
||||
ld [%sp + STACKFRAME_SZ + PT_I0], %i0
|
||||
or %l7, %lo(sys_call_table), %l7
|
||||
ld [%sp + STACKFRAME_SZ + PT_I1], %i1
|
||||
ld [%sp + STACKFRAME_SZ + PT_I2], %i2
|
||||
ld [%sp + STACKFRAME_SZ + PT_I3], %i3
|
||||
ld [%sp + STACKFRAME_SZ + PT_I4], %i4
|
||||
ld [%sp + STACKFRAME_SZ + PT_I5], %i5
|
||||
cmp %g1, NR_syscalls
|
||||
bgeu 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
sll %g1, 2, %l4
|
||||
mov %i0, %o0
|
||||
ld [%l7 + %l4], %l7
|
||||
mov %i1, %o1
|
||||
mov %i2, %o2
|
||||
mov %i3, %o3
|
||||
|
|
|
@ -100,8 +100,8 @@ do_fpdis:
|
|||
fmuld %f0, %f2, %f26
|
||||
faddd %f0, %f2, %f28
|
||||
fmuld %f0, %f2, %f30
|
||||
b,pt %xcc, fpdis_exit
|
||||
nop
|
||||
ba,a,pt %xcc, fpdis_exit
|
||||
|
||||
2: andcc %g5, FPRS_DU, %g0
|
||||
bne,pt %icc, 3f
|
||||
fzero %f32
|
||||
|
@ -144,8 +144,8 @@ do_fpdis:
|
|||
fmuld %f32, %f34, %f58
|
||||
faddd %f32, %f34, %f60
|
||||
fmuld %f32, %f34, %f62
|
||||
ba,pt %xcc, fpdis_exit
|
||||
nop
|
||||
ba,a,pt %xcc, fpdis_exit
|
||||
|
||||
3: mov SECONDARY_CONTEXT, %g3
|
||||
add %g6, TI_FPREGS, %g1
|
||||
|
||||
|
@ -197,8 +197,7 @@ fpdis_exit2:
|
|||
fp_other_bounce:
|
||||
call do_fpother
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size fp_other_bounce,.-fp_other_bounce
|
||||
|
||||
.align 32
|
||||
|
|
|
@ -461,9 +461,8 @@ sun4v_chip_type:
|
|||
subcc %g3, 1, %g3
|
||||
bne,pt %xcc, 41b
|
||||
add %g1, 1, %g1
|
||||
mov SUN4V_CHIP_SPARC64X, %g4
|
||||
ba,pt %xcc, 5f
|
||||
nop
|
||||
mov SUN4V_CHIP_SPARC64X, %g4
|
||||
|
||||
49:
|
||||
mov SUN4V_CHIP_UNKNOWN, %g4
|
||||
|
@ -548,8 +547,7 @@ sun4u_init:
|
|||
stxa %g0, [%g7] ASI_DMMU
|
||||
membar #Sync
|
||||
|
||||
ba,pt %xcc, sun4u_continue
|
||||
nop
|
||||
ba,a,pt %xcc, sun4u_continue
|
||||
|
||||
sun4v_init:
|
||||
/* Set ctx 0 */
|
||||
|
@ -560,14 +558,12 @@ sun4v_init:
|
|||
mov SECONDARY_CONTEXT, %g7
|
||||
stxa %g0, [%g7] ASI_MMU
|
||||
membar #Sync
|
||||
ba,pt %xcc, niagara_tlb_fixup
|
||||
nop
|
||||
ba,a,pt %xcc, niagara_tlb_fixup
|
||||
|
||||
sun4u_continue:
|
||||
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
|
||||
|
||||
ba,pt %xcc, spitfire_tlb_fixup
|
||||
nop
|
||||
ba,a,pt %xcc, spitfire_tlb_fixup
|
||||
|
||||
niagara_tlb_fixup:
|
||||
mov 3, %g2 /* Set TLB type to hypervisor. */
|
||||
|
@ -639,8 +635,7 @@ niagara_patch:
|
|||
call hypervisor_patch_cachetlbops
|
||||
nop
|
||||
|
||||
ba,pt %xcc, tlb_fixup_done
|
||||
nop
|
||||
ba,a,pt %xcc, tlb_fixup_done
|
||||
|
||||
cheetah_tlb_fixup:
|
||||
mov 2, %g2 /* Set TLB type to cheetah+. */
|
||||
|
@ -659,8 +654,7 @@ cheetah_tlb_fixup:
|
|||
call cheetah_patch_cachetlbops
|
||||
nop
|
||||
|
||||
ba,pt %xcc, tlb_fixup_done
|
||||
nop
|
||||
ba,a,pt %xcc, tlb_fixup_done
|
||||
|
||||
spitfire_tlb_fixup:
|
||||
/* Set TLB type to spitfire. */
|
||||
|
@ -782,8 +776,7 @@ setup_trap_table:
|
|||
call %o1
|
||||
add %sp, (2047 + 128), %o0
|
||||
|
||||
ba,pt %xcc, 2f
|
||||
nop
|
||||
ba,a,pt %xcc, 2f
|
||||
|
||||
1: sethi %hi(sparc64_ttable_tl0), %o0
|
||||
set prom_set_trap_table_name, %g2
|
||||
|
@ -822,8 +815,7 @@ setup_trap_table:
|
|||
|
||||
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
|
||||
|
||||
ba,pt %xcc, 2f
|
||||
nop
|
||||
ba,a,pt %xcc, 2f
|
||||
|
||||
/* Disable STICK_INT interrupts. */
|
||||
1:
|
||||
|
|
|
@ -18,8 +18,7 @@ __do_privact:
|
|||
109: or %g7, %lo(109b), %g7
|
||||
call do_privact
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __do_privact,.-__do_privact
|
||||
|
||||
.type do_mna,#function
|
||||
|
@ -46,8 +45,7 @@ do_mna:
|
|||
mov %l5, %o2
|
||||
call mem_address_unaligned
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size do_mna,.-do_mna
|
||||
|
||||
.type do_lddfmna,#function
|
||||
|
@ -65,8 +63,7 @@ do_lddfmna:
|
|||
mov %l5, %o2
|
||||
call handle_lddfmna
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size do_lddfmna,.-do_lddfmna
|
||||
|
||||
.type do_stdfmna,#function
|
||||
|
@ -84,8 +81,7 @@ do_stdfmna:
|
|||
mov %l5, %o2
|
||||
call handle_stdfmna
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size do_stdfmna,.-do_stdfmna
|
||||
|
||||
.type breakpoint_trap,#function
|
||||
|
|
|
@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev)
|
|||
/* No special bus mastering setup handling */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
int pcibios_add_device(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
/* Add sriov arch specific initialization here.
|
||||
* Copy dev_archdata from PF to VF
|
||||
*/
|
||||
if (dev->is_virtfn) {
|
||||
pdev = dev->physfn;
|
||||
memcpy(&dev->dev.archdata, &pdev->dev.archdata,
|
||||
sizeof(struct dev_archdata));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
|
||||
static int __init pcibios_init(void)
|
||||
{
|
||||
pci_dfl_cache_line_size = 64 >> 2;
|
||||
|
|
|
@ -14,10 +14,6 @@
|
|||
#include <asm/visasm.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
|
||||
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
|
||||
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
# define SCHEDULE_USER schedule_user
|
||||
#else
|
||||
|
@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
|||
wrpr %g1, %cwp
|
||||
ba,a,pt %xcc, user_rtt_fill_64bit
|
||||
|
||||
user_rtt_fill_fixup_dax:
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
mov 1, %g3
|
||||
|
||||
user_rtt_fill_fixup_mna:
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
mov 2, %g3
|
||||
|
||||
user_rtt_fill_fixup:
|
||||
rdpr %cwp, %g1
|
||||
add %g1, 1, %g1
|
||||
wrpr %g1, 0x0, %cwp
|
||||
|
||||
rdpr %wstate, %g2
|
||||
sll %g2, 3, %g2
|
||||
wrpr %g2, 0x0, %wstate
|
||||
|
||||
/* We know %canrestore and %otherwin are both zero. */
|
||||
|
||||
sethi %hi(sparc64_kern_pri_context), %g2
|
||||
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
|
||||
mov PRIMARY_CONTEXT, %g1
|
||||
|
||||
661: stxa %g2, [%g1] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %g2, [%g1] ASI_MMU
|
||||
.previous
|
||||
|
||||
sethi %hi(KERNBASE), %g1
|
||||
flush %g1
|
||||
|
||||
or %g4, FAULT_CODE_WINFIXUP, %g4
|
||||
stb %g4, [%g6 + TI_FAULT_CODE]
|
||||
stx %g5, [%g6 + TI_FAULT_ADDR]
|
||||
|
||||
mov %g6, %l1
|
||||
wrpr %g0, 0x0, %tl
|
||||
|
||||
661: nop
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
SET_GL(0)
|
||||
.previous
|
||||
|
||||
wrpr %g0, RTRAP_PSTATE, %pstate
|
||||
|
||||
mov %l1, %g6
|
||||
ldx [%g6 + TI_TASK], %g4
|
||||
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
|
||||
call do_sparc64_fault
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
clr %g3
|
||||
|
||||
user_rtt_pre_restore:
|
||||
add %g1, 1, %g1
|
||||
|
|
|
@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid. We always build signal frames which are
|
||||
* 16-byte aligned, therefore we can always enforce that the restore
|
||||
* frame has that property as well.
|
||||
*/
|
||||
static bool invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 15) ||
|
||||
((unsigned long)fp) > 0x100000000ULL - fplen)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void do_sigreturn32(struct pt_regs *regs)
|
||||
{
|
||||
struct signal_frame32 __user *sf;
|
||||
compat_uptr_t fpu_save;
|
||||
compat_uptr_t rwin_save;
|
||||
unsigned int psr;
|
||||
unsigned int psr, ufp;
|
||||
unsigned pc, npc;
|
||||
sigset_t set;
|
||||
compat_sigset_t seta;
|
||||
|
@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
|
|||
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 3))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(pc, &sf->info.si_regs.pc) ||
|
||||
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
if (__get_user(pc, &sf->info.si_regs.pc) ||
|
||||
__get_user(npc, &sf->info.si_regs.npc))
|
||||
goto segv;
|
||||
|
||||
|
@ -227,7 +244,7 @@ segv:
|
|||
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_signal_frame32 __user *sf;
|
||||
unsigned int psr, pc, npc;
|
||||
unsigned int psr, pc, npc, ufp;
|
||||
compat_uptr_t fpu_save;
|
||||
compat_uptr_t rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
|
|||
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 3))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(pc, &sf->regs.pc) ||
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
if (__get_user(pc, &sf->regs.pc) ||
|
||||
__get_user(npc, &sf->regs.npc))
|
||||
goto segv;
|
||||
|
||||
|
@ -307,14 +329,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static int invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
|
|
@ -60,10 +60,22 @@ struct rt_signal_frame {
|
|||
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
|
||||
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
|
||||
|
||||
/* Checks if the fp is valid. We always build signal frames which are
|
||||
* 16-byte aligned, therefore we can always enforce that the restore
|
||||
* frame has that property as well.
|
||||
*/
|
||||
static inline bool invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
asmlinkage void do_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long up_psr, pc, npc, ufp;
|
||||
struct signal_frame __user *sf;
|
||||
unsigned long up_psr, pc, npc;
|
||||
sigset_t set;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
|
@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
|
|||
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv_and_exit;
|
||||
|
||||
if (((unsigned long) sf) & 3)
|
||||
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
|
||||
goto segv_and_exit;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv_and_exit;
|
||||
|
||||
err = __get_user(pc, &sf->info.si_regs.pc);
|
||||
|
@ -127,7 +142,7 @@ segv_and_exit:
|
|||
asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_signal_frame __user *sf;
|
||||
unsigned int psr, pc, npc;
|
||||
unsigned int psr, pc, npc, ufp;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
|
|||
|
||||
synchronize_user_stack();
|
||||
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
|
||||
(((unsigned long) sf) & 0x03))
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if (ufp & 0x7)
|
||||
goto segv;
|
||||
|
||||
err = __get_user(pc, &sf->regs.pc);
|
||||
|
@ -178,15 +198,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static inline int invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp = regs->u_regs[UREG_FP];
|
||||
|
|
|
@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
|
|||
unsigned char fenab;
|
||||
int err;
|
||||
|
||||
flush_user_windows();
|
||||
synchronize_user_stack();
|
||||
if (get_thread_wsaved() ||
|
||||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
|
||||
(!__access_ok(ucp, sizeof(*ucp))))
|
||||
|
@ -234,6 +234,17 @@ do_sigsegv:
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid. We always build rt signal frames which
|
||||
* are 16-byte aligned, therefore we can always enforce that the
|
||||
* restore frame has that property as well.
|
||||
*/
|
||||
static bool invalid_frame_pointer(void __user *fp)
|
||||
{
|
||||
if (((unsigned long) fp) & 15)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
struct rt_signal_frame {
|
||||
struct sparc_stackf ss;
|
||||
siginfo_t info;
|
||||
|
@ -246,8 +257,8 @@ struct rt_signal_frame {
|
|||
|
||||
void do_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tpc, tnpc, tstate, ufp;
|
||||
struct rt_signal_frame __user *sf;
|
||||
unsigned long tpc, tnpc, tstate;
|
||||
__siginfo_fpu_t __user *fpu_save;
|
||||
__siginfo_rwin_t __user *rwin_save;
|
||||
sigset_t set;
|
||||
|
@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
|
|||
(regs->u_regs [UREG_FP] + STACK_BIAS);
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (((unsigned long) sf) & 3)
|
||||
if (invalid_frame_pointer(sf))
|
||||
goto segv;
|
||||
|
||||
err = get_user(tpc, &sf->regs.tpc);
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
goto segv;
|
||||
|
||||
if ((ufp + STACK_BIAS) & 0x7)
|
||||
goto segv;
|
||||
|
||||
err = __get_user(tpc, &sf->regs.tpc);
|
||||
err |= __get_user(tnpc, &sf->regs.tnpc);
|
||||
if (test_thread_flag(TIF_32BIT)) {
|
||||
tpc &= 0xffffffff;
|
||||
|
@ -308,14 +325,6 @@ segv:
|
|||
force_sig(SIGSEGV, current);
|
||||
}
|
||||
|
||||
/* Checks if the fp is valid */
|
||||
static int invalid_frame_pointer(void __user *fp)
|
||||
{
|
||||
if (((unsigned long) fp) & 15)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
|
||||
{
|
||||
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
|
||||
|
|
|
@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
|||
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (((unsigned long) fpu) & 3)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (test_tsk_thread_flag(current, TIF_USEDFPU))
|
||||
regs->psr &= ~PSR_EF;
|
||||
|
@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
|
|||
struct thread_info *t = current_thread_info();
|
||||
int i, wsaved, err;
|
||||
|
||||
__get_user(wsaved, &rp->wsaved);
|
||||
if (((unsigned long) rp) & 3)
|
||||
return -EFAULT;
|
||||
|
||||
get_user(wsaved, &rp->wsaved);
|
||||
if (wsaved > NSWINS)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
|||
unsigned long fprs;
|
||||
int err;
|
||||
|
||||
err = __get_user(fprs, &fpu->si_fprs);
|
||||
if (((unsigned long) fpu) & 7)
|
||||
return -EFAULT;
|
||||
|
||||
err = get_user(fprs, &fpu->si_fprs);
|
||||
fprs_write(0);
|
||||
regs->tstate &= ~TSTATE_PEF;
|
||||
if (fprs & FPRS_DL)
|
||||
|
@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
|
|||
struct thread_info *t = current_thread_info();
|
||||
int i, wsaved, err;
|
||||
|
||||
__get_user(wsaved, &rp->wsaved);
|
||||
if (((unsigned long) rp) & 7)
|
||||
return -EFAULT;
|
||||
|
||||
get_user(wsaved, &rp->wsaved);
|
||||
if (wsaved > NSWINS)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
|
|||
ba,pt %xcc, etraptl1
|
||||
rd %pc, %g7
|
||||
|
||||
ba,pt %xcc, 2f
|
||||
nop
|
||||
ba,a,pt %xcc, 2f
|
||||
|
||||
1: ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
|
|||
mov %l5, %o2
|
||||
call spitfire_access_error
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __spitfire_access_error,.-__spitfire_access_error
|
||||
|
||||
/* This is the trap handler entry point for ECC correctable
|
||||
|
@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
|
|||
mov %l5, %o2
|
||||
call spitfire_data_access_exception_tl1
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
|
||||
|
||||
.type __spitfire_data_access_exception,#function
|
||||
|
@ -200,8 +197,7 @@ __spitfire_data_access_exception:
|
|||
mov %l5, %o2
|
||||
call spitfire_data_access_exception
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
|
||||
|
||||
.type __spitfire_insn_access_exception_tl1,#function
|
||||
|
@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
|
|||
mov %l5, %o2
|
||||
call spitfire_insn_access_exception_tl1
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
|
||||
|
||||
.type __spitfire_insn_access_exception,#function
|
||||
|
@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
|
|||
mov %l5, %o2
|
||||
call spitfire_insn_access_exception
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
|
||||
|
|
|
@ -158,7 +158,25 @@ linux_syscall_trace32:
|
|||
add %sp, PTREGS_OFF, %o0
|
||||
brnz,pn %o0, 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
/* Syscall tracing can modify the registers. */
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
||||
sethi %hi(sys_call_table32), %l7
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
|
||||
or %l7, %lo(sys_call_table32), %l7
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
|
||||
|
||||
cmp %g1, NR_syscalls
|
||||
bgeu,pn %xcc, 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
sll %g1, 2, %l4
|
||||
srl %i0, 0, %o0
|
||||
lduw [%l7 + %l4], %l7
|
||||
srl %i4, 0, %o4
|
||||
srl %i1, 0, %o1
|
||||
srl %i2, 0, %o2
|
||||
|
@ -170,7 +188,25 @@ linux_syscall_trace:
|
|||
add %sp, PTREGS_OFF, %o0
|
||||
brnz,pn %o0, 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
/* Syscall tracing can modify the registers. */
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
||||
sethi %hi(sys_call_table64), %l7
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
|
||||
or %l7, %lo(sys_call_table64), %l7
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
|
||||
|
||||
cmp %g1, NR_syscalls
|
||||
bgeu,pn %xcc, 3f
|
||||
mov -ENOSYS, %o0
|
||||
|
||||
sll %g1, 2, %l4
|
||||
mov %i0, %o0
|
||||
lduw [%l7 + %l4], %l7
|
||||
mov %i1, %o1
|
||||
mov %i2, %o2
|
||||
mov %i3, %o3
|
||||
|
|
98
arch/sparc/kernel/urtt_fill.S
Normal file
98
arch/sparc/kernel/urtt_fill.S
Normal file
|
@ -0,0 +1,98 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/trap_block.h>
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/head.h>
|
||||
|
||||
.text
|
||||
.align 8
|
||||
.globl user_rtt_fill_fixup_common
|
||||
user_rtt_fill_fixup_common:
|
||||
rdpr %cwp, %g1
|
||||
add %g1, 1, %g1
|
||||
wrpr %g1, 0x0, %cwp
|
||||
|
||||
rdpr %wstate, %g2
|
||||
sll %g2, 3, %g2
|
||||
wrpr %g2, 0x0, %wstate
|
||||
|
||||
/* We know %canrestore and %otherwin are both zero. */
|
||||
|
||||
sethi %hi(sparc64_kern_pri_context), %g2
|
||||
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
|
||||
mov PRIMARY_CONTEXT, %g1
|
||||
|
||||
661: stxa %g2, [%g1] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %g2, [%g1] ASI_MMU
|
||||
.previous
|
||||
|
||||
sethi %hi(KERNBASE), %g1
|
||||
flush %g1
|
||||
|
||||
mov %g4, %l4
|
||||
mov %g5, %l5
|
||||
brnz,pn %g3, 1f
|
||||
mov %g3, %l3
|
||||
|
||||
or %g4, FAULT_CODE_WINFIXUP, %g4
|
||||
stb %g4, [%g6 + TI_FAULT_CODE]
|
||||
stx %g5, [%g6 + TI_FAULT_ADDR]
|
||||
1:
|
||||
mov %g6, %l1
|
||||
wrpr %g0, 0x0, %tl
|
||||
|
||||
661: nop
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
SET_GL(0)
|
||||
.previous
|
||||
|
||||
wrpr %g0, RTRAP_PSTATE, %pstate
|
||||
|
||||
mov %l1, %g6
|
||||
ldx [%g6 + TI_TASK], %g4
|
||||
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
|
||||
|
||||
brnz,pn %l3, 1f
|
||||
nop
|
||||
|
||||
call do_sparc64_fault
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
|
||||
1: cmp %g3, 2
|
||||
bne,pn %xcc, 2f
|
||||
nop
|
||||
|
||||
sethi %hi(tlb_type), %g1
|
||||
lduw [%g1 + %lo(tlb_type)], %g1
|
||||
cmp %g1, 3
|
||||
bne,pt %icc, 1f
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
mov %l4, %o2
|
||||
call sun4v_do_mna
|
||||
mov %l5, %o1
|
||||
ba,a,pt %xcc, rtrap
|
||||
1: mov %l4, %o1
|
||||
mov %l5, %o2
|
||||
call mem_address_unaligned
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
2: sethi %hi(tlb_type), %g1
|
||||
mov %l4, %o1
|
||||
lduw [%g1 + %lo(tlb_type)], %g1
|
||||
mov %l5, %o2
|
||||
cmp %g1, 3
|
||||
bne,pt %icc, 1f
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
call sun4v_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
1: call spitfire_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
|
@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
|
|||
mov %l4, %o1
|
||||
call bad_trap
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
invoke_utrap:
|
||||
sllx %g3, 3, %g3
|
||||
|
|
|
@ -33,6 +33,10 @@ ENTRY(_start)
|
|||
jiffies = jiffies_64;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPARC64
|
||||
ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
|
||||
#endif
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
#ifdef CONFIG_SPARC64
|
||||
|
|
|
@ -32,8 +32,7 @@ fill_fixup:
|
|||
rd %pc, %g7
|
||||
call do_sparc64_fault
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,pt %xcc, rtrap
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
/* Be very careful about usage of the trap globals here.
|
||||
* You cannot touch %g5 as that has the fault information.
|
||||
|
|
|
@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
int i;
|
||||
pte_t orig[2];
|
||||
unsigned long nptes;
|
||||
|
||||
if (!pte_present(*ptep) && pte_present(entry))
|
||||
mm->context.huge_pte_count++;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
set_pte_at(mm, addr, ptep, entry);
|
||||
|
||||
nptes = 1 << HUGETLB_PAGE_ORDER;
|
||||
orig[0] = *ptep;
|
||||
orig[1] = *(ptep + nptes / 2);
|
||||
for (i = 0; i < nptes; i++) {
|
||||
*ptep = entry;
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
pte_val(entry) += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
|
@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|||
{
|
||||
pte_t entry;
|
||||
int i;
|
||||
unsigned long nptes;
|
||||
|
||||
entry = *ptep;
|
||||
if (pte_present(entry))
|
||||
mm->context.huge_pte_count--;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
pte_clear(mm, addr, ptep);
|
||||
nptes = 1 << HUGETLB_PAGE_ORDER;
|
||||
for (i = 0; i < nptes; i++) {
|
||||
*ptep = __pte(0UL);
|
||||
addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
|
||||
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
|
|
@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
|
|||
tsb_insert(tsb, tag, tte);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
if ((tlb_type == hypervisor &&
|
||||
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
|
||||
(tlb_type != hypervisor &&
|
||||
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
@ -1267,13 +1255,6 @@ static int __init numa_parse_mdesc(void)
|
|||
int i, j, err, count;
|
||||
u64 node;
|
||||
|
||||
/* Some sane defaults for numa latency values */
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
for (j = 0; j < MAX_NUMNODES; j++)
|
||||
numa_latency[i][j] = (i == j) ?
|
||||
LOCAL_DISTANCE : REMOTE_DISTANCE;
|
||||
}
|
||||
|
||||
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
|
||||
if (node == MDESC_NODE_NULL) {
|
||||
mdesc_release(md);
|
||||
|
@ -1369,10 +1350,18 @@ static int __init numa_parse_sun4u(void)
|
|||
|
||||
static int __init bootmem_init_numa(void)
|
||||
{
|
||||
int i, j;
|
||||
int err = -1;
|
||||
|
||||
numadbg("bootmem_init_numa()\n");
|
||||
|
||||
/* Some sane defaults for numa latency values */
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
for (j = 0; j < MAX_NUMNODES; j++)
|
||||
numa_latency[i][j] = (i == j) ?
|
||||
LOCAL_DISTANCE : REMOTE_DISTANCE;
|
||||
}
|
||||
|
||||
if (numa_enabled) {
|
||||
if (tlb_type == hypervisor)
|
||||
err = numa_parse_mdesc();
|
||||
|
@ -2832,9 +2821,10 @@ void hugetlb_setup(struct pt_regs *regs)
|
|||
* the Data-TLB for huge pages.
|
||||
*/
|
||||
if (tlb_type == cheetah_plus) {
|
||||
bool need_context_reload = false;
|
||||
unsigned long ctx;
|
||||
|
||||
spin_lock(&ctx_alloc_lock);
|
||||
spin_lock_irq(&ctx_alloc_lock);
|
||||
ctx = mm->context.sparc64_ctx_val;
|
||||
ctx &= ~CTX_PGSZ_MASK;
|
||||
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
|
||||
|
@ -2853,9 +2843,12 @@ void hugetlb_setup(struct pt_regs *regs)
|
|||
* also executing in this address space.
|
||||
*/
|
||||
mm->context.sparc64_ctx_val = ctx;
|
||||
on_each_cpu(context_reload, mm, 0);
|
||||
need_context_reload = true;
|
||||
}
|
||||
spin_unlock(&ctx_alloc_lock);
|
||||
spin_unlock_irq(&ctx_alloc_lock);
|
||||
|
||||
if (need_context_reload)
|
||||
on_each_cpu(context_reload, mm, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
|
|||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
bool exec, bool huge)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
unsigned long nr;
|
||||
|
@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
|||
}
|
||||
|
||||
if (!tb->active) {
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr, huge);
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
if (nr == 0) {
|
||||
tb->mm = mm;
|
||||
tb->huge = huge;
|
||||
}
|
||||
|
||||
if (tb->huge != huge) {
|
||||
flush_tlb_pending();
|
||||
tb->huge = huge;
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
tb->vaddrs[nr] = vaddr;
|
||||
tb->tlb_nr = ++nr;
|
||||
|
@ -104,6 +112,8 @@ out:
|
|||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
bool huge = is_hugetlb_pte(orig);
|
||||
|
||||
if (tlb_type != hypervisor &&
|
||||
pte_dirty(orig)) {
|
||||
unsigned long paddr, pfn = pte_pfn(orig);
|
||||
|
@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
|||
|
||||
no_cache_flush:
|
||||
if (!fullmm)
|
||||
tlb_batch_add_one(mm, vaddr, pte_exec(orig));
|
||||
tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|||
if (pte_val(*pte) & _PAGE_VALID) {
|
||||
bool exec = pte_exec(*pte);
|
||||
|
||||
tlb_batch_add_one(mm, vaddr, exec);
|
||||
tlb_batch_add_one(mm, vaddr, exec, false);
|
||||
}
|
||||
pte++;
|
||||
vaddr += PAGE_SIZE;
|
||||
|
@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t orig_pte = __pte(pmd_val(orig));
|
||||
bool exec = pte_exec(orig_pte);
|
||||
|
||||
tlb_batch_add_one(mm, addr, exec);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
|
||||
tlb_batch_add_one(mm, addr, exec, true);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
|
||||
true);
|
||||
} else {
|
||||
tlb_batch_pmd_scan(mm, addr, orig);
|
||||
}
|
||||
|
|
|
@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
|
||||
if (!tb->huge) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
}
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
if (!huge) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
}
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
|
|
@ -109,6 +109,12 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|||
preempt_count_dec();
|
||||
}
|
||||
|
||||
/*
|
||||
* In IST context, we explicitly disable preemption. This serves two
|
||||
* purposes: it makes it much less likely that we would accidentally
|
||||
* schedule in IST context and it will force a warning if we somehow
|
||||
* manage to schedule by accident.
|
||||
*/
|
||||
void ist_enter(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
|
@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs)
|
|||
rcu_nmi_enter();
|
||||
}
|
||||
|
||||
/*
|
||||
* We are atomic because we're on the IST stack; or we're on
|
||||
* x86_32, in which case we still shouldn't schedule; or we're
|
||||
* on x86_64 and entered from user mode, in which case we're
|
||||
* still atomic unless ist_begin_non_atomic is called.
|
||||
*/
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
|
||||
|
@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs)
|
|||
|
||||
void ist_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
if (!user_mode(regs))
|
||||
rcu_nmi_exit();
|
||||
|
@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
|||
BUG_ON((unsigned long)(current_top_of_stack() -
|
||||
current_stack_pointer()) >= THREAD_SIZE);
|
||||
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
|||
*/
|
||||
void ist_end_non_atomic(void)
|
||||
{
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static nokprobe_inline int
|
||||
|
|
|
@ -3014,6 +3014,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
|||
if (dbgregs->flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (dbgregs->dr6 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
if (dbgregs->dr7 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = dbgregs->dr6;
|
||||
|
|
|
@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
|
|||
select MPILIB
|
||||
select PUBLIC_KEY_ALGO_RSA
|
||||
select CRYPTO_HASH_INFO
|
||||
select CRYPTO_AKCIPHER
|
||||
help
|
||||
This option provides support for asymmetric public key type handling.
|
||||
If signature generation and/or verification are to be used,
|
||||
|
|
|
@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
unsigned int unit;
|
||||
u32 unit_size;
|
||||
int ret;
|
||||
|
||||
if (!ctx->u.aes.key_len)
|
||||
|
@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
if (!req->info)
|
||||
return -EINVAL;
|
||||
|
||||
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
|
||||
if (!(req->nbytes & (unit_size_map[unit].size - 1)))
|
||||
break;
|
||||
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
|
||||
if (req->nbytes <= unit_size_map[0].size) {
|
||||
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
|
||||
if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
|
||||
unit_size = unit_size_map[unit].value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
|
||||
if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
|
||||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
|
||||
/* Use the fallback to process the request for any
|
||||
* unsupported unit sizes or key sizes
|
||||
|
@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
|||
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
|
||||
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
|
||||
: CCP_AES_ACTION_DECRYPT;
|
||||
rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
|
||||
rctx->cmd.u.xts.unit_size = unit_size;
|
||||
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
|
||||
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
|
||||
rctx->cmd.u.xts.iv = &rctx->iv_sg;
|
||||
|
|
|
@ -145,8 +145,6 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
|
|||
void adf_disable_aer(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_aer(void);
|
||||
void adf_exit_aer(void);
|
||||
int adf_init_pf_wq(void);
|
||||
void adf_exit_pf_wq(void);
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
|
||||
|
@ -229,6 +227,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
|||
uint32_t vf_mask);
|
||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
uint32_t vf_mask);
|
||||
int adf_init_pf_wq(void);
|
||||
void adf_exit_pf_wq(void);
|
||||
#else
|
||||
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
||||
{
|
||||
|
@ -238,5 +238,14 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
|||
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int adf_init_pf_wq(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void adf_exit_pf_wq(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -551,11 +551,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
|
|||
/* disable interrupts and clear status */
|
||||
for (i = 0; i < kona_gpio->num_bank; i++) {
|
||||
/* Unlock the entire bank first */
|
||||
bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
|
||||
bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
|
||||
writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
|
||||
writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
|
||||
/* Now re-lock the bank */
|
||||
bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
|
||||
bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3316,6 +3316,24 @@ int drm_mode_addfb2(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct drm_mode_rmfb_work {
|
||||
struct work_struct work;
|
||||
struct list_head fbs;
|
||||
};
|
||||
|
||||
static void drm_mode_rmfb_work_fn(struct work_struct *w)
|
||||
{
|
||||
struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
|
||||
|
||||
while (!list_empty(&arg->fbs)) {
|
||||
struct drm_framebuffer *fb =
|
||||
list_first_entry(&arg->fbs, typeof(*fb), filp_head);
|
||||
|
||||
list_del_init(&fb->filp_head);
|
||||
drm_framebuffer_remove(fb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_rmfb - remove an FB from the configuration
|
||||
* @dev: drm device for the ioctl
|
||||
|
@ -3356,7 +3374,25 @@ int drm_mode_rmfb(struct drm_device *dev,
|
|||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
mutex_unlock(&file_priv->fbs_lock);
|
||||
|
||||
drm_framebuffer_unreference(fb);
|
||||
/*
|
||||
* we now own the reference that was stored in the fbs list
|
||||
*
|
||||
* drm_framebuffer_remove may fail with -EINTR on pending signals,
|
||||
* so run this in a separate stack as there's no way to correctly
|
||||
* handle this after the fb is already removed from the lookup table.
|
||||
*/
|
||||
if (atomic_read(&fb->refcount.refcount) > 1) {
|
||||
struct drm_mode_rmfb_work arg;
|
||||
|
||||
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
|
||||
INIT_LIST_HEAD(&arg.fbs);
|
||||
list_add_tail(&fb->filp_head, &arg.fbs);
|
||||
|
||||
schedule_work(&arg.work);
|
||||
flush_work(&arg.work);
|
||||
destroy_work_on_stack(&arg.work);
|
||||
} else
|
||||
drm_framebuffer_unreference(fb);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -3509,7 +3545,6 @@ out_err1:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* drm_fb_release - remove and free the FBs on this file
|
||||
* @priv: drm file for the ioctl
|
||||
|
@ -3524,6 +3559,9 @@ out_err1:
|
|||
void drm_fb_release(struct drm_file *priv)
|
||||
{
|
||||
struct drm_framebuffer *fb, *tfb;
|
||||
struct drm_mode_rmfb_work arg;
|
||||
|
||||
INIT_LIST_HEAD(&arg.fbs);
|
||||
|
||||
/*
|
||||
* When the file gets released that means no one else can access the fb
|
||||
|
@ -3536,10 +3574,22 @@ void drm_fb_release(struct drm_file *priv)
|
|||
* at it any more.
|
||||
*/
|
||||
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
|
||||
list_del_init(&fb->filp_head);
|
||||
if (atomic_read(&fb->refcount.refcount) > 1) {
|
||||
list_move_tail(&fb->filp_head, &arg.fbs);
|
||||
} else {
|
||||
list_del_init(&fb->filp_head);
|
||||
|
||||
/* This drops the fpriv->fbs reference. */
|
||||
drm_framebuffer_unreference(fb);
|
||||
/* This drops the fpriv->fbs reference. */
|
||||
drm_framebuffer_unreference(fb);
|
||||
}
|
||||
}
|
||||
|
||||
if (!list_empty(&arg.fbs)) {
|
||||
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
|
||||
|
||||
schedule_work(&arg.work);
|
||||
flush_work(&arg.work);
|
||||
destroy_work_on_stack(&arg.work);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4475,7 +4475,7 @@ static int rocker_port_obj_add(struct net_device *dev,
|
|||
fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
|
||||
err = rocker_port_fib_ipv4(rocker_port, trans,
|
||||
htonl(fib4->dst), fib4->dst_len,
|
||||
&fib4->fi, fib4->tb_id, 0);
|
||||
fib4->fi, fib4->tb_id, 0);
|
||||
break;
|
||||
case SWITCHDEV_OBJ_ID_PORT_FDB:
|
||||
err = rocker_port_fdb_add(rocker_port, trans,
|
||||
|
@ -4547,7 +4547,7 @@ static int rocker_port_obj_del(struct net_device *dev,
|
|||
fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
|
||||
err = rocker_port_fib_ipv4(rocker_port, NULL,
|
||||
htonl(fib4->dst), fib4->dst_len,
|
||||
&fib4->fi, fib4->tb_id,
|
||||
fib4->fi, fib4->tb_id,
|
||||
ROCKER_OP_FLAG_REMOVE);
|
||||
break;
|
||||
case SWITCHDEV_OBJ_ID_PORT_FDB:
|
||||
|
|
|
@ -619,6 +619,17 @@ fail:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
/* All our existing PIO buffers went away */
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
tx_queue->piobuf = NULL;
|
||||
}
|
||||
|
||||
#else /* !EFX_USE_PIO */
|
||||
|
||||
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
|
||||
|
@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
|
|||
{
|
||||
}
|
||||
|
||||
static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
static void efx_ef10_remove(struct efx_nic *efx)
|
||||
|
@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
|
|||
nic_data->must_realloc_vis = true;
|
||||
nic_data->must_restore_filters = true;
|
||||
nic_data->must_restore_piobufs = true;
|
||||
efx_ef10_forget_old_piobufs(efx);
|
||||
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
|
||||
|
||||
/* Driver-created vswitches and vports must be re-created */
|
||||
|
|
|
@ -310,15 +310,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* Need Geneve and inner Ethernet header to be present */
|
||||
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
|
||||
goto error;
|
||||
goto drop;
|
||||
|
||||
/* Return packets with reserved bits set */
|
||||
geneveh = geneve_hdr(skb);
|
||||
if (unlikely(geneveh->ver != GENEVE_VER))
|
||||
goto error;
|
||||
goto drop;
|
||||
|
||||
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
|
||||
goto error;
|
||||
goto drop;
|
||||
|
||||
opts_len = geneveh->opt_len * 4;
|
||||
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
|
||||
|
@ -336,10 +336,6 @@ drop:
|
|||
/* Consume bad packet */
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
/* Let the UDP layer deal with the skb */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
|
||||
|
@ -998,6 +994,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return geneve_xmit_skb(skb, dev, info);
|
||||
}
|
||||
|
||||
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
/* GENEVE overhead is not fixed, so we can't enforce a more
|
||||
* precise max MTU.
|
||||
*/
|
||||
if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
|
||||
return -EINVAL;
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
|
@ -1042,7 +1049,7 @@ static const struct net_device_ops geneve_netdev_ops = {
|
|||
.ndo_stop = geneve_stop,
|
||||
.ndo_start_xmit = geneve_xmit,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_change_mtu = geneve_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_fill_metadata_dst = geneve_fill_metadata_dst,
|
||||
|
@ -1349,11 +1356,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
|
|||
|
||||
err = geneve_configure(net, dev, &geneve_remote_unspec,
|
||||
0, 0, 0, htons(dst_port), true);
|
||||
if (err) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* openvswitch users expect packet sizes to be unrestricted,
|
||||
* so set the largest MTU we can.
|
||||
*/
|
||||
err = geneve_change_mtu(dev, IP_MAX_MTU);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return dev;
|
||||
|
||||
err:
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
|
||||
|
||||
|
|
|
@ -969,7 +969,7 @@ static void team_port_disable(struct team *team,
|
|||
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
|
||||
NETIF_F_HIGHDMA | NETIF_F_LRO)
|
||||
|
||||
static void __team_compute_features(struct team *team)
|
||||
static void ___team_compute_features(struct team *team)
|
||||
{
|
||||
struct team_port *port;
|
||||
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
|
||||
|
@ -993,15 +993,20 @@ static void __team_compute_features(struct team *team)
|
|||
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
|
||||
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
|
||||
}
|
||||
|
||||
static void __team_compute_features(struct team *team)
|
||||
{
|
||||
___team_compute_features(team);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
static void team_compute_features(struct team *team)
|
||||
{
|
||||
mutex_lock(&team->lock);
|
||||
__team_compute_features(team);
|
||||
___team_compute_features(team);
|
||||
mutex_unlock(&team->lock);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
static int team_port_enter(struct team *team, struct team_port *port)
|
||||
|
|
|
@ -567,11 +567,13 @@ static void tun_detach_all(struct net_device *dev)
|
|||
for (i = 0; i < n; i++) {
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
BUG_ON(!tfile);
|
||||
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
|
||||
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
|
||||
RCU_INIT_POINTER(tfile->tun, NULL);
|
||||
--tun->numqueues;
|
||||
}
|
||||
list_for_each_entry(tfile, &tun->disabled, next) {
|
||||
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
|
||||
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
|
||||
RCU_INIT_POINTER(tfile->tun, NULL);
|
||||
}
|
||||
|
@ -627,6 +629,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
|
|||
goto out;
|
||||
}
|
||||
tfile->queue_index = tun->numqueues;
|
||||
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
||||
tun->numqueues++;
|
||||
|
@ -1408,9 +1411,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
|
|||
if (!iov_iter_count(to))
|
||||
return 0;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
return -EIO;
|
||||
|
||||
/* Read frames from queue */
|
||||
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
|
||||
&peeked, &off, &err);
|
||||
|
|
|
@ -1254,7 +1254,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* Need Vxlan and inner Ethernet header to be present */
|
||||
if (!pskb_may_pull(skb, VXLAN_HLEN))
|
||||
goto error;
|
||||
goto drop;
|
||||
|
||||
vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
|
||||
flags = ntohl(vxh->vx_flags);
|
||||
|
@ -1344,13 +1344,7 @@ drop:
|
|||
bad_flags:
|
||||
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
|
||||
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
|
||||
|
||||
error:
|
||||
if (tun_dst)
|
||||
dst_release((struct dst_entry *)tun_dst);
|
||||
|
||||
/* Return non vxlan pkt */
|
||||
return 1;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
|
@ -2370,27 +2364,41 @@ static void vxlan_set_multicast_list(struct net_device *dev)
|
|||
{
|
||||
}
|
||||
|
||||
static int __vxlan_change_mtu(struct net_device *dev,
|
||||
struct net_device *lowerdev,
|
||||
struct vxlan_rdst *dst, int new_mtu, bool strict)
|
||||
{
|
||||
int max_mtu = IP_MAX_MTU;
|
||||
|
||||
if (lowerdev)
|
||||
max_mtu = lowerdev->mtu;
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu -= VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu -= VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
|
||||
new_mtu = max_mtu;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
struct net_device *lowerdev;
|
||||
int max_mtu;
|
||||
|
||||
lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
|
||||
if (lowerdev == NULL)
|
||||
return eth_change_mtu(dev, new_mtu);
|
||||
|
||||
if (dst->remote_ip.sa.sa_family == AF_INET6)
|
||||
max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
|
||||
else
|
||||
max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
|
||||
|
||||
if (new_mtu < 68 || new_mtu > max_mtu)
|
||||
return -EINVAL;
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
|
||||
dst->remote_ifindex);
|
||||
return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
|
||||
}
|
||||
|
||||
static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
|
||||
|
@ -2768,6 +2776,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
int err;
|
||||
bool use_ipv6 = false;
|
||||
__be16 default_port = vxlan->cfg.dst_port;
|
||||
struct net_device *lowerdev = NULL;
|
||||
|
||||
vxlan->net = src_net;
|
||||
|
||||
|
@ -2788,9 +2797,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
}
|
||||
|
||||
if (conf->remote_ifindex) {
|
||||
struct net_device *lowerdev
|
||||
= __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
|
||||
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
|
||||
dst->remote_ifindex = conf->remote_ifindex;
|
||||
|
||||
if (!lowerdev) {
|
||||
|
@ -2814,6 +2821,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
needed_headroom = lowerdev->hard_header_len;
|
||||
}
|
||||
|
||||
if (conf->mtu) {
|
||||
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
|
||||
needed_headroom += VXLAN6_HEADROOM;
|
||||
else
|
||||
|
@ -2991,6 +3004,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
|
||||
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
|
||||
|
||||
if (tb[IFLA_MTU])
|
||||
conf.mtu = nla_get_u32(tb[IFLA_MTU]);
|
||||
|
||||
err = vxlan_dev_configure(src_net, dev, &conf);
|
||||
switch (err) {
|
||||
case -ENODEV:
|
||||
|
|
|
@ -1191,9 +1191,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
|
|||
const struct mtk_desc_pin *pin;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
|
||||
for (eint_num = 0;
|
||||
eint_num < pctl->devdata->ap_num;
|
||||
eint_num += 32, reg += 4) {
|
||||
status = readl(reg);
|
||||
reg += 4;
|
||||
while (status) {
|
||||
offset = __ffs(status);
|
||||
index = eint_num + offset;
|
||||
|
|
|
@ -227,6 +227,7 @@ static struct {
|
|||
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
||||
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
|
||||
{"Promise", "", NULL, BLIST_SPARSELUN},
|
||||
{"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
|
||||
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
|
||||
{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
|
||||
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
|
||||
|
|
|
@ -910,9 +910,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
}
|
||||
|
||||
/*
|
||||
* If we finished all bytes in the request we are done now.
|
||||
* special case: failed zero length commands always need to
|
||||
* drop down into the retry code. Otherwise, if we finished
|
||||
* all bytes in the request we are done now.
|
||||
*/
|
||||
if (!scsi_end_request(req, error, good_bytes, 0))
|
||||
if (!(blk_rq_bytes(req) == 0 && error) &&
|
||||
!scsi_end_request(req, error, good_bytes, 0))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1618,7 +1618,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
|||
struct dentry *dentry = __d_alloc(parent->d_sb, name);
|
||||
if (!dentry)
|
||||
return NULL;
|
||||
|
||||
dentry->d_flags |= DCACHE_RCUACCESS;
|
||||
spin_lock(&parent->d_lock);
|
||||
/*
|
||||
* don't need child lock because it is not subject
|
||||
|
@ -2413,7 +2413,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
|
|||
{
|
||||
BUG_ON(!d_unhashed(entry));
|
||||
hlist_bl_lock(b);
|
||||
entry->d_flags |= DCACHE_RCUACCESS;
|
||||
hlist_bl_add_head_rcu(&entry->d_hash, b);
|
||||
hlist_bl_unlock(b);
|
||||
}
|
||||
|
@ -2632,6 +2631,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
|
|||
/* ... and switch them in the tree */
|
||||
if (IS_ROOT(dentry)) {
|
||||
/* splicing a tree */
|
||||
dentry->d_flags |= DCACHE_RCUACCESS;
|
||||
dentry->d_parent = target->d_parent;
|
||||
target->d_parent = target;
|
||||
list_del_init(&target->d_child);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/file.h>
|
||||
#include "ecryptfs_kernel.h"
|
||||
|
||||
struct ecryptfs_open_req {
|
||||
|
@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
|
||||
(*lower_file) = dentry_open(&req.path, flags, cred);
|
||||
if (!IS_ERR(*lower_file))
|
||||
goto out;
|
||||
goto have_file;
|
||||
if ((flags & O_ACCMODE) == O_RDONLY) {
|
||||
rc = PTR_ERR((*lower_file));
|
||||
goto out;
|
||||
|
@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
mutex_unlock(&ecryptfs_kthread_ctl.mux);
|
||||
wake_up(&ecryptfs_kthread_ctl.wait);
|
||||
wait_for_completion(&req.done);
|
||||
if (IS_ERR(*lower_file))
|
||||
if (IS_ERR(*lower_file)) {
|
||||
rc = PTR_ERR(*lower_file);
|
||||
goto out;
|
||||
}
|
||||
have_file:
|
||||
if ((*lower_file)->f_op->mmap == NULL) {
|
||||
fput(*lower_file);
|
||||
*lower_file = NULL;
|
||||
rc = -EMEDIUMTYPE;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
|
|||
if (IS_ERR(sb))
|
||||
return ERR_CAST(sb);
|
||||
|
||||
/*
|
||||
* procfs isn't actually a stacking filesystem; however, there is
|
||||
* too much magic going on inside it to permit stacking things on
|
||||
* top of it
|
||||
*/
|
||||
sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
|
||||
|
||||
if (!proc_parse_options(options, ns)) {
|
||||
deactivate_locked_super(sb);
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
|
|
@ -301,7 +301,7 @@
|
|||
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
|
||||
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_SHIFT 24
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
|
||||
|
|
|
@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
|
|||
int xt_register_matches(struct xt_match *match, unsigned int n);
|
||||
void xt_unregister_matches(struct xt_match *match, unsigned int n);
|
||||
|
||||
int xt_check_entry_offsets(const void *base, const char *elems,
|
||||
unsigned int target_offset,
|
||||
unsigned int next_offset);
|
||||
|
||||
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
|
||||
bool inv_proto);
|
||||
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
|
||||
bool inv_proto);
|
||||
|
||||
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
||||
struct xt_counters_info *info, bool compat);
|
||||
|
||||
struct xt_table *xt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
struct xt_table_info *bootstrap,
|
||||
|
@ -478,7 +485,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
|
|||
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
|
||||
|
||||
int xt_compat_match_offset(const struct xt_match *match);
|
||||
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
unsigned int *size);
|
||||
int xt_compat_match_to_user(const struct xt_entry_match *m,
|
||||
void __user **dstptr, unsigned int *size);
|
||||
|
@ -488,6 +495,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
|||
unsigned int *size);
|
||||
int xt_compat_target_to_user(const struct xt_entry_target *t,
|
||||
void __user **dstptr, unsigned int *size);
|
||||
int xt_compat_check_entry_offsets(const void *base, const char *elems,
|
||||
unsigned int target_offset,
|
||||
unsigned int next_offset);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* _X_TABLES_H */
|
||||
|
|
|
@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
|
||||
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
|
||||
u8 *protocol, struct flowi4 *fl4);
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
|
||||
|
||||
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
|
||||
|
|
|
@ -88,7 +88,7 @@ struct switchdev_obj_ipv4_fib {
|
|||
struct switchdev_obj obj;
|
||||
u32 dst;
|
||||
int dst_len;
|
||||
struct fib_info fi;
|
||||
struct fib_info *fi;
|
||||
u8 tos;
|
||||
u8 type;
|
||||
u32 nlflags;
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
#if defined(__GLIBC__)
|
||||
|
||||
/* Coordinate with glibc net/if.h header. */
|
||||
#if defined(_NET_IF_H)
|
||||
#if defined(_NET_IF_H) && defined(__USE_MISC)
|
||||
|
||||
/* GLIBC headers included first so don't define anything
|
||||
* that would already be defined. */
|
||||
|
|
|
@ -358,7 +358,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
|
|||
static struct dentry *bpf_mount(struct file_system_type *type, int flags,
|
||||
const char *dev_name, void *data)
|
||||
{
|
||||
return mount_ns(type, flags, current->nsproxy->mnt_ns, bpf_fill_super);
|
||||
return mount_nodev(type, flags, data, bpf_fill_super);
|
||||
}
|
||||
|
||||
static struct file_system_type bpf_fs_type = {
|
||||
|
@ -366,7 +366,6 @@ static struct file_system_type bpf_fs_type = {
|
|||
.name = "bpf",
|
||||
.mount = bpf_mount,
|
||||
.kill_sb = kill_litter_super,
|
||||
.fs_flags = FS_USERNS_MOUNT,
|
||||
};
|
||||
|
||||
MODULE_ALIAS_FS("bpf");
|
||||
|
|
|
@ -3008,7 +3008,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
|
|||
static inline void schedule_debug(struct task_struct *prev)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_STACK_END_CHECK
|
||||
BUG_ON(task_stack_end_corrupted(prev));
|
||||
if (task_stack_end_corrupted(prev))
|
||||
panic("corrupted stack end detected inside scheduler\n");
|
||||
#endif
|
||||
|
||||
if (unlikely(in_atomic_preempt_off())) {
|
||||
|
|
|
@ -3661,6 +3661,7 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|||
* ordering is imposed by list_lru_node->lock taken by
|
||||
* memcg_drain_all_list_lrus().
|
||||
*/
|
||||
rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
|
||||
css_for_each_descendant_pre(css, &memcg->css) {
|
||||
child = mem_cgroup_from_css(css);
|
||||
BUG_ON(child->kmemcg_id != kmemcg_id);
|
||||
|
@ -3668,6 +3669,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|||
if (!memcg->use_hierarchy)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
|
||||
|
||||
memcg_free_cache_id(kmemcg_id);
|
||||
|
|
|
@ -278,6 +278,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
|
|||
* change from under us.
|
||||
*/
|
||||
list_for_each_entry(v, &vg->vlan_list, vlist) {
|
||||
if (!br_vlan_should_use(v))
|
||||
continue;
|
||||
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
|
||||
if (f && f->is_local && !f->dst)
|
||||
fdb_delete_local(br, NULL, f);
|
||||
|
|
|
@ -1247,6 +1247,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
|
|||
err = ipgre_newlink(net, dev, tb, NULL);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
/* openvswitch users expect packet sizes to be unrestricted,
|
||||
* so set the largest MTU we can.
|
||||
*/
|
||||
err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
return dev;
|
||||
out:
|
||||
free_netdev(dev);
|
||||
|
|
|
@ -948,17 +948,31 @@ done:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
|
||||
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
|
||||
|
||||
if (new_mtu < 68 ||
|
||||
new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (strict)
|
||||
return -EINVAL;
|
||||
|
||||
new_mtu = max_mtu;
|
||||
}
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
|
||||
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
return __ip_tunnel_change_mtu(dev, new_mtu, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
|
||||
|
||||
static void ip_tunnel_dev_free(struct net_device *dev)
|
||||
|
|
|
@ -359,11 +359,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* All zeroes == unconditional rule. */
|
||||
static inline bool unconditional(const struct arpt_arp *arp)
|
||||
static inline bool unconditional(const struct arpt_entry *e)
|
||||
{
|
||||
static const struct arpt_arp uncond;
|
||||
|
||||
return memcmp(arp, &uncond, sizeof(uncond)) == 0;
|
||||
return e->target_offset == sizeof(struct arpt_entry) &&
|
||||
memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct arpt_entry *target)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
|
@ -402,11 +415,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|||
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
|
||||
|
||||
/* Unconditional return/END. */
|
||||
if ((e->target_offset == sizeof(struct arpt_entry) &&
|
||||
if ((unconditional(e) &&
|
||||
(strcmp(t->target.u.user.name,
|
||||
XT_STANDARD_TARGET) == 0) &&
|
||||
t->verdict < 0 && unconditional(&e->arp)) ||
|
||||
visited) {
|
||||
t->verdict < 0) || visited) {
|
||||
unsigned int oldpos, size;
|
||||
|
||||
if ((strcmp(t->target.u.user.name,
|
||||
|
@ -439,6 +451,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|||
size = e->next_offset;
|
||||
e = (struct arpt_entry *)
|
||||
(entry0 + pos + size);
|
||||
if (pos + size >= newinfo->size)
|
||||
return 0;
|
||||
e->counters.pcnt = pos;
|
||||
pos += size;
|
||||
} else {
|
||||
|
@ -458,9 +472,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
e = (struct arpt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
if (newpos >= newinfo->size)
|
||||
return 0;
|
||||
}
|
||||
e = (struct arpt_entry *)
|
||||
(entry0 + newpos);
|
||||
|
@ -474,25 +494,6 @@ next:
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_entry(const struct arpt_entry *e, const char *name)
|
||||
{
|
||||
const struct xt_entry_target *t;
|
||||
|
||||
if (!arp_checkentry(&e->arp)) {
|
||||
duprintf("arp_tables: arp check failed %p %s.\n", e, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = arpt_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int check_target(struct arpt_entry *e, const char *name)
|
||||
{
|
||||
struct xt_entry_target *t = arpt_get_target(e);
|
||||
|
@ -522,10 +523,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
|
|||
struct xt_target *target;
|
||||
int ret;
|
||||
|
||||
ret = check_entry(e, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
return -ENOMEM;
|
||||
|
@ -557,7 +554,7 @@ static bool check_underflow(const struct arpt_entry *e)
|
|||
const struct xt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->arp))
|
||||
if (!unconditional(e))
|
||||
return false;
|
||||
t = arpt_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
|
@ -576,9 +573,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|||
unsigned int valid_hooks)
|
||||
{
|
||||
unsigned int h;
|
||||
int err;
|
||||
|
||||
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p\n", e);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -590,6 +589,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!arp_checkentry(&e->arp))
|
||||
return -EINVAL;
|
||||
|
||||
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
||||
e->next_offset);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
|
||||
if (!(valid_hooks & (1 << h)))
|
||||
|
@ -598,9 +605,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h]) {
|
||||
if (!check_underflow(e)) {
|
||||
pr_err("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
pr_debug("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
|
@ -691,10 +698,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|||
}
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
|
||||
duprintf("Looping hook\n");
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
}
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
|
@ -1125,55 +1130,17 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|||
unsigned int i;
|
||||
struct xt_counters_info tmp;
|
||||
struct xt_counters *paddc;
|
||||
unsigned int num_counters;
|
||||
const char *name;
|
||||
int size;
|
||||
void *ptmp;
|
||||
struct xt_table *t;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
struct arpt_entry *iter;
|
||||
unsigned int addend;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
if (compat) {
|
||||
ptmp = &compat_tmp;
|
||||
size = sizeof(struct compat_xt_counters_info);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ptmp = &tmp;
|
||||
size = sizeof(struct xt_counters_info);
|
||||
}
|
||||
paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
||||
if (IS_ERR(paddc))
|
||||
return PTR_ERR(paddc);
|
||||
|
||||
if (copy_from_user(ptmp, user, size) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
num_counters = compat_tmp.num_counters;
|
||||
name = compat_tmp.name;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
num_counters = tmp.num_counters;
|
||||
name = tmp.name;
|
||||
}
|
||||
|
||||
if (len != size + num_counters * sizeof(struct xt_counters))
|
||||
return -EINVAL;
|
||||
|
||||
paddc = vmalloc(len - size);
|
||||
if (!paddc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(paddc, user + size, len - size) != 0) {
|
||||
ret = -EFAULT;
|
||||
goto free;
|
||||
}
|
||||
|
||||
t = xt_find_table_lock(net, NFPROTO_ARP, name);
|
||||
t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
|
||||
if (IS_ERR_OR_NULL(t)) {
|
||||
ret = t ? PTR_ERR(t) : -ENOENT;
|
||||
goto free;
|
||||
|
@ -1181,7 +1148,7 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|||
|
||||
local_bh_disable();
|
||||
private = t->private;
|
||||
if (private->number != num_counters) {
|
||||
if (private->number != tmp.num_counters) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_up_free;
|
||||
}
|
||||
|
@ -1208,6 +1175,18 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_arpt_replace {
|
||||
char name[XT_TABLE_MAXNAMELEN];
|
||||
u32 valid_hooks;
|
||||
u32 num_entries;
|
||||
u32 size;
|
||||
u32 hook_entry[NF_ARP_NUMHOOKS];
|
||||
u32 underflow[NF_ARP_NUMHOOKS];
|
||||
u32 num_counters;
|
||||
compat_uptr_t counters;
|
||||
struct compat_arpt_entry entries[0];
|
||||
};
|
||||
|
||||
static inline void compat_release_entry(struct compat_arpt_entry *e)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
|
@ -1216,24 +1195,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
|
|||
module_put(t->u.kernel.target->me);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static int
|
||||
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
const char *name)
|
||||
const unsigned char *limit)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
unsigned int entry_offset;
|
||||
int ret, off, h;
|
||||
int ret, off;
|
||||
|
||||
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
||||
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1245,8 +1222,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* For purposes of check_entry casting the compat entry is fine */
|
||||
ret = check_entry((struct arpt_entry *)e, name);
|
||||
if (!arp_checkentry(&e->arp))
|
||||
return -EINVAL;
|
||||
|
||||
ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
|
||||
e->next_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1270,17 +1250,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
|||
if (ret)
|
||||
goto release_target;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
|
||||
if ((unsigned char *)e - base == hook_entries[h])
|
||||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h])
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
}
|
||||
|
||||
/* Clear counters and comefrom */
|
||||
memset(&e->counters, 0, sizeof(e->counters));
|
||||
e->comefrom = 0;
|
||||
return 0;
|
||||
|
||||
release_target:
|
||||
|
@ -1289,18 +1258,17 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
||||
unsigned int *size, const char *name,
|
||||
unsigned int *size,
|
||||
struct xt_table_info *newinfo, unsigned char *base)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
struct arpt_entry *de;
|
||||
unsigned int origsize;
|
||||
int ret, h;
|
||||
int h;
|
||||
|
||||
ret = 0;
|
||||
origsize = *size;
|
||||
de = (struct arpt_entry *)*dstptr;
|
||||
memcpy(de, e, sizeof(struct arpt_entry));
|
||||
|
@ -1321,148 +1289,82 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
|||
if ((unsigned char *)de - base < newinfo->underflow[h])
|
||||
newinfo->underflow[h] -= origsize - *size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int translate_compat_table(const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
static int translate_compat_table(struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
unsigned int total_size,
|
||||
unsigned int number,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows)
|
||||
const struct compat_arpt_replace *compatr)
|
||||
{
|
||||
unsigned int i, j;
|
||||
struct xt_table_info *newinfo, *info;
|
||||
void *pos, *entry0, *entry1;
|
||||
struct compat_arpt_entry *iter0;
|
||||
struct arpt_entry *iter1;
|
||||
struct arpt_replace repl;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
|
||||
info = *pinfo;
|
||||
entry0 = *pentry0;
|
||||
size = total_size;
|
||||
info->number = number;
|
||||
|
||||
/* Init all hooks to impossible value. */
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
info->hook_entry[i] = 0xFFFFFFFF;
|
||||
info->underflow[i] = 0xFFFFFFFF;
|
||||
}
|
||||
size = compatr->size;
|
||||
info->number = compatr->num_entries;
|
||||
|
||||
duprintf("translate_compat_table: size %u\n", info->size);
|
||||
j = 0;
|
||||
xt_compat_lock(NFPROTO_ARP);
|
||||
xt_compat_init_offsets(NFPROTO_ARP, number);
|
||||
xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
||||
entry0,
|
||||
entry0 + total_size,
|
||||
hook_entries,
|
||||
underflows,
|
||||
name);
|
||||
entry0 + compatr->size);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (j != number) {
|
||||
if (j != compatr->num_entries) {
|
||||
duprintf("translate_compat_table: %u not %u entries\n",
|
||||
j, number);
|
||||
j, compatr->num_entries);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
/* Only hooks which are valid */
|
||||
if (!(valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (info->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, hook_entries[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (info->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, underflows[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
newinfo = xt_alloc_table_info(size);
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
newinfo->number = number;
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
}
|
||||
entry1 = newinfo->entries;
|
||||
pos = entry1;
|
||||
size = total_size;
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
name, newinfo, entry1);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
size = compatr->size;
|
||||
xt_entry_foreach(iter0, entry0, compatr->size)
|
||||
compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
newinfo, entry1);
|
||||
|
||||
/* all module references in entry0 are now gone */
|
||||
|
||||
xt_compat_flush_offsets(NFPROTO_ARP);
|
||||
xt_compat_unlock(NFPROTO_ARP);
|
||||
|
||||
memcpy(&repl, compatr, sizeof(*compatr));
|
||||
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
repl.hook_entry[i] = newinfo->hook_entry[i];
|
||||
repl.underflow[i] = newinfo->underflow[i];
|
||||
}
|
||||
|
||||
repl.num_counters = 0;
|
||||
repl.counters = NULL;
|
||||
repl.size = newinfo->size;
|
||||
ret = translate_table(newinfo, entry1, &repl);
|
||||
if (ret)
|
||||
goto free_newinfo;
|
||||
|
||||
ret = -ELOOP;
|
||||
if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
||||
goto free_newinfo;
|
||||
|
||||
i = 0;
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
iter1->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(iter1->counters.pcnt)) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = check_target(iter1, name);
|
||||
if (ret != 0) {
|
||||
xt_percpu_counter_free(iter1->counters.pcnt);
|
||||
break;
|
||||
}
|
||||
++i;
|
||||
if (strcmp(arpt_get_target(iter1)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
if (ret) {
|
||||
/*
|
||||
* The first i matches need cleanup_entry (calls ->destroy)
|
||||
* because they had called ->check already. The other j-i
|
||||
* entries need only release.
|
||||
*/
|
||||
int skip = i;
|
||||
j -= i;
|
||||
xt_entry_foreach(iter0, entry0, newinfo->size) {
|
||||
if (skip-- > 0)
|
||||
continue;
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
if (i-- == 0)
|
||||
break;
|
||||
cleanup_entry(iter1);
|
||||
}
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pinfo = newinfo;
|
||||
*pentry0 = entry1;
|
||||
xt_free_table_info(info);
|
||||
|
@ -1470,31 +1372,18 @@ static int translate_compat_table(const char *name,
|
|||
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
out:
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(NFPROTO_ARP);
|
||||
xt_compat_unlock(NFPROTO_ARP);
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(NFPROTO_ARP);
|
||||
xt_compat_unlock(NFPROTO_ARP);
|
||||
goto out;
|
||||
}
|
||||
|
||||
struct compat_arpt_replace {
|
||||
char name[XT_TABLE_MAXNAMELEN];
|
||||
u32 valid_hooks;
|
||||
u32 num_entries;
|
||||
u32 size;
|
||||
u32 hook_entry[NF_ARP_NUMHOOKS];
|
||||
u32 underflow[NF_ARP_NUMHOOKS];
|
||||
u32 num_counters;
|
||||
compat_uptr_t counters;
|
||||
struct compat_arpt_entry entries[0];
|
||||
};
|
||||
|
||||
static int compat_do_replace(struct net *net, void __user *user,
|
||||
unsigned int len)
|
||||
{
|
||||
|
@ -1527,10 +1416,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
|
||||
&newinfo, &loc_cpu_entry, tmp.size,
|
||||
tmp.num_entries, tmp.hook_entry,
|
||||
tmp.underflow);
|
||||
ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
|
|
|
@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
|
|||
|
||||
/* All zeroes == unconditional rule. */
|
||||
/* Mildly perf critical (only if packet tracing is on) */
|
||||
static inline bool unconditional(const struct ipt_ip *ip)
|
||||
static inline bool unconditional(const struct ipt_entry *e)
|
||||
{
|
||||
static const struct ipt_ip uncond;
|
||||
|
||||
return memcmp(ip, &uncond, sizeof(uncond)) == 0;
|
||||
return e->target_offset == sizeof(struct ipt_entry) &&
|
||||
memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
|
||||
#undef FWINV
|
||||
}
|
||||
|
||||
|
@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
|
|||
} else if (s == e) {
|
||||
(*rulenum)++;
|
||||
|
||||
if (s->target_offset == sizeof(struct ipt_entry) &&
|
||||
if (unconditional(s) &&
|
||||
strcmp(t->target.u.kernel.target->name,
|
||||
XT_STANDARD_TARGET) == 0 &&
|
||||
t->verdict < 0 &&
|
||||
unconditional(&s->ip)) {
|
||||
t->verdict < 0) {
|
||||
/* Tail of chains: STANDARD target (return/policy) */
|
||||
*comment = *chainname == hookname
|
||||
? comments[NF_IP_TRACE_COMMENT_POLICY]
|
||||
|
@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct ipt_entry *target)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
|
@ -476,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
|
||||
|
||||
/* Unconditional return/END. */
|
||||
if ((e->target_offset == sizeof(struct ipt_entry) &&
|
||||
if ((unconditional(e) &&
|
||||
(strcmp(t->target.u.user.name,
|
||||
XT_STANDARD_TARGET) == 0) &&
|
||||
t->verdict < 0 && unconditional(&e->ip)) ||
|
||||
visited) {
|
||||
t->verdict < 0) || visited) {
|
||||
unsigned int oldpos, size;
|
||||
|
||||
if ((strcmp(t->target.u.user.name,
|
||||
|
@ -521,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
size = e->next_offset;
|
||||
e = (struct ipt_entry *)
|
||||
(entry0 + pos + size);
|
||||
if (pos + size >= newinfo->size)
|
||||
return 0;
|
||||
e->counters.pcnt = pos;
|
||||
pos += size;
|
||||
} else {
|
||||
|
@ -539,9 +552,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
e = (struct ipt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
if (newpos >= newinfo->size)
|
||||
return 0;
|
||||
}
|
||||
e = (struct ipt_entry *)
|
||||
(entry0 + newpos);
|
||||
|
@ -568,27 +587,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
|
|||
module_put(par.match->me);
|
||||
}
|
||||
|
||||
static int
|
||||
check_entry(const struct ipt_entry *e, const char *name)
|
||||
{
|
||||
const struct xt_entry_target *t;
|
||||
|
||||
if (!ip_checkentry(&e->ip)) {
|
||||
duprintf("ip check failed %p %s.\n", e, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (e->target_offset + sizeof(struct xt_entry_target) >
|
||||
e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = ipt_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
|
||||
{
|
||||
|
@ -666,10 +664,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
|||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
ret = check_entry(e, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
return -ENOMEM;
|
||||
|
@ -721,7 +715,7 @@ static bool check_underflow(const struct ipt_entry *e)
|
|||
const struct xt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->ip))
|
||||
if (!unconditional(e))
|
||||
return false;
|
||||
t = ipt_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
|
@ -741,9 +735,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|||
unsigned int valid_hooks)
|
||||
{
|
||||
unsigned int h;
|
||||
int err;
|
||||
|
||||
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p\n", e);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -755,6 +751,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ip_checkentry(&e->ip))
|
||||
return -EINVAL;
|
||||
|
||||
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
||||
e->next_offset);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
||||
if (!(valid_hooks & (1 << h)))
|
||||
|
@ -763,9 +767,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h]) {
|
||||
if (!check_underflow(e)) {
|
||||
pr_err("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
pr_debug("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
|
@ -1309,55 +1313,17 @@ do_add_counters(struct net *net, const void __user *user,
|
|||
unsigned int i;
|
||||
struct xt_counters_info tmp;
|
||||
struct xt_counters *paddc;
|
||||
unsigned int num_counters;
|
||||
const char *name;
|
||||
int size;
|
||||
void *ptmp;
|
||||
struct xt_table *t;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
struct ipt_entry *iter;
|
||||
unsigned int addend;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
if (compat) {
|
||||
ptmp = &compat_tmp;
|
||||
size = sizeof(struct compat_xt_counters_info);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ptmp = &tmp;
|
||||
size = sizeof(struct xt_counters_info);
|
||||
}
|
||||
paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
||||
if (IS_ERR(paddc))
|
||||
return PTR_ERR(paddc);
|
||||
|
||||
if (copy_from_user(ptmp, user, size) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
num_counters = compat_tmp.num_counters;
|
||||
name = compat_tmp.name;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
num_counters = tmp.num_counters;
|
||||
name = tmp.name;
|
||||
}
|
||||
|
||||
if (len != size + num_counters * sizeof(struct xt_counters))
|
||||
return -EINVAL;
|
||||
|
||||
paddc = vmalloc(len - size);
|
||||
if (!paddc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(paddc, user + size, len - size) != 0) {
|
||||
ret = -EFAULT;
|
||||
goto free;
|
||||
}
|
||||
|
||||
t = xt_find_table_lock(net, AF_INET, name);
|
||||
t = xt_find_table_lock(net, AF_INET, tmp.name);
|
||||
if (IS_ERR_OR_NULL(t)) {
|
||||
ret = t ? PTR_ERR(t) : -ENOENT;
|
||||
goto free;
|
||||
|
@ -1365,7 +1331,7 @@ do_add_counters(struct net *net, const void __user *user,
|
|||
|
||||
local_bh_disable();
|
||||
private = t->private;
|
||||
if (private->number != num_counters) {
|
||||
if (private->number != tmp.num_counters) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_up_free;
|
||||
}
|
||||
|
@ -1444,7 +1410,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
|
|||
|
||||
static int
|
||||
compat_find_calc_match(struct xt_entry_match *m,
|
||||
const char *name,
|
||||
const struct ipt_ip *ip,
|
||||
int *size)
|
||||
{
|
||||
|
@ -1479,21 +1444,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
const char *name)
|
||||
const unsigned char *limit)
|
||||
{
|
||||
struct xt_entry_match *ematch;
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
unsigned int entry_offset;
|
||||
unsigned int j;
|
||||
int ret, off, h;
|
||||
int ret, off;
|
||||
|
||||
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
||||
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1505,8 +1468,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* For purposes of check_entry casting the compat entry is fine */
|
||||
ret = check_entry((struct ipt_entry *)e, name);
|
||||
if (!ip_checkentry(&e->ip))
|
||||
return -EINVAL;
|
||||
|
||||
ret = xt_compat_check_entry_offsets(e, e->elems,
|
||||
e->target_offset, e->next_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1514,7 +1480,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|||
entry_offset = (void *)e - (void *)base;
|
||||
j = 0;
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = compat_find_calc_match(ematch, name, &e->ip, &off);
|
||||
ret = compat_find_calc_match(ematch, &e->ip, &off);
|
||||
if (ret != 0)
|
||||
goto release_matches;
|
||||
++j;
|
||||
|
@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
||||
if ((unsigned char *)e - base == hook_entries[h])
|
||||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h])
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
}
|
||||
|
||||
/* Clear counters and comefrom */
|
||||
memset(&e->counters, 0, sizeof(e->counters));
|
||||
e->comefrom = 0;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -1561,19 +1516,18 @@ release_matches:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
|
||||
unsigned int *size, const char *name,
|
||||
unsigned int *size,
|
||||
struct xt_table_info *newinfo, unsigned char *base)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
struct ipt_entry *de;
|
||||
unsigned int origsize;
|
||||
int ret, h;
|
||||
int h;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
ret = 0;
|
||||
origsize = *size;
|
||||
de = (struct ipt_entry *)*dstptr;
|
||||
memcpy(de, e, sizeof(struct ipt_entry));
|
||||
|
@ -1582,201 +1536,105 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
|
|||
*dstptr += sizeof(struct ipt_entry);
|
||||
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
|
||||
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = xt_compat_match_from_user(ematch, dstptr, size);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
xt_ematch_foreach(ematch, e)
|
||||
xt_compat_match_from_user(ematch, dstptr, size);
|
||||
|
||||
de->target_offset = e->target_offset - (origsize - *size);
|
||||
t = compat_ipt_get_target(e);
|
||||
target = t->u.kernel.target;
|
||||
xt_compat_target_from_user(t, dstptr, size);
|
||||
|
||||
de->next_offset = e->next_offset - (origsize - *size);
|
||||
|
||||
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
||||
if ((unsigned char *)de - base < newinfo->hook_entry[h])
|
||||
newinfo->hook_entry[h] -= origsize - *size;
|
||||
if ((unsigned char *)de - base < newinfo->underflow[h])
|
||||
newinfo->underflow[h] -= origsize - *size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
|
||||
{
|
||||
struct xt_entry_match *ematch;
|
||||
struct xt_mtchk_param mtpar;
|
||||
unsigned int j;
|
||||
int ret = 0;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
mtpar.family = NFPROTO_IPV4;
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = check_match(ematch, &mtpar);
|
||||
if (ret != 0)
|
||||
goto cleanup_matches;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
return 0;
|
||||
|
||||
cleanup_matches:
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
cleanup_match(ematch, net);
|
||||
}
|
||||
|
||||
xt_percpu_counter_free(e->counters.pcnt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
translate_compat_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
unsigned int total_size,
|
||||
unsigned int number,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows)
|
||||
const struct compat_ipt_replace *compatr)
|
||||
{
|
||||
unsigned int i, j;
|
||||
struct xt_table_info *newinfo, *info;
|
||||
void *pos, *entry0, *entry1;
|
||||
struct compat_ipt_entry *iter0;
|
||||
struct ipt_entry *iter1;
|
||||
struct ipt_replace repl;
|
||||
unsigned int size;
|
||||
int ret;
|
||||
|
||||
info = *pinfo;
|
||||
entry0 = *pentry0;
|
||||
size = total_size;
|
||||
info->number = number;
|
||||
|
||||
/* Init all hooks to impossible value. */
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
info->hook_entry[i] = 0xFFFFFFFF;
|
||||
info->underflow[i] = 0xFFFFFFFF;
|
||||
}
|
||||
size = compatr->size;
|
||||
info->number = compatr->num_entries;
|
||||
|
||||
duprintf("translate_compat_table: size %u\n", info->size);
|
||||
j = 0;
|
||||
xt_compat_lock(AF_INET);
|
||||
xt_compat_init_offsets(AF_INET, number);
|
||||
xt_compat_init_offsets(AF_INET, compatr->num_entries);
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
||||
entry0,
|
||||
entry0 + total_size,
|
||||
hook_entries,
|
||||
underflows,
|
||||
name);
|
||||
entry0 + compatr->size);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (j != number) {
|
||||
if (j != compatr->num_entries) {
|
||||
duprintf("translate_compat_table: %u not %u entries\n",
|
||||
j, number);
|
||||
j, compatr->num_entries);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
/* Only hooks which are valid */
|
||||
if (!(valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (info->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, hook_entries[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (info->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, underflows[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
newinfo = xt_alloc_table_info(size);
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
newinfo->number = number;
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
newinfo->underflow[i] = compatr->underflow[i];
|
||||
}
|
||||
entry1 = newinfo->entries;
|
||||
pos = entry1;
|
||||
size = total_size;
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
name, newinfo, entry1);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
size = compatr->size;
|
||||
xt_entry_foreach(iter0, entry0, compatr->size)
|
||||
compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
newinfo, entry1);
|
||||
|
||||
/* all module references in entry0 are now gone.
|
||||
* entry1/newinfo contains a 64bit ruleset that looks exactly as
|
||||
* generated by 64bit userspace.
|
||||
*
|
||||
* Call standard translate_table() to validate all hook_entrys,
|
||||
* underflows, check for loops, etc.
|
||||
*/
|
||||
xt_compat_flush_offsets(AF_INET);
|
||||
xt_compat_unlock(AF_INET);
|
||||
|
||||
memcpy(&repl, compatr, sizeof(*compatr));
|
||||
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
repl.hook_entry[i] = newinfo->hook_entry[i];
|
||||
repl.underflow[i] = newinfo->underflow[i];
|
||||
}
|
||||
|
||||
repl.num_counters = 0;
|
||||
repl.counters = NULL;
|
||||
repl.size = newinfo->size;
|
||||
ret = translate_table(net, newinfo, entry1, &repl);
|
||||
if (ret)
|
||||
goto free_newinfo;
|
||||
|
||||
ret = -ELOOP;
|
||||
if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
||||
goto free_newinfo;
|
||||
|
||||
i = 0;
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
ret = compat_check_entry(iter1, net, name);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
if (strcmp(ipt_get_target(iter1)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
if (ret) {
|
||||
/*
|
||||
* The first i matches need cleanup_entry (calls ->destroy)
|
||||
* because they had called ->check already. The other j-i
|
||||
* entries need only release.
|
||||
*/
|
||||
int skip = i;
|
||||
j -= i;
|
||||
xt_entry_foreach(iter0, entry0, newinfo->size) {
|
||||
if (skip-- > 0)
|
||||
continue;
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
if (i-- == 0)
|
||||
break;
|
||||
cleanup_entry(iter1, net);
|
||||
}
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pinfo = newinfo;
|
||||
*pentry0 = entry1;
|
||||
xt_free_table_info(info);
|
||||
|
@ -1784,17 +1642,16 @@ translate_compat_table(struct net *net,
|
|||
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
out:
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(AF_INET);
|
||||
xt_compat_unlock(AF_INET);
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(AF_INET);
|
||||
xt_compat_unlock(AF_INET);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1830,10 +1687,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
||||
&newinfo, &loc_cpu_entry, tmp.size,
|
||||
tmp.num_entries, tmp.hook_entry,
|
||||
tmp.underflow);
|
||||
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
|
|
|
@ -1531,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* if we're overly short, let UDP handle it */
|
||||
encap_rcv = ACCESS_ONCE(up->encap_rcv);
|
||||
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
|
||||
if (encap_rcv) {
|
||||
int ret;
|
||||
|
||||
/* Verify checksum before giving to encap */
|
||||
|
|
|
@ -1072,17 +1072,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
|||
const struct in6_addr *final_dst)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
|
||||
int err;
|
||||
|
||||
dst = ip6_sk_dst_check(sk, dst, fl6);
|
||||
if (!dst)
|
||||
dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
|
||||
|
||||
err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
if (final_dst)
|
||||
fl6->daddr = *final_dst;
|
||||
|
||||
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
||||
return dst;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
|
||||
|
||||
|
|
|
@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
|
|||
|
||||
/* All zeroes == unconditional rule. */
|
||||
/* Mildly perf critical (only if packet tracing is on) */
|
||||
static inline bool unconditional(const struct ip6t_ip6 *ipv6)
|
||||
static inline bool unconditional(const struct ip6t_entry *e)
|
||||
{
|
||||
static const struct ip6t_ip6 uncond;
|
||||
|
||||
return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
|
||||
return e->target_offset == sizeof(struct ip6t_entry) &&
|
||||
memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
|
||||
}
|
||||
|
||||
static inline const struct xt_entry_target *
|
||||
|
@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
|
|||
} else if (s == e) {
|
||||
(*rulenum)++;
|
||||
|
||||
if (s->target_offset == sizeof(struct ip6t_entry) &&
|
||||
if (unconditional(s) &&
|
||||
strcmp(t->target.u.kernel.target->name,
|
||||
XT_STANDARD_TARGET) == 0 &&
|
||||
t->verdict < 0 &&
|
||||
unconditional(&s->ipv6)) {
|
||||
t->verdict < 0) {
|
||||
/* Tail of chains: STANDARD target (return/policy) */
|
||||
*comment = *chainname == hookname
|
||||
? comments[NF_IP6_TRACE_COMMENT_POLICY]
|
||||
|
@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct ip6t_entry *target)
|
||||
{
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
|
@ -488,11 +500,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
|
||||
|
||||
/* Unconditional return/END. */
|
||||
if ((e->target_offset == sizeof(struct ip6t_entry) &&
|
||||
if ((unconditional(e) &&
|
||||
(strcmp(t->target.u.user.name,
|
||||
XT_STANDARD_TARGET) == 0) &&
|
||||
t->verdict < 0 &&
|
||||
unconditional(&e->ipv6)) || visited) {
|
||||
t->verdict < 0) || visited) {
|
||||
unsigned int oldpos, size;
|
||||
|
||||
if ((strcmp(t->target.u.user.name,
|
||||
|
@ -533,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
size = e->next_offset;
|
||||
e = (struct ip6t_entry *)
|
||||
(entry0 + pos + size);
|
||||
if (pos + size >= newinfo->size)
|
||||
return 0;
|
||||
e->counters.pcnt = pos;
|
||||
pos += size;
|
||||
} else {
|
||||
|
@ -551,9 +564,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|||
/* This a jump; chase it. */
|
||||
duprintf("Jump rule %u -> %u\n",
|
||||
pos, newpos);
|
||||
e = (struct ip6t_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
if (newpos >= newinfo->size)
|
||||
return 0;
|
||||
}
|
||||
e = (struct ip6t_entry *)
|
||||
(entry0 + newpos);
|
||||
|
@ -580,27 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
|
|||
module_put(par.match->me);
|
||||
}
|
||||
|
||||
static int
|
||||
check_entry(const struct ip6t_entry *e, const char *name)
|
||||
{
|
||||
const struct xt_entry_target *t;
|
||||
|
||||
if (!ip6_checkentry(&e->ipv6)) {
|
||||
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (e->target_offset + sizeof(struct xt_entry_target) >
|
||||
e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = ip6t_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
|
||||
{
|
||||
const struct ip6t_ip6 *ipv6 = par->entryinfo;
|
||||
|
@ -679,10 +677,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
|||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
ret = check_entry(e, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
return -ENOMEM;
|
||||
|
@ -733,7 +727,7 @@ static bool check_underflow(const struct ip6t_entry *e)
|
|||
const struct xt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->ipv6))
|
||||
if (!unconditional(e))
|
||||
return false;
|
||||
t = ip6t_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
|
@ -753,9 +747,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|||
unsigned int valid_hooks)
|
||||
{
|
||||
unsigned int h;
|
||||
int err;
|
||||
|
||||
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p\n", e);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -767,6 +763,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ip6_checkentry(&e->ipv6))
|
||||
return -EINVAL;
|
||||
|
||||
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
||||
e->next_offset);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
||||
if (!(valid_hooks & (1 << h)))
|
||||
|
@ -775,9 +779,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h]) {
|
||||
if (!check_underflow(e)) {
|
||||
pr_err("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
pr_debug("Underflows must be unconditional and "
|
||||
"use the STANDARD target with "
|
||||
"ACCEPT/DROP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
|
@ -1321,55 +1325,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
|
|||
unsigned int i;
|
||||
struct xt_counters_info tmp;
|
||||
struct xt_counters *paddc;
|
||||
unsigned int num_counters;
|
||||
char *name;
|
||||
int size;
|
||||
void *ptmp;
|
||||
struct xt_table *t;
|
||||
const struct xt_table_info *private;
|
||||
int ret = 0;
|
||||
struct ip6t_entry *iter;
|
||||
unsigned int addend;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
if (compat) {
|
||||
ptmp = &compat_tmp;
|
||||
size = sizeof(struct compat_xt_counters_info);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ptmp = &tmp;
|
||||
size = sizeof(struct xt_counters_info);
|
||||
}
|
||||
|
||||
if (copy_from_user(ptmp, user, size) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
num_counters = compat_tmp.num_counters;
|
||||
name = compat_tmp.name;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
num_counters = tmp.num_counters;
|
||||
name = tmp.name;
|
||||
}
|
||||
|
||||
if (len != size + num_counters * sizeof(struct xt_counters))
|
||||
return -EINVAL;
|
||||
|
||||
paddc = vmalloc(len - size);
|
||||
if (!paddc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(paddc, user + size, len - size) != 0) {
|
||||
ret = -EFAULT;
|
||||
goto free;
|
||||
}
|
||||
|
||||
t = xt_find_table_lock(net, AF_INET6, name);
|
||||
paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
||||
if (IS_ERR(paddc))
|
||||
return PTR_ERR(paddc);
|
||||
t = xt_find_table_lock(net, AF_INET6, tmp.name);
|
||||
if (IS_ERR_OR_NULL(t)) {
|
||||
ret = t ? PTR_ERR(t) : -ENOENT;
|
||||
goto free;
|
||||
|
@ -1377,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
|
|||
|
||||
local_bh_disable();
|
||||
private = t->private;
|
||||
if (private->number != num_counters) {
|
||||
if (private->number != tmp.num_counters) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_up_free;
|
||||
}
|
||||
|
@ -1456,7 +1421,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
|
|||
|
||||
static int
|
||||
compat_find_calc_match(struct xt_entry_match *m,
|
||||
const char *name,
|
||||
const struct ip6t_ip6 *ipv6,
|
||||
int *size)
|
||||
{
|
||||
|
@ -1491,21 +1455,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
const char *name)
|
||||
const unsigned char *limit)
|
||||
{
|
||||
struct xt_entry_match *ematch;
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
unsigned int entry_offset;
|
||||
unsigned int j;
|
||||
int ret, off, h;
|
||||
int ret, off;
|
||||
|
||||
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
||||
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
|
||||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
|
||||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
|
||||
(unsigned char *)e + e->next_offset > limit) {
|
||||
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1517,8 +1479,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* For purposes of check_entry casting the compat entry is fine */
|
||||
ret = check_entry((struct ip6t_entry *)e, name);
|
||||
if (!ip6_checkentry(&e->ipv6))
|
||||
return -EINVAL;
|
||||
|
||||
ret = xt_compat_check_entry_offsets(e, e->elems,
|
||||
e->target_offset, e->next_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1526,7 +1491,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|||
entry_offset = (void *)e - (void *)base;
|
||||
j = 0;
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
|
||||
ret = compat_find_calc_match(ematch, &e->ipv6, &off);
|
||||
if (ret != 0)
|
||||
goto release_matches;
|
||||
++j;
|
||||
|
@ -1549,17 +1514,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Check hooks & underflows */
|
||||
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
||||
if ((unsigned char *)e - base == hook_entries[h])
|
||||
newinfo->hook_entry[h] = hook_entries[h];
|
||||
if ((unsigned char *)e - base == underflows[h])
|
||||
newinfo->underflow[h] = underflows[h];
|
||||
}
|
||||
|
||||
/* Clear counters and comefrom */
|
||||
memset(&e->counters, 0, sizeof(e->counters));
|
||||
e->comefrom = 0;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -1573,18 +1527,17 @@ release_matches:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
||||
unsigned int *size, const char *name,
|
||||
unsigned int *size,
|
||||
struct xt_table_info *newinfo, unsigned char *base)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct ip6t_entry *de;
|
||||
unsigned int origsize;
|
||||
int ret, h;
|
||||
int h;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
ret = 0;
|
||||
origsize = *size;
|
||||
de = (struct ip6t_entry *)*dstptr;
|
||||
memcpy(de, e, sizeof(struct ip6t_entry));
|
||||
|
@ -1593,11 +1546,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
|||
*dstptr += sizeof(struct ip6t_entry);
|
||||
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
|
||||
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = xt_compat_match_from_user(ematch, dstptr, size);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
xt_ematch_foreach(ematch, e)
|
||||
xt_compat_match_from_user(ematch, dstptr, size);
|
||||
|
||||
de->target_offset = e->target_offset - (origsize - *size);
|
||||
t = compat_ip6t_get_target(e);
|
||||
xt_compat_target_from_user(t, dstptr, size);
|
||||
|
@ -1609,183 +1560,83 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
|||
if ((unsigned char *)de - base < newinfo->underflow[h])
|
||||
newinfo->underflow[h] -= origsize - *size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int compat_check_entry(struct ip6t_entry *e, struct net *net,
|
||||
const char *name)
|
||||
{
|
||||
unsigned int j;
|
||||
int ret = 0;
|
||||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
e->counters.pcnt = xt_percpu_counter_alloc();
|
||||
if (IS_ERR_VALUE(e->counters.pcnt))
|
||||
return -ENOMEM;
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
mtpar.family = NFPROTO_IPV6;
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
ret = check_match(ematch, &mtpar);
|
||||
if (ret != 0)
|
||||
goto cleanup_matches;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
return 0;
|
||||
|
||||
cleanup_matches:
|
||||
xt_ematch_foreach(ematch, e) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
cleanup_match(ematch, net);
|
||||
}
|
||||
|
||||
xt_percpu_counter_free(e->counters.pcnt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
translate_compat_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
unsigned int total_size,
|
||||
unsigned int number,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows)
|
||||
const struct compat_ip6t_replace *compatr)
|
||||
{
|
||||
unsigned int i, j;
|
||||
struct xt_table_info *newinfo, *info;
|
||||
void *pos, *entry0, *entry1;
|
||||
struct compat_ip6t_entry *iter0;
|
||||
struct ip6t_entry *iter1;
|
||||
struct ip6t_replace repl;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
|
||||
info = *pinfo;
|
||||
entry0 = *pentry0;
|
||||
size = total_size;
|
||||
info->number = number;
|
||||
|
||||
/* Init all hooks to impossible value. */
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
info->hook_entry[i] = 0xFFFFFFFF;
|
||||
info->underflow[i] = 0xFFFFFFFF;
|
||||
}
|
||||
size = compatr->size;
|
||||
info->number = compatr->num_entries;
|
||||
|
||||
duprintf("translate_compat_table: size %u\n", info->size);
|
||||
j = 0;
|
||||
xt_compat_lock(AF_INET6);
|
||||
xt_compat_init_offsets(AF_INET6, number);
|
||||
xt_compat_init_offsets(AF_INET6, compatr->num_entries);
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
||||
entry0,
|
||||
entry0 + total_size,
|
||||
hook_entries,
|
||||
underflows,
|
||||
name);
|
||||
entry0 + compatr->size);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
++j;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (j != number) {
|
||||
if (j != compatr->num_entries) {
|
||||
duprintf("translate_compat_table: %u not %u entries\n",
|
||||
j, number);
|
||||
j, compatr->num_entries);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
/* Only hooks which are valid */
|
||||
if (!(valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (info->hook_entry[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid hook entry %u %u\n",
|
||||
i, hook_entries[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (info->underflow[i] == 0xFFFFFFFF) {
|
||||
duprintf("Invalid underflow %u %u\n",
|
||||
i, underflows[i]);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
newinfo = xt_alloc_table_info(size);
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
newinfo->number = number;
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = info->hook_entry[i];
|
||||
newinfo->underflow[i] = info->underflow[i];
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
newinfo->underflow[i] = compatr->underflow[i];
|
||||
}
|
||||
entry1 = newinfo->entries;
|
||||
pos = entry1;
|
||||
size = total_size;
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
name, newinfo, entry1);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
size = compatr->size;
|
||||
xt_entry_foreach(iter0, entry0, compatr->size)
|
||||
compat_copy_entry_from_user(iter0, &pos, &size,
|
||||
newinfo, entry1);
|
||||
|
||||
/* all module references in entry0 are now gone. */
|
||||
xt_compat_flush_offsets(AF_INET6);
|
||||
xt_compat_unlock(AF_INET6);
|
||||
|
||||
memcpy(&repl, compatr, sizeof(*compatr));
|
||||
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
repl.hook_entry[i] = newinfo->hook_entry[i];
|
||||
repl.underflow[i] = newinfo->underflow[i];
|
||||
}
|
||||
|
||||
repl.num_counters = 0;
|
||||
repl.counters = NULL;
|
||||
repl.size = newinfo->size;
|
||||
ret = translate_table(net, newinfo, entry1, &repl);
|
||||
if (ret)
|
||||
goto free_newinfo;
|
||||
|
||||
ret = -ELOOP;
|
||||
if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
||||
goto free_newinfo;
|
||||
|
||||
i = 0;
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
ret = compat_check_entry(iter1, net, name);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
if (strcmp(ip6t_get_target(iter1)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
if (ret) {
|
||||
/*
|
||||
* The first i matches need cleanup_entry (calls ->destroy)
|
||||
* because they had called ->check already. The other j-i
|
||||
* entries need only release.
|
||||
*/
|
||||
int skip = i;
|
||||
j -= i;
|
||||
xt_entry_foreach(iter0, entry0, newinfo->size) {
|
||||
if (skip-- > 0)
|
||||
continue;
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
xt_entry_foreach(iter1, entry1, newinfo->size) {
|
||||
if (i-- == 0)
|
||||
break;
|
||||
cleanup_entry(iter1, net);
|
||||
}
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pinfo = newinfo;
|
||||
*pentry0 = entry1;
|
||||
xt_free_table_info(info);
|
||||
|
@ -1793,17 +1644,16 @@ translate_compat_table(struct net *net,
|
|||
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
out:
|
||||
xt_entry_foreach(iter0, entry0, total_size) {
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(AF_INET6);
|
||||
xt_compat_unlock(AF_INET6);
|
||||
xt_entry_foreach(iter0, entry0, compatr->size) {
|
||||
if (j-- == 0)
|
||||
break;
|
||||
compat_release_entry(iter0);
|
||||
}
|
||||
return ret;
|
||||
out_unlock:
|
||||
xt_compat_flush_offsets(AF_INET6);
|
||||
xt_compat_unlock(AF_INET6);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1839,10 +1689,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
||||
&newinfo, &loc_cpu_entry, tmp.size,
|
||||
tmp.num_entries, tmp.hook_entry,
|
||||
tmp.underflow);
|
||||
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
|
||||
if (ret != 0)
|
||||
goto free_newinfo;
|
||||
|
||||
|
|
|
@ -1706,7 +1706,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
|||
destp = ntohs(inet->inet_dport);
|
||||
srcp = ntohs(inet->inet_sport);
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
|
||||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
||||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
|
||||
timer_active = 1;
|
||||
timer_expires = icsk->icsk_timeout;
|
||||
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
|
||||
|
|
|
@ -647,7 +647,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* if we're overly short, let UDP handle it */
|
||||
encap_rcv = ACCESS_ONCE(up->encap_rcv);
|
||||
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
|
||||
if (encap_rcv) {
|
||||
int ret;
|
||||
|
||||
/* Verify checksum before giving to encap */
|
||||
|
|
|
@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
|||
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
|
||||
tunnel->encap = encap;
|
||||
if (encap == L2TP_ENCAPTYPE_UDP) {
|
||||
struct udp_tunnel_sock_cfg udp_cfg;
|
||||
struct udp_tunnel_sock_cfg udp_cfg = { };
|
||||
|
||||
udp_cfg.sk_user_data = tunnel;
|
||||
udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
|
||||
|
|
|
@ -415,6 +415,47 @@ int xt_check_match(struct xt_mtchk_param *par,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xt_check_match);
|
||||
|
||||
/** xt_check_entry_match - check that matches end before start of target
|
||||
*
|
||||
* @match: beginning of xt_entry_match
|
||||
* @target: beginning of this rules target (alleged end of matches)
|
||||
* @alignment: alignment requirement of match structures
|
||||
*
|
||||
* Validates that all matches add up to the beginning of the target,
|
||||
* and that each match covers at least the base structure size.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int xt_check_entry_match(const char *match, const char *target,
|
||||
const size_t alignment)
|
||||
{
|
||||
const struct xt_entry_match *pos;
|
||||
int length = target - match;
|
||||
|
||||
if (length == 0) /* no matches */
|
||||
return 0;
|
||||
|
||||
pos = (struct xt_entry_match *)match;
|
||||
do {
|
||||
if ((unsigned long)pos % alignment)
|
||||
return -EINVAL;
|
||||
|
||||
if (length < (int)sizeof(struct xt_entry_match))
|
||||
return -EINVAL;
|
||||
|
||||
if (pos->u.match_size < sizeof(struct xt_entry_match))
|
||||
return -EINVAL;
|
||||
|
||||
if (pos->u.match_size > length)
|
||||
return -EINVAL;
|
||||
|
||||
length -= pos->u.match_size;
|
||||
pos = ((void *)((char *)(pos) + (pos)->u.match_size));
|
||||
} while (length > 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
|
||||
{
|
||||
|
@ -484,13 +525,14 @@ int xt_compat_match_offset(const struct xt_match *match)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
|
||||
|
||||
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
unsigned int *size)
|
||||
void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
unsigned int *size)
|
||||
{
|
||||
const struct xt_match *match = m->u.kernel.match;
|
||||
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
|
||||
int pad, off = xt_compat_match_offset(match);
|
||||
u_int16_t msize = cm->u.user.match_size;
|
||||
char name[sizeof(m->u.user.name)];
|
||||
|
||||
m = *dstptr;
|
||||
memcpy(m, cm, sizeof(*cm));
|
||||
|
@ -504,10 +546,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
|||
|
||||
msize += off;
|
||||
m->u.user.match_size = msize;
|
||||
strlcpy(name, match->name, sizeof(name));
|
||||
module_put(match->me);
|
||||
strncpy(m->u.user.name, name, sizeof(m->u.user.name));
|
||||
|
||||
*size += off;
|
||||
*dstptr += msize;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
|
||||
|
||||
|
@ -538,8 +582,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
|
||||
|
||||
/* non-compat version may have padding after verdict */
|
||||
struct compat_xt_standard_target {
|
||||
struct compat_xt_entry_target t;
|
||||
compat_uint_t verdict;
|
||||
};
|
||||
|
||||
int xt_compat_check_entry_offsets(const void *base, const char *elems,
|
||||
unsigned int target_offset,
|
||||
unsigned int next_offset)
|
||||
{
|
||||
long size_of_base_struct = elems - (const char *)base;
|
||||
const struct compat_xt_entry_target *t;
|
||||
const char *e = base;
|
||||
|
||||
if (target_offset < size_of_base_struct)
|
||||
return -EINVAL;
|
||||
|
||||
if (target_offset + sizeof(*t) > next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = (void *)(e + target_offset);
|
||||
if (t->u.target_size < sizeof(*t))
|
||||
return -EINVAL;
|
||||
|
||||
if (target_offset + t->u.target_size > next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
|
||||
COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
/* compat_xt_entry match has less strict aligment requirements,
|
||||
* otherwise they are identical. In case of padding differences
|
||||
* we need to add compat version of xt_check_entry_match.
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
|
||||
|
||||
return xt_check_entry_match(elems, base + target_offset,
|
||||
__alignof__(struct compat_xt_entry_match));
|
||||
}
|
||||
EXPORT_SYMBOL(xt_compat_check_entry_offsets);
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
/**
|
||||
* xt_check_entry_offsets - validate arp/ip/ip6t_entry
|
||||
*
|
||||
* @base: pointer to arp/ip/ip6t_entry
|
||||
* @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
|
||||
* @target_offset: the arp/ip/ip6_t->target_offset
|
||||
* @next_offset: the arp/ip/ip6_t->next_offset
|
||||
*
|
||||
* validates that target_offset and next_offset are sane and that all
|
||||
* match sizes (if any) align with the target offset.
|
||||
*
|
||||
* This function does not validate the targets or matches themselves, it
|
||||
* only tests that all the offsets and sizes are correct, that all
|
||||
* match structures are aligned, and that the last structure ends where
|
||||
* the target structure begins.
|
||||
*
|
||||
* Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
|
||||
*
|
||||
* The arp/ip/ip6t_entry structure @base must have passed following tests:
|
||||
* - it must point to a valid memory location
|
||||
* - base to base + next_offset must be accessible, i.e. not exceed allocated
|
||||
* length.
|
||||
*
|
||||
* A well-formed entry looks like this:
|
||||
*
|
||||
* ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
|
||||
* e->elems[]-----' | |
|
||||
* matchsize | |
|
||||
* matchsize | |
|
||||
* | |
|
||||
* target_offset---------------------------------' |
|
||||
* next_offset---------------------------------------------------'
|
||||
*
|
||||
* elems[]: flexible array member at end of ip(6)/arpt_entry struct.
|
||||
* This is where matches (if any) and the target reside.
|
||||
* target_offset: beginning of target.
|
||||
* next_offset: start of the next rule; also: size of this rule.
|
||||
* Since targets have a minimum size, target_offset + minlen <= next_offset.
|
||||
*
|
||||
* Every match stores its size, sum of sizes must not exceed target_offset.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
int xt_check_entry_offsets(const void *base,
|
||||
const char *elems,
|
||||
unsigned int target_offset,
|
||||
unsigned int next_offset)
|
||||
{
|
||||
long size_of_base_struct = elems - (const char *)base;
|
||||
const struct xt_entry_target *t;
|
||||
const char *e = base;
|
||||
|
||||
/* target start is within the ip/ip6/arpt_entry struct */
|
||||
if (target_offset < size_of_base_struct)
|
||||
return -EINVAL;
|
||||
|
||||
if (target_offset + sizeof(*t) > next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = (void *)(e + target_offset);
|
||||
if (t->u.target_size < sizeof(*t))
|
||||
return -EINVAL;
|
||||
|
||||
if (target_offset + t->u.target_size > next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
|
||||
XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
return xt_check_entry_match(elems, base + target_offset,
|
||||
__alignof__(struct xt_entry_match));
|
||||
}
|
||||
EXPORT_SYMBOL(xt_check_entry_offsets);
|
||||
|
||||
int xt_check_target(struct xt_tgchk_param *par,
|
||||
unsigned int size, u_int8_t proto, bool inv_proto)
|
||||
{
|
||||
|
@ -590,6 +751,80 @@ int xt_check_target(struct xt_tgchk_param *par,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xt_check_target);
|
||||
|
||||
/**
|
||||
* xt_copy_counters_from_user - copy counters and metadata from userspace
|
||||
*
|
||||
* @user: src pointer to userspace memory
|
||||
* @len: alleged size of userspace memory
|
||||
* @info: where to store the xt_counters_info metadata
|
||||
* @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
|
||||
*
|
||||
* Copies counter meta data from @user and stores it in @info.
|
||||
*
|
||||
* vmallocs memory to hold the counters, then copies the counter data
|
||||
* from @user to the new memory and returns a pointer to it.
|
||||
*
|
||||
* If @compat is true, @info gets converted automatically to the 64bit
|
||||
* representation.
|
||||
*
|
||||
* The metadata associated with the counters is stored in @info.
|
||||
*
|
||||
* Return: returns pointer that caller has to test via IS_ERR().
|
||||
* If IS_ERR is false, caller has to vfree the pointer.
|
||||
*/
|
||||
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
||||
struct xt_counters_info *info, bool compat)
|
||||
{
|
||||
void *mem;
|
||||
u64 size;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
/* structures only differ in size due to alignment */
|
||||
struct compat_xt_counters_info compat_tmp;
|
||||
|
||||
if (len <= sizeof(compat_tmp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len -= sizeof(compat_tmp);
|
||||
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
strlcpy(info->name, compat_tmp.name, sizeof(info->name));
|
||||
info->num_counters = compat_tmp.num_counters;
|
||||
user += sizeof(compat_tmp);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (len <= sizeof(*info))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
len -= sizeof(*info);
|
||||
if (copy_from_user(info, user, sizeof(*info)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
user += sizeof(*info);
|
||||
}
|
||||
|
||||
size = sizeof(struct xt_counters);
|
||||
size *= info->num_counters;
|
||||
|
||||
if (size != (u64)len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mem = vmalloc(len);
|
||||
if (!mem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (copy_from_user(mem, user, len) == 0)
|
||||
return mem;
|
||||
|
||||
vfree(mem);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int xt_compat_target_offset(const struct xt_target *target)
|
||||
{
|
||||
|
@ -605,6 +840,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
|||
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
|
||||
int pad, off = xt_compat_target_offset(target);
|
||||
u_int16_t tsize = ct->u.user.target_size;
|
||||
char name[sizeof(t->u.user.name)];
|
||||
|
||||
t = *dstptr;
|
||||
memcpy(t, ct, sizeof(*ct));
|
||||
|
@ -618,6 +854,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
|||
|
||||
tsize += off;
|
||||
t->u.user.target_size = tsize;
|
||||
strlcpy(name, target->name, sizeof(name));
|
||||
module_put(target->me);
|
||||
strncpy(t->u.user.name, name, sizeof(t->u.user.name));
|
||||
|
||||
*size += off;
|
||||
*dstptr += tsize;
|
||||
|
|
|
@ -2784,6 +2784,7 @@ static int netlink_dump(struct sock *sk)
|
|||
struct netlink_callback *cb;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct nlmsghdr *nlh;
|
||||
struct module *module;
|
||||
int len, err = -ENOBUFS;
|
||||
int alloc_min_size;
|
||||
int alloc_size;
|
||||
|
@ -2863,9 +2864,11 @@ static int netlink_dump(struct sock *sk)
|
|||
cb->done(cb);
|
||||
|
||||
nlk->cb_running = false;
|
||||
module = cb->module;
|
||||
skb = cb->skb;
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
module_put(cb->module);
|
||||
consume_skb(cb->skb);
|
||||
module_put(module);
|
||||
consume_skb(skb);
|
||||
return 0;
|
||||
|
||||
errout_skb:
|
||||
|
|
|
@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
|
|||
struct vxlan_config conf = {
|
||||
.no_share = true,
|
||||
.flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
|
||||
/* Don't restrict the packets that can be sent by MTU */
|
||||
.mtu = IP_MAX_MTU,
|
||||
};
|
||||
|
||||
if (!options) {
|
||||
|
|
|
@ -1169,6 +1169,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
|
|||
.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
|
||||
.dst = dst,
|
||||
.dst_len = dst_len,
|
||||
.fi = fi,
|
||||
.tos = tos,
|
||||
.type = type,
|
||||
.nlflags = nlflags,
|
||||
|
@ -1177,8 +1178,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
|
|||
struct net_device *dev;
|
||||
int err = 0;
|
||||
|
||||
memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
|
||||
|
||||
/* Don't offload route if using custom ip rules or if
|
||||
* IPv4 FIB offloading has been disabled completely.
|
||||
*/
|
||||
|
@ -1222,6 +1221,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
|
|||
.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
|
||||
.dst = dst,
|
||||
.dst_len = dst_len,
|
||||
.fi = fi,
|
||||
.tos = tos,
|
||||
.type = type,
|
||||
.nlflags = 0,
|
||||
|
@ -1230,8 +1230,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
|
|||
struct net_device *dev;
|
||||
int err = 0;
|
||||
|
||||
memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
|
||||
|
||||
if (!(fi->fib_flags & RTNH_F_OFFLOAD))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
|
|||
goto out;
|
||||
|
||||
tipc_tlv_sprintf(msg->rep, "%-10u %s",
|
||||
nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
|
||||
nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
|
||||
scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
|
||||
out:
|
||||
tipc_tlv_sprintf(msg->rep, "\n");
|
||||
|
|
|
@ -2814,6 +2814,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (!attrs[TIPC_NLA_SOCK])
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
|
||||
attrs[TIPC_NLA_SOCK],
|
||||
tipc_nl_sock_policy);
|
||||
|
|
|
@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
|
|||
return private(dev, iwr, cmd, info, handler);
|
||||
}
|
||||
/* Old driver API : call driver ioctl handler */
|
||||
if (dev->netdev_ops->ndo_do_ioctl)
|
||||
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
|
||||
if (dev->netdev_ops->ndo_do_ioctl) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (info->flags & IW_REQUEST_FLAG_COMPAT) {
|
||||
int ret = 0;
|
||||
struct iwreq iwr_lcl;
|
||||
struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
|
||||
|
||||
memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
|
||||
iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
|
||||
iwr_lcl.u.data.length = iwp_compat->length;
|
||||
iwr_lcl.u.data.flags = iwp_compat->flags;
|
||||
|
||||
ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
|
||||
|
||||
iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
|
||||
iwp_compat->length = iwr_lcl.u.data.length;
|
||||
iwp_compat->flags = iwr_lcl.u.data.flags;
|
||||
|
||||
return ret;
|
||||
} else
|
||||
#endif
|
||||
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
|
||||
}
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
|
|
@ -359,8 +359,11 @@ enum {
|
|||
|
||||
#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
|
||||
#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
|
||||
#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
|
||||
#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
|
||||
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
|
||||
#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
|
||||
#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
|
||||
IS_KBL(pci) || IS_KBL_LP(pci)
|
||||
|
||||
static char *driver_short_names[] = {
|
||||
[AZX_DRIVER_ICH] = "HDA Intel",
|
||||
|
@ -2204,6 +2207,12 @@ static const struct pci_device_id azx_ids[] = {
|
|||
/* Sunrise Point-LP */
|
||||
{ PCI_DEVICE(0x8086, 0x9d70),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
|
||||
/* Kabylake */
|
||||
{ PCI_DEVICE(0x8086, 0xa171),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
|
||||
/* Kabylake-LP */
|
||||
{ PCI_DEVICE(0x8086, 0x9d71),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
|
||||
/* Broxton-P(Apollolake) */
|
||||
{ PCI_DEVICE(0x8086, 0x5a98),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
|
||||
|
|
|
@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
|
|||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
case 0x10ec0294:
|
||||
case 0x10ec0700:
|
||||
case 0x10ec0701:
|
||||
case 0x10ec0703:
|
||||
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
|
||||
break;
|
||||
case 0x10ec0662:
|
||||
|
@ -2655,6 +2658,7 @@ enum {
|
|||
ALC269_TYPE_ALC256,
|
||||
ALC269_TYPE_ALC225,
|
||||
ALC269_TYPE_ALC294,
|
||||
ALC269_TYPE_ALC700,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
|
|||
case ALC269_TYPE_ALC256:
|
||||
case ALC269_TYPE_ALC225:
|
||||
case ALC269_TYPE_ALC294:
|
||||
case ALC269_TYPE_ALC700:
|
||||
ssids = alc269_ssids;
|
||||
break;
|
||||
default:
|
||||
|
@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
|
|||
static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
||||
{
|
||||
static struct coef_fw coef0255[] = {
|
||||
WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
|
||||
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
|
||||
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0255_1[] = {
|
||||
WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
WRITE_COEF(0x1b, 0x0c0b),
|
||||
WRITE_COEF(0x45, 0xc429),
|
||||
|
@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
|||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0255:
|
||||
alc_process_coef_fw(codec, coef0255_1);
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0233:
|
||||
|
@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
|||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
|
||||
WRITE_COEF(0x1b, 0x0c6b),
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
WRITE_COEF(0x45, 0xd429),
|
||||
WRITE_COEF(0x1b, 0x0c2b),
|
||||
|
@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
|||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0233:
|
||||
case 0x10ec0283:
|
||||
alc_process_coef_fw(codec, coef0233);
|
||||
|
@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
|||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
|
||||
WRITE_COEF(0x1b, 0x0c6b),
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
WRITE_COEF(0x45, 0xe429),
|
||||
WRITE_COEF(0x1b, 0x0c2b),
|
||||
|
@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
|||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0233:
|
||||
case 0x10ec0283:
|
||||
alc_process_coef_fw(codec, coef0233);
|
||||
|
@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
|
|||
static void alc255_set_default_jack_type(struct hda_codec *codec)
|
||||
{
|
||||
/* Set to iphone type */
|
||||
static struct coef_fw fw[] = {
|
||||
static struct coef_fw alc255fw[] = {
|
||||
WRITE_COEF(0x1b, 0x880b),
|
||||
WRITE_COEF(0x45, 0xd089),
|
||||
WRITE_COEF(0x1b, 0x080b),
|
||||
|
@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
|
|||
WRITE_COEF(0x1b, 0x0c0b),
|
||||
{}
|
||||
};
|
||||
alc_process_coef_fw(codec, fw);
|
||||
static struct coef_fw alc256fw[] = {
|
||||
WRITE_COEF(0x1b, 0x884b),
|
||||
WRITE_COEF(0x45, 0xd089),
|
||||
WRITE_COEF(0x1b, 0x084b),
|
||||
WRITE_COEF(0x46, 0x0004),
|
||||
WRITE_COEF(0x1b, 0x0c4b),
|
||||
{}
|
||||
};
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0255:
|
||||
alc_process_coef_fw(codec, alc255fw);
|
||||
break;
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, alc256fw);
|
||||
break;
|
||||
}
|
||||
msleep(30);
|
||||
}
|
||||
|
||||
|
@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
|
||||
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
|
||||
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
|
||||
SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
|
||||
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
|
||||
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
|
||||
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
|
||||
|
@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|||
{0x12, 0x90a60180},
|
||||
{0x14, 0x90170130},
|
||||
{0x21, 0x02211040}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60180},
|
||||
{0x14, 0x90170120},
|
||||
{0x21, 0x02211030}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60160},
|
||||
{0x14, 0x90170120},
|
||||
|
@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec)
|
|||
case 0x10ec0294:
|
||||
spec->codec_variant = ALC269_TYPE_ALC294;
|
||||
break;
|
||||
case 0x10ec0700:
|
||||
case 0x10ec0701:
|
||||
case 0x10ec0703:
|
||||
spec->codec_variant = ALC269_TYPE_ALC700;
|
||||
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
|
||||
alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
|
||||
|
@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
|
|||
HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
|
||||
HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
|
||||
HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
|
||||
HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
|
||||
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
|
||||
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
|
||||
|
|
|
@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
|
|||
|
||||
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
|
||||
lockdep_is_held(&kvm->irq_lock));
|
||||
if (gsi < irq_rt->nr_rt_entries) {
|
||||
if (irq_rt && gsi < irq_rt->nr_rt_entries) {
|
||||
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
|
||||
entries[n] = *e;
|
||||
++n;
|
||||
|
|
Loading…
Add table
Reference in a new issue