Merge "Merge branch 'android-4.4@77ddb50' (v4.4.74) into 'msm-4.4'"

This commit is contained in:
Linux Build Service Account 2017-07-03 07:57:57 -07:00 committed by Gerrit - the friendly Code Review server
commit 74b5a0f867
111 changed files with 895 additions and 582 deletions

View file

@ -3605,6 +3605,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
spia_pedr= spia_pedr=
spia_peddr= spia_peddr=
stack_guard_gap= [MM]
override the default stack gap protection. The value
is in page units and it defines how many pages prior
to (for stacks growing down) resp. after (for stacks
growing up) the main stack are reserved for no other
mapping. Default value is 256 pages.
stacktrace [FTRACE] stacktrace [FTRACE]
Enabled the stack tracer on boot up. Enabled the stack tracer on boot up.

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 72 SUBLEVEL = 74
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt
@ -793,7 +793,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
KBUILD_ARFLAGS := $(call ar-option,D) KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto' # check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif endif

View file

@ -0,0 +1,5 @@
# KEEP ALPHABETICALLY SORTED
CONFIG_ARMV8_DEPRECATED=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
CONFIG_SWP_EMULATION=y

View file

@ -12,7 +12,6 @@ CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_ASHMEM=y CONFIG_ASHMEM=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
@ -21,7 +20,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_SCHED=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_DEFAULT_SECURITY_SELINUX=y CONFIG_DEFAULT_SECURITY_SELINUX=y
CONFIG_EMBEDDED=y CONFIG_EMBEDDED=y
CONFIG_FB=y CONFIG_FB=y
@ -155,9 +153,7 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SETEND_EMULATION=y
CONFIG_STAGING=y CONFIG_STAGING=y
CONFIG_SWP_EMULATION=y
CONFIG_SYNC=y CONFIG_SYNC=y
CONFIG_TUN=y CONFIG_TUN=y
CONFIG_UID_SYS_STATS=y CONFIG_UID_SYS_STATS=y

View file

@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -30,7 +30,7 @@
/* kHz uV */ /* kHz uV */
996000 1250000 996000 1250000
792000 1175000 792000 1175000
396000 1075000 396000 1150000
>; >;
fsl,soc-operating-points = < fsl,soc-operating-points = <
/* ARM kHz SOC-PU uV */ /* ARM kHz SOC-PU uV */

View file

@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto success; goto success;
} }

View file

@ -816,8 +816,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break; break;
} }
/* Compact branch: BNEZC || JIALC */ /* Compact branch: BNEZC || JIALC */
if (insn.i_format.rs) if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
regs->regs[31] = epc + 4; regs->regs[31] = epc + 4;
}
regs->cp0_epc += 8; regs->cp0_epc += 8;
break; break;
#endif #endif

View file

@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap; int do_color_align, last_mmap;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }
@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff, const unsigned long len, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
int do_color_align, last_mmap; int do_color_align, last_mmap;
@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff); addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }

View file

@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr) if ((mm->task_size - len) < addr)
return 0; return 0;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vma->vm_start); return (!vma || (addr + len) <= vm_start_gap(vma));
} }
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)

View file

@ -229,12 +229,17 @@ ENTRY(sie64a)
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done: .Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception) # some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# instructions between sie64a and .Lsie_done should not cause program # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# interrupts. So lets use a nop (47 00 00 00) as a landing pad. # Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie # See also .Lcleanup_sie
.Lrewind_pad: .Lrewind_pad6:
nop 0 nopr 7
.Lrewind_pad4:
nopr 7
.Lrewind_pad2:
nopr 7
.globl sie_exit .globl sie_exit
sie_exit: sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@ -247,7 +252,9 @@ sie_exit:
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit j sie_exit
EX_TABLE(.Lrewind_pad,.Lsie_fault) EX_TABLE(.Lrewind_pad6,.Lsie_fault)
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault)
#endif #endif

View file

@ -97,7 +97,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -135,7 +135,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -372,7 +372,7 @@ void __init vmem_map_init(void)
ro_end = (unsigned long)&_eshared & PAGE_MASK; ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
start = reg->base; start = reg->base;
end = reg->base + reg->size - 1; end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start) if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0); vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end) else if (start >= ro_start && end <= ro_end)

View file

@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
void bad_trap(struct pt_regs *regs, long lvl) void bad_trap(struct pt_regs *regs, long lvl)
{ {
char buffer[32]; char buffer[36];
siginfo_t info; siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs, if (notify_die(DIE_TRAP, "bad trap", regs,
@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
void bad_trap_tl1(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl)
{ {
char buffer[32]; char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP) 0, lvl, SIGTRAP) == NOTIFY_STOP)

View file

@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, HPAGE_SIZE); addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View file

@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (current->mm->get_unmapped_area == arch_get_unmapped_area) if (current->mm->get_unmapped_area == arch_get_unmapped_area)

View file

@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (end - len >= addr && if (end - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View file

@ -100,5 +100,6 @@ void __init initmem_init(void)
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn)); (ulong) pfn_to_kaddr(highstart_pfn));
__vmalloc_start_set = true;
setup_bootmem_allocator(); setup_bootmem_allocator();
} }

View file

@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0 # define PLATFORM_NR_IRQS 0
#endif #endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0 #if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { } static inline void variant_init_irq(void) { }

View file

@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
int irq = irq_find_mapping(NULL, hwirq); int irq = irq_find_mapping(NULL, hwirq);
if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq);
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */ /* Debugging check for stack overflow: is there less than 1KB free? */
{ {

View file

@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */ /* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr) if (TASK_SIZE - len < addr)
return -ENOMEM; return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start) if (!vmm || addr + len <= vm_start_gap(vmm))
return addr; return addr;
addr = vmm->vm_end; addr = vmm->vm_end;
if (flags & MAP_SHARED) if (flags & MAP_SHARED)

View file

@ -24,16 +24,18 @@
/* Interrupt configuration. */ /* Interrupt configuration. */
#define PLATFORM_NR_IRQS 10 #define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */ /* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX #ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM
#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else #else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM
#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif #endif
/* /*
@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10 #define C67X00_SIZE 0x10
#define C67X00_IRQ 5
#endif /* __XTENSA_XTAVNET_HARDWARE_H */ #endif /* __XTENSA_XTAVNET_HARDWARE_H */

View file

@ -209,8 +209,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[2] = { /* IRQ number */ [2] = { /* IRQ number */
.start = OETH_IRQ, .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.end = OETH_IRQ, .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@ -246,8 +246,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { /* IRQ number */ [1] = { /* IRQ number */
.start = C67X00_IRQ, .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.end = C67X00_IRQ, .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@ -280,7 +280,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = { static struct plat_serial8250_port serial_platform_data[] = {
[0] = { [0] = {
.mapbase = DUART16552_PADDR, .mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM, .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP, UPF_IOREMAP,
.iotype = UPIO_MEM32, .iotype = UPIO_MEM32,

View file

@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
continue; continue;
bsd_start = le32_to_cpu(p->p_offset); bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size); bsd_size = le32_to_cpu(p->p_size);
if (memcmp(flavour, "bsd\0", 4) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size) if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */ /* full parent partition, we have it already */
continue; continue;

View file

@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags); retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags); retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT) if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count); atomic_inc(&dev->power.usage_count);

View file

@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
/* cannot be lower than 11 otherwise freq will not fall */ /* cannot be lower than 1 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 || if (ret != 1 || input < 1 || input > 100 ||
input >= cs_tuners->up_threshold) input >= cs_tuners->up_threshold)
return -EINVAL; return -EINVAL;

View file

@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap; struct ttm_bo_kmap_obj cache_kmap;
int next_cursor; int next_cursor;
bool support_wide_screen; bool support_wide_screen;
bool DisableP2A;
enum ast_tx_chip tx_chip_type; enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk; u8 dp501_maxclk;

View file

@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else } else
*need_post = false; *need_post = false;
/* Check P2A Access */
ast->DisableP2A = true;
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF)
ast->DisableP2A = false;
/* Check if we support wide screen */ /* Check if we support wide screen */
switch (ast->chip) { switch (ast->chip) {
case AST1180: case AST1180:
@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true; ast->support_wide_screen = true;
else { else {
ast->support_wide_screen = false; ast->support_wide_screen = false;
/* Read SCU7c (silicon revision register) */ if (ast->DisableP2A == false) {
ast_write32(ast, 0xf004, 0x1e6e0000); /* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf000, 0x1); ast_write32(ast, 0xf004, 0x1e6e0000);
data = ast_read32(ast, 0x1207c); ast_write32(ast, 0xf000, 0x1);
data &= 0x300; data = ast_read32(ast, 0x1207c);
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ data &= 0x300;
ast->support_wide_screen = true; if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ ast->support_wide_screen = true;
ast->support_wide_screen = true; if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
} }
break; break;
} }
@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2; uint32_t data, data2;
uint32_t denum, num, div, ref_pll; uint32_t denum, num, div, ref_pll;
ast_write32(ast, 0xf004, 0x1e6e0000); if (ast->DisableP2A)
ast_write32(ast, 0xf000, 0x1); {
ast_write32(ast, 0x10000, 0xfc600309);
do {
if (pci_channel_offline(dev->pdev))
return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
if (data & 0x40)
ast->dram_bus_width = 16; ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
}
else else
ast->dram_bus_width = 32; {
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x10004);
if (ast->chip == AST2300 || ast->chip == AST2400) { if (data & 0x40)
switch (data & 0x03) { ast->dram_bus_width = 16;
case 0: else
ast->dram_type = AST_DRAM_512Mx16; ast->dram_bus_width = 32;
break;
default: if (ast->chip == AST2300 || ast->chip == AST2400) {
case 1: switch (data & 0x03) {
ast->dram_type = AST_DRAM_1Gx16; case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (data & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (data & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
}
data = ast_read32(ast, 0x10120);
data2 = ast_read32(ast, 0x10170);
if (data2 & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = data & 0x1f;
num = (data & 0x3fe0) >> 5;
data = (data & 0xc000) >> 14;
switch (data) {
case 3:
div = 0x4;
break; break;
case 2: case 2:
ast->dram_type = AST_DRAM_2Gx16; case 1:
div = 0x2;
break; break;
case 3: default:
ast->dram_type = AST_DRAM_4Gx16; div = 0x1;
break;
}
} else {
switch (data & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (data & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break; break;
} }
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
} }
data = ast_read32(ast, 0x10120);
data2 = ast_read32(ast, 0x10170);
if (data2 & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = data & 0x1f;
num = (data & 0x3fe0) >> 5;
data = (data & 0xc000) >> 14;
switch (data) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
break;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0; return 0;
} }

View file

@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev); ast_enable_mmio(dev);
ast_set_def_ext_reg(dev); ast_set_def_ext_reg(dev);
if (ast->chip == AST2300 || ast->chip == AST2400) if (ast->DisableP2A == false)
ast_init_dram_2300(dev); {
else if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_reg(dev); ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev); ast_init_3rdtx(dev);
}
else
{
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
} }
/* AST 2300 DRAM settings */ /* AST 2300 DRAM settings */

View file

@ -370,7 +370,8 @@ nouveau_display_init(struct drm_device *dev)
return ret; return ret;
/* enable polling for external displays */ /* enable polling for external displays */
drm_kms_helper_poll_enable(dev); if (!dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */ /* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {

View file

@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev); pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true); ret = nouveau_do_resume(drm_dev, true);
drm_kms_helper_poll_enable(drm_dev);
if (!drm_dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(drm_dev);
/* do magic */ /* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);

View file

@ -99,6 +99,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo; struct nouveau_bo *bo;
struct nouveau_bo *bo_gart; struct nouveau_bo *bo_gart;
u32 *suspend; u32 *suspend;
struct mutex mutex;
}; };
u64 nv84_fence_crtc(struct nouveau_channel *, int); u64 nv84_fence_crtc(struct nouveau_channel *, int);

View file

@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (nvif_unpack(argv->v0, 0, 0, true)) { if (nvif_unpack(argv->v0, 0, 0, true)) {
/* block access to objects not created via this interface */ /* block access to objects not created via this interface */
owner = argv->v0.owner; owner = argv->v0.owner;
if (argv->v0.object == 0ULL) if (argv->v0.object == 0ULL &&
argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else else
argv->v0.owner = NVDRM_OBJECT_USIF; argv->v0.owner = NVDRM_OBJECT_USIF;

View file

@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
} }
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma); nouveau_bo_vma_del(priv->bo, &fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base); nouveau_fence_context_del(&fctx->base);
chan->fence = NULL; chan->fence = NULL;
nouveau_fence_context_free(&fctx->base); nouveau_fence_context_free(&fctx->base);
@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32; fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan); fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) { if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart); &fctx->vma_gart);
} }
mutex_unlock(&priv->mutex);
/* map display semaphore buffers into channel's vm */ /* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = fence_context_alloc(priv->base.contexts); priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true; priv->base.uevent = true;
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */ /* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/* /*

View file

@ -54,7 +54,7 @@
#define SMBSLVDAT (0xC + piix4_smba) #define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */ /* count for request_region */
#define SMBIOSIZE 8 #define SMBIOSIZE 9
/* PCI Address Constants */ /* PCI Address Constants */
#define SMBBA 0x090 #define SMBBA 0x090

View file

@ -263,8 +263,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
static void calibrate_as3935(struct as3935_state *st) static void calibrate_as3935(struct as3935_state *st)
{ {
mutex_lock(&st->lock);
/* mask disturber interrupt bit */ /* mask disturber interrupt bit */
as3935_write(st, AS3935_INT, BIT(5)); as3935_write(st, AS3935_INT, BIT(5));
@ -274,8 +272,6 @@ static void calibrate_as3935(struct as3935_state *st)
mdelay(2); mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
mutex_unlock(&st->lock);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
@ -312,6 +308,8 @@ static int as3935_resume(struct device *dev)
val &= ~AS3935_AFE_PWR_BIT; val &= ~AS3935_AFE_PWR_BIT;
ret = as3935_write(st, AS3935_AFE_GAIN, val); ret = as3935_write(st, AS3935_AFE_GAIN, val);
calibrate_as3935(st);
err_resume: err_resume:
mutex_unlock(&st->lock); mutex_unlock(&st->lock);

View file

@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{ {
struct irq_domain *root_domain = struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip); &xtensa_mx_irq_chip);
irq_set_default_host(root_domain); irq_set_default_host(root_domain);

View file

@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{ {
struct irq_domain *root_domain = struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip); &xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain); irq_set_default_host(root_domain);
return 0; return 0;

View file

@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
memset(&tvdata,0,sizeof(tvdata)); memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw); eeprom = pvr2_eeprom_fetch(hdw);
if (!eeprom) return -EINVAL; if (!eeprom)
return -EINVAL;
{ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
struct i2c_client fake_client;
/* Newer version expects a useless client interface */
fake_client.addr = hdw->eeprom_addr;
fake_client.adapter = &hdw->i2c_adap;
tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
}
trace_eeprom("eeprom assumed v4l tveeprom module"); trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:"); trace_eeprom("eeprom direct call results:");

View file

@ -793,7 +793,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
*/ */
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{ {
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL; return NULL;
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);

View file

@ -375,8 +375,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
* and use SDR Mode * and use SDR Mode
*/ */
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else if (pdata->port_mode[i] == } else if (pdata->port_mode[i] ==
OMAP_EHCI_PORT_MODE_HSIC) { OMAP_EHCI_PORT_MODE_HSIC) {
/* /*

View file

@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
duramar2150_c2port_dev = c2port_device_register("uc", duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL); &duramar2150_c2port_ops, NULL);
if (!duramar2150_c2port_dev) { if (IS_ERR(duramar2150_c2port_dev)) {
ret = -ENODEV; ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region; goto free_region;
} }

View file

@ -246,6 +246,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
sizeof(*dm), sizeof(*dm),
1000); 1000);
kfree(dm);
return rc; return rc;
} }

View file

@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL) if (skb == NULL)
break; break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
}
/* Grrr, we cannot offset to correctly align the IP header. */ /* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
} }
@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{ {
struct netdev_private *np = netdev_priv(dev); struct netdev_private *np = netdev_priv(dev);
unsigned int entry; unsigned int entry;
unsigned int prev_tx;
u32 status; u32 status;
int i; int i, j;
/* /*
* be cautious here, wrapping the queue has weird semantics * be cautious here, wrapping the queue has weird semantics
@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
} }
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE; entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) { for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0; int wrap_ring = 0;
@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag), skb_frag_size(this_frag),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
if (pci_dma_mapping_error(np->pci_dev,
np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status); np->tx_ring[entry].status = cpu_to_le32(status);
@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
PCI_DMA_TODEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_frag_size(
&skb_shinfo(skb)->frags[j-1]),
PCI_DMA_TODEVICE);
entry++;
}
}
dev_kfree_skb_any(skb);
np->cur_tx = prev_tx;
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up /* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */ after the Tx thread. */
@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */ break; /* Better luck next round. */
np->rx_info[entry].mapping = np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
}
np->rx_ring[entry].rxaddr = np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
} }

View file

@ -1999,8 +1999,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page) if (!rxb->page)
continue; continue;
dma_unmap_single(rx_queue->dev, rxb->dma, dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page); __free_page(rxb->page);
rxb->page = NULL; rxb->page = NULL;

View file

@ -105,8 +105,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
struct hns_nic_ring_data *ring_data) struct hns_nic_ring_data *ring_data)
{ {
struct hns_nic_priv *priv = netdev_priv(ndev); struct hns_nic_priv *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring; struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
int buf_num; int buf_num;

View file

@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int mlx4_comm_internal_err(u32 slave_read) int mlx4_comm_internal_err(u32 slave_read)
{ {
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;

View file

@ -218,6 +218,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_interface *intf; struct mlx4_interface *intf;
mlx4_stop_catas_poll(dev); mlx4_stop_catas_poll(dev);
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
mlx4_is_slave(dev)) {
/* In mlx4_remove_one on a VF */
u32 slave_read =
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
if (mlx4_comm_internal_err(slave_read)) {
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
__func__);
mlx4_enter_error_state(dev->persist);
}
}
mutex_lock(&intf_mutex); mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list) list_for_each_entry(intf, &intf_list, list)

View file

@ -1205,6 +1205,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type); enum mlx4_port_type *type);

View file

@ -171,6 +171,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data, .get_mdio_data = ravb_get_mdio_data,
}; };
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
stats->tx_packets++;
}
free_num++;
}
if (txed)
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
/* Free skb's and DMA buffers for Ethernet AVB */ /* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_free(struct net_device *ndev, int q)
{ {
@ -186,19 +229,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]); kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL; priv->rx_skb[q] = NULL;
/* Free TX skb ringbuffer */
if (priv->tx_skb[q]) {
for (i = 0; i < priv->num_tx_ring[q]; i++)
dev_kfree_skb(priv->tx_skb[q][i]);
}
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */ /* Free aligned TX buffers */
kfree(priv->tx_align[q]); kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL; priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) { if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
PKT_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) * ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1); (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@ -207,12 +252,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
} }
if (priv->tx_ring[q]) { if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1); (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]); priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL; priv->tx_ring[q] = NULL;
} }
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
} }
/* Format skb and descriptor buffer for Ethernet AVB */ /* Format skb and descriptor buffer for Ethernet AVB */
@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0; return 0;
} }
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
stats->tx_packets++;
}
free_num++;
}
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
static void ravb_get_tx_tstamp(struct net_device *ndev) static void ravb_get_tx_tstamp(struct net_device *ndev)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */ /* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS); ravb_write(ndev, ~mask, TIS);
ravb_tx_free(ndev, q); ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q); netif_wake_subqueue(ndev, q);
mmiowb(); mmiowb();
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
@ -1393,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC; priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] > if (priv->cur_tx[q] - priv->dirty_tx[q] >
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q); netif_stop_subqueue(ndev, q);
exit: exit:

View file

@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
#define xemaclite_writel iowrite32be
#else
#define xemaclite_readl ioread32
#define xemaclite_writel iowrite32
#endif
/** /**
* struct net_local - Our private per device data * struct net_local - Our private per device data
* @ndev: instance of the network device * @ndev: instance of the network device
@ -158,15 +166,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
u32 reg_data; u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */ /* Enable the Tx interrupts for the first Buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET); drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */ /* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */ /* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
} }
/** /**
@ -181,17 +189,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data; u32 reg_data;
/* Disable the Global Interrupt Enable */ /* Disable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */ /* Disable the Tx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET); drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */ /* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET); drvdata->base_addr + XEL_RSR_OFFSET);
} }
/** /**
@ -323,7 +331,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
byte_count = ETH_FRAME_LEN; byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */ /* Check if the expected buffer is available */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@ -336,7 +344,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
addr = (void __iomem __force *)((u32 __force)addr ^ addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET); XEL_BUFFER_OFFSET);
reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0) XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@ -347,16 +355,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Write the frame to the buffer */ /* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET); addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a /* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame * is used by the interrupt handler to check whether a frame
* has been transmitted */ * has been transmitted */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
__raw_writel(reg_data, addr + XEL_TSR_OFFSET); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0; return 0;
} }
@ -371,7 +379,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* *
* Return: Total number of bytes received * Return: Total number of bytes received
*/ */
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{ {
void __iomem *addr; void __iomem *addr;
u16 length, proto_type; u16 length, proto_type;
@ -381,7 +389,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */ /* Verify which buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0) if (drvdata->rx_ping_pong != 0)
@ -398,27 +406,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
return 0; /* No data was available */ return 0; /* No data was available */
/* Verify that buffer has valid data */ /* Verify that buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) != if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK) XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */ return 0; /* No data was available */
} }
/* Get the protocol type of the ethernet frame that arrived */ /* Get the protocol type of the ethernet frame that arrived */
proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK); XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame /* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */ * or an IP packet or an ARP packet */
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) { if (proto_type == ETH_P_IP) {
length = ((ntohl(__raw_readl(addr + length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET + XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) & XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK); XEL_RPLR_LENGTH_MASK);
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN; length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP) } else if (proto_type == ETH_P_ARP)
@ -431,14 +440,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
/* Use the length in the frame, plus the header and trailer */ /* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN; length = proto_type + ETH_HLEN + ETH_FCS_LEN;
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */ /* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length); data, length);
/* Acknowledge the frame */ /* Acknowledge the frame */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK; reg_data &= ~XEL_RSR_RECV_DONE_MASK;
__raw_writel(reg_data, addr + XEL_RSR_OFFSET); xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length; return length;
} }
@ -465,14 +477,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */ /* Update the MAC address in the EmacLite */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */ /* Wait for EmacLite to finish with the MAC address update */
while ((__raw_readl(addr + XEL_TSR_OFFSET) & while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0) XEL_TSR_PROG_MAC_ADDR) != 0)
; ;
} }
@ -605,7 +617,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_reserve(skb, 2); skb_reserve(skb, 2);
len = xemaclite_recv_data(lp, (u8 *) skb->data); len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) { if (!len) {
dev->stats.rx_errors++; dev->stats.rx_errors++;
@ -642,32 +654,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
u32 tx_status; u32 tx_status;
/* Check if there is Rx Data available */ /* Check if there is Rx Data available */
if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) || XEL_RSR_RECV_DONE_MASK) ||
(__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK)) & XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev); xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */ /* Check if the Transmission for the first buffer is completed */
tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true; tx_complete = true;
} }
/* Check if the Transmission for the second buffer is completed */ /* Check if the Transmission for the second buffer is completed */
tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET); XEL_TSR_OFFSET);
tx_complete = true; tx_complete = true;
} }
@ -700,7 +712,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
/* wait for the MDIO interface to not be busy or timeout /* wait for the MDIO interface to not be busy or timeout
after some time. after some time.
*/ */
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) { XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) { if (time_before_eq(end, jiffies)) {
WARN_ON(1); WARN_ON(1);
@ -736,17 +748,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* MDIO Address register. Set the Status bit in the MDIO Control * MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction. * register to start a MDIO read transaction.
*/ */
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(XEL_MDIOADDR_OP_MASK | xemaclite_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET); lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET); lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp)) if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT; return -ETIMEDOUT;
rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev, dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@ -783,13 +795,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* Data register. Finally, set the Status bit in the MDIO Control * Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction. * register to start a MDIO write transaction.
*/ */
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(~XEL_MDIOADDR_OP_MASK & xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET); lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET); lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0; return 0;
} }
@ -836,8 +848,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
/* Enable the MDIO bus by asserting the enable bit in MDIO Control /* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register. * register.
*/ */
__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET); lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc(); bus = mdiobus_alloc();
if (!bus) { if (!bus) {
@ -1141,8 +1153,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
dev_warn(dev, "No MAC address found\n"); dev_warn(dev, "No MAC address found\n");
/* Clear the Tx CSR's in case this is a restart */ /* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */ /* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr); xemaclite_update_address(lp, ndev->dev_addr);

View file

@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
{ {
/* Finish setting up the DEVICE info. */ /* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU; dev->mtu = AX_MTU;
dev->hard_header_len = 0; dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->addr_len = 0; dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25; dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10; dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops; dev->header_ops = &ax25_header_ops;

View file

@ -1851,6 +1851,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi); napi_complete(napi);
if (!list_empty(&tp->rx_done)) if (!list_empty(&tp->rx_done))
napi_schedule(napi); napi_schedule(napi);
else if (!skb_queue_empty(&tp->tx_queue) &&
!list_empty(&tp->tx_free))
napi_schedule(napi);
} }
return work_done; return work_done;
@ -2990,10 +2993,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) { if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp); tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags); set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
napi_disable(&tp->napi); napi_disable(&tp->napi);
netif_carrier_on(netdev); netif_carrier_on(netdev);
rtl_start_rx(tp); rtl_start_rx(tp);
napi_enable(&tp->napi); napi_enable(&tp->napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
} }
} else { } else {
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
@ -3001,6 +3007,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi); napi_disable(&tp->napi);
tp->rtl_ops.disable(tp); tp->rtl_ops.disable(tp);
napi_enable(&tp->napi); napi_enable(&tp->napi);
netif_info(tp, link, netdev, "carrier off\n");
} }
} }
} }
@ -3385,12 +3392,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev)) if (!netif_running(netdev))
return 0; return 0;
netif_stop_queue(netdev);
napi_disable(&tp->napi); napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags); clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb); usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule); cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
netif_stop_queue(netdev);
mutex_lock(&tp->control); mutex_lock(&tp->control);
tp->rtl_ops.disable(tp); tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control); mutex_unlock(&tp->control);
@ -3415,12 +3422,14 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control); mutex_lock(&tp->control);
tp->rtl_ops.enable(tp); tp->rtl_ops.enable(tp);
rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev); rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control); mutex_unlock(&tp->control);
netif_wake_queue(netdev);
} }
napi_enable(&tp->napi); napi_enable(&tp->napi);
netif_wake_queue(netdev);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
return 0; return 0;
} }

View file

@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
/* Private data structure */ /* Private data structure */
struct sierra_net_data { struct sierra_net_data {
u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
u16 link_up; /* air link up or down */ u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@ -122,6 +120,7 @@ struct param {
/* LSI Protocol types */ /* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01 #define SIERRA_NET_PROTOCOL_UMTS 0x01
#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */ /* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00 #define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01 #define SIERRA_NET_COVERAGE_NOPACKET 0x01
@ -129,7 +128,8 @@ struct param {
/* LSI Session */ /* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00 #define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */ /* LSI Link types */
#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 #define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts { struct lsi_umts {
u8 protocol; u8 protocol;
@ -137,9 +137,14 @@ struct lsi_umts {
__be16 length; __be16 length;
/* eventually use a union for the rest - assume umts for now */ /* eventually use a union for the rest - assume umts for now */
u8 coverage; u8 coverage;
u8 unused2[41]; u8 network_len; /* network name len */
u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state; u8 session_state;
u8 unused3[33]; u8 unused3[33];
} __packed;
struct lsi_umts_single {
struct lsi_umts lsi;
u8 link_type; u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */ u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@ -158,10 +163,31 @@ struct lsi_umts {
u8 reserved[8]; u8 reserved[8];
} __packed; } __packed;
struct lsi_umts_dual {
struct lsi_umts lsi;
u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
u8 unused4[23];
u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
u8 unused5[68];
} __packed;
#define SIERRA_NET_LSI_COMMON_LEN 4 #define SIERRA_NET_LSI_COMMON_LEN 4
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \ #define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */ /* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata); static void sierra_sync_timer(unsigned long syncdata);
@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
dev->data[0] = (unsigned long)priv; dev->data[0] = (unsigned long)priv;
} }
/* is packet IPv4 */ /* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb) static inline int is_ip(struct sk_buff *skb)
{ {
return skb->protocol == cpu_to_be16(ETH_P_IP); return skb->protocol == cpu_to_be16(ETH_P_IP) ||
skb->protocol == cpu_to_be16(ETH_P_IPV6);
} }
/* /*
@ -350,46 +377,51 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{ {
struct lsi_umts *lsi = (struct lsi_umts *)data; struct lsi_umts *lsi = (struct lsi_umts *)data;
u32 expected_length;
if (datalen < sizeof(struct lsi_umts)) { if (datalen < sizeof(struct lsi_umts_single)) {
netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
__func__, datalen, __func__, datalen, sizeof(struct lsi_umts_single));
sizeof(struct lsi_umts));
return -1; return -1;
} }
if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length),
(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
return -1;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
/* Validate the link type */
if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
lsi->link_type);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
|| lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
/* Validate the session state */ /* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n", netdev_err(dev->net, "Session idle, 0x%02x\n",
lsi->session_state); lsi->session_state);
return 0;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
/* Validate the link type */
if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
single->link_type);
return -1;
}
expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
} else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
} else {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
if (be16_to_cpu(lsi->length) != expected_length) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length), expected_length);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0; return 0;
} }
@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
u8 numendpoints; u8 numendpoints;
u16 fwattr = 0; u16 fwattr = 0;
int status; int status;
struct ethhdr *eth;
struct sierra_net_data *priv; struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum; dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
/* we will have to manufacture ethernet headers, prepare template */
eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
eth->h_proto = cpu_to_be16(ETH_P_IP);
/* prepare shutdown message template */ /* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */ /* set context index initially to 0 - prepares tx hdr template */
@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, hh.hdrlen); skb_pull(skb, hh.hdrlen);
/* We are going to accept this packet, prepare it */ /* We are going to accept this packet, prepare it.
memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, * In case protocol is IPv6, keep it, otherwise force IPv4.
ETH_HLEN); */
skb_reset_mac_header(skb);
if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
eth_zero_addr(eth_hdr(skb)->h_source);
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */ /* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len) if (hh.payload_len.word == skb->len)

View file

@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->irq = PARPORT_IRQ_NONE; p->irq = PARPORT_IRQ_NONE;
} }
if (p->irq != PARPORT_IRQ_NONE) { if (p->irq != PARPORT_IRQ_NONE) {
printk(", irq %d", p->irq); pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) { if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE; p->dma = PARPORT_DMA_NONE;
@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
is mandatory (see above) */ is mandatory (see above) */
p->dma = PARPORT_DMA_NONE; p->dma = PARPORT_DMA_NONE;
printk(" ["); pr_cont(" [");
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} #define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{ {
int f = 0; int f = 0;
printmode(PCSPP); printmode(PCSPP);
@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
// printmode(DMA); // printmode(DMA);
} }
#undef printmode #undef printmode
printk("]\n"); pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) { if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler, if (request_irq (p->irq, parport_irq_handler,

View file

@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */

View file

@ -894,7 +894,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL; return _FAIL;
if (len > MAX_IE_SZ) if (len < 0 || len > MAX_IE_SZ)
return _FAIL; return _FAIL;
pbss_network->IELength = len; pbss_network->IELength = len;

View file

@ -1185,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
struct cb_desc *cb_desc, struct sk_buff *skb) struct cb_desc *cb_desc, struct sk_buff *skb)
{ {
struct r8192_priv *priv = rtllib_priv(dev); struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, dma_addr_t mapping;
PCI_DMA_TODEVICE);
struct tx_fwinfo_8190pci *pTxFwInfo = NULL; struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@ -1197,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc); pTxFwInfo->TxRate, cb_desc);
if (pci_dma_mapping_error(priv->pdev, mapping))
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
if (cb_desc->bAMPDUEnable) { if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1; pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor; pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@ -1233,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
} }
memset((u8 *)pdesc, 0, 12); memset((u8 *)pdesc, 0, 12);
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
return;
}
pdesc->LINIP = 0; pdesc->LINIP = 0;
pdesc->CmdInit = 1; pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;

View file

@ -27,6 +27,7 @@
#define UARTn_FRAME 0x04 #define UARTn_FRAME 0x04
#define UARTn_FRAME_DATABITS__MASK 0x000f #define UARTn_FRAME_DATABITS__MASK 0x000f
#define UARTn_FRAME_DATABITS(n) ((n) - 3) #define UARTn_FRAME_DATABITS(n) ((n) - 3)
#define UARTn_FRAME_PARITY__MASK 0x0300
#define UARTn_FRAME_PARITY_NONE 0x0000 #define UARTn_FRAME_PARITY_NONE 0x0000
#define UARTn_FRAME_PARITY_EVEN 0x0200 #define UARTn_FRAME_PARITY_EVEN 0x0200
#define UARTn_FRAME_PARITY_ODD 0x0300 #define UARTn_FRAME_PARITY_ODD 0x0300
@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
16 * (4 + (clkdiv >> 6))); 16 * (4 + (clkdiv >> 6)));
frame = efm32_uart_read32(efm_port, UARTn_FRAME); frame = efm32_uart_read32(efm_port, UARTn_FRAME);
if (frame & UARTn_FRAME_PARITY_ODD) switch (frame & UARTn_FRAME_PARITY__MASK) {
case UARTn_FRAME_PARITY_ODD:
*parity = 'o'; *parity = 'o';
else if (frame & UARTn_FRAME_PARITY_EVEN) break;
case UARTn_FRAME_PARITY_EVEN:
*parity = 'e'; *parity = 'e';
else break;
default:
*parity = 'n'; *parity = 'n';
}
*bits = (frame & UARTn_FRAME_DATABITS__MASK) - *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
UARTn_FRAME_DATABITS(4) + 4; UARTn_FRAME_DATABITS(4) + 4;

View file

@ -2569,6 +2569,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL); GFP_KERNEL);
if (!hcd->bandwidth_mutex) { if (!hcd->bandwidth_mutex) {
kfree(hcd->address0_mutex);
kfree(hcd); kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL; return NULL;

View file

@ -1329,7 +1329,13 @@ static int hub_configure(struct usb_hub *hub,
if (ret < 0) { if (ret < 0) {
message = "can't read hub descriptor"; message = "can't read hub descriptor";
goto fail; goto fail;
} else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { }
maxchild = USB_MAXCHILDREN;
if (hub_is_superspeed(hdev))
maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
if (hub->descriptor->bNbrPorts > maxchild) {
message = "hub has too many ports!"; message = "hub has too many ports!";
ret = -ENODEV; ret = -ENODEV;
goto fail; goto fail;

View file

@ -148,7 +148,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) { if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n"); dev_err(dev, "no AXI UpScaler clk specified\n");
return -ENODEV; ret = -ENODEV;
goto axius_clk_err;
} }
clk_prepare_enable(exynos->axius_clk); clk_prepare_enable(exynos->axius_clk);
} else { } else {
@ -206,6 +207,7 @@ err3:
regulator_disable(exynos->vdd33); regulator_disable(exynos->vdd33);
err2: err2:
clk_disable_unprepare(exynos->axius_clk); clk_disable_unprepare(exynos->axius_clk);
axius_clk_err:
clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk); clk_disable_unprepare(exynos->clk);
return ret; return ret;

View file

@ -1676,9 +1676,10 @@ static void
gadgetfs_suspend (struct usb_gadget *gadget) gadgetfs_suspend (struct usb_gadget *gadget)
{ {
struct dev_data *dev = get_gadget_data (gadget); struct dev_data *dev = get_gadget_data (gadget);
unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state); INFO (dev, "suspended from state %d\n", dev->state);
spin_lock (&dev->lock); spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) { switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died?? case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED: case STATE_DEV_CONNECTED:
@ -1689,7 +1690,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
default: default:
break; break;
} }
spin_unlock (&dev->lock); spin_unlock_irqrestore(&dev->lock, flags);
} }
static struct usb_gadget_driver gadgetfs_driver = { static struct usb_gadget_driver gadgetfs_driver = {

View file

@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
/* Report reset and disconnect events to the driver */ /* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) { if (dum->driver && (disconnect || reset)) {
stop_activity(dum); stop_activity(dum);
spin_unlock(&dum->lock);
if (reset) if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver); usb_gadget_udc_reset(&dum->gadget, dum->driver);
else else
dum->driver->disconnect(&dum->gadget); dum->driver->disconnect(&dum->gadget);
spin_lock(&dum->lock);
} }
} else if (dum_hcd->active != dum_hcd->old_active) { } else if (dum_hcd->active != dum_hcd->old_active) {
if (dum_hcd->old_active && dum->driver->suspend) { if (dum_hcd->old_active && dum->driver->suspend)
spin_unlock(&dum->lock);
dum->driver->suspend(&dum->gadget); dum->driver->suspend(&dum->gadget);
spin_lock(&dum->lock); else if (!dum_hcd->old_active && dum->driver->resume)
} else if (!dum_hcd->old_active && dum->driver->resume) {
spin_unlock(&dum->lock);
dum->driver->resume(&dum->gadget); dum->driver->resume(&dum->gadget);
spin_lock(&dum->lock);
}
} }
dum_hcd->old_status = dum_hcd->port_status; dum_hcd->old_status = dum_hcd->port_status;
@ -985,7 +978,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum; struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
dum->driver = NULL; dum->driver = NULL;
spin_unlock_irq(&dum->lock);
return 0; return 0;
} }
@ -2011,7 +2006,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
HUB_CHAR_COMMON_OCPM); HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1; desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
desc->u.ss.DeviceRemovable = 0xffff; desc->u.ss.DeviceRemovable = 0;
} }
static inline void hub_descriptor(struct usb_hub_descriptor *desc) static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@ -2023,8 +2018,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM); HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1; desc->bNbrPorts = 1;
desc->u.hs.DeviceRemovable[0] = 0xff; desc->u.hs.DeviceRemovable[0] = 0;
desc->u.hs.DeviceRemovable[1] = 0xff; desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
} }
static int dummy_hub_control( static int dummy_hub_control(

View file

@ -2425,11 +2425,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]); nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */ /* report disconnect; the driver is already quiesced */
if (driver) { if (driver)
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget); driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
usb_reinit(dev); usb_reinit(dev);
} }
@ -3275,8 +3272,6 @@ next_endpoints:
BIT(PCI_RETRY_ABORT_INTERRUPT)) BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat) static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
__releases(dev->lock)
__acquires(dev->lock)
{ {
struct net2280_ep *ep; struct net2280_ep *ep;
u32 tmp, num, mask, scratch; u32 tmp, num, mask, scratch;
@ -3317,14 +3312,12 @@ __acquires(dev->lock)
if (disconnect || reset) { if (disconnect || reset) {
stop_activity(dev, dev->driver); stop_activity(dev, dev->driver);
ep0_start(dev); ep0_start(dev);
spin_unlock(&dev->lock);
if (reset) if (reset)
usb_gadget_udc_reset usb_gadget_udc_reset
(&dev->gadget, dev->driver); (&dev->gadget, dev->driver);
else else
(dev->driver->disconnect) (dev->driver->disconnect)
(&dev->gadget); (&dev->gadget);
spin_lock(&dev->lock);
return; return;
} }
} }

View file

@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
time = 30; time = 30;
break; break;
default: default:
time = 300; time = 50;
break; break;
} }
@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
pipe = td->pipe; pipe = td->pipe;
pipe_stop(r8a66597, pipe); pipe_stop(r8a66597, pipe);
/* Select a different address or endpoint */
new_td = td; new_td = td;
do { do {
list_move_tail(&new_td->queue, list_move_tail(&new_td->queue,
@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
new_td = td; new_td = td;
break; break;
} }
} while (td != new_td && td->address == new_td->address); } while (td != new_td && td->address == new_td->address &&
td->pipe->info.epnum == new_td->pipe->info.epnum);
start_transfer(r8a66597, new_td); start_transfer(r8a66597, new_td);

View file

@ -198,6 +198,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042) pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS; xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1142)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (xhci->quirks & XHCI_RESET_ON_RESUME) if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,

View file

@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
} }
} while (server->tcpStatus == CifsNeedReconnect); } while (server->tcpStatus == CifsNeedReconnect);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
return rc; return rc;
} }
@ -421,18 +424,27 @@ cifs_echo_request(struct work_struct *work)
int rc; int rc;
struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work); struct TCP_Server_Info, echo.work);
unsigned long echo_interval;
/* /*
* We cannot send an echo if it is disabled or until the * If we need to renegotiate, set echo interval to zero to
* NEGOTIATE_PROTOCOL request is done, which is indicated by * immediately call echo service where we can renegotiate.
* server->ops->need_neg() == true. Also, no need to ping if */
* we got a response recently. if (server->tcpStatus == CifsNeedNegotiate)
echo_interval = 0;
else
echo_interval = SMB_ECHO_INTERVAL;
/*
* We cannot send an echo if it is disabled.
* Also, no need to ping if we got a response recently.
*/ */
if (server->tcpStatus == CifsNeedReconnect || if (server->tcpStatus == CifsNeedReconnect ||
server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || server->tcpStatus == CifsExiting ||
server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) || (server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo; goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;

View file

@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
ret = -ENOMEM; ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) { if (sl) {
sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock); spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) { if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock); spin_unlock(&configfs_dirent_lock);
config_item_put(item);
kfree(sl); kfree(sl);
return -ENOENT; return -ENOENT;
} }
sl->sl_target = config_item_get(item);
list_add(&sl->sl_list, &target_sd->s_links); list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock); spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry, ret = configfs_create_link(sl, parent_item->ci_dentry,

View file

@ -660,6 +660,20 @@ has_zeroout:
ret = check_block_validity(inode, map); ret = check_block_validity(inode, map);
if (ret != 0) if (ret != 0)
return ret; return ret;
/*
* Inodes with freshly allocated blocks where contents will be
* visible after transaction commit must be on transaction's
* ordered data list.
*/
if (map->m_flags & EXT4_MAP_NEW &&
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
!IS_NOQUOTA(inode) &&
ext4_should_order_data(inode)) {
ret = ext4_jbd2_file_inode(handle, inode);
if (ret)
return ret;
}
} }
return retval; return retval;
} }
@ -1166,15 +1180,6 @@ static int ext4_write_end(struct file *file,
trace_android_fs_datawrite_end(inode, pos, len); trace_android_fs_datawrite_end(inode, pos, len);
trace_ext4_write_end(inode, pos, len, copied); trace_ext4_write_end(inode, pos, len, copied);
if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
ret = ext4_jbd2_file_inode(handle, inode);
if (ret) {
unlock_page(page);
page_cache_release(page);
goto errout;
}
}
if (ext4_has_inline_data(inode)) { if (ext4_has_inline_data(inode)) {
ret = ext4_write_inline_data_end(inode, pos, len, ret = ext4_write_inline_data_end(inode, pos, len,
copied, page); copied, page);

View file

@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
if (invalidate) if (invalidate)
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
} }
} else { } else {
@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
/* Make sure any pending writes are cancelled. */
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
fscache_invalidate_writes(cookie);
/* Reset the cookie state if it wasn't relinquished */ /* Reset the cookie state if it wasn't relinquished */
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
atomic_inc(&cookie->n_active); atomic_inc(&cookie->n_active);

View file

@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
spin_lock_init(&cookie->lock); spin_lock_init(&cookie->lock);
spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects); INIT_HLIST_HEAD(&cookie->backing_objects);
/* check the netfs type is not already present */ /* check the netfs type is not already present */

View file

@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
static const struct fscache_state *fscache_object_available(struct fscache_object *, int); static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
static const struct fscache_state *fscache_update_object(struct fscache_object *, int); static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
#define __STATE_NAME(n) fscache_osm_##n #define __STATE_NAME(n) fscache_osm_##n
#define STATE(n) (&__STATE_NAME(n)) #define STATE(n) (&__STATE_NAME(n))
@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
static WAIT_STATE(WAIT_FOR_INIT, "?INI", static WAIT_STATE(WAIT_FOR_INIT, "?INI",
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@ -229,6 +230,10 @@ execute_work_state:
event = -1; event = -1;
if (new_state == NO_TRANSIT) { if (new_state == NO_TRANSIT) {
_debug("{OBJ%x} %s notrans", object->debug_id, state->name); _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
fscache_enqueue_object(object); fscache_enqueue_object(object);
event_mask = object->oob_event_mask; event_mask = object->oob_event_mask;
goto unmask_events; goto unmask_events;
@ -239,7 +244,7 @@ execute_work_state:
object->state = state = new_state; object->state = state = new_state;
if (state->work) { if (state->work) {
if (unlikely(state->work == ((void *)2UL))) { if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]"); _leave(" [dead]");
return; return;
} }
@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
fscache_mark_object_dead(object); fscache_mark_object_dead(object);
object->oob_event_mask = 0; object->oob_event_mask = 0;
if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
/* Reject any new read/write ops and abort any that are pending. */
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_cancel_all_ops(object);
}
if (list_empty(&object->dependents) && if (list_empty(&object->dependents) &&
object->n_ops == 0 && object->n_ops == 0 &&
object->n_children == 0) object->n_children == 0)
@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
} }
} }
EXPORT_SYMBOL(fscache_object_mark_killed); EXPORT_SYMBOL(fscache_object_mark_killed);
/*
* The object is dead. We can get here if an object gets queued by an event
* that would lead to its death (such as EV_KILL) when the dispatcher is
* already running (and so can be requeued) but hasn't yet cleared the event
* mask.
*/
static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
int event)
{
if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
&object->flags))
return NO_TRANSIT;
WARN(true, "FS-Cache object redispatched after death");
return NO_TRANSIT;
}

View file

@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View file

@ -1072,6 +1072,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
case -NFS4ERR_BADXDR: case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE: case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE: case -NFS4ERR_NOFILEHANDLE:
case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */ /* Non-seqid mutating errors */
return; return;
}; };

View file

@ -3358,6 +3358,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.tgid += 1, iter = next_tgid(ns, iter)) { iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF]; char name[PROC_NUMBUF];
int len; int len;
cond_resched();
if (!has_pid_permissions(ns, iter.task, 2)) if (!has_pid_permissions(ns, iter.task, 2))
continue; continue;

View file

@ -347,11 +347,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */ /* We don't show the stack guard page in /proc/maps */
start = vma->vm_start; start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end; end = vma->vm_end;
if (stack_guard_page_end(vma, end))
end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",

View file

@ -74,6 +74,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/major.h>
#include "internal.h" #include "internal.h"
static struct kmem_cache *romfs_inode_cachep; static struct kmem_cache *romfs_inode_cachep;
@ -415,7 +416,22 @@ static void romfs_destroy_inode(struct inode *inode)
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{ {
struct super_block *sb = dentry->d_sb; struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev); u64 id = 0;
/* When calling huge_encode_dev(),
* use sb->s_bdev->bd_dev when,
* - CONFIG_ROMFS_ON_BLOCK defined
* use sb->s_dev when,
* - CONFIG_ROMFS_ON_BLOCK undefined and
* - CONFIG_ROMFS_ON_MTD defined
* leave id as 0 when,
* - CONFIG_ROMFS_ON_BLOCK undefined and
* - CONFIG_ROMFS_ON_MTD undefined
*/
if (sb->s_bdev)
id = huge_encode_dev(sb->s_bdev->bd_dev);
else if (sb->s_dev)
id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC; buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN; buf->f_namelen = ROMFS_MAXFN;
@ -488,6 +504,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY | MS_NOATIME; sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops; sb->s_op = &romfs_super_ops;
#ifdef CONFIG_ROMFS_ON_MTD
/* Use same dev ID from the underlying mtdblock device */
if (sb->s_mtd)
sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
#endif
/* read the image superblock and check it */ /* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL); rsb = kmalloc(512, GFP_KERNEL);
if (!rsb) if (!rsb)

View file

@ -174,19 +174,6 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na
return 1; return 1;
} }
/* This function is used when file opening. The open flags must be
* checked before calling check_caller_access_to_name()
*/
int open_flags_to_access_mode(int open_flags)
{
if ((open_flags & O_ACCMODE) == O_RDONLY)
return 0; /* R_OK */
if ((open_flags & O_ACCMODE) == O_WRONLY)
return 1; /* W_OK */
/* Probably O_RDRW, but treat as default to be safe */
return 1; /* R_OK | W_OK */
}
static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key, static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
appid_t value) appid_t value)
{ {

View file

@ -499,7 +499,6 @@ extern appid_t get_appid(const char *app_name);
extern appid_t get_ext_gid(const char *app_name); extern appid_t get_ext_gid(const char *app_name);
extern appid_t is_excluded(const char *app_name, userid_t userid); extern appid_t is_excluded(const char *app_name, userid_t userid);
extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name); extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
extern int open_flags_to_access_mode(int open_flags);
extern int packagelist_init(void); extern int packagelist_init(void);
extern void packagelist_exit(void); extern void packagelist_exit(void);

View file

@ -360,6 +360,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */ struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */ struct hlist_node cookie_link; /* link in cookie->backing_objects */

View file

@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ... and so on. * ... and so on.
*/ */
#define order_base_2(n) ilog2(roundup_pow_of_two(n)) static inline __attribute_const__
int __order_base_2(unsigned long n)
{
return n > 1 ? ilog2(n - 1) + 1 : 0;
}
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) ? 0 : \
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
#endif /* _LINUX_LOG2_H */ #endif /* _LINUX_LOG2_H */

View file

@ -1292,39 +1292,11 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen); int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/* Is the vma a continuation of the stack vma above it? */
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
static inline bool vma_is_anonymous(struct vm_area_struct *vma) static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{ {
return !vma->vm_ops; return !vma->vm_ops;
} }
static inline int stack_guard_page_start(struct vm_area_struct *vma,
unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
(vma->vm_start == addr) &&
!vma_growsdown(vma->vm_prev, addr);
}
/* Is the vma a continuation of the stack vma below it? */
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
}
static inline int stack_guard_page_end(struct vm_area_struct *vma,
unsigned long addr)
{
return (vma->vm_flags & VM_GROWSUP) &&
(vma->vm_end == addr) &&
!vma_growsup(vma->vm_next, addr);
}
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma, extern unsigned long move_page_tables(struct vm_area_struct *vma,
@ -2029,6 +2001,7 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset, pgoff_t offset,
unsigned long size); unsigned long size);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address); extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@ -2057,6 +2030,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
return vma; return vma;
} }
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
unsigned long vm_start = vma->vm_start;
if (vma->vm_flags & VM_GROWSDOWN) {
vm_start -= stack_guard_gap;
if (vm_start > vma->vm_start)
vm_start = 0;
}
return vm_start;
}
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
if (vma->vm_flags & VM_GROWSUP) {
vm_end += stack_guard_gap;
if (vm_end < vma->vm_end)
vm_end = -PAGE_SIZE;
}
return vm_end;
}
static inline unsigned long vma_pages(struct vm_area_struct *vma) static inline unsigned long vma_pages(struct vm_area_struct *vma)
{ {
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;

View file

@ -744,6 +744,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
{ {
u32 hash; u32 hash;
/* @flowlabel may include more than a flow label, eg, the traffic class.
* Here we want only the flow label value.
*/
flowlabel &= IPV6_FLOWLABEL_MASK;
if (flowlabel || if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
(!autolabel && (!autolabel &&

View file

@ -22,6 +22,9 @@
*/ */
#define USB_MAXCHILDREN 31 #define USB_MAXCHILDREN 31
/* See USB 3.1 spec Table 10-5 */
#define USB_SS_MAXPORTS 15
/* /*
* Hub request types * Hub request types
*/ */

View file

@ -313,7 +313,8 @@ static const char *const bpf_jmp_string[16] = {
[BPF_EXIT >> 4] = "exit", [BPF_EXIT >> 4] = "exit",
}; };
static void print_bpf_insn(struct bpf_insn *insn) static void print_bpf_insn(const struct verifier_env *env,
const struct bpf_insn *insn)
{ {
u8 class = BPF_CLASS(insn->code); u8 class = BPF_CLASS(insn->code);
@ -377,9 +378,19 @@ static void print_bpf_insn(struct bpf_insn *insn)
insn->code, insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3], bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm); insn->src_reg, insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IMM) { } else if (BPF_MODE(insn->code) == BPF_IMM &&
verbose("(%02x) r%d = 0x%x\n", BPF_SIZE(insn->code) == BPF_DW) {
insn->code, insn->dst_reg, insn->imm); /* At this point, we already made sure that the second
* part of the ldimm64 insn is accessible.
*/
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
if (map_ptr && !env->allow_ptr_leaks)
imm = 0;
verbose("(%02x) r%d = 0x%llx\n", insn->code,
insn->dst_reg, (unsigned long long)imm);
} else { } else {
verbose("BUG_ld_%02x\n", insn->code); verbose("BUG_ld_%02x\n", insn->code);
return; return;
@ -1758,7 +1769,7 @@ static int do_check(struct verifier_env *env)
if (log_level) { if (log_level) {
verbose("%d: ", insn_idx); verbose("%d: ", insn_idx);
print_bpf_insn(insn); print_bpf_insn(env, insn);
} }
if (class == BPF_ALU || class == BPF_ALU64) { if (class == BPF_ALU || class == BPF_ALU64) {

View file

@ -1305,8 +1305,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
ret = __irq_set_trigger(desc, ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK); new->flags & IRQF_TRIGGER_MASK);
if (ret) if (ret) {
irq_release_resources(desc);
goto out_mask; goto out_mask;
}
} }
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \

View file

@ -569,7 +569,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
{ {
struct alarm_base *base = &alarm_bases[alarm->type]; struct alarm_base *base = &alarm_bases[alarm->type];
start = ktime_add(start, base->gettime()); start = ktime_add_safe(start, base->gettime());
alarm_start(alarm, start); alarm_start(alarm, start);
} }
EXPORT_SYMBOL_GPL(alarm_start_relative); EXPORT_SYMBOL_GPL(alarm_start_relative);
@ -655,7 +655,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
overrun++; overrun++;
} }
alarm->node.expires = ktime_add(alarm->node.expires, interval); alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
return overrun; return overrun;
} }
EXPORT_SYMBOL_GPL(alarm_forward); EXPORT_SYMBOL_GPL(alarm_forward);
@ -843,13 +843,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
/* start the timer */ /* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
/*
* Rate limit to the tick as a hot fix to prevent DOS. Will be
* mopped up later.
*/
if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
exp = timespec_to_ktime(new_setting->it_value); exp = timespec_to_ktime(new_setting->it_value);
/* Convert (if necessary) to absolute time */ /* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) { if (flags != TIMER_ABSTIME) {
ktime_t now; ktime_t now;
now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
exp = ktime_add(now, exp); exp = ktime_add_safe(now, exp);
} }
alarm_start(&timr->it.alarm.alarmtimer, exp); alarm_start(&timr->it.alarm.alarmtimer, exp);

View file

@ -312,11 +312,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
/* mlock all present pages, but do not fault in new pages */ /* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT; return -ENOENT;
/* For mm_populate(), just skip the stack guard page. */
if ((*flags & FOLL_POPULATE) &&
(stack_guard_page_start(vma, address) ||
stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT;
if (*flags & FOLL_WRITE) if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE; fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking) if (nonblocking)

View file

@ -13,6 +13,7 @@
* *
*/ */
#include <linux/ftrace.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/printk.h> #include <linux/printk.h>
@ -251,6 +252,8 @@ void kasan_report(unsigned long addr, size_t size,
if (likely(!kasan_report_enabled())) if (likely(!kasan_report_enabled()))
return; return;
disable_trace_on_warning();
info.access_addr = (void *)addr; info.access_addr = (void *)addr;
info.access_size = size; info.access_size = size;
info.is_write = is_write; info.is_write = is_write;

View file

@ -1208,7 +1208,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* page_remove_rmap() in try_to_unmap_one(). So to determine page status * page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time. * correctly, we save a copy of the page flags at this time.
*/ */
page_flags = p->flags; if (PageHuge(p))
page_flags = hpage->flags;
else
page_flags = p->flags;
/* /*
* unpoison always clear PG_hwpoison inside page lock * unpoison always clear PG_hwpoison inside page lock

View file

@ -2662,40 +2662,6 @@ out_release:
return ret; return ret;
} }
/*
* This is like a special single-page "expand_{down|up}wards()",
* except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
/* /*
* We enter with non-exclusive mmap_sem (to exclude vma changes, * We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked. * but allow concurrent faults), and pte mapped but not yet locked.
@ -2716,10 +2682,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */ /* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),

160
mm/mmap.c
View file

@ -304,6 +304,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
unsigned long retval; unsigned long retval;
unsigned long newbrk, oldbrk; unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *next;
unsigned long min_brk; unsigned long min_brk;
bool populate; bool populate;
@ -348,7 +349,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
} }
/* Check against existing mmap mappings. */ /* Check against existing mmap mappings. */
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) next = find_vma(mm, oldbrk);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out; goto out;
/* Ok, looks good - let it rip. */ /* Ok, looks good - let it rip. */
@ -371,10 +373,22 @@ out:
static long vma_compute_subtree_gap(struct vm_area_struct *vma) static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{ {
unsigned long max, subtree_gap; unsigned long max, prev_end, subtree_gap;
max = vma->vm_start;
if (vma->vm_prev) /*
max -= vma->vm_prev->vm_end; * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
* allow two stack_guard_gaps between them here, and when choosing
* an unmapped area; whereas when expanding we only require one.
* That's a little inconsistent, but keeps the code here simpler.
*/
max = vm_start_gap(vma);
if (vma->vm_prev) {
prev_end = vm_end_gap(vma->vm_prev);
if (max > prev_end)
max -= prev_end;
else
max = 0;
}
if (vma->vm_rb.rb_left) { if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left, subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap; struct vm_area_struct, vm_rb)->rb_subtree_gap;
@ -467,7 +481,7 @@ static void validate_mm(struct mm_struct *mm)
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
} }
highest_address = vma->vm_end; highest_address = vm_end_gap(vma);
vma = vma->vm_next; vma = vma->vm_next;
i++; i++;
} }
@ -636,7 +650,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_next) if (vma->vm_next)
vma_gap_update(vma->vm_next); vma_gap_update(vma->vm_next);
else else
mm->highest_vm_end = vma->vm_end; mm->highest_vm_end = vm_end_gap(vma);
/* /*
* vma->vm_prev wasn't known when we followed the rbtree to find the * vma->vm_prev wasn't known when we followed the rbtree to find the
@ -882,7 +896,7 @@ again: remove_next = 1 + (end > next->vm_end);
vma_gap_update(vma); vma_gap_update(vma);
if (end_changed) { if (end_changed) {
if (!next) if (!next)
mm->highest_vm_end = end; mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next) else if (!adjust_next)
vma_gap_update(next); vma_gap_update(next);
} }
@ -925,7 +939,7 @@ again: remove_next = 1 + (end > next->vm_end);
else if (next) else if (next)
vma_gap_update(next); vma_gap_update(next);
else else
mm->highest_vm_end = end; VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
} }
if (insert && file) if (insert && file)
uprobe_mmap(insert); uprobe_mmap(insert);
@ -1771,7 +1785,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
while (true) { while (true) {
/* Visit left subtree if it looks promising */ /* Visit left subtree if it looks promising */
gap_end = vma->vm_start; gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) { if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left = struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left, rb_entry(vma->vm_rb.rb_left,
@ -1782,12 +1796,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
} }
} }
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current: check_current:
/* Check if current node has a suitable gap */ /* Check if current node has a suitable gap */
if (gap_start > high_limit) if (gap_start > high_limit)
return -ENOMEM; return -ENOMEM;
if (gap_end >= low_limit && gap_end - gap_start >= length) if (gap_end >= low_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found; goto found;
/* Visit right subtree if it looks promising */ /* Visit right subtree if it looks promising */
@ -1809,8 +1824,8 @@ check_current:
vma = rb_entry(rb_parent(prev), vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb); struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) { if (prev == vma->vm_rb.rb_left) {
gap_start = vma->vm_prev->vm_end; gap_start = vm_end_gap(vma->vm_prev);
gap_end = vma->vm_start; gap_end = vm_start_gap(vma);
goto check_current; goto check_current;
} }
} }
@ -1874,7 +1889,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
while (true) { while (true) {
/* Visit right subtree if it looks promising */ /* Visit right subtree if it looks promising */
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) { if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right = struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right, rb_entry(vma->vm_rb.rb_right,
@ -1887,10 +1902,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
check_current: check_current:
/* Check if current node has a suitable gap */ /* Check if current node has a suitable gap */
gap_end = vma->vm_start; gap_end = vm_start_gap(vma);
if (gap_end < low_limit) if (gap_end < low_limit)
return -ENOMEM; return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length) if (gap_start <= high_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found; goto found;
/* Visit left subtree if it looks promising */ /* Visit left subtree if it looks promising */
@ -1913,7 +1929,7 @@ check_current:
struct vm_area_struct, vm_rb); struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) { if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ? gap_start = vma->vm_prev ?
vma->vm_prev->vm_end : 0; vm_end_gap(vma->vm_prev) : 0;
goto check_current; goto check_current;
} }
} }
@ -1951,7 +1967,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr) if (len > TASK_SIZE - mmap_min_addr)
@ -1962,9 +1978,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr; return addr;
} }
@ -1987,7 +2004,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff, const unsigned long len, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
@ -2002,9 +2019,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */ /* requesting a specific address */
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr; return addr;
} }
@ -2129,21 +2147,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the * update accounting. This is shared with both the
* grow-up and grow-down cases. * grow-up and grow-down cases.
*/ */
static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long size, unsigned long grow)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim; struct rlimit *rlim = current->signal->rlim;
unsigned long new_start, actual_size; unsigned long new_start;
/* address space limit tests */ /* address space limit tests */
if (!may_expand_vm(mm, grow)) if (!may_expand_vm(mm, grow))
return -ENOMEM; return -ENOMEM;
/* Stack limit test */ /* Stack limit test */
actual_size = size; if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
actual_size -= PAGE_SIZE;
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM; return -ENOMEM;
/* mlock limit tests */ /* mlock limit tests */
@ -2181,16 +2197,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next;
unsigned long gap_addr;
int error = 0; int error = 0;
if (!(vma->vm_flags & VM_GROWSUP)) if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT; return -EFAULT;
/* Guard against wrapping around to address 0. */ /* Guard against exceeding limits of the address space. */
if (address < PAGE_ALIGN(address+4)) address &= PAGE_MASK;
address = PAGE_ALIGN(address+4); if (address >= TASK_SIZE)
else
return -ENOMEM; return -ENOMEM;
address += PAGE_SIZE;
/* Enforce stack_guard_gap */
gap_addr = address + stack_guard_gap;
/* Guard against overflow */
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
next = vma->vm_next;
if (next && next->vm_start < gap_addr) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
/* We must make sure the anon_vma is allocated. */ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
@ -2236,7 +2268,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next) if (vma->vm_next)
vma_gap_update(vma->vm_next); vma_gap_update(vma->vm_next);
else else
mm->highest_vm_end = address; mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma); perf_event_mmap(vma);
@ -2257,6 +2289,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address) unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
unsigned long gap_addr;
int error; int error;
address &= PAGE_MASK; address &= PAGE_MASK;
@ -2264,6 +2298,17 @@ int expand_downwards(struct vm_area_struct *vma,
if (error) if (error)
return error; return error;
/* Enforce stack_guard_gap */
gap_addr = address - stack_guard_gap;
if (gap_addr > address)
return -ENOMEM;
prev = vma->vm_prev;
if (prev && prev->vm_end > gap_addr) {
if (!(prev->vm_flags & VM_GROWSDOWN))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
/* We must make sure the anon_vma is allocated. */ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM; return -ENOMEM;
@ -2319,28 +2364,25 @@ int expand_downwards(struct vm_area_struct *vma,
return error; return error;
} }
/* /* enforced gap between the expanding stack and other mappings. */
* Note how expand_stack() refuses to expand the stack all the way to unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
* abut the next virtual mapping, *unless* that mapping itself is also
* a stack mapping. We want to leave room for a guard page, after all static int __init cmdline_parse_stack_guard_gap(char *p)
* (the guard page itself is not added here, that is done by the {
* actual page faulting logic) unsigned long val;
* char *endptr;
* This matches the behavior of the guard page logic (see mm/memory.c:
* check_stack_guard_page()), which only allows the guard page to be val = simple_strtoul(p, &endptr, 10);
* removed under these circumstances. if (!*endptr)
*/ stack_guard_gap = val << PAGE_SHIFT;
return 0;
}
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
#ifdef CONFIG_STACK_GROWSUP #ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address) int expand_stack(struct vm_area_struct *vma, unsigned long address)
{ {
struct vm_area_struct *next;
address &= PAGE_MASK;
next = vma->vm_next;
if (next && next->vm_start == address + PAGE_SIZE) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
}
return expand_upwards(vma, address); return expand_upwards(vma, address);
} }
@ -2362,14 +2404,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else #else
int expand_stack(struct vm_area_struct *vma, unsigned long address) int expand_stack(struct vm_area_struct *vma, unsigned long address)
{ {
struct vm_area_struct *prev;
address &= PAGE_MASK;
prev = vma->vm_prev;
if (prev && prev->vm_end == address) {
if (!(prev->vm_flags & VM_GROWSDOWN))
return -ENOMEM;
}
return expand_downwards(vma, address); return expand_downwards(vma, address);
} }
@ -2467,7 +2501,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_prev = prev; vma->vm_prev = prev;
vma_gap_update(vma); vma_gap_update(vma);
} else } else
mm->highest_vm_end = prev ? prev->vm_end : 0; mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL; tail_vma->vm_next = NULL;
/* Kill the cache */ /* Kill the cache */

View file

@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
if (!page) if (!page)
goto not_enough_page; goto not_enough_page;
ctrl->map[idx] = page; ctrl->map[idx] = page;
if (!(idx % SWAP_CLUSTER_MAX))
cond_resched();
} }
return 0; return 0;
not_enough_page: not_enough_page:

View file

@ -886,9 +886,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen) if (regs.len > reglen)
regs.len = reglen; regs.len = reglen;
regbuf = vzalloc(reglen); regbuf = NULL;
if (reglen && !regbuf) if (reglen) {
return -ENOMEM; regbuf = vzalloc(reglen);
if (!regbuf)
return -ENOMEM;
}
ops->get_regs(dev, &regs, regbuf); ops->get_regs(dev, &regs, regbuf);

Some files were not shown because too many files have changed in this diff Show more