Merge "Merge branch 'android-4.4@77ddb50' (v4.4.74) into 'msm-4.4'"
This commit is contained in:
commit
74b5a0f867
111 changed files with 895 additions and 582 deletions
|
@ -3605,6 +3605,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
spia_pedr=
|
||||
spia_peddr=
|
||||
|
||||
stack_guard_gap= [MM]
|
||||
override the default stack gap protection. The value
|
||||
is in page units and it defines how many pages prior
|
||||
to (for stacks growing down) resp. after (for stacks
|
||||
growing up) the main stack are reserved for no other
|
||||
mapping. Default value is 256 pages.
|
||||
|
||||
stacktrace [FTRACE]
|
||||
Enabled the stack tracer on boot up.
|
||||
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 72
|
||||
SUBLEVEL = 74
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
@ -793,7 +793,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
|
|||
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
|
5
android/configs/android-base-arm64.cfg
Normal file
5
android/configs/android-base-arm64.cfg
Normal file
|
@ -0,0 +1,5 @@
|
|||
# KEEP ALPHABETICALLY SORTED
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_SWP_EMULATION=y
|
|
@ -12,7 +12,6 @@ CONFIG_ANDROID=y
|
|||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
|
@ -21,7 +20,6 @@ CONFIG_CGROUP_CPUACCT=y
|
|||
CONFIG_CGROUP_DEBUG=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_DEFAULT_SECURITY_SELINUX=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_FB=y
|
||||
|
@ -155,9 +153,7 @@ CONFIG_SECURITY=y
|
|||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_SWP_EMULATION=y
|
||||
CONFIG_SYNC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
|
|
|
@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
/* kHz uV */
|
||||
996000 1250000
|
||||
792000 1175000
|
||||
396000 1075000
|
||||
396000 1150000
|
||||
>;
|
||||
fsl,soc-operating-points = <
|
||||
/* ARM kHz SOC-PU uV */
|
||||
|
|
|
@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
goto success;
|
||||
}
|
||||
|
||||
|
|
|
@ -816,8 +816,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
|||
break;
|
||||
}
|
||||
/* Compact branch: BNEZC || JIALC */
|
||||
if (insn.i_format.rs)
|
||||
if (!insn.i_format.rs) {
|
||||
/* JIALC: set $31/ra */
|
||||
regs->regs[31] = epc + 4;
|
||||
}
|
||||
regs->cp0_epc += 8;
|
||||
break;
|
||||
#endif
|
||||
|
|
|
@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
unsigned long task_size = TASK_SIZE;
|
||||
int do_color_align, last_mmap;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
goto found_addr;
|
||||
}
|
||||
|
||||
|
@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
int do_color_align, last_mmap;
|
||||
|
@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
goto found_addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
|
|||
if ((mm->task_size - len) < addr)
|
||||
return 0;
|
||||
vma = find_vma(mm, addr);
|
||||
return (!vma || (addr + len) <= vma->vm_start);
|
||||
return (!vma || (addr + len) <= vm_start_gap(vma));
|
||||
}
|
||||
|
||||
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
|
||||
|
|
|
@ -229,12 +229,17 @@ ENTRY(sie64a)
|
|||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
.Lsie_done:
|
||||
# some program checks are suppressing. C code (e.g. do_protection_exception)
|
||||
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
|
||||
# instructions between sie64a and .Lsie_done should not cause program
|
||||
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
|
||||
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
|
||||
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
|
||||
# Other instructions between sie64a and .Lsie_done should not cause program
|
||||
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
|
||||
# See also .Lcleanup_sie
|
||||
.Lrewind_pad:
|
||||
nop 0
|
||||
.Lrewind_pad6:
|
||||
nopr 7
|
||||
.Lrewind_pad4:
|
||||
nopr 7
|
||||
.Lrewind_pad2:
|
||||
nopr 7
|
||||
.globl sie_exit
|
||||
sie_exit:
|
||||
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
|
||||
|
@ -247,7 +252,9 @@ sie_exit:
|
|||
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
|
||||
j sie_exit
|
||||
|
||||
EX_TABLE(.Lrewind_pad,.Lsie_fault)
|
||||
EX_TABLE(.Lrewind_pad6,.Lsie_fault)
|
||||
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
|
||||
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
|
||||
EX_TABLE(sie_exit,.Lsie_fault)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -372,7 +372,7 @@ void __init vmem_map_init(void)
|
|||
ro_end = (unsigned long)&_eshared & PAGE_MASK;
|
||||
for_each_memblock(memory, reg) {
|
||||
start = reg->base;
|
||||
end = reg->base + reg->size - 1;
|
||||
end = reg->base + reg->size;
|
||||
if (start >= ro_end || end <= ro_start)
|
||||
vmem_add_mem(start, end - start, 0);
|
||||
else if (start >= ro_start && end <= ro_end)
|
||||
|
|
|
@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
|
|||
|
||||
void bad_trap(struct pt_regs *regs, long lvl)
|
||||
{
|
||||
char buffer[32];
|
||||
char buffer[36];
|
||||
siginfo_t info;
|
||||
|
||||
if (notify_die(DIE_TRAP, "bad trap", regs,
|
||||
|
@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
|
|||
|
||||
void bad_trap_tl1(struct pt_regs *regs, long lvl)
|
||||
{
|
||||
char buffer[32];
|
||||
char buffer[36];
|
||||
|
||||
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
|
||||
0, lvl, SIGTRAP) == NOTIFY_STOP)
|
||||
|
|
|
@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
addr = ALIGN(addr, HPAGE_SIZE);
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
|
|
@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
|
|
@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (end - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
|
|
|
@ -100,5 +100,6 @@ void __init initmem_init(void)
|
|||
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
|
||||
(ulong) pfn_to_kaddr(highstart_pfn));
|
||||
|
||||
__vmalloc_start_set = true;
|
||||
setup_bootmem_allocator();
|
||||
}
|
||||
|
|
|
@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
|
|||
# define PLATFORM_NR_IRQS 0
|
||||
#endif
|
||||
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
|
||||
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
|
||||
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
|
||||
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
|
||||
|
||||
#if VARIANT_NR_IRQS == 0
|
||||
static inline void variant_init_irq(void) { }
|
||||
|
|
|
@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
|
|||
{
|
||||
int irq = irq_find_mapping(NULL, hwirq);
|
||||
|
||||
if (hwirq >= NR_IRQS) {
|
||||
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
||||
__func__, hwirq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: is there less than 1KB free? */
|
||||
{
|
||||
|
|
|
@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
/* At this point: (!vmm || addr < vmm->vm_end). */
|
||||
if (TASK_SIZE - len < addr)
|
||||
return -ENOMEM;
|
||||
if (!vmm || addr + len <= vmm->vm_start)
|
||||
if (!vmm || addr + len <= vm_start_gap(vmm))
|
||||
return addr;
|
||||
addr = vmm->vm_end;
|
||||
if (flags & MAP_SHARED)
|
||||
|
|
|
@ -24,16 +24,18 @@
|
|||
|
||||
/* Interrupt configuration. */
|
||||
|
||||
#define PLATFORM_NR_IRQS 10
|
||||
#define PLATFORM_NR_IRQS 0
|
||||
|
||||
/* Default assignment of LX60 devices to external interrupts. */
|
||||
|
||||
#ifdef CONFIG_XTENSA_MX
|
||||
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
|
||||
#define OETH_IRQ XCHAL_EXTINT4_NUM
|
||||
#define C67X00_IRQ XCHAL_EXTINT8_NUM
|
||||
#else
|
||||
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
|
||||
#define OETH_IRQ XCHAL_EXTINT1_NUM
|
||||
#define C67X00_IRQ XCHAL_EXTINT5_NUM
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -63,5 +65,5 @@
|
|||
|
||||
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
|
||||
#define C67X00_SIZE 0x10
|
||||
#define C67X00_IRQ 5
|
||||
|
||||
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
|
||||
|
|
|
@ -209,8 +209,8 @@ static struct resource ethoc_res[] = {
|
|||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = { /* IRQ number */
|
||||
.start = OETH_IRQ,
|
||||
.end = OETH_IRQ,
|
||||
.start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
|
||||
.end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
@ -246,8 +246,8 @@ static struct resource c67x00_res[] = {
|
|||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = { /* IRQ number */
|
||||
.start = C67X00_IRQ,
|
||||
.end = C67X00_IRQ,
|
||||
.start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
|
||||
.end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
@ -280,7 +280,7 @@ static struct resource serial_resource = {
|
|||
static struct plat_serial8250_port serial_platform_data[] = {
|
||||
[0] = {
|
||||
.mapbase = DUART16552_PADDR,
|
||||
.irq = DUART16552_INTNUM,
|
||||
.irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
|
||||
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
|
||||
UPF_IOREMAP,
|
||||
.iotype = UPIO_MEM32,
|
||||
|
|
|
@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
|
|||
continue;
|
||||
bsd_start = le32_to_cpu(p->p_offset);
|
||||
bsd_size = le32_to_cpu(p->p_size);
|
||||
if (memcmp(flavour, "bsd\0", 4) == 0)
|
||||
bsd_start += offset;
|
||||
if (offset == bsd_start && size == bsd_size)
|
||||
/* full parent partition, we have it already */
|
||||
continue;
|
||||
|
|
|
@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
|
|||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT) {
|
||||
if (!atomic_dec_and_test(&dev->power.usage_count))
|
||||
return 0;
|
||||
}
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
retval = rpm_idle(dev, rpmflags);
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
|
|||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT) {
|
||||
if (!atomic_dec_and_test(&dev->power.usage_count))
|
||||
return 0;
|
||||
}
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
retval = rpm_suspend(dev, rpmflags);
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
|
|||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
|
||||
dev->power.runtime_status != RPM_ACTIVE);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT)
|
||||
atomic_inc(&dev->power.usage_count);
|
||||
|
|
|
@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
|
|||
int ret;
|
||||
ret = sscanf(buf, "%u", &input);
|
||||
|
||||
/* cannot be lower than 11 otherwise freq will not fall */
|
||||
if (ret != 1 || input < 11 || input > 100 ||
|
||||
/* cannot be lower than 1 otherwise freq will not fall */
|
||||
if (ret != 1 || input < 1 || input > 100 ||
|
||||
input >= cs_tuners->up_threshold)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -113,6 +113,7 @@ struct ast_private {
|
|||
struct ttm_bo_kmap_obj cache_kmap;
|
||||
int next_cursor;
|
||||
bool support_wide_screen;
|
||||
bool DisableP2A;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
u8 dp501_maxclk;
|
||||
|
|
|
@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
} else
|
||||
*need_post = false;
|
||||
|
||||
/* Check P2A Access */
|
||||
ast->DisableP2A = true;
|
||||
data = ast_read32(ast, 0xf004);
|
||||
if (data != 0xFFFFFFFF)
|
||||
ast->DisableP2A = false;
|
||||
|
||||
/* Check if we support wide screen */
|
||||
switch (ast->chip) {
|
||||
case AST1180:
|
||||
|
@ -140,6 +146,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
ast->support_wide_screen = true;
|
||||
else {
|
||||
ast->support_wide_screen = false;
|
||||
if (ast->DisableP2A == false) {
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
|
@ -150,6 +157,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
|||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -216,16 +224,16 @@ static int ast_get_dram_info(struct drm_device *dev)
|
|||
uint32_t data, data2;
|
||||
uint32_t denum, num, div, ref_pll;
|
||||
|
||||
if (ast->DisableP2A)
|
||||
{
|
||||
ast->dram_bus_width = 16;
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
ast->mclk = 396;
|
||||
}
|
||||
else
|
||||
{
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
|
||||
|
||||
ast_write32(ast, 0x10000, 0xfc600309);
|
||||
|
||||
do {
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
return -EIO;
|
||||
} while (ast_read32(ast, 0x10000) != 0x01);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (data & 0x40)
|
||||
|
@ -290,6 +298,7 @@ static int ast_get_dram_info(struct drm_device *dev)
|
|||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev)
|
|||
ast_enable_mmio(dev);
|
||||
ast_set_def_ext_reg(dev);
|
||||
|
||||
if (ast->DisableP2A == false)
|
||||
{
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
|
||||
ast_init_3rdtx(dev);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ast->tx_chip_type != AST_TX_NONE)
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
|
||||
}
|
||||
}
|
||||
|
||||
/* AST 2300 DRAM settings */
|
||||
|
|
|
@ -370,6 +370,7 @@ nouveau_display_init(struct drm_device *dev)
|
|||
return ret;
|
||||
|
||||
/* enable polling for external displays */
|
||||
if (!dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
/* enable hotplug interrupts */
|
||||
|
|
|
@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
|
|||
pci_set_master(pdev);
|
||||
|
||||
ret = nouveau_do_resume(drm_dev, true);
|
||||
|
||||
if (!drm_dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
|
||||
/* do magic */
|
||||
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
|
||||
|
|
|
@ -99,6 +99,7 @@ struct nv84_fence_priv {
|
|||
struct nouveau_bo *bo;
|
||||
struct nouveau_bo *bo_gart;
|
||||
u32 *suspend;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
u64 nv84_fence_crtc(struct nouveau_channel *, int);
|
||||
|
|
|
@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
|
|||
if (nvif_unpack(argv->v0, 0, 0, true)) {
|
||||
/* block access to objects not created via this interface */
|
||||
owner = argv->v0.owner;
|
||||
if (argv->v0.object == 0ULL)
|
||||
if (argv->v0.object == 0ULL &&
|
||||
argv->v0.type != NVIF_IOCTL_V0_DEL)
|
||||
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
|
||||
else
|
||||
argv->v0.owner = NVDRM_OBJECT_USIF;
|
||||
|
|
|
@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
|
|||
}
|
||||
|
||||
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
|
||||
mutex_lock(&priv->mutex);
|
||||
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
|
||||
nouveau_bo_vma_del(priv->bo, &fctx->vma);
|
||||
mutex_unlock(&priv->mutex);
|
||||
nouveau_fence_context_del(&fctx->base);
|
||||
chan->fence = NULL;
|
||||
nouveau_fence_context_free(&fctx->base);
|
||||
|
@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
|
|||
fctx->base.sync32 = nv84_fence_sync32;
|
||||
fctx->base.sequence = nv84_fence_read(chan);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
|
||||
&fctx->vma_gart);
|
||||
}
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
/* map display semaphore buffers into channel's vm */
|
||||
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
|
||||
|
@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm)
|
|||
priv->base.context_base = fence_context_alloc(priv->base.contexts);
|
||||
priv->base.uevent = true;
|
||||
|
||||
mutex_init(&priv->mutex);
|
||||
|
||||
/* Use VRAM if there is any ; otherwise fallback to system memory */
|
||||
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
|
||||
/*
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define SMBSLVDAT (0xC + piix4_smba)
|
||||
|
||||
/* count for request_region */
|
||||
#define SMBIOSIZE 8
|
||||
#define SMBIOSIZE 9
|
||||
|
||||
/* PCI Address Constants */
|
||||
#define SMBBA 0x090
|
||||
|
|
|
@ -263,8 +263,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
|
|||
|
||||
static void calibrate_as3935(struct as3935_state *st)
|
||||
{
|
||||
mutex_lock(&st->lock);
|
||||
|
||||
/* mask disturber interrupt bit */
|
||||
as3935_write(st, AS3935_INT, BIT(5));
|
||||
|
||||
|
@ -274,8 +272,6 @@ static void calibrate_as3935(struct as3935_state *st)
|
|||
|
||||
mdelay(2);
|
||||
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
|
||||
|
||||
mutex_unlock(&st->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@ -312,6 +308,8 @@ static int as3935_resume(struct device *dev)
|
|||
val &= ~AS3935_AFE_PWR_BIT;
|
||||
ret = as3935_write(st, AS3935_AFE_GAIN, val);
|
||||
|
||||
calibrate_as3935(st);
|
||||
|
||||
err_resume:
|
||||
mutex_unlock(&st->lock);
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
|
|||
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
|
||||
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
|
||||
&xtensa_mx_irq_domain_ops,
|
||||
&xtensa_mx_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
|
|
|
@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
|
|||
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
|
||||
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
|
||||
&xtensa_irq_domain_ops, &xtensa_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
return 0;
|
||||
|
|
|
@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
|
|||
memset(&tvdata,0,sizeof(tvdata));
|
||||
|
||||
eeprom = pvr2_eeprom_fetch(hdw);
|
||||
if (!eeprom) return -EINVAL;
|
||||
if (!eeprom)
|
||||
return -EINVAL;
|
||||
|
||||
{
|
||||
struct i2c_client fake_client;
|
||||
/* Newer version expects a useless client interface */
|
||||
fake_client.addr = hdw->eeprom_addr;
|
||||
fake_client.adapter = &hdw->i2c_adap;
|
||||
tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
|
||||
}
|
||||
tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
|
||||
|
||||
trace_eeprom("eeprom assumed v4l tveeprom module");
|
||||
trace_eeprom("eeprom direct call results:");
|
||||
|
|
|
@ -793,7 +793,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
|
|||
*/
|
||||
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
|
||||
{
|
||||
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
|
||||
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
|
||||
return NULL;
|
||||
|
||||
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
|
||||
|
|
|
@ -375,8 +375,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
|
|||
* and use SDR Mode
|
||||
*/
|
||||
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
|
||||
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
|
||||
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
|
||||
reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
|
||||
} else if (pdata->port_mode[i] ==
|
||||
OMAP_EHCI_PORT_MODE_HSIC) {
|
||||
/*
|
||||
|
|
|
@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
|
|||
|
||||
duramar2150_c2port_dev = c2port_device_register("uc",
|
||||
&duramar2150_c2port_ops, NULL);
|
||||
if (!duramar2150_c2port_dev) {
|
||||
ret = -ENODEV;
|
||||
if (IS_ERR(duramar2150_c2port_dev)) {
|
||||
ret = PTR_ERR(duramar2150_c2port_dev);
|
||||
goto free_region;
|
||||
}
|
||||
|
||||
|
|
|
@ -246,6 +246,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
|
|||
sizeof(*dm),
|
||||
1000);
|
||||
|
||||
kfree(dm);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev)
|
|||
if (skb == NULL)
|
||||
break;
|
||||
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_info[i].mapping)) {
|
||||
dev_kfree_skb(skb);
|
||||
np->rx_info[i].skb = NULL;
|
||||
break;
|
||||
}
|
||||
/* Grrr, we cannot offset to correctly align the IP header. */
|
||||
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
||||
}
|
||||
|
@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unsigned int entry;
|
||||
unsigned int prev_tx;
|
||||
u32 status;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* be cautious here, wrapping the queue has weird semantics
|
||||
|
@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
|
||||
|
||||
prev_tx = np->cur_tx;
|
||||
entry = np->cur_tx % TX_RING_SIZE;
|
||||
for (i = 0; i < skb_num_frags(skb); i++) {
|
||||
int wrap_ring = 0;
|
||||
|
@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_frag_size(this_frag),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->tx_info[entry].mapping)) {
|
||||
dev->stats.tx_dropped++;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
|
||||
np->tx_ring[entry].status = cpu_to_le32(status);
|
||||
|
@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
err_out:
|
||||
entry = prev_tx % TX_RING_SIZE;
|
||||
np->tx_info[entry].skb = NULL;
|
||||
if (i > 0) {
|
||||
pci_unmap_single(np->pci_dev,
|
||||
np->tx_info[entry].mapping,
|
||||
skb_first_frag_len(skb),
|
||||
PCI_DMA_TODEVICE);
|
||||
np->tx_info[entry].mapping = 0;
|
||||
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
|
||||
for (j = 1; j < i; j++) {
|
||||
pci_unmap_single(np->pci_dev,
|
||||
np->tx_info[entry].mapping,
|
||||
skb_frag_size(
|
||||
&skb_shinfo(skb)->frags[j-1]),
|
||||
PCI_DMA_TODEVICE);
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
np->cur_tx = prev_tx;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* The interrupt handler does all of the Rx thread work and cleans up
|
||||
after the Tx thread. */
|
||||
|
@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev)
|
|||
break; /* Better luck next round. */
|
||||
np->rx_info[entry].mapping =
|
||||
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_info[entry].mapping)) {
|
||||
dev_kfree_skb(skb);
|
||||
np->rx_info[entry].skb = NULL;
|
||||
break;
|
||||
}
|
||||
np->rx_ring[entry].rxaddr =
|
||||
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
||||
}
|
||||
|
|
|
@ -1999,7 +1999,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
|
|||
if (!rxb->page)
|
||||
continue;
|
||||
|
||||
dma_unmap_single(rx_queue->dev, rxb->dma,
|
||||
dma_unmap_page(rx_queue->dev, rxb->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_page(rxb->page);
|
||||
|
||||
|
|
|
@ -105,8 +105,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
|
|||
struct hns_nic_ring_data *ring_data)
|
||||
{
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
struct device *dev = priv->dev;
|
||||
struct hnae_ring *ring = ring_data->ring;
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
struct netdev_queue *dev_queue;
|
||||
struct skb_frag_struct *frag;
|
||||
int buf_num;
|
||||
|
|
|
@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int mlx4_comm_internal_err(u32 slave_read)
|
||||
int mlx4_comm_internal_err(u32 slave_read)
|
||||
{
|
||||
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
|
||||
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
|
||||
|
|
|
@ -218,6 +218,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
|
|||
struct mlx4_interface *intf;
|
||||
|
||||
mlx4_stop_catas_poll(dev);
|
||||
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
|
||||
mlx4_is_slave(dev)) {
|
||||
/* In mlx4_remove_one on a VF */
|
||||
u32 slave_read =
|
||||
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
|
||||
|
||||
if (mlx4_comm_internal_err(slave_read)) {
|
||||
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
|
||||
__func__);
|
||||
mlx4_enter_error_state(dev->persist);
|
||||
}
|
||||
}
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
|
|
|
@ -1205,6 +1205,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
|
|||
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
|
||||
|
||||
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
|
||||
int mlx4_comm_internal_err(u32 slave_read);
|
||||
|
||||
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
|
||||
enum mlx4_port_type *type);
|
||||
|
|
|
@ -171,6 +171,49 @@ static struct mdiobb_ops bb_ops = {
|
|||
.get_mdio_data = ravb_get_mdio_data,
|
||||
};
|
||||
|
||||
/* Free TX skb function for AVB-IP */
|
||||
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_tx_desc *desc;
|
||||
int free_num = 0;
|
||||
int entry;
|
||||
u32 size;
|
||||
|
||||
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
|
||||
bool txed;
|
||||
|
||||
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
|
||||
NUM_TX_DESC);
|
||||
desc = &priv->tx_ring[q][entry];
|
||||
txed = desc->die_dt == DT_FEMPTY;
|
||||
if (free_txed_only && !txed)
|
||||
break;
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
|
||||
/* Free the original skb. */
|
||||
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
size, DMA_TO_DEVICE);
|
||||
/* Last packet descriptor? */
|
||||
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
|
||||
entry /= NUM_TX_DESC;
|
||||
dev_kfree_skb_any(priv->tx_skb[q][entry]);
|
||||
priv->tx_skb[q][entry] = NULL;
|
||||
if (txed)
|
||||
stats->tx_packets++;
|
||||
}
|
||||
free_num++;
|
||||
}
|
||||
if (txed)
|
||||
stats->tx_bytes += size;
|
||||
desc->die_dt = DT_EEMPTY;
|
||||
}
|
||||
return free_num;
|
||||
}
|
||||
|
||||
/* Free skb's and DMA buffers for Ethernet AVB */
|
||||
static void ravb_ring_free(struct net_device *ndev, int q)
|
||||
{
|
||||
|
@ -186,19 +229,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free TX skb ringbuffer */
|
||||
if (priv->tx_skb[q]) {
|
||||
for (i = 0; i < priv->num_tx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->tx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->tx_skb[q]);
|
||||
priv->tx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
if (priv->rx_ring[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
||||
|
||||
if (!dma_mapping_error(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr)))
|
||||
dma_unmap_single(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr),
|
||||
PKT_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
ring_size = sizeof(struct ravb_ex_rx_desc) *
|
||||
(priv->num_rx_ring[q] + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
|
||||
|
@ -207,12 +252,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
}
|
||||
|
||||
if (priv->tx_ring[q]) {
|
||||
ravb_tx_free(ndev, q, false);
|
||||
|
||||
ring_size = sizeof(struct ravb_tx_desc) *
|
||||
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
|
||||
priv->tx_desc_dma[q]);
|
||||
priv->tx_ring[q] = NULL;
|
||||
}
|
||||
|
||||
/* Free TX skb ringbuffer.
|
||||
* SKBs are freed by ravb_tx_free() call above.
|
||||
*/
|
||||
kfree(priv->tx_skb[q]);
|
||||
priv->tx_skb[q] = NULL;
|
||||
}
|
||||
|
||||
/* Format skb and descriptor buffer for Ethernet AVB */
|
||||
|
@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_device *ndev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Free TX skb function for AVB-IP */
|
||||
static int ravb_tx_free(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_tx_desc *desc;
|
||||
int free_num = 0;
|
||||
int entry;
|
||||
u32 size;
|
||||
|
||||
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
|
||||
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
|
||||
NUM_TX_DESC);
|
||||
desc = &priv->tx_ring[q][entry];
|
||||
if (desc->die_dt != DT_FEMPTY)
|
||||
break;
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
|
||||
/* Free the original skb. */
|
||||
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
size, DMA_TO_DEVICE);
|
||||
/* Last packet descriptor? */
|
||||
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
|
||||
entry /= NUM_TX_DESC;
|
||||
dev_kfree_skb_any(priv->tx_skb[q][entry]);
|
||||
priv->tx_skb[q][entry] = NULL;
|
||||
stats->tx_packets++;
|
||||
}
|
||||
free_num++;
|
||||
}
|
||||
stats->tx_bytes += size;
|
||||
desc->die_dt = DT_EEMPTY;
|
||||
}
|
||||
return free_num;
|
||||
}
|
||||
|
||||
static void ravb_get_tx_tstamp(struct net_device *ndev)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
|
@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
|||
spin_lock_irqsave(&priv->lock, flags);
|
||||
/* Clear TX interrupt */
|
||||
ravb_write(ndev, ~mask, TIS);
|
||||
ravb_tx_free(ndev, q);
|
||||
ravb_tx_free(ndev, q, true);
|
||||
netif_wake_subqueue(ndev, q);
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
@ -1393,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
|
||||
priv->cur_tx[q] += NUM_TX_DESC;
|
||||
if (priv->cur_tx[q] - priv->dirty_tx[q] >
|
||||
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
|
||||
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
|
||||
!ravb_tx_free(ndev, q, true))
|
||||
netif_stop_subqueue(ndev, q);
|
||||
|
||||
exit:
|
||||
|
|
|
@ -100,6 +100,14 @@
|
|||
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
|
||||
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define xemaclite_readl ioread32be
|
||||
#define xemaclite_writel iowrite32be
|
||||
#else
|
||||
#define xemaclite_readl ioread32
|
||||
#define xemaclite_writel iowrite32
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct net_local - Our private per device data
|
||||
* @ndev: instance of the network device
|
||||
|
@ -158,15 +166,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
|
|||
u32 reg_data;
|
||||
|
||||
/* Enable the Tx interrupts for the first Buffer */
|
||||
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
|
||||
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
|
||||
drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
|
||||
/* Enable the Rx interrupts for the first buffer */
|
||||
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
|
||||
xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
|
||||
|
||||
/* Enable the Global Interrupt Enable */
|
||||
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
|
||||
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -181,16 +189,16 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
|
|||
u32 reg_data;
|
||||
|
||||
/* Disable the Global Interrupt Enable */
|
||||
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
|
||||
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
|
||||
|
||||
/* Disable the Tx interrupts for the first buffer */
|
||||
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
|
||||
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
|
||||
drvdata->base_addr + XEL_TSR_OFFSET);
|
||||
|
||||
/* Disable the Rx interrupts for the first buffer */
|
||||
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
|
||||
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
|
||||
reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
|
||||
xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
|
||||
drvdata->base_addr + XEL_RSR_OFFSET);
|
||||
}
|
||||
|
||||
|
@ -323,7 +331,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
|
|||
byte_count = ETH_FRAME_LEN;
|
||||
|
||||
/* Check if the expected buffer is available */
|
||||
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
|
||||
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
|
||||
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
|
||||
|
||||
|
@ -336,7 +344,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
|
|||
|
||||
addr = (void __iomem __force *)((u32 __force)addr ^
|
||||
XEL_BUFFER_OFFSET);
|
||||
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
|
||||
|
||||
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
|
||||
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
|
||||
|
@ -347,16 +355,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
|
|||
/* Write the frame to the buffer */
|
||||
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
|
||||
|
||||
__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
|
||||
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
|
||||
addr + XEL_TPLR_OFFSET);
|
||||
|
||||
/* Update the Tx Status Register to indicate that there is a
|
||||
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
|
||||
* is used by the interrupt handler to check whether a frame
|
||||
* has been transmitted */
|
||||
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
|
||||
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
|
||||
__raw_writel(reg_data, addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -371,7 +379,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
|
|||
*
|
||||
* Return: Total number of bytes received
|
||||
*/
|
||||
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
|
||||
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
|
||||
{
|
||||
void __iomem *addr;
|
||||
u16 length, proto_type;
|
||||
|
@ -381,7 +389,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
|
|||
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
|
||||
|
||||
/* Verify which buffer has valid data */
|
||||
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
|
||||
|
||||
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
|
||||
if (drvdata->rx_ping_pong != 0)
|
||||
|
@ -398,27 +406,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
|
|||
return 0; /* No data was available */
|
||||
|
||||
/* Verify that buffer has valid data */
|
||||
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
|
||||
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
|
||||
XEL_RSR_RECV_DONE_MASK)
|
||||
return 0; /* No data was available */
|
||||
}
|
||||
|
||||
/* Get the protocol type of the ethernet frame that arrived */
|
||||
proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
|
||||
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
|
||||
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
|
||||
XEL_RPLR_LENGTH_MASK);
|
||||
|
||||
/* Check if received ethernet frame is a raw ethernet frame
|
||||
* or an IP packet or an ARP packet */
|
||||
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
|
||||
if (proto_type > ETH_DATA_LEN) {
|
||||
|
||||
if (proto_type == ETH_P_IP) {
|
||||
length = ((ntohl(__raw_readl(addr +
|
||||
length = ((ntohl(xemaclite_readl(addr +
|
||||
XEL_HEADER_IP_LENGTH_OFFSET +
|
||||
XEL_RXBUFF_OFFSET)) >>
|
||||
XEL_HEADER_SHIFT) &
|
||||
XEL_RPLR_LENGTH_MASK);
|
||||
length = min_t(u16, length, ETH_DATA_LEN);
|
||||
length += ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
} else if (proto_type == ETH_P_ARP)
|
||||
|
@ -431,14 +440,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
|
|||
/* Use the length in the frame, plus the header and trailer */
|
||||
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
if (WARN_ON(length > maxlen))
|
||||
length = maxlen;
|
||||
|
||||
/* Read from the EmacLite device */
|
||||
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
|
||||
data, length);
|
||||
|
||||
/* Acknowledge the frame */
|
||||
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
|
||||
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
|
||||
__raw_writel(reg_data, addr + XEL_RSR_OFFSET);
|
||||
xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
@ -465,14 +477,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
|
|||
|
||||
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
|
||||
|
||||
__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
|
||||
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
|
||||
|
||||
/* Update the MAC address in the EmacLite */
|
||||
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
|
||||
__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
|
||||
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
|
||||
|
||||
/* Wait for EmacLite to finish with the MAC address update */
|
||||
while ((__raw_readl(addr + XEL_TSR_OFFSET) &
|
||||
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
|
||||
XEL_TSR_PROG_MAC_ADDR) != 0)
|
||||
;
|
||||
}
|
||||
|
@ -605,7 +617,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
|
|||
|
||||
skb_reserve(skb, 2);
|
||||
|
||||
len = xemaclite_recv_data(lp, (u8 *) skb->data);
|
||||
len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
|
||||
|
||||
if (!len) {
|
||||
dev->stats.rx_errors++;
|
||||
|
@ -642,31 +654,31 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
|
|||
u32 tx_status;
|
||||
|
||||
/* Check if there is Rx Data available */
|
||||
if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
|
||||
if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
|
||||
XEL_RSR_RECV_DONE_MASK) ||
|
||||
(__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
|
||||
(xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
|
||||
& XEL_RSR_RECV_DONE_MASK))
|
||||
|
||||
xemaclite_rx_handler(dev);
|
||||
|
||||
/* Check if the Transmission for the first buffer is completed */
|
||||
tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
|
||||
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
|
||||
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
|
||||
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
|
||||
|
||||
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
|
||||
__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
|
||||
|
||||
tx_complete = true;
|
||||
}
|
||||
|
||||
/* Check if the Transmission for the second buffer is completed */
|
||||
tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
|
||||
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
|
||||
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
|
||||
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
|
||||
|
||||
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
|
||||
__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
|
||||
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
|
||||
XEL_TSR_OFFSET);
|
||||
|
||||
tx_complete = true;
|
||||
|
@ -700,7 +712,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
|
|||
/* wait for the MDIO interface to not be busy or timeout
|
||||
after some time.
|
||||
*/
|
||||
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
|
||||
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
|
||||
XEL_MDIOCTRL_MDIOSTS_MASK) {
|
||||
if (time_before_eq(end, jiffies)) {
|
||||
WARN_ON(1);
|
||||
|
@ -736,17 +748,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
|
|||
* MDIO Address register. Set the Status bit in the MDIO Control
|
||||
* register to start a MDIO read transaction.
|
||||
*/
|
||||
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
__raw_writel(XEL_MDIOADDR_OP_MASK |
|
||||
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
xemaclite_writel(XEL_MDIOADDR_OP_MASK |
|
||||
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
|
||||
lp->base_addr + XEL_MDIOADDR_OFFSET);
|
||||
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
|
||||
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
|
||||
lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
|
||||
if (xemaclite_mdio_wait(lp))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
|
||||
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
|
||||
|
||||
dev_dbg(&lp->ndev->dev,
|
||||
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
|
||||
|
@ -783,12 +795,12 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
|
|||
* Data register. Finally, set the Status bit in the MDIO Control
|
||||
* register to start a MDIO write transaction.
|
||||
*/
|
||||
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
__raw_writel(~XEL_MDIOADDR_OP_MASK &
|
||||
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
|
||||
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
|
||||
lp->base_addr + XEL_MDIOADDR_OFFSET);
|
||||
__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
|
||||
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
|
||||
xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
|
||||
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
|
||||
lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
|
||||
return 0;
|
||||
|
@ -836,7 +848,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
|
|||
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
|
||||
* register.
|
||||
*/
|
||||
__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
|
||||
xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
|
||||
lp->base_addr + XEL_MDIOCTRL_OFFSET);
|
||||
|
||||
bus = mdiobus_alloc();
|
||||
|
@ -1141,8 +1153,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
|||
dev_warn(dev, "No MAC address found\n");
|
||||
|
||||
/* Clear the Tx CSR's in case this is a restart */
|
||||
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
|
||||
__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
|
||||
xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
|
||||
|
||||
/* Set the MAC address in the EmacLite device */
|
||||
xemaclite_update_address(lp, ndev->dev_addr);
|
||||
|
|
|
@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
|
|||
{
|
||||
/* Finish setting up the DEVICE info. */
|
||||
dev->mtu = AX_MTU;
|
||||
dev->hard_header_len = 0;
|
||||
dev->addr_len = 0;
|
||||
dev->hard_header_len = AX25_MAX_HEADER_LEN;
|
||||
dev->addr_len = AX25_ADDR_LEN;
|
||||
dev->type = ARPHRD_AX25;
|
||||
dev->tx_queue_len = 10;
|
||||
dev->header_ops = &ax25_header_ops;
|
||||
|
|
|
@ -1851,6 +1851,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
|
|||
napi_complete(napi);
|
||||
if (!list_empty(&tp->rx_done))
|
||||
napi_schedule(napi);
|
||||
else if (!skb_queue_empty(&tp->tx_queue) &&
|
||||
!list_empty(&tp->tx_free))
|
||||
napi_schedule(napi);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
@ -2990,10 +2993,13 @@ static void set_carrier(struct r8152 *tp)
|
|||
if (!netif_carrier_ok(netdev)) {
|
||||
tp->rtl_ops.enable(tp);
|
||||
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
|
||||
netif_stop_queue(netdev);
|
||||
napi_disable(&tp->napi);
|
||||
netif_carrier_on(netdev);
|
||||
rtl_start_rx(tp);
|
||||
napi_enable(&tp->napi);
|
||||
netif_wake_queue(netdev);
|
||||
netif_info(tp, link, netdev, "carrier on\n");
|
||||
}
|
||||
} else {
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
|
@ -3001,6 +3007,7 @@ static void set_carrier(struct r8152 *tp)
|
|||
napi_disable(&tp->napi);
|
||||
tp->rtl_ops.disable(tp);
|
||||
napi_enable(&tp->napi);
|
||||
netif_info(tp, link, netdev, "carrier off\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3385,12 +3392,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
|
|||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
netif_stop_queue(netdev);
|
||||
mutex_lock(&tp->control);
|
||||
tp->rtl_ops.disable(tp);
|
||||
mutex_unlock(&tp->control);
|
||||
|
@ -3415,12 +3422,14 @@ static int rtl8152_post_reset(struct usb_interface *intf)
|
|||
if (netif_carrier_ok(netdev)) {
|
||||
mutex_lock(&tp->control);
|
||||
tp->rtl_ops.enable(tp);
|
||||
rtl_start_rx(tp);
|
||||
rtl8152_set_rx_mode(netdev);
|
||||
mutex_unlock(&tp->control);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
napi_enable(&tp->napi);
|
||||
netif_wake_queue(netdev);
|
||||
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
|
|||
/* Private data structure */
|
||||
struct sierra_net_data {
|
||||
|
||||
u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
|
||||
|
||||
u16 link_up; /* air link up or down */
|
||||
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
|
||||
|
||||
|
@ -122,6 +120,7 @@ struct param {
|
|||
|
||||
/* LSI Protocol types */
|
||||
#define SIERRA_NET_PROTOCOL_UMTS 0x01
|
||||
#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
|
||||
/* LSI Coverage */
|
||||
#define SIERRA_NET_COVERAGE_NONE 0x00
|
||||
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
|
||||
|
@ -129,7 +128,8 @@ struct param {
|
|||
/* LSI Session */
|
||||
#define SIERRA_NET_SESSION_IDLE 0x00
|
||||
/* LSI Link types */
|
||||
#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
|
||||
#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
|
||||
#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
|
||||
|
||||
struct lsi_umts {
|
||||
u8 protocol;
|
||||
|
@ -137,9 +137,14 @@ struct lsi_umts {
|
|||
__be16 length;
|
||||
/* eventually use a union for the rest - assume umts for now */
|
||||
u8 coverage;
|
||||
u8 unused2[41];
|
||||
u8 network_len; /* network name len */
|
||||
u8 network[40]; /* network name (UCS2, bigendian) */
|
||||
u8 session_state;
|
||||
u8 unused3[33];
|
||||
} __packed;
|
||||
|
||||
struct lsi_umts_single {
|
||||
struct lsi_umts lsi;
|
||||
u8 link_type;
|
||||
u8 pdp_addr_len; /* NW-supplied PDP address len */
|
||||
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
|
||||
|
@ -158,10 +163,31 @@ struct lsi_umts {
|
|||
u8 reserved[8];
|
||||
} __packed;
|
||||
|
||||
struct lsi_umts_dual {
|
||||
struct lsi_umts lsi;
|
||||
u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
|
||||
u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
|
||||
u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
|
||||
u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
|
||||
u8 unused4[23];
|
||||
u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
|
||||
u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
|
||||
u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
|
||||
u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
|
||||
u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
|
||||
u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
|
||||
u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
|
||||
u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
|
||||
u8 unused5[68];
|
||||
} __packed;
|
||||
|
||||
#define SIERRA_NET_LSI_COMMON_LEN 4
|
||||
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
|
||||
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
|
||||
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
|
||||
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
|
||||
#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
|
||||
#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
|
||||
(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
|
||||
|
||||
/* Forward definitions */
|
||||
static void sierra_sync_timer(unsigned long syncdata);
|
||||
|
@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
|
|||
dev->data[0] = (unsigned long)priv;
|
||||
}
|
||||
|
||||
/* is packet IPv4 */
|
||||
/* is packet IPv4/IPv6 */
|
||||
static inline int is_ip(struct sk_buff *skb)
|
||||
{
|
||||
return skb->protocol == cpu_to_be16(ETH_P_IP);
|
||||
return skb->protocol == cpu_to_be16(ETH_P_IP) ||
|
||||
skb->protocol == cpu_to_be16(ETH_P_IPV6);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -350,42 +377,14 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
|
|||
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
|
||||
{
|
||||
struct lsi_umts *lsi = (struct lsi_umts *)data;
|
||||
u32 expected_length;
|
||||
|
||||
if (datalen < sizeof(struct lsi_umts)) {
|
||||
netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
|
||||
__func__, datalen,
|
||||
sizeof(struct lsi_umts));
|
||||
if (datalen < sizeof(struct lsi_umts_single)) {
|
||||
netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
|
||||
__func__, datalen, sizeof(struct lsi_umts_single));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
|
||||
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
|
||||
__func__, be16_to_cpu(lsi->length),
|
||||
(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Validate the protocol - only support UMTS for now */
|
||||
if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
|
||||
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
|
||||
lsi->protocol);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Validate the link type */
|
||||
if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
|
||||
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
|
||||
lsi->link_type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Validate the coverage */
|
||||
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
|
||||
|| lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
|
||||
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Validate the session state */
|
||||
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
|
||||
netdev_err(dev->net, "Session idle, 0x%02x\n",
|
||||
|
@ -393,6 +392,39 @@ static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Validate the protocol - only support UMTS for now */
|
||||
if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
|
||||
struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
|
||||
|
||||
/* Validate the link type */
|
||||
if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
|
||||
single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
|
||||
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
|
||||
single->link_type);
|
||||
return -1;
|
||||
}
|
||||
expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
|
||||
} else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
|
||||
expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
|
||||
} else {
|
||||
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
|
||||
lsi->protocol);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (be16_to_cpu(lsi->length) != expected_length) {
|
||||
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
|
||||
__func__, be16_to_cpu(lsi->length), expected_length);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Validate the coverage */
|
||||
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
|
||||
lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
|
||||
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set link_sense true */
|
||||
return 1;
|
||||
}
|
||||
|
@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
u8 numendpoints;
|
||||
u16 fwattr = 0;
|
||||
int status;
|
||||
struct ethhdr *eth;
|
||||
struct sierra_net_data *priv;
|
||||
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
|
||||
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
|
||||
|
@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
|
||||
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
|
||||
|
||||
/* we will have to manufacture ethernet headers, prepare template */
|
||||
eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
|
||||
memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN);
|
||||
eth->h_proto = cpu_to_be16(ETH_P_IP);
|
||||
|
||||
/* prepare shutdown message template */
|
||||
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
|
||||
/* set context index initially to 0 - prepares tx hdr template */
|
||||
|
@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
|
||||
skb_pull(skb, hh.hdrlen);
|
||||
|
||||
/* We are going to accept this packet, prepare it */
|
||||
memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
|
||||
ETH_HLEN);
|
||||
/* We are going to accept this packet, prepare it.
|
||||
* In case protocol is IPv6, keep it, otherwise force IPv4.
|
||||
*/
|
||||
skb_reset_mac_header(skb);
|
||||
if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
|
||||
eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
|
||||
eth_zero_addr(eth_hdr(skb)->h_source);
|
||||
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
|
||||
|
||||
/* Last packet in batch handled by usbnet */
|
||||
if (hh.payload_len.word == skb->len)
|
||||
|
|
|
@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
|
|||
p->irq = PARPORT_IRQ_NONE;
|
||||
}
|
||||
if (p->irq != PARPORT_IRQ_NONE) {
|
||||
printk(", irq %d", p->irq);
|
||||
pr_cont(", irq %d", p->irq);
|
||||
|
||||
if (p->dma == PARPORT_DMA_AUTO) {
|
||||
p->dma = PARPORT_DMA_NONE;
|
||||
|
@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
|
|||
is mandatory (see above) */
|
||||
p->dma = PARPORT_DMA_NONE;
|
||||
|
||||
printk(" [");
|
||||
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
|
||||
pr_cont(" [");
|
||||
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
|
||||
{
|
||||
int f = 0;
|
||||
printmode(PCSPP);
|
||||
|
@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
|
|||
// printmode(DMA);
|
||||
}
|
||||
#undef printmode
|
||||
printk("]\n");
|
||||
pr_cont("]\n");
|
||||
|
||||
if (p->irq != PARPORT_IRQ_NONE) {
|
||||
if (request_irq (p->irq, parport_irq_handler,
|
||||
|
|
|
@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
|
|||
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
|
||||
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
|
||||
BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
|
||||
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
|
||||
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
|
||||
|
|
|
@ -894,7 +894,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
|
|||
return _FAIL;
|
||||
|
||||
|
||||
if (len > MAX_IE_SZ)
|
||||
if (len < 0 || len > MAX_IE_SZ)
|
||||
return _FAIL;
|
||||
|
||||
pbss_network->IELength = len;
|
||||
|
|
|
@ -1185,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
|
|||
struct cb_desc *cb_desc, struct sk_buff *skb)
|
||||
{
|
||||
struct r8192_priv *priv = rtllib_priv(dev);
|
||||
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
dma_addr_t mapping;
|
||||
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
|
||||
|
||||
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
|
||||
|
@ -1197,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
|
|||
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
|
||||
pTxFwInfo->TxRate, cb_desc);
|
||||
|
||||
if (pci_dma_mapping_error(priv->pdev, mapping))
|
||||
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
|
||||
if (cb_desc->bAMPDUEnable) {
|
||||
pTxFwInfo->AllowAggregation = 1;
|
||||
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
|
||||
|
@ -1233,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
|
|||
}
|
||||
|
||||
memset((u8 *)pdesc, 0, 12);
|
||||
|
||||
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(priv->pdev, mapping)) {
|
||||
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
pdesc->LINIP = 0;
|
||||
pdesc->CmdInit = 1;
|
||||
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define UARTn_FRAME 0x04
|
||||
#define UARTn_FRAME_DATABITS__MASK 0x000f
|
||||
#define UARTn_FRAME_DATABITS(n) ((n) - 3)
|
||||
#define UARTn_FRAME_PARITY__MASK 0x0300
|
||||
#define UARTn_FRAME_PARITY_NONE 0x0000
|
||||
#define UARTn_FRAME_PARITY_EVEN 0x0200
|
||||
#define UARTn_FRAME_PARITY_ODD 0x0300
|
||||
|
@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
|
|||
16 * (4 + (clkdiv >> 6)));
|
||||
|
||||
frame = efm32_uart_read32(efm_port, UARTn_FRAME);
|
||||
if (frame & UARTn_FRAME_PARITY_ODD)
|
||||
switch (frame & UARTn_FRAME_PARITY__MASK) {
|
||||
case UARTn_FRAME_PARITY_ODD:
|
||||
*parity = 'o';
|
||||
else if (frame & UARTn_FRAME_PARITY_EVEN)
|
||||
break;
|
||||
case UARTn_FRAME_PARITY_EVEN:
|
||||
*parity = 'e';
|
||||
else
|
||||
break;
|
||||
default:
|
||||
*parity = 'n';
|
||||
}
|
||||
|
||||
*bits = (frame & UARTn_FRAME_DATABITS__MASK) -
|
||||
UARTn_FRAME_DATABITS(4) + 4;
|
||||
|
|
|
@ -2569,6 +2569,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
|
|||
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
|
||||
GFP_KERNEL);
|
||||
if (!hcd->bandwidth_mutex) {
|
||||
kfree(hcd->address0_mutex);
|
||||
kfree(hcd);
|
||||
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
|
||||
return NULL;
|
||||
|
|
|
@ -1329,7 +1329,13 @@ static int hub_configure(struct usb_hub *hub,
|
|||
if (ret < 0) {
|
||||
message = "can't read hub descriptor";
|
||||
goto fail;
|
||||
} else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
|
||||
}
|
||||
|
||||
maxchild = USB_MAXCHILDREN;
|
||||
if (hub_is_superspeed(hdev))
|
||||
maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
|
||||
|
||||
if (hub->descriptor->bNbrPorts > maxchild) {
|
||||
message = "hub has too many ports!";
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
|
|
|
@ -148,7 +148,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
|
|||
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
|
||||
if (IS_ERR(exynos->axius_clk)) {
|
||||
dev_err(dev, "no AXI UpScaler clk specified\n");
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto axius_clk_err;
|
||||
}
|
||||
clk_prepare_enable(exynos->axius_clk);
|
||||
} else {
|
||||
|
@ -206,6 +207,7 @@ err3:
|
|||
regulator_disable(exynos->vdd33);
|
||||
err2:
|
||||
clk_disable_unprepare(exynos->axius_clk);
|
||||
axius_clk_err:
|
||||
clk_disable_unprepare(exynos->susp_clk);
|
||||
clk_disable_unprepare(exynos->clk);
|
||||
return ret;
|
||||
|
|
|
@ -1676,9 +1676,10 @@ static void
|
|||
gadgetfs_suspend (struct usb_gadget *gadget)
|
||||
{
|
||||
struct dev_data *dev = get_gadget_data (gadget);
|
||||
unsigned long flags;
|
||||
|
||||
INFO (dev, "suspended from state %d\n", dev->state);
|
||||
spin_lock (&dev->lock);
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
switch (dev->state) {
|
||||
case STATE_DEV_SETUP: // VERY odd... host died??
|
||||
case STATE_DEV_CONNECTED:
|
||||
|
@ -1689,7 +1690,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock (&dev->lock);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
|
||||
static struct usb_gadget_driver gadgetfs_driver = {
|
||||
|
|
|
@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
|
|||
/* Report reset and disconnect events to the driver */
|
||||
if (dum->driver && (disconnect || reset)) {
|
||||
stop_activity(dum);
|
||||
spin_unlock(&dum->lock);
|
||||
if (reset)
|
||||
usb_gadget_udc_reset(&dum->gadget, dum->driver);
|
||||
else
|
||||
dum->driver->disconnect(&dum->gadget);
|
||||
spin_lock(&dum->lock);
|
||||
}
|
||||
} else if (dum_hcd->active != dum_hcd->old_active) {
|
||||
if (dum_hcd->old_active && dum->driver->suspend) {
|
||||
spin_unlock(&dum->lock);
|
||||
if (dum_hcd->old_active && dum->driver->suspend)
|
||||
dum->driver->suspend(&dum->gadget);
|
||||
spin_lock(&dum->lock);
|
||||
} else if (!dum_hcd->old_active && dum->driver->resume) {
|
||||
spin_unlock(&dum->lock);
|
||||
else if (!dum_hcd->old_active && dum->driver->resume)
|
||||
dum->driver->resume(&dum->gadget);
|
||||
spin_lock(&dum->lock);
|
||||
}
|
||||
}
|
||||
|
||||
dum_hcd->old_status = dum_hcd->port_status;
|
||||
|
@ -985,7 +978,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
|
|||
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
|
||||
struct dummy *dum = dum_hcd->dum;
|
||||
|
||||
spin_lock_irq(&dum->lock);
|
||||
dum->driver = NULL;
|
||||
spin_unlock_irq(&dum->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2011,7 +2006,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
|
|||
HUB_CHAR_COMMON_OCPM);
|
||||
desc->bNbrPorts = 1;
|
||||
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
|
||||
desc->u.ss.DeviceRemovable = 0xffff;
|
||||
desc->u.ss.DeviceRemovable = 0;
|
||||
}
|
||||
|
||||
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
|
||||
|
@ -2023,8 +2018,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
|
|||
HUB_CHAR_INDV_PORT_LPSM |
|
||||
HUB_CHAR_COMMON_OCPM);
|
||||
desc->bNbrPorts = 1;
|
||||
desc->u.hs.DeviceRemovable[0] = 0xff;
|
||||
desc->u.hs.DeviceRemovable[1] = 0xff;
|
||||
desc->u.hs.DeviceRemovable[0] = 0;
|
||||
desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
|
||||
}
|
||||
|
||||
static int dummy_hub_control(
|
||||
|
|
|
@ -2425,11 +2425,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
|
|||
nuke(&dev->ep[i]);
|
||||
|
||||
/* report disconnect; the driver is already quiesced */
|
||||
if (driver) {
|
||||
spin_unlock(&dev->lock);
|
||||
if (driver)
|
||||
driver->disconnect(&dev->gadget);
|
||||
spin_lock(&dev->lock);
|
||||
}
|
||||
|
||||
usb_reinit(dev);
|
||||
}
|
||||
|
@ -3275,8 +3272,6 @@ next_endpoints:
|
|||
BIT(PCI_RETRY_ABORT_INTERRUPT))
|
||||
|
||||
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
|
||||
__releases(dev->lock)
|
||||
__acquires(dev->lock)
|
||||
{
|
||||
struct net2280_ep *ep;
|
||||
u32 tmp, num, mask, scratch;
|
||||
|
@ -3317,14 +3312,12 @@ __acquires(dev->lock)
|
|||
if (disconnect || reset) {
|
||||
stop_activity(dev, dev->driver);
|
||||
ep0_start(dev);
|
||||
spin_unlock(&dev->lock);
|
||||
if (reset)
|
||||
usb_gadget_udc_reset
|
||||
(&dev->gadget, dev->driver);
|
||||
else
|
||||
(dev->driver->disconnect)
|
||||
(&dev->gadget);
|
||||
spin_lock(&dev->lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
|
|||
time = 30;
|
||||
break;
|
||||
default:
|
||||
time = 300;
|
||||
time = 50;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
|
|||
pipe = td->pipe;
|
||||
pipe_stop(r8a66597, pipe);
|
||||
|
||||
/* Select a different address or endpoint */
|
||||
new_td = td;
|
||||
do {
|
||||
list_move_tail(&new_td->queue,
|
||||
|
@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
|
|||
new_td = td;
|
||||
break;
|
||||
}
|
||||
} while (td != new_td && td->address == new_td->address);
|
||||
} while (td != new_td && td->address == new_td->address &&
|
||||
td->pipe->info.epnum == new_td->pipe->info.epnum);
|
||||
|
||||
start_transfer(r8a66597, new_td);
|
||||
|
||||
|
|
|
@ -198,6 +198,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
|
||||
pdev->device == 0x1042)
|
||||
xhci->quirks |= XHCI_BROKEN_STREAMS;
|
||||
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
|
||||
pdev->device == 0x1142)
|
||||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||
|
||||
if (xhci->quirks & XHCI_RESET_ON_RESUME)
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
||||
|
|
|
@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|||
}
|
||||
} while (server->tcpStatus == CifsNeedReconnect);
|
||||
|
||||
if (server->tcpStatus == CifsNeedNegotiate)
|
||||
mod_delayed_work(cifsiod_wq, &server->echo, 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -421,18 +424,27 @@ cifs_echo_request(struct work_struct *work)
|
|||
int rc;
|
||||
struct TCP_Server_Info *server = container_of(work,
|
||||
struct TCP_Server_Info, echo.work);
|
||||
unsigned long echo_interval;
|
||||
|
||||
/*
|
||||
* We cannot send an echo if it is disabled or until the
|
||||
* NEGOTIATE_PROTOCOL request is done, which is indicated by
|
||||
* server->ops->need_neg() == true. Also, no need to ping if
|
||||
* we got a response recently.
|
||||
* If we need to renegotiate, set echo interval to zero to
|
||||
* immediately call echo service where we can renegotiate.
|
||||
*/
|
||||
if (server->tcpStatus == CifsNeedNegotiate)
|
||||
echo_interval = 0;
|
||||
else
|
||||
echo_interval = SMB_ECHO_INTERVAL;
|
||||
|
||||
/*
|
||||
* We cannot send an echo if it is disabled.
|
||||
* Also, no need to ping if we got a response recently.
|
||||
*/
|
||||
|
||||
if (server->tcpStatus == CifsNeedReconnect ||
|
||||
server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
|
||||
server->tcpStatus == CifsExiting ||
|
||||
server->tcpStatus == CifsNew ||
|
||||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
|
||||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
|
||||
time_before(jiffies, server->lstrp + echo_interval - HZ))
|
||||
goto requeue_echo;
|
||||
|
||||
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
|
||||
|
|
|
@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
|
|||
ret = -ENOMEM;
|
||||
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
|
||||
if (sl) {
|
||||
sl->sl_target = config_item_get(item);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
config_item_put(item);
|
||||
kfree(sl);
|
||||
return -ENOENT;
|
||||
}
|
||||
sl->sl_target = config_item_get(item);
|
||||
list_add(&sl->sl_list, &target_sd->s_links);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
ret = configfs_create_link(sl, parent_item->ci_dentry,
|
||||
|
|
|
@ -660,6 +660,20 @@ has_zeroout:
|
|||
ret = check_block_validity(inode, map);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Inodes with freshly allocated blocks where contents will be
|
||||
* visible after transaction commit must be on transaction's
|
||||
* ordered data list.
|
||||
*/
|
||||
if (map->m_flags & EXT4_MAP_NEW &&
|
||||
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
|
||||
!IS_NOQUOTA(inode) &&
|
||||
ext4_should_order_data(inode)) {
|
||||
ret = ext4_jbd2_file_inode(handle, inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
@ -1166,15 +1180,6 @@ static int ext4_write_end(struct file *file,
|
|||
|
||||
trace_android_fs_datawrite_end(inode, pos, len);
|
||||
trace_ext4_write_end(inode, pos, len, copied);
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
|
||||
ret = ext4_jbd2_file_inode(handle, inode);
|
||||
if (ret) {
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
|
||||
if (ext4_has_inline_data(inode)) {
|
||||
ret = ext4_write_inline_data_end(inode, pos, len,
|
||||
copied, page);
|
||||
|
|
|
@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
|
|||
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
|
||||
if (invalidate)
|
||||
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
|
||||
}
|
||||
} else {
|
||||
|
@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
|
|||
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/* Make sure any pending writes are cancelled. */
|
||||
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
|
||||
fscache_invalidate_writes(cookie);
|
||||
|
||||
/* Reset the cookie state if it wasn't relinquished */
|
||||
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
|
||||
atomic_inc(&cookie->n_active);
|
||||
|
|
|
@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
|||
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
|
||||
|
||||
spin_lock_init(&cookie->lock);
|
||||
spin_lock_init(&cookie->stores_lock);
|
||||
INIT_HLIST_HEAD(&cookie->backing_objects);
|
||||
|
||||
/* check the netfs type is not already present */
|
||||
|
|
|
@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
|
|||
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
|
||||
|
||||
#define __STATE_NAME(n) fscache_osm_##n
|
||||
#define STATE(n) (&__STATE_NAME(n))
|
||||
|
@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
|
|||
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
|
||||
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
|
||||
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
|
||||
static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
|
||||
static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
|
||||
|
||||
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
|
||||
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
|
||||
|
@ -229,6 +230,10 @@ execute_work_state:
|
|||
event = -1;
|
||||
if (new_state == NO_TRANSIT) {
|
||||
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
|
||||
if (unlikely(state == STATE(OBJECT_DEAD))) {
|
||||
_leave(" [dead]");
|
||||
return;
|
||||
}
|
||||
fscache_enqueue_object(object);
|
||||
event_mask = object->oob_event_mask;
|
||||
goto unmask_events;
|
||||
|
@ -239,7 +244,7 @@ execute_work_state:
|
|||
object->state = state = new_state;
|
||||
|
||||
if (state->work) {
|
||||
if (unlikely(state->work == ((void *)2UL))) {
|
||||
if (unlikely(state == STATE(OBJECT_DEAD))) {
|
||||
_leave(" [dead]");
|
||||
return;
|
||||
}
|
||||
|
@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
|
|||
fscache_mark_object_dead(object);
|
||||
object->oob_event_mask = 0;
|
||||
|
||||
if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
|
||||
/* Reject any new read/write ops and abort any that are pending. */
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
fscache_cancel_all_ops(object);
|
||||
}
|
||||
|
||||
if (list_empty(&object->dependents) &&
|
||||
object->n_ops == 0 &&
|
||||
object->n_children == 0)
|
||||
|
@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_mark_killed);
|
||||
|
||||
/*
|
||||
* The object is dead. We can get here if an object gets queued by an event
|
||||
* that would lead to its death (such as EV_KILL) when the dispatcher is
|
||||
* already running (and so can be requeued) but hasn't yet cleared the event
|
||||
* mask.
|
||||
*/
|
||||
static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
|
||||
int event)
|
||||
{
|
||||
if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
|
||||
&object->flags))
|
||||
return NO_TRANSIT;
|
||||
|
||||
WARN(true, "FS-Cache object redispatched after death");
|
||||
return NO_TRANSIT;
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -1072,6 +1072,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
|
|||
case -NFS4ERR_BADXDR:
|
||||
case -NFS4ERR_RESOURCE:
|
||||
case -NFS4ERR_NOFILEHANDLE:
|
||||
case -NFS4ERR_MOVED:
|
||||
/* Non-seqid mutating errors */
|
||||
return;
|
||||
};
|
||||
|
|
|
@ -3358,6 +3358,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
|
|||
iter.tgid += 1, iter = next_tgid(ns, iter)) {
|
||||
char name[PROC_NUMBUF];
|
||||
int len;
|
||||
|
||||
cond_resched();
|
||||
if (!has_pid_permissions(ns, iter.task, 2))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -347,11 +347,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|||
|
||||
/* We don't show the stack guard page in /proc/maps */
|
||||
start = vma->vm_start;
|
||||
if (stack_guard_page_start(vma, start))
|
||||
start += PAGE_SIZE;
|
||||
end = vma->vm_end;
|
||||
if (stack_guard_page_end(vma, end))
|
||||
end -= PAGE_SIZE;
|
||||
|
||||
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
|
||||
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
|
||||
|
|
|
@ -74,6 +74,7 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/major.h>
|
||||
#include "internal.h"
|
||||
|
||||
static struct kmem_cache *romfs_inode_cachep;
|
||||
|
@ -415,7 +416,22 @@ static void romfs_destroy_inode(struct inode *inode)
|
|||
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
{
|
||||
struct super_block *sb = dentry->d_sb;
|
||||
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
|
||||
u64 id = 0;
|
||||
|
||||
/* When calling huge_encode_dev(),
|
||||
* use sb->s_bdev->bd_dev when,
|
||||
* - CONFIG_ROMFS_ON_BLOCK defined
|
||||
* use sb->s_dev when,
|
||||
* - CONFIG_ROMFS_ON_BLOCK undefined and
|
||||
* - CONFIG_ROMFS_ON_MTD defined
|
||||
* leave id as 0 when,
|
||||
* - CONFIG_ROMFS_ON_BLOCK undefined and
|
||||
* - CONFIG_ROMFS_ON_MTD undefined
|
||||
*/
|
||||
if (sb->s_bdev)
|
||||
id = huge_encode_dev(sb->s_bdev->bd_dev);
|
||||
else if (sb->s_dev)
|
||||
id = huge_encode_dev(sb->s_dev);
|
||||
|
||||
buf->f_type = ROMFS_MAGIC;
|
||||
buf->f_namelen = ROMFS_MAXFN;
|
||||
|
@ -488,6 +504,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_flags |= MS_RDONLY | MS_NOATIME;
|
||||
sb->s_op = &romfs_super_ops;
|
||||
|
||||
#ifdef CONFIG_ROMFS_ON_MTD
|
||||
/* Use same dev ID from the underlying mtdblock device */
|
||||
if (sb->s_mtd)
|
||||
sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
|
||||
#endif
|
||||
/* read the image superblock and check it */
|
||||
rsb = kmalloc(512, GFP_KERNEL);
|
||||
if (!rsb)
|
||||
|
|
|
@ -174,19 +174,6 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* This function is used when file opening. The open flags must be
|
||||
* checked before calling check_caller_access_to_name()
|
||||
*/
|
||||
int open_flags_to_access_mode(int open_flags)
|
||||
{
|
||||
if ((open_flags & O_ACCMODE) == O_RDONLY)
|
||||
return 0; /* R_OK */
|
||||
if ((open_flags & O_ACCMODE) == O_WRONLY)
|
||||
return 1; /* W_OK */
|
||||
/* Probably O_RDRW, but treat as default to be safe */
|
||||
return 1; /* R_OK | W_OK */
|
||||
}
|
||||
|
||||
static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
|
||||
appid_t value)
|
||||
{
|
||||
|
|
|
@ -499,7 +499,6 @@ extern appid_t get_appid(const char *app_name);
|
|||
extern appid_t get_ext_gid(const char *app_name);
|
||||
extern appid_t is_excluded(const char *app_name, userid_t userid);
|
||||
extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
|
||||
extern int open_flags_to_access_mode(int open_flags);
|
||||
extern int packagelist_init(void);
|
||||
extern void packagelist_exit(void);
|
||||
|
||||
|
|
|
@ -360,6 +360,7 @@ struct fscache_object {
|
|||
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
|
||||
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
|
||||
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
|
||||
#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
|
||||
|
||||
struct list_head cache_link; /* link in cache->object_list */
|
||||
struct hlist_node cookie_link; /* link in cookie->backing_objects */
|
||||
|
|
|
@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
|
|||
* ... and so on.
|
||||
*/
|
||||
|
||||
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
|
||||
static inline __attribute_const__
|
||||
int __order_base_2(unsigned long n)
|
||||
{
|
||||
return n > 1 ? ilog2(n - 1) + 1 : 0;
|
||||
}
|
||||
|
||||
#define order_base_2(n) \
|
||||
( \
|
||||
__builtin_constant_p(n) ? ( \
|
||||
((n) == 0 || (n) == 1) ? 0 : \
|
||||
ilog2((n) - 1) + 1) : \
|
||||
__order_base_2(n) \
|
||||
)
|
||||
#endif /* _LINUX_LOG2_H */
|
||||
|
|
|
@ -1292,39 +1292,11 @@ int clear_page_dirty_for_io(struct page *page);
|
|||
|
||||
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
|
||||
{
|
||||
return !vma->vm_ops;
|
||||
}
|
||||
|
||||
static inline int stack_guard_page_start(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||
(vma->vm_start == addr) &&
|
||||
!vma_growsdown(vma->vm_prev, addr);
|
||||
}
|
||||
|
||||
/* Is the vma a continuation of the stack vma below it? */
|
||||
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
|
||||
}
|
||||
|
||||
static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
return (vma->vm_flags & VM_GROWSUP) &&
|
||||
(vma->vm_end == addr) &&
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
|
@ -2029,6 +2001,7 @@ void page_cache_async_readahead(struct address_space *mapping,
|
|||
pgoff_t offset,
|
||||
unsigned long size);
|
||||
|
||||
extern unsigned long stack_guard_gap;
|
||||
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
|
||||
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
|
@ -2057,6 +2030,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
|
|||
return vma;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_start = vma->vm_start;
|
||||
|
||||
if (vma->vm_flags & VM_GROWSDOWN) {
|
||||
vm_start -= stack_guard_gap;
|
||||
if (vm_start > vma->vm_start)
|
||||
vm_start = 0;
|
||||
}
|
||||
return vm_start;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_end = vma->vm_end;
|
||||
|
||||
if (vma->vm_flags & VM_GROWSUP) {
|
||||
vm_end += stack_guard_gap;
|
||||
if (vm_end < vma->vm_end)
|
||||
vm_end = -PAGE_SIZE;
|
||||
}
|
||||
return vm_end;
|
||||
}
|
||||
|
||||
static inline unsigned long vma_pages(struct vm_area_struct *vma)
|
||||
{
|
||||
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
|
|
|
@ -744,6 +744,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
|
|||
{
|
||||
u32 hash;
|
||||
|
||||
/* @flowlabel may include more than a flow label, eg, the traffic class.
|
||||
* Here we want only the flow label value.
|
||||
*/
|
||||
flowlabel &= IPV6_FLOWLABEL_MASK;
|
||||
|
||||
if (flowlabel ||
|
||||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
|
||||
(!autolabel &&
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
*/
|
||||
#define USB_MAXCHILDREN 31
|
||||
|
||||
/* See USB 3.1 spec Table 10-5 */
|
||||
#define USB_SS_MAXPORTS 15
|
||||
|
||||
/*
|
||||
* Hub request types
|
||||
*/
|
||||
|
|
|
@ -313,7 +313,8 @@ static const char *const bpf_jmp_string[16] = {
|
|||
[BPF_EXIT >> 4] = "exit",
|
||||
};
|
||||
|
||||
static void print_bpf_insn(struct bpf_insn *insn)
|
||||
static void print_bpf_insn(const struct verifier_env *env,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
u8 class = BPF_CLASS(insn->code);
|
||||
|
||||
|
@ -377,9 +378,19 @@ static void print_bpf_insn(struct bpf_insn *insn)
|
|||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->src_reg, insn->imm);
|
||||
} else if (BPF_MODE(insn->code) == BPF_IMM) {
|
||||
verbose("(%02x) r%d = 0x%x\n",
|
||||
insn->code, insn->dst_reg, insn->imm);
|
||||
} else if (BPF_MODE(insn->code) == BPF_IMM &&
|
||||
BPF_SIZE(insn->code) == BPF_DW) {
|
||||
/* At this point, we already made sure that the second
|
||||
* part of the ldimm64 insn is accessible.
|
||||
*/
|
||||
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
|
||||
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
|
||||
|
||||
if (map_ptr && !env->allow_ptr_leaks)
|
||||
imm = 0;
|
||||
|
||||
verbose("(%02x) r%d = 0x%llx\n", insn->code,
|
||||
insn->dst_reg, (unsigned long long)imm);
|
||||
} else {
|
||||
verbose("BUG_ld_%02x\n", insn->code);
|
||||
return;
|
||||
|
@ -1758,7 +1769,7 @@ static int do_check(struct verifier_env *env)
|
|||
|
||||
if (log_level) {
|
||||
verbose("%d: ", insn_idx);
|
||||
print_bpf_insn(insn);
|
||||
print_bpf_insn(env, insn);
|
||||
}
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
|
|
|
@ -1305,9 +1305,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
ret = __irq_set_trigger(desc,
|
||||
new->flags & IRQF_TRIGGER_MASK);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
irq_release_resources(desc);
|
||||
goto out_mask;
|
||||
}
|
||||
}
|
||||
|
||||
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
||||
IRQS_ONESHOT | IRQS_WAITING);
|
||||
|
|
|
@ -569,7 +569,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
|
|||
{
|
||||
struct alarm_base *base = &alarm_bases[alarm->type];
|
||||
|
||||
start = ktime_add(start, base->gettime());
|
||||
start = ktime_add_safe(start, base->gettime());
|
||||
alarm_start(alarm, start);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_start_relative);
|
||||
|
@ -655,7 +655,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
|
|||
overrun++;
|
||||
}
|
||||
|
||||
alarm->node.expires = ktime_add(alarm->node.expires, interval);
|
||||
alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
|
||||
return overrun;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alarm_forward);
|
||||
|
@ -843,13 +843,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
|||
|
||||
/* start the timer */
|
||||
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
|
||||
|
||||
/*
|
||||
* Rate limit to the tick as a hot fix to prevent DOS. Will be
|
||||
* mopped up later.
|
||||
*/
|
||||
if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
|
||||
timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
|
||||
|
||||
exp = timespec_to_ktime(new_setting->it_value);
|
||||
/* Convert (if necessary) to absolute time */
|
||||
if (flags != TIMER_ABSTIME) {
|
||||
ktime_t now;
|
||||
|
||||
now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
|
||||
exp = ktime_add(now, exp);
|
||||
exp = ktime_add_safe(now, exp);
|
||||
}
|
||||
|
||||
alarm_start(&timr->it.alarm.alarmtimer, exp);
|
||||
|
|
5
mm/gup.c
5
mm/gup.c
|
@ -312,11 +312,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
|||
/* mlock all present pages, but do not fault in new pages */
|
||||
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
|
||||
return -ENOENT;
|
||||
/* For mm_populate(), just skip the stack guard page. */
|
||||
if ((*flags & FOLL_POPULATE) &&
|
||||
(stack_guard_page_start(vma, address) ||
|
||||
stack_guard_page_end(vma, address + PAGE_SIZE)))
|
||||
return -ENOENT;
|
||||
if (*flags & FOLL_WRITE)
|
||||
fault_flags |= FAULT_FLAG_WRITE;
|
||||
if (nonblocking)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
|
@ -251,6 +252,8 @@ void kasan_report(unsigned long addr, size_t size,
|
|||
if (likely(!kasan_report_enabled()))
|
||||
return;
|
||||
|
||||
disable_trace_on_warning();
|
||||
|
||||
info.access_addr = (void *)addr;
|
||||
info.access_size = size;
|
||||
info.is_write = is_write;
|
||||
|
|
|
@ -1208,6 +1208,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|||
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
|
||||
* correctly, we save a copy of the page flags at this time.
|
||||
*/
|
||||
if (PageHuge(p))
|
||||
page_flags = hpage->flags;
|
||||
else
|
||||
page_flags = p->flags;
|
||||
|
||||
/*
|
||||
|
|
38
mm/memory.c
38
mm/memory.c
|
@ -2662,40 +2662,6 @@ out_release:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is like a special single-page "expand_{down|up}wards()",
|
||||
* except we must first make sure that 'address{-|+}PAGE_SIZE'
|
||||
* doesn't hit another vma.
|
||||
*/
|
||||
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
address &= PAGE_MASK;
|
||||
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
|
||||
struct vm_area_struct *prev = vma->vm_prev;
|
||||
|
||||
/*
|
||||
* Is there a mapping abutting this one below?
|
||||
*
|
||||
* That's only ok if it's the same stack mapping
|
||||
* that has gotten split..
|
||||
*/
|
||||
if (prev && prev->vm_end == address)
|
||||
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
|
||||
|
||||
return expand_downwards(vma, address - PAGE_SIZE);
|
||||
}
|
||||
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
|
||||
struct vm_area_struct *next = vma->vm_next;
|
||||
|
||||
/* As VM_GROWSDOWN but s/below/above/ */
|
||||
if (next && next->vm_start == address + PAGE_SIZE)
|
||||
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
|
||||
|
||||
return expand_upwards(vma, address + PAGE_SIZE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
||||
* but allow concurrent faults), and pte mapped but not yet locked.
|
||||
|
@ -2716,10 +2682,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (vma->vm_flags & VM_SHARED)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/* Check if we need to add a guard page to the stack */
|
||||
if (check_stack_guard_page(vma, address) < 0)
|
||||
return VM_FAULT_SIGSEGV;
|
||||
|
||||
/* Use the zero-page for reads */
|
||||
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
|
||||
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
|
||||
|
|
160
mm/mmap.c
160
mm/mmap.c
|
@ -304,6 +304,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
|
|||
unsigned long retval;
|
||||
unsigned long newbrk, oldbrk;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *next;
|
||||
unsigned long min_brk;
|
||||
bool populate;
|
||||
|
||||
|
@ -348,7 +349,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
|
|||
}
|
||||
|
||||
/* Check against existing mmap mappings. */
|
||||
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
|
||||
next = find_vma(mm, oldbrk);
|
||||
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
|
||||
goto out;
|
||||
|
||||
/* Ok, looks good - let it rip. */
|
||||
|
@ -371,10 +373,22 @@ out:
|
|||
|
||||
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long max, subtree_gap;
|
||||
max = vma->vm_start;
|
||||
if (vma->vm_prev)
|
||||
max -= vma->vm_prev->vm_end;
|
||||
unsigned long max, prev_end, subtree_gap;
|
||||
|
||||
/*
|
||||
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
|
||||
* allow two stack_guard_gaps between them here, and when choosing
|
||||
* an unmapped area; whereas when expanding we only require one.
|
||||
* That's a little inconsistent, but keeps the code here simpler.
|
||||
*/
|
||||
max = vm_start_gap(vma);
|
||||
if (vma->vm_prev) {
|
||||
prev_end = vm_end_gap(vma->vm_prev);
|
||||
if (max > prev_end)
|
||||
max -= prev_end;
|
||||
else
|
||||
max = 0;
|
||||
}
|
||||
if (vma->vm_rb.rb_left) {
|
||||
subtree_gap = rb_entry(vma->vm_rb.rb_left,
|
||||
struct vm_area_struct, vm_rb)->rb_subtree_gap;
|
||||
|
@ -467,7 +481,7 @@ static void validate_mm(struct mm_struct *mm)
|
|||
anon_vma_unlock_read(anon_vma);
|
||||
}
|
||||
|
||||
highest_address = vma->vm_end;
|
||||
highest_address = vm_end_gap(vma);
|
||||
vma = vma->vm_next;
|
||||
i++;
|
||||
}
|
||||
|
@ -636,7 +650,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (vma->vm_next)
|
||||
vma_gap_update(vma->vm_next);
|
||||
else
|
||||
mm->highest_vm_end = vma->vm_end;
|
||||
mm->highest_vm_end = vm_end_gap(vma);
|
||||
|
||||
/*
|
||||
* vma->vm_prev wasn't known when we followed the rbtree to find the
|
||||
|
@ -882,7 +896,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||
vma_gap_update(vma);
|
||||
if (end_changed) {
|
||||
if (!next)
|
||||
mm->highest_vm_end = end;
|
||||
mm->highest_vm_end = vm_end_gap(vma);
|
||||
else if (!adjust_next)
|
||||
vma_gap_update(next);
|
||||
}
|
||||
|
@ -925,7 +939,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||
else if (next)
|
||||
vma_gap_update(next);
|
||||
else
|
||||
mm->highest_vm_end = end;
|
||||
VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
|
||||
}
|
||||
if (insert && file)
|
||||
uprobe_mmap(insert);
|
||||
|
@ -1771,7 +1785,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
|||
|
||||
while (true) {
|
||||
/* Visit left subtree if it looks promising */
|
||||
gap_end = vma->vm_start;
|
||||
gap_end = vm_start_gap(vma);
|
||||
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
|
||||
struct vm_area_struct *left =
|
||||
rb_entry(vma->vm_rb.rb_left,
|
||||
|
@ -1782,12 +1796,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
|
||||
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
|
||||
check_current:
|
||||
/* Check if current node has a suitable gap */
|
||||
if (gap_start > high_limit)
|
||||
return -ENOMEM;
|
||||
if (gap_end >= low_limit && gap_end - gap_start >= length)
|
||||
if (gap_end >= low_limit &&
|
||||
gap_end > gap_start && gap_end - gap_start >= length)
|
||||
goto found;
|
||||
|
||||
/* Visit right subtree if it looks promising */
|
||||
|
@ -1809,8 +1824,8 @@ check_current:
|
|||
vma = rb_entry(rb_parent(prev),
|
||||
struct vm_area_struct, vm_rb);
|
||||
if (prev == vma->vm_rb.rb_left) {
|
||||
gap_start = vma->vm_prev->vm_end;
|
||||
gap_end = vma->vm_start;
|
||||
gap_start = vm_end_gap(vma->vm_prev);
|
||||
gap_end = vm_start_gap(vma);
|
||||
goto check_current;
|
||||
}
|
||||
}
|
||||
|
@ -1874,7 +1889,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
|||
|
||||
while (true) {
|
||||
/* Visit right subtree if it looks promising */
|
||||
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
|
||||
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
|
||||
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
|
||||
struct vm_area_struct *right =
|
||||
rb_entry(vma->vm_rb.rb_right,
|
||||
|
@ -1887,10 +1902,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
|||
|
||||
check_current:
|
||||
/* Check if current node has a suitable gap */
|
||||
gap_end = vma->vm_start;
|
||||
gap_end = vm_start_gap(vma);
|
||||
if (gap_end < low_limit)
|
||||
return -ENOMEM;
|
||||
if (gap_start <= high_limit && gap_end - gap_start >= length)
|
||||
if (gap_start <= high_limit &&
|
||||
gap_end > gap_start && gap_end - gap_start >= length)
|
||||
goto found;
|
||||
|
||||
/* Visit left subtree if it looks promising */
|
||||
|
@ -1913,7 +1929,7 @@ check_current:
|
|||
struct vm_area_struct, vm_rb);
|
||||
if (prev == vma->vm_rb.rb_right) {
|
||||
gap_start = vma->vm_prev ?
|
||||
vma->vm_prev->vm_end : 0;
|
||||
vm_end_gap(vma->vm_prev) : 0;
|
||||
goto check_current;
|
||||
}
|
||||
}
|
||||
|
@ -1951,7 +1967,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
|
@ -1962,9 +1978,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
|
||||
if (addr) {
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -1987,7 +2004,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
@ -2002,9 +2019,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
/* requesting a specific address */
|
||||
if (addr) {
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)) &&
|
||||
(!prev || addr >= vm_end_gap(prev)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -2129,21 +2147,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
|
|||
* update accounting. This is shared with both the
|
||||
* grow-up and grow-down cases.
|
||||
*/
|
||||
static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
|
||||
static int acct_stack_growth(struct vm_area_struct *vma,
|
||||
unsigned long size, unsigned long grow)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct rlimit *rlim = current->signal->rlim;
|
||||
unsigned long new_start, actual_size;
|
||||
unsigned long new_start;
|
||||
|
||||
/* address space limit tests */
|
||||
if (!may_expand_vm(mm, grow))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Stack limit test */
|
||||
actual_size = size;
|
||||
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
|
||||
actual_size -= PAGE_SIZE;
|
||||
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
|
||||
if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
|
||||
return -ENOMEM;
|
||||
|
||||
/* mlock limit tests */
|
||||
|
@ -2181,16 +2197,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|||
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *next;
|
||||
unsigned long gap_addr;
|
||||
int error = 0;
|
||||
|
||||
if (!(vma->vm_flags & VM_GROWSUP))
|
||||
return -EFAULT;
|
||||
|
||||
/* Guard against wrapping around to address 0. */
|
||||
if (address < PAGE_ALIGN(address+4))
|
||||
address = PAGE_ALIGN(address+4);
|
||||
else
|
||||
/* Guard against exceeding limits of the address space. */
|
||||
address &= PAGE_MASK;
|
||||
if (address >= TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
address += PAGE_SIZE;
|
||||
|
||||
/* Enforce stack_guard_gap */
|
||||
gap_addr = address + stack_guard_gap;
|
||||
|
||||
/* Guard against overflow */
|
||||
if (gap_addr < address || gap_addr > TASK_SIZE)
|
||||
gap_addr = TASK_SIZE;
|
||||
|
||||
next = vma->vm_next;
|
||||
if (next && next->vm_start < gap_addr) {
|
||||
if (!(next->vm_flags & VM_GROWSUP))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
}
|
||||
|
||||
/* We must make sure the anon_vma is allocated. */
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
|
@ -2236,7 +2268,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|||
if (vma->vm_next)
|
||||
vma_gap_update(vma->vm_next);
|
||||
else
|
||||
mm->highest_vm_end = address;
|
||||
mm->highest_vm_end = vm_end_gap(vma);
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
||||
perf_event_mmap(vma);
|
||||
|
@ -2257,6 +2289,8 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
unsigned long address)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct vm_area_struct *prev;
|
||||
unsigned long gap_addr;
|
||||
int error;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
|
@ -2264,6 +2298,17 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
/* Enforce stack_guard_gap */
|
||||
gap_addr = address - stack_guard_gap;
|
||||
if (gap_addr > address)
|
||||
return -ENOMEM;
|
||||
prev = vma->vm_prev;
|
||||
if (prev && prev->vm_end > gap_addr) {
|
||||
if (!(prev->vm_flags & VM_GROWSDOWN))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
}
|
||||
|
||||
/* We must make sure the anon_vma is allocated. */
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
return -ENOMEM;
|
||||
|
@ -2319,28 +2364,25 @@ int expand_downwards(struct vm_area_struct *vma,
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note how expand_stack() refuses to expand the stack all the way to
|
||||
* abut the next virtual mapping, *unless* that mapping itself is also
|
||||
* a stack mapping. We want to leave room for a guard page, after all
|
||||
* (the guard page itself is not added here, that is done by the
|
||||
* actual page faulting logic)
|
||||
*
|
||||
* This matches the behavior of the guard page logic (see mm/memory.c:
|
||||
* check_stack_guard_page()), which only allows the guard page to be
|
||||
* removed under these circumstances.
|
||||
*/
|
||||
/* enforced gap between the expanding stack and other mappings. */
|
||||
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
|
||||
|
||||
static int __init cmdline_parse_stack_guard_gap(char *p)
|
||||
{
|
||||
unsigned long val;
|
||||
char *endptr;
|
||||
|
||||
val = simple_strtoul(p, &endptr, 10);
|
||||
if (!*endptr)
|
||||
stack_guard_gap = val << PAGE_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
|
||||
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
int expand_stack(struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *next;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
next = vma->vm_next;
|
||||
if (next && next->vm_start == address + PAGE_SIZE) {
|
||||
if (!(next->vm_flags & VM_GROWSUP))
|
||||
return -ENOMEM;
|
||||
}
|
||||
return expand_upwards(vma, address);
|
||||
}
|
||||
|
||||
|
@ -2362,14 +2404,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|||
#else
|
||||
int expand_stack(struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *prev;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
prev = vma->vm_prev;
|
||||
if (prev && prev->vm_end == address) {
|
||||
if (!(prev->vm_flags & VM_GROWSDOWN))
|
||||
return -ENOMEM;
|
||||
}
|
||||
return expand_downwards(vma, address);
|
||||
}
|
||||
|
||||
|
@ -2467,7 +2501,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
vma->vm_prev = prev;
|
||||
vma_gap_update(vma);
|
||||
} else
|
||||
mm->highest_vm_end = prev ? prev->vm_end : 0;
|
||||
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
|
||||
tail_vma->vm_next = NULL;
|
||||
|
||||
/* Kill the cache */
|
||||
|
|
|
@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
|
|||
if (!page)
|
||||
goto not_enough_page;
|
||||
ctrl->map[idx] = page;
|
||||
|
||||
if (!(idx % SWAP_CLUSTER_MAX))
|
||||
cond_resched();
|
||||
}
|
||||
return 0;
|
||||
not_enough_page:
|
||||
|
|
|
@ -886,9 +886,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
|||
if (regs.len > reglen)
|
||||
regs.len = reglen;
|
||||
|
||||
regbuf = NULL;
|
||||
if (reglen) {
|
||||
regbuf = vzalloc(reglen);
|
||||
if (reglen && !regbuf)
|
||||
if (!regbuf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ops->get_regs(dev, ®s, regbuf);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue