Merge branch 'misc' into for-linus
Conflicts: arch/arm/kernel/ptrace.c
This commit is contained in:
commit
4175160b06
37 changed files with 268 additions and 173 deletions
|
@ -11,6 +11,7 @@ config ARM
|
||||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_KPROBES if !XIP_KERNEL
|
select HAVE_KPROBES if !XIP_KERNEL
|
||||||
select HAVE_KRETPROBES if (HAVE_KPROBES)
|
select HAVE_KRETPROBES if (HAVE_KPROBES)
|
||||||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||||
|
@ -30,6 +31,8 @@ config ARM
|
||||||
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
|
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
|
||||||
select HAVE_C_RECORDMCOUNT
|
select HAVE_C_RECORDMCOUNT
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
|
select HARDIRQS_SW_RESEND
|
||||||
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||||
select GENERIC_PCI_IOMAP
|
select GENERIC_PCI_IOMAP
|
||||||
|
@ -126,14 +129,6 @@ config TRACE_IRQFLAGS_SUPPORT
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config HARDIRQS_SW_RESEND
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
|
|
||||||
config GENERIC_IRQ_PROBE
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
|
|
||||||
config GENERIC_LOCKBREAK
|
config GENERIC_LOCKBREAK
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
@ -633,7 +628,6 @@ config ARCH_MMP
|
||||||
select CLKDEV_LOOKUP
|
select CLKDEV_LOOKUP
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select GPIO_PXA
|
select GPIO_PXA
|
||||||
select TICK_ONESHOT
|
|
||||||
select PLAT_PXA
|
select PLAT_PXA
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select GENERIC_ALLOCATOR
|
select GENERIC_ALLOCATOR
|
||||||
|
@ -717,7 +711,6 @@ config ARCH_PXA
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select GPIO_PXA
|
select GPIO_PXA
|
||||||
select TICK_ONESHOT
|
|
||||||
select PLAT_PXA
|
select PLAT_PXA
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select AUTO_ZRELADDR
|
select AUTO_ZRELADDR
|
||||||
|
@ -784,7 +777,6 @@ config ARCH_SA1100
|
||||||
select CPU_FREQ
|
select CPU_FREQ
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select CLKDEV_LOOKUP
|
select CLKDEV_LOOKUP
|
||||||
select TICK_ONESHOT
|
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select HAVE_IDE
|
select HAVE_IDE
|
||||||
select NEED_MACH_MEMORY_H
|
select NEED_MACH_MEMORY_H
|
||||||
|
@ -1562,7 +1554,6 @@ config ARM_ARCH_TIMER
|
||||||
config HAVE_ARM_TWD
|
config HAVE_ARM_TWD
|
||||||
bool
|
bool
|
||||||
depends on SMP
|
depends on SMP
|
||||||
select TICK_ONESHOT
|
|
||||||
help
|
help
|
||||||
This options enables support for the ARM timer and watchdog unit
|
This options enables support for the ARM timer and watchdog unit
|
||||||
|
|
||||||
|
|
|
@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
|
||||||
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
||||||
|
#define CB_BITS 0x08
|
||||||
|
#else
|
||||||
|
#define CB_BITS 0x0c
|
||||||
|
#endif
|
||||||
|
|
||||||
__setup_mmu: sub r3, r4, #16384 @ Page directory size
|
__setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||||
bic r3, r3, #0xff @ Align the pointer
|
bic r3, r3, #0xff @ Align the pointer
|
||||||
bic r3, r3, #0x3f00
|
bic r3, r3, #0x3f00
|
||||||
|
@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||||
mov r9, r0, lsr #18
|
mov r9, r0, lsr #18
|
||||||
mov r9, r9, lsl #18 @ start of RAM
|
mov r9, r9, lsl #18 @ start of RAM
|
||||||
add r10, r9, #0x10000000 @ a reasonable RAM size
|
add r10, r9, #0x10000000 @ a reasonable RAM size
|
||||||
mov r1, #0x12
|
mov r1, #0x12 @ XN|U + section mapping
|
||||||
orr r1, r1, #3 << 10
|
orr r1, r1, #3 << 10 @ AP=11
|
||||||
add r2, r3, #16384
|
add r2, r3, #16384
|
||||||
1: cmp r1, r9 @ if virt > start of RAM
|
1: cmp r1, r9 @ if virt > start of RAM
|
||||||
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
cmphs r10, r1 @ && end of RAM > virt
|
||||||
orrhs r1, r1, #0x08 @ set cacheable
|
bic r1, r1, #0x1c @ clear XN|U + C + B
|
||||||
#else
|
orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
|
||||||
orrhs r1, r1, #0x0c @ set cacheable, bufferable
|
orrhs r1, r1, r6 @ set RAM section settings
|
||||||
#endif
|
|
||||||
cmp r1, r10 @ if virt > end of RAM
|
|
||||||
bichs r1, r1, #0x0c @ clear cacheable, bufferable
|
|
||||||
str r1, [r0], #4 @ 1:1 mapping
|
str r1, [r0], #4 @ 1:1 mapping
|
||||||
add r1, r1, #1048576
|
add r1, r1, #1048576
|
||||||
teq r0, r2
|
teq r0, r2
|
||||||
|
@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||||
* so there is no map overlap problem for up to 1 MB compressed kernel.
|
* so there is no map overlap problem for up to 1 MB compressed kernel.
|
||||||
* If the execution is in RAM then we would only be duplicating the above.
|
* If the execution is in RAM then we would only be duplicating the above.
|
||||||
*/
|
*/
|
||||||
mov r1, #0x1e
|
orr r1, r6, #0x04 @ ensure B is set for this
|
||||||
orr r1, r1, #3 << 10
|
orr r1, r1, #3 << 10
|
||||||
mov r2, pc
|
mov r2, pc
|
||||||
mov r2, r2, lsr #20
|
mov r2, r2, lsr #20
|
||||||
|
@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
|
||||||
__armv4_mmu_cache_on:
|
__armv4_mmu_cache_on:
|
||||||
mov r12, lr
|
mov r12, lr
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
mov r6, #CB_BITS | 0x12 @ U
|
||||||
bl __setup_mmu
|
bl __setup_mmu
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||||
|
@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
|
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
|
||||||
tst r11, #0xf @ VMSA
|
tst r11, #0xf @ VMSA
|
||||||
|
movne r6, #CB_BITS | 0x02 @ !XN
|
||||||
blne __setup_mmu
|
blne __setup_mmu
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||||
|
@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
|
||||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||||
#endif
|
#endif
|
||||||
orrne r0, r0, #1 @ MMU enabled
|
orrne r0, r0, #1 @ MMU enabled
|
||||||
movne r1, #-1
|
movne r1, #0xfffffffd @ domain 0 = client
|
||||||
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
||||||
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
||||||
#endif
|
#endif
|
||||||
|
@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
|
||||||
|
|
||||||
__fa526_cache_on:
|
__fa526_cache_on:
|
||||||
mov r12, lr
|
mov r12, lr
|
||||||
|
mov r6, #CB_BITS | 0x12 @ U
|
||||||
bl __setup_mmu
|
bl __setup_mmu
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
|
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
|
||||||
|
@ -682,6 +688,7 @@ __fa526_cache_on:
|
||||||
|
|
||||||
__arm6_mmu_cache_on:
|
__arm6_mmu_cache_on:
|
||||||
mov r12, lr
|
mov r12, lr
|
||||||
|
mov r6, #CB_BITS | 0x12 @ U
|
||||||
bl __setup_mmu
|
bl __setup_mmu
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
* struct vic_device - VIC PM device
|
* struct vic_device - VIC PM device
|
||||||
* @irq: The IRQ number for the base of the VIC.
|
* @irq: The IRQ number for the base of the VIC.
|
||||||
* @base: The register base for the VIC.
|
* @base: The register base for the VIC.
|
||||||
|
* @valid_sources: A bitmask of valid interrupts
|
||||||
* @resume_sources: A bitmask of interrupts for resume.
|
* @resume_sources: A bitmask of interrupts for resume.
|
||||||
* @resume_irqs: The IRQs enabled for resume.
|
* @resume_irqs: The IRQs enabled for resume.
|
||||||
* @int_select: Save for VIC_INT_SELECT.
|
* @int_select: Save for VIC_INT_SELECT.
|
||||||
|
@ -50,6 +51,7 @@
|
||||||
struct vic_device {
|
struct vic_device {
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
int irq;
|
int irq;
|
||||||
|
u32 valid_sources;
|
||||||
u32 resume_sources;
|
u32 resume_sources;
|
||||||
u32 resume_irqs;
|
u32 resume_irqs;
|
||||||
u32 int_select;
|
u32 int_select;
|
||||||
|
@ -164,10 +166,32 @@ static int __init vic_pm_init(void)
|
||||||
late_initcall(vic_pm_init);
|
late_initcall(vic_pm_init);
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
|
static struct irq_chip vic_chip;
|
||||||
|
|
||||||
|
static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||||
|
irq_hw_number_t hwirq)
|
||||||
|
{
|
||||||
|
struct vic_device *v = d->host_data;
|
||||||
|
|
||||||
|
/* Skip invalid IRQs, only register handlers for the real ones */
|
||||||
|
if (!(v->valid_sources & (1 << hwirq)))
|
||||||
|
return -ENOTSUPP;
|
||||||
|
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
|
||||||
|
irq_set_chip_data(irq, v->base);
|
||||||
|
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irq_domain_ops vic_irqdomain_ops = {
|
||||||
|
.map = vic_irqdomain_map,
|
||||||
|
.xlate = irq_domain_xlate_onetwocell,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vic_register() - Register a VIC.
|
* vic_register() - Register a VIC.
|
||||||
* @base: The base address of the VIC.
|
* @base: The base address of the VIC.
|
||||||
* @irq: The base IRQ for the VIC.
|
* @irq: The base IRQ for the VIC.
|
||||||
|
* @valid_sources: bitmask of valid interrupts
|
||||||
* @resume_sources: bitmask of interrupts allowed for resume sources.
|
* @resume_sources: bitmask of interrupts allowed for resume sources.
|
||||||
* @node: The device tree node associated with the VIC.
|
* @node: The device tree node associated with the VIC.
|
||||||
*
|
*
|
||||||
|
@ -178,7 +202,8 @@ late_initcall(vic_pm_init);
|
||||||
* This also configures the IRQ domain for the VIC.
|
* This also configures the IRQ domain for the VIC.
|
||||||
*/
|
*/
|
||||||
static void __init vic_register(void __iomem *base, unsigned int irq,
|
static void __init vic_register(void __iomem *base, unsigned int irq,
|
||||||
u32 resume_sources, struct device_node *node)
|
u32 valid_sources, u32 resume_sources,
|
||||||
|
struct device_node *node)
|
||||||
{
|
{
|
||||||
struct vic_device *v;
|
struct vic_device *v;
|
||||||
|
|
||||||
|
@ -189,11 +214,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
|
||||||
|
|
||||||
v = &vic_devices[vic_id];
|
v = &vic_devices[vic_id];
|
||||||
v->base = base;
|
v->base = base;
|
||||||
|
v->valid_sources = valid_sources;
|
||||||
v->resume_sources = resume_sources;
|
v->resume_sources = resume_sources;
|
||||||
v->irq = irq;
|
v->irq = irq;
|
||||||
vic_id++;
|
vic_id++;
|
||||||
v->domain = irq_domain_add_legacy(node, 32, irq, 0,
|
v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0,
|
||||||
&irq_domain_simple_ops, v);
|
&vic_irqdomain_ops, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vic_ack_irq(struct irq_data *d)
|
static void vic_ack_irq(struct irq_data *d)
|
||||||
|
@ -287,23 +313,6 @@ static void __init vic_clear_interrupts(void __iomem *base)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init vic_set_irq_sources(void __iomem *base,
|
|
||||||
unsigned int irq_start, u32 vic_sources)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 0; i < 32; i++) {
|
|
||||||
if (vic_sources & (1 << i)) {
|
|
||||||
unsigned int irq = irq_start + i;
|
|
||||||
|
|
||||||
irq_set_chip_and_handler(irq, &vic_chip,
|
|
||||||
handle_level_irq);
|
|
||||||
irq_set_chip_data(irq, base);
|
|
||||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
|
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
|
||||||
* The original cell has 32 interrupts, while the modified one has 64,
|
* The original cell has 32 interrupts, while the modified one has 64,
|
||||||
|
@ -338,8 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
|
||||||
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
|
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
vic_set_irq_sources(base, irq_start, vic_sources);
|
vic_register(base, irq_start, vic_sources, 0, node);
|
||||||
vic_register(base, irq_start, 0, node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init __vic_init(void __iomem *base, unsigned int irq_start,
|
void __init __vic_init(void __iomem *base, unsigned int irq_start,
|
||||||
|
@ -379,9 +387,7 @@ void __init __vic_init(void __iomem *base, unsigned int irq_start,
|
||||||
|
|
||||||
vic_init2(base);
|
vic_init2(base);
|
||||||
|
|
||||||
vic_set_irq_sources(base, irq_start, vic_sources);
|
vic_register(base, irq_start, vic_sources, resume_sources, node);
|
||||||
|
|
||||||
vic_register(base, irq_start, resume_sources, node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -101,7 +101,7 @@ struct cpu_cache_fns {
|
||||||
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
|
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
|
||||||
|
|
||||||
void (*coherent_kern_range)(unsigned long, unsigned long);
|
void (*coherent_kern_range)(unsigned long, unsigned long);
|
||||||
void (*coherent_user_range)(unsigned long, unsigned long);
|
int (*coherent_user_range)(unsigned long, unsigned long);
|
||||||
void (*flush_kern_dcache_area)(void *, size_t);
|
void (*flush_kern_dcache_area)(void *, size_t);
|
||||||
|
|
||||||
void (*dma_map_area)(const void *, size_t, int);
|
void (*dma_map_area)(const void *, size_t, int);
|
||||||
|
@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
|
||||||
extern void __cpuc_flush_user_all(void);
|
extern void __cpuc_flush_user_all(void);
|
||||||
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
|
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
|
||||||
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
|
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
|
||||||
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
|
extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
|
||||||
extern void __cpuc_flush_dcache_area(void *, size_t);
|
extern void __cpuc_flush_dcache_area(void *, size_t);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||||
* Harvard caches are synchronised for the user space address range.
|
* Harvard caches are synchronised for the user space address range.
|
||||||
* This is used for the ARM private sys_cacheflush system call.
|
* This is used for the ARM private sys_cacheflush system call.
|
||||||
*/
|
*/
|
||||||
#define flush_cache_user_range(vma,start,end) \
|
#define flush_cache_user_range(start,end) \
|
||||||
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
|
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||||
(unsigned long)(n), \
|
(unsigned long)(n), \
|
||||||
sizeof(*(ptr))))
|
sizeof(*(ptr))))
|
||||||
|
|
||||||
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
|
#define cmpxchg64(ptr, o, n) \
|
||||||
|
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
|
||||||
|
atomic64_t, \
|
||||||
|
counter), \
|
||||||
|
(unsigned long)(o), \
|
||||||
|
(unsigned long)(n)))
|
||||||
|
|
||||||
/*
|
#define cmpxchg64_local(ptr, o, n) \
|
||||||
* Note : ARMv7-M (currently unsupported by Linux) does not support
|
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
|
||||||
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
|
local64_t, \
|
||||||
* not be allowed to use __cmpxchg64.
|
a), \
|
||||||
*/
|
(unsigned long)(o), \
|
||||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
(unsigned long)(n)))
|
||||||
unsigned long long old,
|
|
||||||
unsigned long long new)
|
|
||||||
{
|
|
||||||
register unsigned long long oldval asm("r0");
|
|
||||||
register unsigned long long __old asm("r2") = old;
|
|
||||||
register unsigned long long __new asm("r4") = new;
|
|
||||||
unsigned long res;
|
|
||||||
|
|
||||||
do {
|
|
||||||
asm volatile(
|
|
||||||
" @ __cmpxchg8\n"
|
|
||||||
" ldrexd %1, %H1, [%2]\n"
|
|
||||||
" mov %0, #0\n"
|
|
||||||
" teq %1, %3\n"
|
|
||||||
" teqeq %H1, %H3\n"
|
|
||||||
" strexdeq %0, %4, %H4, [%2]\n"
|
|
||||||
: "=&r" (res), "=&r" (oldval)
|
|
||||||
: "r" (ptr), "Ir" (__old), "r" (__new)
|
|
||||||
: "memory", "cc");
|
|
||||||
} while (res);
|
|
||||||
|
|
||||||
return oldval;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
|
|
||||||
unsigned long long old,
|
|
||||||
unsigned long long new)
|
|
||||||
{
|
|
||||||
unsigned long long ret;
|
|
||||||
|
|
||||||
smp_mb();
|
|
||||||
ret = __cmpxchg64(ptr, old, new);
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define cmpxchg64(ptr,o,n) \
|
|
||||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
|
||||||
(unsigned long long)(o), \
|
|
||||||
(unsigned long long)(n)))
|
|
||||||
|
|
||||||
#define cmpxchg64_local(ptr,o,n) \
|
|
||||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
|
||||||
(unsigned long long)(o), \
|
|
||||||
(unsigned long long)(n)))
|
|
||||||
|
|
||||||
#else /* min ARCH = ARMv6 */
|
|
||||||
|
|
||||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||||
|
|
||||||
|
|
|
@ -42,4 +42,9 @@ struct sys_timer {
|
||||||
|
|
||||||
extern void timer_tick(void);
|
extern void timer_tick(void);
|
||||||
|
|
||||||
|
struct timespec;
|
||||||
|
typedef void (*clock_access_fn)(struct timespec *);
|
||||||
|
extern int register_persistent_clock(clock_access_fn read_boot,
|
||||||
|
clock_access_fn read_persistent);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -69,8 +69,6 @@
|
||||||
*/
|
*/
|
||||||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
|
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
|
||||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||||
#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
|
|
||||||
#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
|
|
||||||
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
||||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
||||||
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
||||||
|
|
|
@ -249,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||||
return regs->ARM_sp;
|
return regs->ARM_sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return regs->ARM_sp;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
93
arch/arm/include/asm/syscall.h
Normal file
93
arch/arm/include/asm/syscall.h
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
* Access to user system call parameters and results
|
||||||
|
*
|
||||||
|
* See asm-generic/syscall.h for descriptions of what we must do here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ASM_ARM_SYSCALL_H
|
||||||
|
#define _ASM_ARM_SYSCALL_H
|
||||||
|
|
||||||
|
#include <linux/err.h>
|
||||||
|
|
||||||
|
extern const unsigned long sys_call_table[];
|
||||||
|
|
||||||
|
static inline int syscall_get_nr(struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return task_thread_info(task)->syscall;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void syscall_rollback(struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
regs->ARM_r0 = regs->ARM_ORIG_r0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline long syscall_get_error(struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long error = regs->ARM_r0;
|
||||||
|
return IS_ERR_VALUE(error) ? error : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline long syscall_get_return_value(struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return regs->ARM_r0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void syscall_set_return_value(struct task_struct *task,
|
||||||
|
struct pt_regs *regs,
|
||||||
|
int error, long val)
|
||||||
|
{
|
||||||
|
regs->ARM_r0 = (long) error ? error : val;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SYSCALL_MAX_ARGS 7
|
||||||
|
|
||||||
|
static inline void syscall_get_arguments(struct task_struct *task,
|
||||||
|
struct pt_regs *regs,
|
||||||
|
unsigned int i, unsigned int n,
|
||||||
|
unsigned long *args)
|
||||||
|
{
|
||||||
|
if (i + n > SYSCALL_MAX_ARGS) {
|
||||||
|
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||||
|
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||||
|
pr_warning("%s called with max args %d, handling only %d\n",
|
||||||
|
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||||
|
memset(args_bad, 0, n_bad * sizeof(args[0]));
|
||||||
|
n = SYSCALL_MAX_ARGS - i;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i == 0) {
|
||||||
|
args[0] = regs->ARM_ORIG_r0;
|
||||||
|
args++;
|
||||||
|
i++;
|
||||||
|
n--;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void syscall_set_arguments(struct task_struct *task,
|
||||||
|
struct pt_regs *regs,
|
||||||
|
unsigned int i, unsigned int n,
|
||||||
|
const unsigned long *args)
|
||||||
|
{
|
||||||
|
if (i + n > SYSCALL_MAX_ARGS) {
|
||||||
|
pr_warning("%s called with max args %d, handling only %d\n",
|
||||||
|
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||||
|
n = SYSCALL_MAX_ARGS - i;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i == 0) {
|
||||||
|
regs->ARM_ORIG_r0 = args[0];
|
||||||
|
args++;
|
||||||
|
i++;
|
||||||
|
n--;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _ASM_ARM_SYSCALL_H */
|
|
@ -277,10 +277,6 @@ __create_page_tables:
|
||||||
mov r3, r3, lsl #PMD_ORDER
|
mov r3, r3, lsl #PMD_ORDER
|
||||||
|
|
||||||
add r0, r4, r3
|
add r0, r4, r3
|
||||||
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
|
|
||||||
cmp r3, #0x0800 @ limit to 512MB
|
|
||||||
movhi r3, #0x0800
|
|
||||||
add r6, r0, r3
|
|
||||||
mov r3, r7, lsr #SECTION_SHIFT
|
mov r3, r7, lsr #SECTION_SHIFT
|
||||||
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
|
||||||
orr r3, r7, r3, lsl #SECTION_SHIFT
|
orr r3, r7, r3, lsl #SECTION_SHIFT
|
||||||
|
@ -289,13 +285,10 @@ __create_page_tables:
|
||||||
#else
|
#else
|
||||||
orr r3, r3, #PMD_SECT_XN
|
orr r3, r3, #PMD_SECT_XN
|
||||||
#endif
|
#endif
|
||||||
1: str r3, [r0], #4
|
str r3, [r0], #4
|
||||||
#ifdef CONFIG_ARM_LPAE
|
#ifdef CONFIG_ARM_LPAE
|
||||||
str r7, [r0], #4
|
str r7, [r0], #4
|
||||||
#endif
|
#endif
|
||||||
add r3, r3, #1 << SECTION_SHIFT
|
|
||||||
cmp r0, r6
|
|
||||||
blo 1b
|
|
||||||
|
|
||||||
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
|
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
|
||||||
/* we don't need any serial debugging mappings */
|
/* we don't need any serial debugging mappings */
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/hw_breakpoint.h>
|
#include <linux/hw_breakpoint.h>
|
||||||
#include <linux/regset.h>
|
#include <linux/regset.h>
|
||||||
#include <linux/audit.h>
|
#include <linux/audit.h>
|
||||||
|
#include <linux/tracehook.h>
|
||||||
|
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
|
@ -918,8 +919,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||||
return scno;
|
return scno;
|
||||||
if (!(current->ptrace & PT_PTRACED))
|
|
||||||
return scno;
|
|
||||||
|
|
||||||
current_thread_info()->syscall = scno;
|
current_thread_info()->syscall = scno;
|
||||||
|
|
||||||
|
@ -930,19 +929,11 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
||||||
ip = regs->ARM_ip;
|
ip = regs->ARM_ip;
|
||||||
regs->ARM_ip = why;
|
regs->ARM_ip = why;
|
||||||
|
|
||||||
/* the 0x80 provides a way for the tracing parent to distinguish
|
if (why)
|
||||||
between a syscall stop and SIGTRAP delivery */
|
tracehook_report_syscall_exit(regs, 0);
|
||||||
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
else if (tracehook_report_syscall_entry(regs))
|
||||||
? 0x80 : 0));
|
current_thread_info()->syscall = -1;
|
||||||
/*
|
|
||||||
* this isn't the same as continuing with a signal, but it will do
|
|
||||||
* for normal use. strace only continues with a signal if the
|
|
||||||
* stopping signal is not SIGTRAP. -brl
|
|
||||||
*/
|
|
||||||
if (current->exit_code) {
|
|
||||||
send_sig(current->exit_code, current, 1);
|
|
||||||
current->exit_code = 0;
|
|
||||||
}
|
|
||||||
regs->ARM_ip = ip;
|
regs->ARM_ip = ip;
|
||||||
|
|
||||||
return current_thread_info()->syscall;
|
return current_thread_info()->syscall;
|
||||||
|
|
|
@ -589,6 +589,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
|
||||||
*/
|
*/
|
||||||
block_sigmask(ka, sig);
|
block_sigmask(ka, sig);
|
||||||
|
|
||||||
|
tracehook_signal_handler(sig, info, ka, regs, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
|
||||||
|
#include <asm/smp_plat.h>
|
||||||
#include <asm/smp_scu.h>
|
#include <asm/smp_scu.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
|
@ -74,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
|
||||||
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
|
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
|
||||||
{
|
{
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
int cpu = smp_processor_id();
|
int cpu = cpu_logical_map(smp_processor_id());
|
||||||
|
|
||||||
if (mode > 3 || mode == 1 || cpu > 3)
|
if (mode > 3 || mode == 1 || cpu > 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
|
#include <asm/cputype.h>
|
||||||
#include <asm/system_info.h>
|
#include <asm/system_info.h>
|
||||||
#include <asm/thread_notify.h>
|
#include <asm/thread_notify.h>
|
||||||
|
|
||||||
|
@ -67,8 +68,7 @@ static int __init thumbee_init(void)
|
||||||
if (cpu_arch < CPU_ARCH_ARMv7)
|
if (cpu_arch < CPU_ARCH_ARMv7)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* processor feature register 0 */
|
pfr0 = read_cpuid_ext(CPUID_EXT_PFR0);
|
||||||
asm("mrc p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
|
|
||||||
if ((pfr0 & 0x0000f000) != 0x00001000)
|
if ((pfr0 & 0x0000f000) != 0x00001000)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,42 @@ void timer_tick(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void dummy_clock_access(struct timespec *ts)
|
||||||
|
{
|
||||||
|
ts->tv_sec = 0;
|
||||||
|
ts->tv_nsec = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static clock_access_fn __read_persistent_clock = dummy_clock_access;
|
||||||
|
static clock_access_fn __read_boot_clock = dummy_clock_access;;
|
||||||
|
|
||||||
|
void read_persistent_clock(struct timespec *ts)
|
||||||
|
{
|
||||||
|
__read_persistent_clock(ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
void read_boot_clock(struct timespec *ts)
|
||||||
|
{
|
||||||
|
__read_boot_clock(ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init register_persistent_clock(clock_access_fn read_boot,
|
||||||
|
clock_access_fn read_persistent)
|
||||||
|
{
|
||||||
|
/* Only allow the clockaccess functions to be registered once */
|
||||||
|
if (__read_persistent_clock == dummy_clock_access &&
|
||||||
|
__read_boot_clock == dummy_clock_access) {
|
||||||
|
if (read_boot)
|
||||||
|
__read_boot_clock = read_boot;
|
||||||
|
if (read_persistent)
|
||||||
|
__read_persistent_clock = read_persistent;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
|
#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
|
||||||
static int timer_suspend(void)
|
static int timer_suspend(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -479,14 +479,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
|
||||||
return regs->ARM_r0;
|
return regs->ARM_r0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
do_cache_op(unsigned long start, unsigned long end, int flags)
|
do_cache_op(unsigned long start, unsigned long end, int flags)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->active_mm;
|
struct mm_struct *mm = current->active_mm;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
if (end < start || flags)
|
if (end < start || flags)
|
||||||
return;
|
return -EINVAL;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
vma = find_vma(mm, start);
|
vma = find_vma(mm, start);
|
||||||
|
@ -496,9 +496,11 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
|
||||||
if (end > vma->vm_end)
|
if (end > vma->vm_end)
|
||||||
end = vma->vm_end;
|
end = vma->vm_end;
|
||||||
|
|
||||||
flush_cache_user_range(vma, start, end);
|
up_read(&mm->mmap_sem);
|
||||||
|
return flush_cache_user_range(start, end);
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -544,8 +546,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
||||||
* the specified region).
|
* the specified region).
|
||||||
*/
|
*/
|
||||||
case NR(cacheflush):
|
case NR(cacheflush):
|
||||||
do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
|
return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
|
||||||
return 0;
|
|
||||||
|
|
||||||
case NR(usr26):
|
case NR(usr26):
|
||||||
if (!(elf_hwcap & HWCAP_26BIT))
|
if (!(elf_hwcap & HWCAP_26BIT))
|
||||||
|
|
|
@ -124,7 +124,7 @@ static u64 tegra_rtc_read_ms(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* read_persistent_clock - Return time from a persistent clock.
|
* tegra_read_persistent_clock - Return time from a persistent clock.
|
||||||
*
|
*
|
||||||
* Reads the time from a source which isn't disabled during PM, the
|
* Reads the time from a source which isn't disabled during PM, the
|
||||||
* 32k sync timer. Convert the cycles elapsed since last read into
|
* 32k sync timer. Convert the cycles elapsed since last read into
|
||||||
|
@ -133,7 +133,7 @@ static u64 tegra_rtc_read_ms(void)
|
||||||
* tegra_rtc driver could be executing to avoid race conditions
|
* tegra_rtc driver could be executing to avoid race conditions
|
||||||
* on the RTC shadow register
|
* on the RTC shadow register
|
||||||
*/
|
*/
|
||||||
void read_persistent_clock(struct timespec *ts)
|
static void tegra_read_persistent_clock(struct timespec *ts)
|
||||||
{
|
{
|
||||||
u64 delta;
|
u64 delta;
|
||||||
struct timespec *tsp = &persistent_ts;
|
struct timespec *tsp = &persistent_ts;
|
||||||
|
@ -243,6 +243,7 @@ static void __init tegra_init_timer(void)
|
||||||
tegra_clockevent.irq = tegra_timer_irq.irq;
|
tegra_clockevent.irq = tegra_timer_irq.irq;
|
||||||
clockevents_register_device(&tegra_clockevent);
|
clockevents_register_device(&tegra_clockevent);
|
||||||
tegra_twd_init();
|
tegra_twd_init();
|
||||||
|
register_persistent_clock(NULL, tegra_read_persistent_clock);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sys_timer tegra_timer = {
|
struct sys_timer tegra_timer = {
|
||||||
|
|
|
@ -78,6 +78,7 @@ ENTRY(v3_coherent_kern_range)
|
||||||
* - end - virtual end address
|
* - end - virtual end address
|
||||||
*/
|
*/
|
||||||
ENTRY(v3_coherent_user_range)
|
ENTRY(v3_coherent_user_range)
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -88,6 +88,7 @@ ENTRY(v4_coherent_kern_range)
|
||||||
* - end - virtual end address
|
* - end - virtual end address
|
||||||
*/
|
*/
|
||||||
ENTRY(v4_coherent_user_range)
|
ENTRY(v4_coherent_user_range)
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -167,9 +167,9 @@ ENTRY(v4wb_coherent_user_range)
|
||||||
add r0, r0, #CACHE_DLINESIZE
|
add r0, r0, #CACHE_DLINESIZE
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mov ip, #0
|
mov r0, #0
|
||||||
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -125,6 +125,7 @@ ENTRY(v4wt_coherent_user_range)
|
||||||
add r0, r0, #CACHE_DLINESIZE
|
add r0, r0, #CACHE_DLINESIZE
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
#include <asm/errno.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
#include "proc-macros.S"
|
#include "proc-macros.S"
|
||||||
|
@ -135,7 +136,6 @@ ENTRY(v6_coherent_user_range)
|
||||||
1:
|
1:
|
||||||
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
|
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
|
||||||
add r0, r0, #CACHE_LINE_SIZE
|
add r0, r0, #CACHE_LINE_SIZE
|
||||||
2:
|
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
#endif
|
#endif
|
||||||
|
@ -154,13 +154,11 @@ ENTRY(v6_coherent_user_range)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fault handling for the cache operation above. If the virtual address in r0
|
* Fault handling for the cache operation above. If the virtual address in r0
|
||||||
* isn't mapped, just try the next page.
|
* isn't mapped, fail with -EFAULT.
|
||||||
*/
|
*/
|
||||||
9001:
|
9001:
|
||||||
mov r0, r0, lsr #12
|
mov r0, #-EFAULT
|
||||||
mov r0, r0, lsl #12
|
mov pc, lr
|
||||||
add r0, r0, #4096
|
|
||||||
b 2b
|
|
||||||
UNWIND(.fnend )
|
UNWIND(.fnend )
|
||||||
ENDPROC(v6_coherent_user_range)
|
ENDPROC(v6_coherent_user_range)
|
||||||
ENDPROC(v6_coherent_kern_range)
|
ENDPROC(v6_coherent_kern_range)
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
#include <asm/errno.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
#include "proc-macros.S"
|
#include "proc-macros.S"
|
||||||
|
@ -198,7 +199,6 @@ ENTRY(v7_coherent_user_range)
|
||||||
add r12, r12, r2
|
add r12, r12, r2
|
||||||
cmp r12, r1
|
cmp r12, r1
|
||||||
blo 2b
|
blo 2b
|
||||||
3:
|
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
|
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
|
||||||
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
|
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
|
||||||
|
@ -208,13 +208,11 @@ ENTRY(v7_coherent_user_range)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fault handling for the cache operation above. If the virtual address in r0
|
* Fault handling for the cache operation above. If the virtual address in r0
|
||||||
* isn't mapped, just try the next page.
|
* isn't mapped, fail with -EFAULT.
|
||||||
*/
|
*/
|
||||||
9001:
|
9001:
|
||||||
mov r12, r12, lsr #12
|
mov r0, #-EFAULT
|
||||||
mov r12, r12, lsl #12
|
mov pc, lr
|
||||||
add r12, r12, #4096
|
|
||||||
b 3b
|
|
||||||
UNWIND(.fnend )
|
UNWIND(.fnend )
|
||||||
ENDPROC(v7_coherent_kern_range)
|
ENDPROC(v7_coherent_kern_range)
|
||||||
ENDPROC(v7_coherent_user_range)
|
ENDPROC(v7_coherent_user_range)
|
||||||
|
|
|
@ -241,6 +241,7 @@ ENTRY(arm1020_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -235,6 +235,7 @@ ENTRY(arm1020e_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -224,6 +224,7 @@ ENTRY(arm1022_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -218,6 +218,7 @@ ENTRY(arm1026_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -210,6 +210,7 @@ ENTRY(arm920_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -212,6 +212,7 @@ ENTRY(arm922_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -258,6 +258,7 @@ ENTRY(arm925_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -221,6 +221,7 @@ ENTRY(arm926_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -160,7 +160,7 @@ ENTRY(arm940_coherent_user_range)
|
||||||
* - size - region size
|
* - size - region size
|
||||||
*/
|
*/
|
||||||
ENTRY(arm940_flush_kern_dcache_area)
|
ENTRY(arm940_flush_kern_dcache_area)
|
||||||
mov ip, #0
|
mov r0, #0
|
||||||
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
|
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
|
||||||
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
|
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
|
||||||
2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
|
2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
|
||||||
|
@ -168,8 +168,8 @@ ENTRY(arm940_flush_kern_dcache_area)
|
||||||
bcs 2b @ entries 63 to 0
|
bcs 2b @ entries 63 to 0
|
||||||
subs r1, r1, #1 << 4
|
subs r1, r1, #1 << 4
|
||||||
bcs 1b @ segments 7 to 0
|
bcs 1b @ segments 7 to 0
|
||||||
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -190,6 +190,7 @@ ENTRY(arm946_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -232,6 +232,7 @@ ENTRY(feroceon_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -193,6 +193,7 @@ ENTRY(mohawk_coherent_user_range)
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
blo 1b
|
blo 1b
|
||||||
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
mcr p15, 0, r0, c7, c10, 4 @ drain WB
|
||||||
|
mov r0, #0
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
|
|
||||||
|
#include <asm/mach/time.h>
|
||||||
#include <asm/sched_clock.h>
|
#include <asm/sched_clock.h>
|
||||||
|
|
||||||
#include <plat/hardware.h>
|
#include <plat/hardware.h>
|
||||||
|
@ -43,7 +44,7 @@ static u32 notrace omap_32k_read_sched_clock(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* read_persistent_clock - Return time from a persistent clock.
|
* omap_read_persistent_clock - Return time from a persistent clock.
|
||||||
*
|
*
|
||||||
* Reads the time from a source which isn't disabled during PM, the
|
* Reads the time from a source which isn't disabled during PM, the
|
||||||
* 32k sync timer. Convert the cycles elapsed since last read into
|
* 32k sync timer. Convert the cycles elapsed since last read into
|
||||||
|
@ -52,7 +53,7 @@ static u32 notrace omap_32k_read_sched_clock(void)
|
||||||
static struct timespec persistent_ts;
|
static struct timespec persistent_ts;
|
||||||
static cycles_t cycles, last_cycles;
|
static cycles_t cycles, last_cycles;
|
||||||
static unsigned int persistent_mult, persistent_shift;
|
static unsigned int persistent_mult, persistent_shift;
|
||||||
void read_persistent_clock(struct timespec *ts)
|
static void omap_read_persistent_clock(struct timespec *ts)
|
||||||
{
|
{
|
||||||
unsigned long long nsecs;
|
unsigned long long nsecs;
|
||||||
cycles_t delta;
|
cycles_t delta;
|
||||||
|
@ -116,6 +117,7 @@ int __init omap_init_clocksource_32k(void)
|
||||||
printk(err, "32k_counter");
|
printk(err, "32k_counter");
|
||||||
|
|
||||||
setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
|
setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
|
||||||
|
register_persistent_clock(NULL, omap_read_persistent_clock);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -241,11 +241,11 @@ static void vfp_panic(char *reason, u32 inst)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
printk(KERN_ERR "VFP: Error: %s\n", reason);
|
pr_err("VFP: Error: %s\n", reason);
|
||||||
printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
|
pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
|
||||||
fmrx(FPEXC), fmrx(FPSCR), inst);
|
fmrx(FPEXC), fmrx(FPSCR), inst);
|
||||||
for (i = 0; i < 32; i += 2)
|
for (i = 0; i < 32; i += 2)
|
||||||
printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
|
pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
|
||||||
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
|
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,7 +452,7 @@ static int vfp_pm_suspend(void)
|
||||||
|
|
||||||
/* if vfp is on, then save state for resumption */
|
/* if vfp is on, then save state for resumption */
|
||||||
if (fpexc & FPEXC_EN) {
|
if (fpexc & FPEXC_EN) {
|
||||||
printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
|
pr_debug("%s: saving vfp state\n", __func__);
|
||||||
vfp_save_state(&ti->vfpstate, fpexc);
|
vfp_save_state(&ti->vfpstate, fpexc);
|
||||||
|
|
||||||
/* disable, just in case */
|
/* disable, just in case */
|
||||||
|
@ -664,16 +664,16 @@ static int __init vfp_init(void)
|
||||||
barrier();
|
barrier();
|
||||||
vfp_vector = vfp_null_entry;
|
vfp_vector = vfp_null_entry;
|
||||||
|
|
||||||
printk(KERN_INFO "VFP support v0.3: ");
|
pr_info("VFP support v0.3: ");
|
||||||
if (VFP_arch)
|
if (VFP_arch)
|
||||||
printk("not present\n");
|
pr_cont("not present\n");
|
||||||
else if (vfpsid & FPSID_NODOUBLE) {
|
else if (vfpsid & FPSID_NODOUBLE) {
|
||||||
printk("no double precision support\n");
|
pr_cont("no double precision support\n");
|
||||||
} else {
|
} else {
|
||||||
hotcpu_notifier(vfp_hotplug, 0);
|
hotcpu_notifier(vfp_hotplug, 0);
|
||||||
|
|
||||||
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
|
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
|
||||||
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
|
pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
|
||||||
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
|
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
|
||||||
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
|
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
|
||||||
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
|
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
|
||||||
|
|
Loading…
Add table
Reference in a new issue