Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6-wd into devel-stable
Conflicts: arch/arm/mach-imx/mach-cpuimx27.c
This commit is contained in:
commit
4722cd7741
106 changed files with 1507 additions and 1090 deletions
|
@ -2649,11 +2649,11 @@ F: drivers/net/wan/dlci.c
|
||||||
F: drivers/net/wan/sdla.c
|
F: drivers/net/wan/sdla.c
|
||||||
|
|
||||||
FRAMEBUFFER LAYER
|
FRAMEBUFFER LAYER
|
||||||
M: Paul Mundt <lethal@linux-sh.org>
|
M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
|
||||||
L: linux-fbdev@vger.kernel.org
|
L: linux-fbdev@vger.kernel.org
|
||||||
W: http://linux-fbdev.sourceforge.net/
|
W: http://linux-fbdev.sourceforge.net/
|
||||||
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
|
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
|
T: git git://github.com/schandinat/linux-2.6.git fbdev-next
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/fb/
|
F: Documentation/fb/
|
||||||
F: Documentation/devicetree/bindings/fb/
|
F: Documentation/devicetree/bindings/fb/
|
||||||
|
@ -4450,8 +4450,8 @@ M: "David S. Miller" <davem@davemloft.net>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.linuxfoundation.org/en/Net
|
W: http://www.linuxfoundation.org/en/Net
|
||||||
W: http://patchwork.ozlabs.org/project/netdev/list/
|
W: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/
|
F: net/
|
||||||
F: include/net/
|
F: include/net/
|
||||||
|
|
|
@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
|
||||||
This workaround defines cpu_relax() as smp_mb(), preventing correctly
|
This workaround defines cpu_relax() as smp_mb(), preventing correctly
|
||||||
written polling loops from denying visibility of updates to memory.
|
written polling loops from denying visibility of updates to memory.
|
||||||
|
|
||||||
|
config ARM_ERRATA_364296
|
||||||
|
bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
|
||||||
|
depends on CPU_V6 && !SMP
|
||||||
|
help
|
||||||
|
This options enables the workaround for the 364296 ARM1136
|
||||||
|
r0p2 erratum (possible cache data corruption with
|
||||||
|
hit-under-miss enabled). It sets the undocumented bit 31 in
|
||||||
|
the auxiliary control register and the FI bit in the control
|
||||||
|
register, thus disabling hit-under-miss without putting the
|
||||||
|
processor into full low interrupt latency mode. ARM11MPCore
|
||||||
|
is not affected.
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
source "arch/arm/common/Kconfig"
|
source "arch/arm/common/Kconfig"
|
||||||
|
|
|
@ -82,7 +82,7 @@ asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
|
||||||
|
|
||||||
|
|
||||||
/* Disable clock to MMC hardware block */
|
/* Disable clock to MMC hardware block */
|
||||||
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
|
__raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
|
||||||
|
|
||||||
mmc_update_progress(MMC_PROGRESS_DONE);
|
mmc_update_progress(MMC_PROGRESS_DONE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,7 +85,7 @@ asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/* Disable clock to SDHI1 hardware block */
|
/* Disable clock to SDHI1 hardware block */
|
||||||
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
|
__raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
|
||||||
|
|
||||||
mmc_update_progress(MMC_PROGRESS_DONE);
|
mmc_update_progress(MMC_PROGRESS_DONE);
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@
|
||||||
#define L2X0_AUX_CTRL_MASK 0xc0000fff
|
#define L2X0_AUX_CTRL_MASK 0xc0000fff
|
||||||
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
|
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
|
||||||
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
|
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
|
||||||
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
|
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
|
||||||
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
|
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
|
||||||
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
|
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
|
||||||
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
|
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
|
||||||
|
|
|
@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||||
#define ARM_DEBUG_ARCH_V6_1 2
|
#define ARM_DEBUG_ARCH_V6_1 2
|
||||||
#define ARM_DEBUG_ARCH_V7_ECP14 3
|
#define ARM_DEBUG_ARCH_V7_ECP14 3
|
||||||
#define ARM_DEBUG_ARCH_V7_MM 4
|
#define ARM_DEBUG_ARCH_V7_MM 4
|
||||||
|
#define ARM_DEBUG_ARCH_V7_1 5
|
||||||
|
|
||||||
/* Breakpoint */
|
/* Breakpoint */
|
||||||
#define ARM_BREAKPOINT_EXECUTE 0
|
#define ARM_BREAKPOINT_EXECUTE 0
|
||||||
|
@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||||
/* Watchpoints */
|
/* Watchpoints */
|
||||||
#define ARM_BREAKPOINT_LOAD 1
|
#define ARM_BREAKPOINT_LOAD 1
|
||||||
#define ARM_BREAKPOINT_STORE 2
|
#define ARM_BREAKPOINT_STORE 2
|
||||||
|
#define ARM_FSR_ACCESS_MASK (1 << 11)
|
||||||
|
|
||||||
/* Privilege Levels */
|
/* Privilege Levels */
|
||||||
#define ARM_BREAKPOINT_PRIV 1
|
#define ARM_BREAKPOINT_PRIV 1
|
||||||
|
|
|
@ -13,7 +13,12 @@
|
||||||
#define __ARM_PMU_H__
|
#define __ARM_PMU_H__
|
||||||
|
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/perf_event.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Types of PMUs that can be accessed directly and require mutual
|
||||||
|
* exclusion between profiling tools.
|
||||||
|
*/
|
||||||
enum arm_pmu_type {
|
enum arm_pmu_type {
|
||||||
ARM_PMU_DEVICE_CPU = 0,
|
ARM_PMU_DEVICE_CPU = 0,
|
||||||
ARM_NUM_PMU_DEVICES,
|
ARM_NUM_PMU_DEVICES,
|
||||||
|
@ -37,21 +42,17 @@ struct arm_pmu_platdata {
|
||||||
* reserve_pmu() - reserve the hardware performance counters
|
* reserve_pmu() - reserve the hardware performance counters
|
||||||
*
|
*
|
||||||
* Reserve the hardware performance counters in the system for exclusive use.
|
* Reserve the hardware performance counters in the system for exclusive use.
|
||||||
* The platform_device for the system is returned on success, ERR_PTR()
|
* Returns 0 on success or -EBUSY if the lock is already held.
|
||||||
* encoded error on failure.
|
|
||||||
*/
|
*/
|
||||||
extern struct platform_device *
|
extern int
|
||||||
reserve_pmu(enum arm_pmu_type device);
|
reserve_pmu(enum arm_pmu_type type);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* release_pmu() - Relinquish control of the performance counters
|
* release_pmu() - Relinquish control of the performance counters
|
||||||
*
|
*
|
||||||
* Release the performance counters and allow someone else to use them.
|
* Release the performance counters and allow someone else to use them.
|
||||||
* Callers must have disabled the counters and released IRQs before calling
|
|
||||||
* this. The platform_device returned from reserve_pmu() must be passed as
|
|
||||||
* a cookie.
|
|
||||||
*/
|
*/
|
||||||
extern int
|
extern void
|
||||||
release_pmu(enum arm_pmu_type type);
|
release_pmu(enum arm_pmu_type type);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -62,30 +63,84 @@ release_pmu(enum arm_pmu_type type);
|
||||||
* the actual hardware initialisation.
|
* the actual hardware initialisation.
|
||||||
*/
|
*/
|
||||||
extern int
|
extern int
|
||||||
init_pmu(enum arm_pmu_type device);
|
init_pmu(enum arm_pmu_type type);
|
||||||
|
|
||||||
#else /* CONFIG_CPU_HAS_PMU */
|
#else /* CONFIG_CPU_HAS_PMU */
|
||||||
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
static inline struct platform_device *
|
|
||||||
reserve_pmu(enum arm_pmu_type device)
|
|
||||||
{
|
|
||||||
return ERR_PTR(-ENODEV);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
release_pmu(struct platform_device *pdev)
|
reserve_pmu(enum arm_pmu_type type)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline void
|
||||||
init_pmu(enum arm_pmu_type device)
|
release_pmu(enum arm_pmu_type type) { }
|
||||||
{
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_CPU_HAS_PMU */
|
#endif /* CONFIG_CPU_HAS_PMU */
|
||||||
|
|
||||||
|
#ifdef CONFIG_HW_PERF_EVENTS
|
||||||
|
|
||||||
|
/* The events for a given PMU register set. */
|
||||||
|
struct pmu_hw_events {
|
||||||
|
/*
|
||||||
|
* The events that are active on the PMU for the given index.
|
||||||
|
*/
|
||||||
|
struct perf_event **events;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A 1 bit for an index indicates that the counter is being used for
|
||||||
|
* an event. A 0 means that the counter can be used.
|
||||||
|
*/
|
||||||
|
unsigned long *used_mask;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware lock to serialize accesses to PMU registers. Needed for the
|
||||||
|
* read/modify/write sequences.
|
||||||
|
*/
|
||||||
|
raw_spinlock_t pmu_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct arm_pmu {
|
||||||
|
struct pmu pmu;
|
||||||
|
enum arm_perf_pmu_ids id;
|
||||||
|
enum arm_pmu_type type;
|
||||||
|
cpumask_t active_irqs;
|
||||||
|
const char *name;
|
||||||
|
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||||
|
void (*enable)(struct hw_perf_event *evt, int idx);
|
||||||
|
void (*disable)(struct hw_perf_event *evt, int idx);
|
||||||
|
int (*get_event_idx)(struct pmu_hw_events *hw_events,
|
||||||
|
struct hw_perf_event *hwc);
|
||||||
|
int (*set_event_filter)(struct hw_perf_event *evt,
|
||||||
|
struct perf_event_attr *attr);
|
||||||
|
u32 (*read_counter)(int idx);
|
||||||
|
void (*write_counter)(int idx, u32 val);
|
||||||
|
void (*start)(void);
|
||||||
|
void (*stop)(void);
|
||||||
|
void (*reset)(void *);
|
||||||
|
int (*map_event)(struct perf_event *event);
|
||||||
|
int num_events;
|
||||||
|
atomic_t active_events;
|
||||||
|
struct mutex reserve_mutex;
|
||||||
|
u64 max_period;
|
||||||
|
struct platform_device *plat_device;
|
||||||
|
struct pmu_hw_events *(*get_hw_events)(void);
|
||||||
|
};
|
||||||
|
|
||||||
|
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
||||||
|
|
||||||
|
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
|
||||||
|
|
||||||
|
u64 armpmu_event_update(struct perf_event *event,
|
||||||
|
struct hw_perf_event *hwc,
|
||||||
|
int idx, int overflow);
|
||||||
|
|
||||||
|
int armpmu_event_set_period(struct perf_event *event,
|
||||||
|
struct hw_perf_event *hwc,
|
||||||
|
int idx);
|
||||||
|
|
||||||
|
#endif /* CONFIG_HW_PERF_EVENTS */
|
||||||
|
|
||||||
#endif /* __ARM_PMU_H__ */
|
#endif /* __ARM_PMU_H__ */
|
||||||
|
|
|
@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
|
||||||
|
|
||||||
/* Number of BRP/WRP registers on this CPU. */
|
/* Number of BRP/WRP registers on this CPU. */
|
||||||
static int core_num_brps;
|
static int core_num_brps;
|
||||||
static int core_num_reserved_brps;
|
|
||||||
static int core_num_wrps;
|
static int core_num_wrps;
|
||||||
|
|
||||||
/* Debug architecture version. */
|
/* Debug architecture version. */
|
||||||
|
@ -137,10 +136,11 @@ static u8 get_debug_arch(void)
|
||||||
u32 didr;
|
u32 didr;
|
||||||
|
|
||||||
/* Do we implement the extended CPUID interface? */
|
/* Do we implement the extended CPUID interface? */
|
||||||
if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
|
if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
|
||||||
"CPUID feature registers not supported. "
|
pr_warning("CPUID feature registers not supported. "
|
||||||
"Assuming v6 debug is present.\n"))
|
"Assuming v6 debug is present.\n");
|
||||||
return ARM_DEBUG_ARCH_V6;
|
return ARM_DEBUG_ARCH_V6;
|
||||||
|
}
|
||||||
|
|
||||||
ARM_DBG_READ(c0, 0, didr);
|
ARM_DBG_READ(c0, 0, didr);
|
||||||
return (didr >> 16) & 0xf;
|
return (didr >> 16) & 0xf;
|
||||||
|
@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void)
|
||||||
static int debug_arch_supported(void)
|
static int debug_arch_supported(void)
|
||||||
{
|
{
|
||||||
u8 arch = get_debug_arch();
|
u8 arch = get_debug_arch();
|
||||||
return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
|
|
||||||
|
/* We don't support the memory-mapped interface. */
|
||||||
|
return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
|
||||||
|
arch >= ARM_DEBUG_ARCH_V7_1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine number of BRP register available. */
|
/* Determine number of WRP registers available. */
|
||||||
|
static int get_num_wrp_resources(void)
|
||||||
|
{
|
||||||
|
u32 didr;
|
||||||
|
ARM_DBG_READ(c0, 0, didr);
|
||||||
|
return ((didr >> 28) & 0xf) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine number of BRP registers available. */
|
||||||
static int get_num_brp_resources(void)
|
static int get_num_brp_resources(void)
|
||||||
{
|
{
|
||||||
u32 didr;
|
u32 didr;
|
||||||
|
@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void)
|
||||||
static int get_num_wrps(void)
|
static int get_num_wrps(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* FIXME: When a watchpoint fires, the only way to work out which
|
* On debug architectures prior to 7.1, when a watchpoint fires, the
|
||||||
* watchpoint it was is by disassembling the faulting instruction
|
* only way to work out which watchpoint it was is by disassembling
|
||||||
* and working out the address of the memory access.
|
* the faulting instruction and working out the address of the memory
|
||||||
|
* access.
|
||||||
*
|
*
|
||||||
* Furthermore, we can only do this if the watchpoint was precise
|
* Furthermore, we can only do this if the watchpoint was precise
|
||||||
* since imprecise watchpoints prevent us from calculating register
|
* since imprecise watchpoints prevent us from calculating register
|
||||||
|
@ -192,36 +204,17 @@ static int get_num_wrps(void)
|
||||||
* [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
|
* [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
|
||||||
* that it is set on some implementations].
|
* that it is set on some implementations].
|
||||||
*/
|
*/
|
||||||
|
if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
|
||||||
|
return 1;
|
||||||
|
|
||||||
#if 0
|
return get_num_wrp_resources();
|
||||||
int wrps;
|
|
||||||
u32 didr;
|
|
||||||
ARM_DBG_READ(c0, 0, didr);
|
|
||||||
wrps = ((didr >> 28) & 0xf) + 1;
|
|
||||||
#endif
|
|
||||||
int wrps = 1;
|
|
||||||
|
|
||||||
if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
|
|
||||||
wrps = get_num_brp_resources() - 1;
|
|
||||||
|
|
||||||
return wrps;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We reserve one breakpoint for each watchpoint. */
|
|
||||||
static int get_num_reserved_brps(void)
|
|
||||||
{
|
|
||||||
if (core_has_mismatch_brps())
|
|
||||||
return get_num_wrps();
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine number of usable BRPs available. */
|
/* Determine number of usable BRPs available. */
|
||||||
static int get_num_brps(void)
|
static int get_num_brps(void)
|
||||||
{
|
{
|
||||||
int brps = get_num_brp_resources();
|
int brps = get_num_brp_resources();
|
||||||
if (core_has_mismatch_brps())
|
return core_has_mismatch_brps() ? brps - 1 : brps;
|
||||||
brps -= get_num_reserved_brps();
|
|
||||||
return brps;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -255,6 +248,7 @@ static int enable_monitor_mode(void)
|
||||||
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
|
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
|
||||||
break;
|
break;
|
||||||
case ARM_DEBUG_ARCH_V7_ECP14:
|
case ARM_DEBUG_ARCH_V7_ECP14:
|
||||||
|
case ARM_DEBUG_ARCH_V7_1:
|
||||||
ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
|
ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||||
val_base = ARM_BASE_BVR;
|
val_base = ARM_BASE_BVR;
|
||||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||||
max_slots = core_num_brps;
|
max_slots = core_num_brps;
|
||||||
if (info->step_ctrl.enabled) {
|
|
||||||
/* Override the breakpoint data with the step data. */
|
|
||||||
addr = info->trigger & ~0x3;
|
|
||||||
ctrl = encode_ctrl_reg(info->step_ctrl);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/* Watchpoint */
|
/* Watchpoint */
|
||||||
if (info->step_ctrl.enabled) {
|
|
||||||
/* Install into the reserved breakpoint region. */
|
|
||||||
ctrl_base = ARM_BASE_BCR + core_num_brps;
|
|
||||||
val_base = ARM_BASE_BVR + core_num_brps;
|
|
||||||
/* Override the watchpoint data with the step data. */
|
|
||||||
addr = info->trigger & ~0x3;
|
|
||||||
ctrl = encode_ctrl_reg(info->step_ctrl);
|
|
||||||
} else {
|
|
||||||
ctrl_base = ARM_BASE_WCR;
|
ctrl_base = ARM_BASE_WCR;
|
||||||
val_base = ARM_BASE_WVR;
|
val_base = ARM_BASE_WVR;
|
||||||
}
|
|
||||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||||
max_slots = core_num_wrps;
|
max_slots = core_num_wrps;
|
||||||
}
|
}
|
||||||
|
@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Override the breakpoint data with the step data. */
|
||||||
|
if (info->step_ctrl.enabled) {
|
||||||
|
addr = info->trigger & ~0x3;
|
||||||
|
ctrl = encode_ctrl_reg(info->step_ctrl);
|
||||||
|
if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
|
||||||
|
i = 0;
|
||||||
|
ctrl_base = ARM_BASE_BCR + core_num_brps;
|
||||||
|
val_base = ARM_BASE_BVR + core_num_brps;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Setup the address register. */
|
/* Setup the address register. */
|
||||||
write_wb_reg(val_base + i, addr);
|
write_wb_reg(val_base + i, addr);
|
||||||
|
|
||||||
|
@ -405,9 +396,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||||
max_slots = core_num_brps;
|
max_slots = core_num_brps;
|
||||||
} else {
|
} else {
|
||||||
/* Watchpoint */
|
/* Watchpoint */
|
||||||
if (info->step_ctrl.enabled)
|
|
||||||
base = ARM_BASE_BCR + core_num_brps;
|
|
||||||
else
|
|
||||||
base = ARM_BASE_WCR;
|
base = ARM_BASE_WCR;
|
||||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||||
max_slots = core_num_wrps;
|
max_slots = core_num_wrps;
|
||||||
|
@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||||
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
|
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* Ensure that we disable the mismatch breakpoint. */
|
||||||
|
if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
|
||||||
|
info->step_ctrl.enabled) {
|
||||||
|
i = 0;
|
||||||
|
base = ARM_BASE_BCR + core_num_brps;
|
||||||
|
}
|
||||||
|
|
||||||
/* Reset the control register. */
|
/* Reset the control register. */
|
||||||
write_wb_reg(base + i, 0);
|
write_wb_reg(base + i, 0);
|
||||||
}
|
}
|
||||||
|
@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||||
* we can use the mismatch feature as a poor-man's hardware
|
* we can use the mismatch feature as a poor-man's hardware
|
||||||
* single-step, but this only works for per-task breakpoints.
|
* single-step, but this only works for per-task breakpoints.
|
||||||
*/
|
*/
|
||||||
if (WARN_ONCE(!bp->overflow_handler &&
|
if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
|
||||||
(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
|
!core_has_mismatch_brps() || !bp->hw.bp_target)) {
|
||||||
|| !bp->hw.bp_target),
|
pr_warning("overflow handler required but none found\n");
|
||||||
"overflow handler required but none found\n")) {
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp)
|
||||||
arch_install_hw_breakpoint(bp);
|
arch_install_hw_breakpoint(bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
int i;
|
int i, access;
|
||||||
|
u32 val, ctrl_reg, alignment_mask;
|
||||||
struct perf_event *wp, **slots;
|
struct perf_event *wp, **slots;
|
||||||
struct arch_hw_breakpoint *info;
|
struct arch_hw_breakpoint *info;
|
||||||
|
struct arch_hw_breakpoint_ctrl ctrl;
|
||||||
|
|
||||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||||
|
|
||||||
/* Without a disassembler, we can only handle 1 watchpoint. */
|
|
||||||
BUG_ON(core_num_wrps > 1);
|
|
||||||
|
|
||||||
for (i = 0; i < core_num_wrps; ++i) {
|
for (i = 0; i < core_num_wrps; ++i) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
wp = slots[i];
|
wp = slots[i];
|
||||||
|
|
||||||
if (wp == NULL) {
|
if (wp == NULL)
|
||||||
rcu_read_unlock();
|
goto unlock;
|
||||||
continue;
|
|
||||||
|
info = counter_arch_bp(wp);
|
||||||
|
/*
|
||||||
|
* The DFAR is an unknown value on debug architectures prior
|
||||||
|
* to 7.1. Since we only allow a single watchpoint on these
|
||||||
|
* older CPUs, we can set the trigger to the lowest possible
|
||||||
|
* faulting address.
|
||||||
|
*/
|
||||||
|
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
|
||||||
|
BUG_ON(i > 0);
|
||||||
|
info->trigger = wp->attr.bp_addr;
|
||||||
|
} else {
|
||||||
|
if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
|
||||||
|
alignment_mask = 0x7;
|
||||||
|
else
|
||||||
|
alignment_mask = 0x3;
|
||||||
|
|
||||||
|
/* Check if the watchpoint value matches. */
|
||||||
|
val = read_wb_reg(ARM_BASE_WVR + i);
|
||||||
|
if (val != (addr & ~alignment_mask))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
/* Possible match, check the byte address select. */
|
||||||
|
ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
|
||||||
|
decode_ctrl_reg(ctrl_reg, &ctrl);
|
||||||
|
if (!((1 << (addr & alignment_mask)) & ctrl.len))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
/* Check that the access type matches. */
|
||||||
|
access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
|
||||||
|
HW_BREAKPOINT_R;
|
||||||
|
if (!(access & hw_breakpoint_type(wp)))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
/* We have a winner. */
|
||||||
|
info->trigger = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The DFAR is an unknown value. Since we only allow a
|
|
||||||
* single watchpoint, we can set the trigger to the lowest
|
|
||||||
* possible faulting address.
|
|
||||||
*/
|
|
||||||
info = counter_arch_bp(wp);
|
|
||||||
info->trigger = wp->attr.bp_addr;
|
|
||||||
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
|
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
|
||||||
perf_bp_event(wp, regs);
|
perf_bp_event(wp, regs);
|
||||||
|
|
||||||
|
@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
||||||
if (!wp->overflow_handler)
|
if (!wp->overflow_handler)
|
||||||
enable_single_step(wp, instruction_pointer(regs));
|
enable_single_step(wp, instruction_pointer(regs));
|
||||||
|
|
||||||
|
unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
|
||||||
|
|
||||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||||
|
|
||||||
for (i = 0; i < core_num_reserved_brps; ++i) {
|
for (i = 0; i < core_num_wrps; ++i) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
wp = slots[i];
|
wp = slots[i];
|
||||||
|
@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||||
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
||||||
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
|
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
|
||||||
case ARM_ENTRY_SYNC_WATCHPOINT:
|
case ARM_ENTRY_SYNC_WATCHPOINT:
|
||||||
watchpoint_handler(addr, regs);
|
watchpoint_handler(addr, fsr, regs);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = 1; /* Unhandled fault. */
|
ret = 1; /* Unhandled fault. */
|
||||||
|
@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||||
/*
|
/*
|
||||||
* One-time initialisation.
|
* One-time initialisation.
|
||||||
*/
|
*/
|
||||||
static void reset_ctrl_regs(void *info)
|
static cpumask_t debug_err_mask;
|
||||||
|
|
||||||
|
static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
|
||||||
{
|
{
|
||||||
int i, cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
|
||||||
|
instr, cpu);
|
||||||
|
|
||||||
|
/* Set the error flag for this CPU and skip the faulting instruction. */
|
||||||
|
cpumask_set_cpu(cpu, &debug_err_mask);
|
||||||
|
instruction_pointer(regs) += 4;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct undef_hook debug_reg_hook = {
|
||||||
|
.instr_mask = 0x0fe80f10,
|
||||||
|
.instr_val = 0x0e000e10,
|
||||||
|
.fn = debug_reg_trap,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void reset_ctrl_regs(void *unused)
|
||||||
|
{
|
||||||
|
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
|
||||||
u32 dbg_power;
|
u32 dbg_power;
|
||||||
cpumask_t *cpumask = info;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* v7 debug contains save and restore registers so that debug state
|
* v7 debug contains save and restore registers so that debug state
|
||||||
|
@ -848,15 +891,29 @@ static void reset_ctrl_regs(void *info)
|
||||||
* Access Register to avoid taking undefined instruction exceptions
|
* Access Register to avoid taking undefined instruction exceptions
|
||||||
* later on.
|
* later on.
|
||||||
*/
|
*/
|
||||||
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
|
switch (debug_arch) {
|
||||||
|
case ARM_DEBUG_ARCH_V7_ECP14:
|
||||||
/*
|
/*
|
||||||
* Ensure sticky power-down is clear (i.e. debug logic is
|
* Ensure sticky power-down is clear (i.e. debug logic is
|
||||||
* powered up).
|
* powered up).
|
||||||
*/
|
*/
|
||||||
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
|
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
|
||||||
if ((dbg_power & 0x1) == 0) {
|
if ((dbg_power & 0x1) == 0)
|
||||||
|
err = -EPERM;
|
||||||
|
break;
|
||||||
|
case ARM_DEBUG_ARCH_V7_1:
|
||||||
|
/*
|
||||||
|
* Ensure the OS double lock is clear.
|
||||||
|
*/
|
||||||
|
asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
|
||||||
|
if ((dbg_power & 0x1) == 1)
|
||||||
|
err = -EPERM;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err) {
|
||||||
pr_warning("CPU %d debug is powered down!\n", cpu);
|
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||||
cpumask_or(cpumask, cpumask, cpumask_of(cpu));
|
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -873,13 +930,13 @@ static void reset_ctrl_regs(void *info)
|
||||||
*/
|
*/
|
||||||
asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
|
asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
|
||||||
isb();
|
isb();
|
||||||
}
|
|
||||||
|
|
||||||
if (enable_monitor_mode())
|
if (enable_monitor_mode())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* We must also reset any reserved registers. */
|
/* We must also reset any reserved registers. */
|
||||||
for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
|
raw_num_brps = get_num_brp_resources();
|
||||||
|
for (i = 0; i < raw_num_brps; ++i) {
|
||||||
write_wb_reg(ARM_BASE_BCR + i, 0UL);
|
write_wb_reg(ARM_BASE_BCR + i, 0UL);
|
||||||
write_wb_reg(ARM_BASE_BVR + i, 0UL);
|
write_wb_reg(ARM_BASE_BVR + i, 0UL);
|
||||||
}
|
}
|
||||||
|
@ -895,6 +952,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
|
||||||
{
|
{
|
||||||
if (action == CPU_ONLINE)
|
if (action == CPU_ONLINE)
|
||||||
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
|
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -905,7 +963,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||||
static int __init arch_hw_breakpoint_init(void)
|
static int __init arch_hw_breakpoint_init(void)
|
||||||
{
|
{
|
||||||
u32 dscr;
|
u32 dscr;
|
||||||
cpumask_t cpumask = { CPU_BITS_NONE };
|
|
||||||
|
|
||||||
debug_arch = get_debug_arch();
|
debug_arch = get_debug_arch();
|
||||||
|
|
||||||
|
@ -916,28 +973,31 @@ static int __init arch_hw_breakpoint_init(void)
|
||||||
|
|
||||||
/* Determine how many BRPs/WRPs are available. */
|
/* Determine how many BRPs/WRPs are available. */
|
||||||
core_num_brps = get_num_brps();
|
core_num_brps = get_num_brps();
|
||||||
core_num_reserved_brps = get_num_reserved_brps();
|
|
||||||
core_num_wrps = get_num_wrps();
|
core_num_wrps = get_num_wrps();
|
||||||
|
|
||||||
pr_info("found %d breakpoint and %d watchpoint registers.\n",
|
/*
|
||||||
core_num_brps + core_num_reserved_brps, core_num_wrps);
|
* We need to tread carefully here because DBGSWENABLE may be
|
||||||
|
* driven low on this core and there isn't an architected way to
|
||||||
if (core_num_reserved_brps)
|
* determine that.
|
||||||
pr_info("%d breakpoint(s) reserved for watchpoint "
|
*/
|
||||||
"single-step.\n", core_num_reserved_brps);
|
register_undef_hook(&debug_reg_hook);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the breakpoint resources. We assume that a halting
|
* Reset the breakpoint resources. We assume that a halting
|
||||||
* debugger will leave the world in a nice state for us.
|
* debugger will leave the world in a nice state for us.
|
||||||
*/
|
*/
|
||||||
on_each_cpu(reset_ctrl_regs, &cpumask, 1);
|
on_each_cpu(reset_ctrl_regs, NULL, 1);
|
||||||
if (!cpumask_empty(&cpumask)) {
|
unregister_undef_hook(&debug_reg_hook);
|
||||||
|
if (!cpumask_empty(&debug_err_mask)) {
|
||||||
core_num_brps = 0;
|
core_num_brps = 0;
|
||||||
core_num_reserved_brps = 0;
|
|
||||||
core_num_wrps = 0;
|
core_num_wrps = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
|
||||||
|
core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
|
||||||
|
"", core_num_wrps);
|
||||||
|
|
||||||
ARM_DBG_READ(c1, 0, dscr);
|
ARM_DBG_READ(c1, 0, dscr);
|
||||||
if (dscr & ARM_DSCR_HDBGEN) {
|
if (dscr & ARM_DSCR_HDBGEN) {
|
||||||
max_watchpoint_len = 4;
|
max_watchpoint_len = 4;
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
*/
|
*/
|
||||||
#define pr_fmt(fmt) "hw perfevents: " fmt
|
#define pr_fmt(fmt) "hw perfevents: " fmt
|
||||||
|
|
||||||
|
#include <linux/bitmap.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -26,16 +27,8 @@
|
||||||
#include <asm/pmu.h>
|
#include <asm/pmu.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
static struct platform_device *pmu_device;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hardware lock to serialize accesses to PMU registers. Needed for the
|
* ARMv6 supports a maximum of 3 events, starting from index 0. If we add
|
||||||
* read/modify/write sequences.
|
|
||||||
*/
|
|
||||||
static DEFINE_RAW_SPINLOCK(pmu_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
|
|
||||||
* another platform that supports more, we need to increase this to be the
|
* another platform that supports more, we need to increase this to be the
|
||||||
* largest of all platforms.
|
* largest of all platforms.
|
||||||
*
|
*
|
||||||
|
@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
|
||||||
* cycle counter CCNT + 31 events counters CNT0..30.
|
* cycle counter CCNT + 31 events counters CNT0..30.
|
||||||
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
|
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
|
||||||
*/
|
*/
|
||||||
#define ARMPMU_MAX_HWEVENTS 33
|
#define ARMPMU_MAX_HWEVENTS 32
|
||||||
|
|
||||||
/* The events for a given CPU. */
|
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
|
||||||
struct cpu_hw_events {
|
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
|
||||||
/*
|
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
|
||||||
* The events that are active on the CPU for the given index. Index 0
|
|
||||||
* is reserved.
|
|
||||||
*/
|
|
||||||
struct perf_event *events[ARMPMU_MAX_HWEVENTS];
|
|
||||||
|
|
||||||
/*
|
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
||||||
* A 1 bit for an index indicates that the counter is being used for
|
|
||||||
* an event. A 0 means that the counter can be used.
|
|
||||||
*/
|
|
||||||
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A 1 bit for an index indicates that the counter is actively being
|
|
||||||
* used.
|
|
||||||
*/
|
|
||||||
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
|
|
||||||
};
|
|
||||||
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
|
||||||
|
|
||||||
struct arm_pmu {
|
|
||||||
enum arm_perf_pmu_ids id;
|
|
||||||
const char *name;
|
|
||||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
|
||||||
void (*enable)(struct hw_perf_event *evt, int idx);
|
|
||||||
void (*disable)(struct hw_perf_event *evt, int idx);
|
|
||||||
int (*get_event_idx)(struct cpu_hw_events *cpuc,
|
|
||||||
struct hw_perf_event *hwc);
|
|
||||||
u32 (*read_counter)(int idx);
|
|
||||||
void (*write_counter)(int idx, u32 val);
|
|
||||||
void (*start)(void);
|
|
||||||
void (*stop)(void);
|
|
||||||
void (*reset)(void *);
|
|
||||||
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
|
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
|
||||||
const unsigned (*event_map)[PERF_COUNT_HW_MAX];
|
|
||||||
u32 raw_event_mask;
|
|
||||||
int num_events;
|
|
||||||
u64 max_period;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Set at runtime when we know what CPU type we are. */
|
/* Set at runtime when we know what CPU type we are. */
|
||||||
static const struct arm_pmu *armpmu;
|
static struct arm_pmu *cpu_pmu;
|
||||||
|
|
||||||
enum arm_perf_pmu_ids
|
enum arm_perf_pmu_ids
|
||||||
armpmu_get_pmu_id(void)
|
armpmu_get_pmu_id(void)
|
||||||
{
|
{
|
||||||
int id = -ENODEV;
|
int id = -ENODEV;
|
||||||
|
|
||||||
if (armpmu != NULL)
|
if (cpu_pmu != NULL)
|
||||||
id = armpmu->id;
|
id = cpu_pmu->id;
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
@ -109,8 +64,8 @@ armpmu_get_max_events(void)
|
||||||
{
|
{
|
||||||
int max_events = 0;
|
int max_events = 0;
|
||||||
|
|
||||||
if (armpmu != NULL)
|
if (cpu_pmu != NULL)
|
||||||
max_events = armpmu->num_events;
|
max_events = cpu_pmu->num_events;
|
||||||
|
|
||||||
return max_events;
|
return max_events;
|
||||||
}
|
}
|
||||||
|
@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||||
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
||||||
|
|
||||||
static int
|
static int
|
||||||
armpmu_map_cache_event(u64 config)
|
armpmu_map_cache_event(const unsigned (*cache_map)
|
||||||
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
||||||
|
u64 config)
|
||||||
{
|
{
|
||||||
unsigned int cache_type, cache_op, cache_result, ret;
|
unsigned int cache_type, cache_op, cache_result, ret;
|
||||||
|
|
||||||
|
@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
|
||||||
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
|
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
|
||||||
|
|
||||||
if (ret == CACHE_OP_UNSUPPORTED)
|
if (ret == CACHE_OP_UNSUPPORTED)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
armpmu_map_event(u64 config)
|
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
|
||||||
{
|
{
|
||||||
int mapping = (*armpmu->event_map)[config];
|
int mapping = (*event_map)[config];
|
||||||
return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
|
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
armpmu_map_raw_event(u64 config)
|
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
|
||||||
{
|
{
|
||||||
return (int)(config & armpmu->raw_event_mask);
|
return (int)(config & raw_event_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int map_cpu_event(struct perf_event *event,
|
||||||
|
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
|
||||||
|
const unsigned (*cache_map)
|
||||||
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
|
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
||||||
|
u32 raw_event_mask)
|
||||||
|
{
|
||||||
|
u64 config = event->attr.config;
|
||||||
|
|
||||||
|
switch (event->attr.type) {
|
||||||
|
case PERF_TYPE_HARDWARE:
|
||||||
|
return armpmu_map_event(event_map, config);
|
||||||
|
case PERF_TYPE_HW_CACHE:
|
||||||
|
return armpmu_map_cache_event(cache_map, config);
|
||||||
|
case PERF_TYPE_RAW:
|
||||||
|
return armpmu_map_raw_event(raw_event_mask, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
armpmu_event_set_period(struct perf_event *event,
|
armpmu_event_set_period(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
s64 left = local64_read(&hwc->period_left);
|
s64 left = local64_read(&hwc->period_left);
|
||||||
s64 period = hwc->sample_period;
|
s64 period = hwc->sample_period;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64
|
u64
|
||||||
armpmu_event_update(struct perf_event *event,
|
armpmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx, int overflow)
|
int idx, int overflow)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
u64 delta, prev_raw_count, new_raw_count;
|
u64 delta, prev_raw_count, new_raw_count;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
|
@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
|
||||||
static void
|
static void
|
||||||
armpmu_stop(struct perf_event *event, int flags)
|
armpmu_stop(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
if (!armpmu)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARM pmu always has to update the counter, so ignore
|
* ARM pmu always has to update the counter, so ignore
|
||||||
* PERF_EF_UPDATE, see comments in armpmu_start().
|
* PERF_EF_UPDATE, see comments in armpmu_start().
|
||||||
|
@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
|
||||||
static void
|
static void
|
||||||
armpmu_start(struct perf_event *event, int flags)
|
armpmu_start(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
if (!armpmu)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARM pmu always has to reprogram the period, so ignore
|
* ARM pmu always has to reprogram the period, so ignore
|
||||||
* PERF_EF_RELOAD, see the comment below.
|
* PERF_EF_RELOAD, see the comment below.
|
||||||
|
@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
|
||||||
static void
|
static void
|
||||||
armpmu_del(struct perf_event *event, int flags)
|
armpmu_del(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
|
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int idx = hwc->idx;
|
int idx = hwc->idx;
|
||||||
|
|
||||||
WARN_ON(idx < 0);
|
WARN_ON(idx < 0);
|
||||||
|
|
||||||
clear_bit(idx, cpuc->active_mask);
|
|
||||||
armpmu_stop(event, PERF_EF_UPDATE);
|
armpmu_stop(event, PERF_EF_UPDATE);
|
||||||
cpuc->events[idx] = NULL;
|
hw_events->events[idx] = NULL;
|
||||||
clear_bit(idx, cpuc->used_mask);
|
clear_bit(idx, hw_events->used_mask);
|
||||||
|
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
|
||||||
static int
|
static int
|
||||||
armpmu_add(struct perf_event *event, int flags)
|
armpmu_add(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
|
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int idx;
|
int idx;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
|
||||||
perf_pmu_disable(event->pmu);
|
perf_pmu_disable(event->pmu);
|
||||||
|
|
||||||
/* If we don't have a space for the counter then finish early. */
|
/* If we don't have a space for the counter then finish early. */
|
||||||
idx = armpmu->get_event_idx(cpuc, hwc);
|
idx = armpmu->get_event_idx(hw_events, hwc);
|
||||||
if (idx < 0) {
|
if (idx < 0) {
|
||||||
err = idx;
|
err = idx;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
|
||||||
*/
|
*/
|
||||||
event->hw.idx = idx;
|
event->hw.idx = idx;
|
||||||
armpmu->disable(hwc, idx);
|
armpmu->disable(hwc, idx);
|
||||||
cpuc->events[idx] = event;
|
hw_events->events[idx] = event;
|
||||||
set_bit(idx, cpuc->active_mask);
|
|
||||||
|
|
||||||
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||||
if (flags & PERF_EF_START)
|
if (flags & PERF_EF_START)
|
||||||
|
@ -345,25 +324,25 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pmu pmu;
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
validate_event(struct cpu_hw_events *cpuc,
|
validate_event(struct pmu_hw_events *hw_events,
|
||||||
struct perf_event *event)
|
struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
struct hw_perf_event fake_event = event->hw;
|
struct hw_perf_event fake_event = event->hw;
|
||||||
|
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||||
|
|
||||||
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
|
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
|
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
validate_group(struct perf_event *event)
|
validate_group(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct perf_event *sibling, *leader = event->group_leader;
|
struct perf_event *sibling, *leader = event->group_leader;
|
||||||
struct cpu_hw_events fake_pmu;
|
struct pmu_hw_events fake_pmu;
|
||||||
|
|
||||||
memset(&fake_pmu, 0, sizeof(fake_pmu));
|
memset(&fake_pmu, 0, sizeof(fake_pmu));
|
||||||
|
|
||||||
|
@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
|
||||||
|
|
||||||
static irqreturn_t armpmu_platform_irq(int irq, void *dev)
|
static irqreturn_t armpmu_platform_irq(int irq, void *dev)
|
||||||
{
|
{
|
||||||
struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
|
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
|
||||||
|
struct platform_device *plat_device = armpmu->plat_device;
|
||||||
|
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
|
||||||
|
|
||||||
return plat->handle_irq(irq, dev, armpmu->handle_irq);
|
return plat->handle_irq(irq, dev, armpmu->handle_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
armpmu_release_hardware(struct arm_pmu *armpmu)
|
||||||
|
{
|
||||||
|
int i, irq, irqs;
|
||||||
|
struct platform_device *pmu_device = armpmu->plat_device;
|
||||||
|
|
||||||
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||||
|
|
||||||
|
for (i = 0; i < irqs; ++i) {
|
||||||
|
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
|
||||||
|
continue;
|
||||||
|
irq = platform_get_irq(pmu_device, i);
|
||||||
|
if (irq >= 0)
|
||||||
|
free_irq(irq, armpmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
release_pmu(armpmu->type);
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
armpmu_reserve_hardware(void)
|
armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||||
{
|
{
|
||||||
struct arm_pmu_platdata *plat;
|
struct arm_pmu_platdata *plat;
|
||||||
irq_handler_t handle_irq;
|
irq_handler_t handle_irq;
|
||||||
int i, err = -ENODEV, irq;
|
int i, err, irq, irqs;
|
||||||
|
struct platform_device *pmu_device = armpmu->plat_device;
|
||||||
|
|
||||||
pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
|
err = reserve_pmu(armpmu->type);
|
||||||
if (IS_ERR(pmu_device)) {
|
if (err) {
|
||||||
pr_warning("unable to reserve pmu\n");
|
pr_warning("unable to reserve pmu\n");
|
||||||
return PTR_ERR(pmu_device);
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_pmu(ARM_PMU_DEVICE_CPU);
|
|
||||||
|
|
||||||
plat = dev_get_platdata(&pmu_device->dev);
|
plat = dev_get_platdata(&pmu_device->dev);
|
||||||
if (plat && plat->handle_irq)
|
if (plat && plat->handle_irq)
|
||||||
handle_irq = armpmu_platform_irq;
|
handle_irq = armpmu_platform_irq;
|
||||||
else
|
else
|
||||||
handle_irq = armpmu->handle_irq;
|
handle_irq = armpmu->handle_irq;
|
||||||
|
|
||||||
if (pmu_device->num_resources < 1) {
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||||
|
if (irqs < 1) {
|
||||||
pr_err("no irqs for PMUs defined\n");
|
pr_err("no irqs for PMUs defined\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < pmu_device->num_resources; ++i) {
|
for (i = 0; i < irqs; ++i) {
|
||||||
|
err = 0;
|
||||||
irq = platform_get_irq(pmu_device, i);
|
irq = platform_get_irq(pmu_device, i);
|
||||||
if (irq < 0)
|
if (irq < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have a single PMU interrupt that we can't shift,
|
||||||
|
* assume that we're running on a uniprocessor machine and
|
||||||
|
* continue. Otherwise, continue without this interrupt.
|
||||||
|
*/
|
||||||
|
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
|
||||||
|
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||||
|
irq, i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
err = request_irq(irq, handle_irq,
|
err = request_irq(irq, handle_irq,
|
||||||
IRQF_DISABLED | IRQF_NOBALANCING,
|
IRQF_DISABLED | IRQF_NOBALANCING,
|
||||||
"armpmu", NULL);
|
"arm-pmu", armpmu);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warning("unable to request IRQ%d for ARM perf "
|
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||||
"counters\n", irq);
|
irq);
|
||||||
break;
|
armpmu_release_hardware(armpmu);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err) {
|
|
||||||
for (i = i - 1; i >= 0; --i) {
|
|
||||||
irq = platform_get_irq(pmu_device, i);
|
|
||||||
if (irq >= 0)
|
|
||||||
free_irq(irq, NULL);
|
|
||||||
}
|
|
||||||
release_pmu(ARM_PMU_DEVICE_CPU);
|
|
||||||
pmu_device = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
cpumask_set_cpu(i, &armpmu->active_irqs);
|
||||||
armpmu_release_hardware(void)
|
|
||||||
{
|
|
||||||
int i, irq;
|
|
||||||
|
|
||||||
for (i = pmu_device->num_resources - 1; i >= 0; --i) {
|
|
||||||
irq = platform_get_irq(pmu_device, i);
|
|
||||||
if (irq >= 0)
|
|
||||||
free_irq(irq, NULL);
|
|
||||||
}
|
|
||||||
armpmu->stop();
|
|
||||||
|
|
||||||
release_pmu(ARM_PMU_DEVICE_CPU);
|
|
||||||
pmu_device = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static atomic_t active_events = ATOMIC_INIT(0);
|
return 0;
|
||||||
static DEFINE_MUTEX(pmu_reserve_mutex);
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hw_perf_event_destroy(struct perf_event *event)
|
hw_perf_event_destroy(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
armpmu_release_hardware();
|
atomic_t *active_events = &armpmu->active_events;
|
||||||
mutex_unlock(&pmu_reserve_mutex);
|
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
|
||||||
|
|
||||||
|
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
|
||||||
|
armpmu_release_hardware(armpmu);
|
||||||
|
mutex_unlock(pmu_reserve_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
event_requires_mode_exclusion(struct perf_event_attr *attr)
|
||||||
|
{
|
||||||
|
return attr->exclude_idle || attr->exclude_user ||
|
||||||
|
attr->exclude_kernel || attr->exclude_hv;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
__hw_perf_event_init(struct perf_event *event)
|
__hw_perf_event_init(struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int mapping, err;
|
int mapping, err;
|
||||||
|
|
||||||
/* Decode the generic type into an ARM event identifier. */
|
mapping = armpmu->map_event(event);
|
||||||
if (PERF_TYPE_HARDWARE == event->attr.type) {
|
|
||||||
mapping = armpmu_map_event(event->attr.config);
|
|
||||||
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
|
|
||||||
mapping = armpmu_map_cache_event(event->attr.config);
|
|
||||||
} else if (PERF_TYPE_RAW == event->attr.type) {
|
|
||||||
mapping = armpmu_map_raw_event(event->attr.config);
|
|
||||||
} else {
|
|
||||||
pr_debug("event type %x not supported\n", event->attr.type);
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mapping < 0) {
|
if (mapping < 0) {
|
||||||
pr_debug("event %x:%llx not supported\n", event->attr.type,
|
pr_debug("event %x:%llx not supported\n", event->attr.type,
|
||||||
|
@ -494,18 +482,6 @@ __hw_perf_event_init(struct perf_event *event)
|
||||||
return mapping;
|
return mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check whether we need to exclude the counter from certain modes.
|
|
||||||
* The ARM performance counters are on all of the time so if someone
|
|
||||||
* has asked us for some excludes then we have to fail.
|
|
||||||
*/
|
|
||||||
if (event->attr.exclude_kernel || event->attr.exclude_user ||
|
|
||||||
event->attr.exclude_hv || event->attr.exclude_idle) {
|
|
||||||
pr_debug("ARM performance counters do not support "
|
|
||||||
"mode exclusion\n");
|
|
||||||
return -EPERM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't assign an index until we actually place the event onto
|
* We don't assign an index until we actually place the event onto
|
||||||
* hardware. Use -1 to signify that we haven't decided where to put it
|
* hardware. Use -1 to signify that we haven't decided where to put it
|
||||||
|
@ -513,17 +489,26 @@ __hw_perf_event_init(struct perf_event *event)
|
||||||
* clever allocation or constraints checking at this point.
|
* clever allocation or constraints checking at this point.
|
||||||
*/
|
*/
|
||||||
hwc->idx = -1;
|
hwc->idx = -1;
|
||||||
|
hwc->config_base = 0;
|
||||||
/*
|
|
||||||
* Store the event encoding into the config_base field. config and
|
|
||||||
* event_base are unused as the only 2 things we need to know are
|
|
||||||
* the event mapping and the counter to use. The counter to use is
|
|
||||||
* also the indx and the config_base is the event type.
|
|
||||||
*/
|
|
||||||
hwc->config_base = (unsigned long)mapping;
|
|
||||||
hwc->config = 0;
|
hwc->config = 0;
|
||||||
hwc->event_base = 0;
|
hwc->event_base = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check whether we need to exclude the counter from certain modes.
|
||||||
|
*/
|
||||||
|
if ((!armpmu->set_event_filter ||
|
||||||
|
armpmu->set_event_filter(hwc, &event->attr)) &&
|
||||||
|
event_requires_mode_exclusion(&event->attr)) {
|
||||||
|
pr_debug("ARM performance counters do not support "
|
||||||
|
"mode exclusion\n");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Store the event encoding into the config_base field.
|
||||||
|
*/
|
||||||
|
hwc->config_base |= (unsigned long)mapping;
|
||||||
|
|
||||||
if (!hwc->sample_period) {
|
if (!hwc->sample_period) {
|
||||||
hwc->sample_period = armpmu->max_period;
|
hwc->sample_period = armpmu->max_period;
|
||||||
hwc->last_period = hwc->sample_period;
|
hwc->last_period = hwc->sample_period;
|
||||||
|
@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
|
||||||
|
|
||||||
static int armpmu_event_init(struct perf_event *event)
|
static int armpmu_event_init(struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
atomic_t *active_events = &armpmu->active_events;
|
||||||
|
|
||||||
switch (event->attr.type) {
|
if (armpmu->map_event(event) == -ENOENT)
|
||||||
case PERF_TYPE_RAW:
|
|
||||||
case PERF_TYPE_HARDWARE:
|
|
||||||
case PERF_TYPE_HW_CACHE:
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
|
||||||
|
|
||||||
if (!armpmu)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
event->destroy = hw_perf_event_destroy;
|
event->destroy = hw_perf_event_destroy;
|
||||||
|
|
||||||
if (!atomic_inc_not_zero(&active_events)) {
|
if (!atomic_inc_not_zero(active_events)) {
|
||||||
mutex_lock(&pmu_reserve_mutex);
|
mutex_lock(&armpmu->reserve_mutex);
|
||||||
if (atomic_read(&active_events) == 0) {
|
if (atomic_read(active_events) == 0)
|
||||||
err = armpmu_reserve_hardware();
|
err = armpmu_reserve_hardware(armpmu);
|
||||||
}
|
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
atomic_inc(&active_events);
|
atomic_inc(active_events);
|
||||||
mutex_unlock(&pmu_reserve_mutex);
|
mutex_unlock(&armpmu->reserve_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
|
||||||
|
|
||||||
static void armpmu_enable(struct pmu *pmu)
|
static void armpmu_enable(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
/* Enable all of the perf events on hardware. */
|
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||||
int idx, enabled = 0;
|
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
||||||
|
|
||||||
if (!armpmu)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
|
||||||
struct perf_event *event = cpuc->events[idx];
|
|
||||||
|
|
||||||
if (!event)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
armpmu->enable(&event->hw, idx);
|
|
||||||
enabled = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (enabled)
|
if (enabled)
|
||||||
armpmu->start();
|
armpmu->start();
|
||||||
|
@ -605,11 +568,16 @@ static void armpmu_enable(struct pmu *pmu)
|
||||||
|
|
||||||
static void armpmu_disable(struct pmu *pmu)
|
static void armpmu_disable(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
if (armpmu)
|
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||||
armpmu->stop();
|
armpmu->stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pmu pmu = {
|
static void __init armpmu_init(struct arm_pmu *armpmu)
|
||||||
|
{
|
||||||
|
atomic_set(&armpmu->active_events, 0);
|
||||||
|
mutex_init(&armpmu->reserve_mutex);
|
||||||
|
|
||||||
|
armpmu->pmu = (struct pmu) {
|
||||||
.pmu_enable = armpmu_enable,
|
.pmu_enable = armpmu_enable,
|
||||||
.pmu_disable = armpmu_disable,
|
.pmu_disable = armpmu_disable,
|
||||||
.event_init = armpmu_event_init,
|
.event_init = armpmu_event_init,
|
||||||
|
@ -619,6 +587,13 @@ static struct pmu pmu = {
|
||||||
.stop = armpmu_stop,
|
.stop = armpmu_stop,
|
||||||
.read = armpmu_read,
|
.read = armpmu_read,
|
||||||
};
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
|
||||||
|
{
|
||||||
|
armpmu_init(armpmu);
|
||||||
|
return perf_pmu_register(&armpmu->pmu, name, type);
|
||||||
|
}
|
||||||
|
|
||||||
/* Include the PMU-specific implementations. */
|
/* Include the PMU-specific implementations. */
|
||||||
#include "perf_event_xscale.c"
|
#include "perf_event_xscale.c"
|
||||||
|
@ -630,14 +605,72 @@ static struct pmu pmu = {
|
||||||
* This requires SMP to be available, so exists as a separate initcall.
|
* This requires SMP to be available, so exists as a separate initcall.
|
||||||
*/
|
*/
|
||||||
static int __init
|
static int __init
|
||||||
armpmu_reset(void)
|
cpu_pmu_reset(void)
|
||||||
{
|
{
|
||||||
if (armpmu && armpmu->reset)
|
if (cpu_pmu && cpu_pmu->reset)
|
||||||
return on_each_cpu(armpmu->reset, NULL, 1);
|
return on_each_cpu(cpu_pmu->reset, NULL, 1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
arch_initcall(armpmu_reset);
|
arch_initcall(cpu_pmu_reset);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PMU platform driver and devicetree bindings.
|
||||||
|
*/
|
||||||
|
static struct of_device_id armpmu_of_device_ids[] = {
|
||||||
|
{.compatible = "arm,cortex-a9-pmu"},
|
||||||
|
{.compatible = "arm,cortex-a8-pmu"},
|
||||||
|
{.compatible = "arm,arm1136-pmu"},
|
||||||
|
{.compatible = "arm,arm1176-pmu"},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_device_id armpmu_plat_device_ids[] = {
|
||||||
|
{.name = "arm-pmu"},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __devinit armpmu_device_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
cpu_pmu->plat_device = pdev;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct platform_driver armpmu_driver = {
|
||||||
|
.driver = {
|
||||||
|
.name = "arm-pmu",
|
||||||
|
.of_match_table = armpmu_of_device_ids,
|
||||||
|
},
|
||||||
|
.probe = armpmu_device_probe,
|
||||||
|
.id_table = armpmu_plat_device_ids,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init register_pmu_driver(void)
|
||||||
|
{
|
||||||
|
return platform_driver_register(&armpmu_driver);
|
||||||
|
}
|
||||||
|
device_initcall(register_pmu_driver);
|
||||||
|
|
||||||
|
static struct pmu_hw_events *armpmu_get_cpu_events(void)
|
||||||
|
{
|
||||||
|
return &__get_cpu_var(cpu_hw_events);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
|
||||||
|
events->events = per_cpu(hw_events, cpu);
|
||||||
|
events->used_mask = per_cpu(used_mask, cpu);
|
||||||
|
raw_spin_lock_init(&events->pmu_lock);
|
||||||
|
}
|
||||||
|
armpmu->get_hw_events = armpmu_get_cpu_events;
|
||||||
|
armpmu->type = ARM_PMU_DEVICE_CPU;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPU PMU identification and registration.
|
||||||
|
*/
|
||||||
static int __init
|
static int __init
|
||||||
init_hw_perf_events(void)
|
init_hw_perf_events(void)
|
||||||
{
|
{
|
||||||
|
@ -651,22 +684,22 @@ init_hw_perf_events(void)
|
||||||
case 0xB360: /* ARM1136 */
|
case 0xB360: /* ARM1136 */
|
||||||
case 0xB560: /* ARM1156 */
|
case 0xB560: /* ARM1156 */
|
||||||
case 0xB760: /* ARM1176 */
|
case 0xB760: /* ARM1176 */
|
||||||
armpmu = armv6pmu_init();
|
cpu_pmu = armv6pmu_init();
|
||||||
break;
|
break;
|
||||||
case 0xB020: /* ARM11mpcore */
|
case 0xB020: /* ARM11mpcore */
|
||||||
armpmu = armv6mpcore_pmu_init();
|
cpu_pmu = armv6mpcore_pmu_init();
|
||||||
break;
|
break;
|
||||||
case 0xC080: /* Cortex-A8 */
|
case 0xC080: /* Cortex-A8 */
|
||||||
armpmu = armv7_a8_pmu_init();
|
cpu_pmu = armv7_a8_pmu_init();
|
||||||
break;
|
break;
|
||||||
case 0xC090: /* Cortex-A9 */
|
case 0xC090: /* Cortex-A9 */
|
||||||
armpmu = armv7_a9_pmu_init();
|
cpu_pmu = armv7_a9_pmu_init();
|
||||||
break;
|
break;
|
||||||
case 0xC050: /* Cortex-A5 */
|
case 0xC050: /* Cortex-A5 */
|
||||||
armpmu = armv7_a5_pmu_init();
|
cpu_pmu = armv7_a5_pmu_init();
|
||||||
break;
|
break;
|
||||||
case 0xC0F0: /* Cortex-A15 */
|
case 0xC0F0: /* Cortex-A15 */
|
||||||
armpmu = armv7_a15_pmu_init();
|
cpu_pmu = armv7_a15_pmu_init();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Intel CPUs [xscale]. */
|
/* Intel CPUs [xscale]. */
|
||||||
|
@ -674,23 +707,23 @@ init_hw_perf_events(void)
|
||||||
part_number = (cpuid >> 13) & 0x7;
|
part_number = (cpuid >> 13) & 0x7;
|
||||||
switch (part_number) {
|
switch (part_number) {
|
||||||
case 1:
|
case 1:
|
||||||
armpmu = xscale1pmu_init();
|
cpu_pmu = xscale1pmu_init();
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
armpmu = xscale2pmu_init();
|
cpu_pmu = xscale2pmu_init();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (armpmu) {
|
if (cpu_pmu) {
|
||||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||||
armpmu->name, armpmu->num_events);
|
cpu_pmu->name, cpu_pmu->num_events);
|
||||||
|
cpu_pmu_init(cpu_pmu);
|
||||||
|
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
|
||||||
} else {
|
} else {
|
||||||
pr_info("no hardware support available\n");
|
pr_info("no hardware support available\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(init_hw_perf_events);
|
early_initcall(init_hw_perf_events);
|
||||||
|
|
|
@ -54,7 +54,7 @@ enum armv6_perf_types {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum armv6_counters {
|
enum armv6_counters {
|
||||||
ARMV6_CYCLE_COUNTER = 1,
|
ARMV6_CYCLE_COUNTER = 0,
|
||||||
ARMV6_COUNTER0,
|
ARMV6_COUNTER0,
|
||||||
ARMV6_COUNTER1,
|
ARMV6_COUNTER1,
|
||||||
};
|
};
|
||||||
|
@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
{
|
{
|
||||||
unsigned long val, mask, evt, flags;
|
unsigned long val, mask, evt, flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||||
* Mask out the current event and set the counter to count the event
|
* Mask out the current event and set the counter to count the event
|
||||||
* that we're interested in.
|
* that we're interested in.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = armv6_pmcr_read();
|
val = armv6_pmcr_read();
|
||||||
val &= ~mask;
|
val &= ~mask;
|
||||||
val |= evt;
|
val |= evt;
|
||||||
armv6_pmcr_write(val);
|
armv6_pmcr_write(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int counter_is_active(unsigned long pmcr, int idx)
|
||||||
|
{
|
||||||
|
unsigned long mask = 0;
|
||||||
|
if (idx == ARMV6_CYCLE_COUNTER)
|
||||||
|
mask = ARMV6_PMCR_CCOUNT_IEN;
|
||||||
|
else if (idx == ARMV6_COUNTER0)
|
||||||
|
mask = ARMV6_PMCR_COUNT0_IEN;
|
||||||
|
else if (idx == ARMV6_COUNTER1)
|
||||||
|
mask = ARMV6_PMCR_COUNT1_IEN;
|
||||||
|
|
||||||
|
if (mask)
|
||||||
|
return pmcr & mask;
|
||||||
|
|
||||||
|
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t
|
static irqreturn_t
|
||||||
|
@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
{
|
{
|
||||||
unsigned long pmcr = armv6_pmcr_read();
|
unsigned long pmcr = armv6_pmcr_read();
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct pmu_hw_events *cpuc;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
|
@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
if (!counter_is_active(pmcr, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
armpmu->disable(hwc, idx);
|
cpu_pmu->disable(hwc, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -527,28 +545,30 @@ static void
|
||||||
armv6pmu_start(void)
|
armv6pmu_start(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = armv6_pmcr_read();
|
val = armv6_pmcr_read();
|
||||||
val |= ARMV6_PMCR_ENABLE;
|
val |= ARMV6_PMCR_ENABLE;
|
||||||
armv6_pmcr_write(val);
|
armv6_pmcr_write(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
armv6pmu_stop(void)
|
armv6pmu_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = armv6_pmcr_read();
|
val = armv6_pmcr_read();
|
||||||
val &= ~ARMV6_PMCR_ENABLE;
|
val &= ~ARMV6_PMCR_ENABLE;
|
||||||
armv6_pmcr_write(val);
|
armv6_pmcr_write(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||||
struct hw_perf_event *event)
|
struct hw_perf_event *event)
|
||||||
{
|
{
|
||||||
/* Always place a cycle counter into the cycle counter. */
|
/* Always place a cycle counter into the cycle counter. */
|
||||||
|
@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
{
|
{
|
||||||
unsigned long val, mask, evt, flags;
|
unsigned long val, mask, evt, flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||||
mask = ARMV6_PMCR_CCOUNT_IEN;
|
mask = ARMV6_PMCR_CCOUNT_IEN;
|
||||||
|
@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
|
||||||
* of ETM bus signal assertion cycles. The external reporting should
|
* of ETM bus signal assertion cycles. The external reporting should
|
||||||
* be disabled and so this should never increment.
|
* be disabled and so this should never increment.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = armv6_pmcr_read();
|
val = armv6_pmcr_read();
|
||||||
val &= ~mask;
|
val &= ~mask;
|
||||||
val |= evt;
|
val |= evt;
|
||||||
armv6_pmcr_write(val);
|
armv6_pmcr_write(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
{
|
{
|
||||||
unsigned long val, mask, flags, evt = 0;
|
unsigned long val, mask, flags, evt = 0;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||||
mask = ARMV6_PMCR_CCOUNT_IEN;
|
mask = ARMV6_PMCR_CCOUNT_IEN;
|
||||||
|
@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
|
||||||
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
|
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
|
||||||
* simply disable the interrupt reporting.
|
* simply disable the interrupt reporting.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = armv6_pmcr_read();
|
val = armv6_pmcr_read();
|
||||||
val &= ~mask;
|
val &= ~mask;
|
||||||
val |= evt;
|
val |= evt;
|
||||||
armv6_pmcr_write(val);
|
armv6_pmcr_write(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu armv6pmu = {
|
static int armv6_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv6_perf_map,
|
||||||
|
&armv6_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct arm_pmu armv6pmu = {
|
||||||
.id = ARM_PERF_PMU_ID_V6,
|
.id = ARM_PERF_PMU_ID_V6,
|
||||||
.name = "v6",
|
.name = "v6",
|
||||||
.handle_irq = armv6pmu_handle_irq,
|
.handle_irq = armv6pmu_handle_irq,
|
||||||
|
@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
|
||||||
.get_event_idx = armv6pmu_get_event_idx,
|
.get_event_idx = armv6pmu_get_event_idx,
|
||||||
.start = armv6pmu_start,
|
.start = armv6pmu_start,
|
||||||
.stop = armv6pmu_stop,
|
.stop = armv6pmu_stop,
|
||||||
.cache_map = &armv6_perf_cache_map,
|
.map_event = armv6_map_event,
|
||||||
.event_map = &armv6_perf_map,
|
|
||||||
.raw_event_mask = 0xFF,
|
|
||||||
.num_events = 3,
|
.num_events = 3,
|
||||||
.max_period = (1LLU << 32) - 1,
|
.max_period = (1LLU << 32) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
static struct arm_pmu *__init armv6pmu_init(void)
|
||||||
{
|
{
|
||||||
return &armv6pmu;
|
return &armv6pmu;
|
||||||
}
|
}
|
||||||
|
@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
|
||||||
* disable the interrupt reporting and update the event. When unthrottling we
|
* disable the interrupt reporting and update the event. When unthrottling we
|
||||||
* reset the period and enable the interrupt reporting.
|
* reset the period and enable the interrupt reporting.
|
||||||
*/
|
*/
|
||||||
static const struct arm_pmu armv6mpcore_pmu = {
|
|
||||||
|
static int armv6mpcore_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv6mpcore_perf_map,
|
||||||
|
&armv6mpcore_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct arm_pmu armv6mpcore_pmu = {
|
||||||
.id = ARM_PERF_PMU_ID_V6MP,
|
.id = ARM_PERF_PMU_ID_V6MP,
|
||||||
.name = "v6mpcore",
|
.name = "v6mpcore",
|
||||||
.handle_irq = armv6pmu_handle_irq,
|
.handle_irq = armv6pmu_handle_irq,
|
||||||
|
@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
|
||||||
.get_event_idx = armv6pmu_get_event_idx,
|
.get_event_idx = armv6pmu_get_event_idx,
|
||||||
.start = armv6pmu_start,
|
.start = armv6pmu_start,
|
||||||
.stop = armv6pmu_stop,
|
.stop = armv6pmu_stop,
|
||||||
.cache_map = &armv6mpcore_perf_cache_map,
|
.map_event = armv6mpcore_map_event,
|
||||||
.event_map = &armv6mpcore_perf_map,
|
|
||||||
.raw_event_mask = 0xFF,
|
|
||||||
.num_events = 3,
|
.num_events = 3,
|
||||||
.max_period = (1LLU << 32) - 1,
|
.max_period = (1LLU << 32) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
static struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||||
{
|
{
|
||||||
return &armv6mpcore_pmu;
|
return &armv6mpcore_pmu;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
static struct arm_pmu *__init armv6pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
static struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_V7
|
#ifdef CONFIG_CPU_V7
|
||||||
|
|
||||||
|
static struct arm_pmu armv7pmu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common ARMv7 event types
|
* Common ARMv7 event types
|
||||||
*
|
*
|
||||||
|
@ -676,24 +679,25 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perf Events counters
|
* Perf Events' indices
|
||||||
*/
|
*/
|
||||||
enum armv7_counters {
|
#define ARMV7_IDX_CYCLE_COUNTER 0
|
||||||
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
|
#define ARMV7_IDX_COUNTER0 1
|
||||||
ARMV7_COUNTER0 = 2, /* First event counter */
|
#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
#define ARMV7_MAX_COUNTERS 32
|
||||||
* The cycle counter is ARMV7_CYCLE_COUNTER.
|
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
|
||||||
* The first event counter is ARMV7_COUNTER0.
|
|
||||||
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
|
|
||||||
*/
|
|
||||||
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARMv7 low level PMNC access
|
* ARMv7 low level PMNC access
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perf Event to low level counters mapping
|
||||||
|
*/
|
||||||
|
#define ARMV7_IDX_TO_COUNTER(x) \
|
||||||
|
(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per-CPU PMNC: config reg
|
* Per-CPU PMNC: config reg
|
||||||
*/
|
*/
|
||||||
|
@ -707,104 +711,77 @@ enum armv7_counters {
|
||||||
#define ARMV7_PMNC_N_MASK 0x1f
|
#define ARMV7_PMNC_N_MASK 0x1f
|
||||||
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
|
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
|
||||||
|
|
||||||
/*
|
|
||||||
* Available counters
|
|
||||||
*/
|
|
||||||
#define ARMV7_CNT0 0 /* First event counter */
|
|
||||||
#define ARMV7_CCNT 31 /* Cycle counter */
|
|
||||||
|
|
||||||
/* Perf Event to low level counters mapping */
|
|
||||||
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CNTENS: counters enable reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
|
||||||
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CNTENC: counters disable reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
|
||||||
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* INTENS: counters overflow interrupt enable reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
|
||||||
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* INTENC: counters overflow interrupt disable reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
|
||||||
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* EVTSEL: Event selection reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SELECT: Counter selection reg
|
|
||||||
*/
|
|
||||||
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FLAG: counters overflow flag status reg
|
* FLAG: counters overflow flag status reg
|
||||||
*/
|
*/
|
||||||
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
|
||||||
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
|
|
||||||
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
|
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
|
||||||
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
|
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
|
||||||
|
|
||||||
static inline unsigned long armv7_pmnc_read(void)
|
/*
|
||||||
|
* PMXEVTYPER: Event selection reg
|
||||||
|
*/
|
||||||
|
#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
|
||||||
|
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Event filters for PMUv2
|
||||||
|
*/
|
||||||
|
#define ARMV7_EXCLUDE_PL1 (1 << 31)
|
||||||
|
#define ARMV7_EXCLUDE_USER (1 << 30)
|
||||||
|
#define ARMV7_INCLUDE_HYP (1 << 27)
|
||||||
|
|
||||||
|
static inline u32 armv7_pmnc_read(void)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
|
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void armv7_pmnc_write(unsigned long val)
|
static inline void armv7_pmnc_write(u32 val)
|
||||||
{
|
{
|
||||||
val &= ARMV7_PMNC_MASK;
|
val &= ARMV7_PMNC_MASK;
|
||||||
isb();
|
isb();
|
||||||
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
|
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
|
static inline int armv7_pmnc_has_overflowed(u32 pmnc)
|
||||||
{
|
{
|
||||||
return pmnc & ARMV7_OVERFLOWED_MASK;
|
return pmnc & ARMV7_OVERFLOWED_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
|
static inline int armv7_pmnc_counter_valid(int idx)
|
||||||
enum armv7_counters counter)
|
{
|
||||||
|
return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 counter;
|
||||||
|
|
||||||
if (counter == ARMV7_CYCLE_COUNTER)
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
ret = pmnc & ARMV7_FLAG_C;
|
|
||||||
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
|
|
||||||
ret = pmnc & ARMV7_FLAG_P(counter);
|
|
||||||
else
|
|
||||||
pr_err("CPU%u checking wrong counter %d overflow status\n",
|
pr_err("CPU%u checking wrong counter %d overflow status\n",
|
||||||
smp_processor_id(), counter);
|
smp_processor_id(), idx);
|
||||||
|
} else {
|
||||||
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
|
ret = pmnc & BIT(counter);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int armv7_pmnc_select_counter(unsigned int idx)
|
static inline int armv7_pmnc_select_counter(int idx)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 counter;
|
||||||
|
|
||||||
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
pr_err("CPU%u selecting wrong PMNC counter"
|
pr_err("CPU%u selecting wrong PMNC counter %d\n",
|
||||||
" %d\n", smp_processor_id(), idx);
|
smp_processor_id(), idx);
|
||||||
return -1;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
|
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
|
@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
|
||||||
|
|
||||||
static inline u32 armv7pmu_read_counter(int idx)
|
static inline u32 armv7pmu_read_counter(int idx)
|
||||||
{
|
{
|
||||||
unsigned long value = 0;
|
u32 value = 0;
|
||||||
|
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
if (!armv7_pmnc_counter_valid(idx))
|
||||||
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
|
|
||||||
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
|
|
||||||
if (armv7_pmnc_select_counter(idx) == idx)
|
|
||||||
asm volatile("mrc p15, 0, %0, c9, c13, 2"
|
|
||||||
: "=r" (value));
|
|
||||||
} else
|
|
||||||
pr_err("CPU%u reading wrong counter %d\n",
|
pr_err("CPU%u reading wrong counter %d\n",
|
||||||
smp_processor_id(), idx);
|
smp_processor_id(), idx);
|
||||||
|
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
|
||||||
|
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
|
||||||
|
else if (armv7_pmnc_select_counter(idx) == idx)
|
||||||
|
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void armv7pmu_write_counter(int idx, u32 value)
|
static inline void armv7pmu_write_counter(int idx, u32 value)
|
||||||
{
|
{
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
if (!armv7_pmnc_counter_valid(idx))
|
||||||
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
|
|
||||||
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
|
|
||||||
if (armv7_pmnc_select_counter(idx) == idx)
|
|
||||||
asm volatile("mcr p15, 0, %0, c9, c13, 2"
|
|
||||||
: : "r" (value));
|
|
||||||
} else
|
|
||||||
pr_err("CPU%u writing wrong counter %d\n",
|
pr_err("CPU%u writing wrong counter %d\n",
|
||||||
smp_processor_id(), idx);
|
smp_processor_id(), idx);
|
||||||
|
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
|
||||||
|
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
|
||||||
|
else if (armv7_pmnc_select_counter(idx) == idx)
|
||||||
|
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
|
static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
|
||||||
{
|
{
|
||||||
if (armv7_pmnc_select_counter(idx) == idx) {
|
if (armv7_pmnc_select_counter(idx) == idx) {
|
||||||
val &= ARMV7_EVTSEL_MASK;
|
val &= ARMV7_EVTYPE_MASK;
|
||||||
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
|
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
|
static inline int armv7_pmnc_enable_counter(int idx)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 counter;
|
||||||
|
|
||||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
pr_err("CPU%u enabling wrong PMNC counter %d\n",
|
||||||
pr_err("CPU%u enabling wrong PMNC counter"
|
smp_processor_id(), idx);
|
||||||
" %d\n", smp_processor_id(), idx);
|
return -EINVAL;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
val = ARMV7_CNTENS_C;
|
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
|
||||||
else
|
|
||||||
val = ARMV7_CNTENS_P(idx);
|
|
||||||
|
|
||||||
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
|
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
|
static inline int armv7_pmnc_disable_counter(int idx)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 counter;
|
||||||
|
|
||||||
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
pr_err("CPU%u disabling wrong PMNC counter %d\n",
|
||||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
smp_processor_id(), idx);
|
||||||
pr_err("CPU%u disabling wrong PMNC counter"
|
return -EINVAL;
|
||||||
" %d\n", smp_processor_id(), idx);
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
val = ARMV7_CNTENC_C;
|
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
|
||||||
else
|
|
||||||
val = ARMV7_CNTENC_P(idx);
|
|
||||||
|
|
||||||
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
|
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
|
static inline int armv7_pmnc_enable_intens(int idx)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 counter;
|
||||||
|
|
||||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
|
||||||
pr_err("CPU%u enabling wrong PMNC counter"
|
smp_processor_id(), idx);
|
||||||
" interrupt enable %d\n", smp_processor_id(), idx);
|
return -EINVAL;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
val = ARMV7_INTENS_C;
|
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
|
||||||
else
|
|
||||||
val = ARMV7_INTENS_P(idx);
|
|
||||||
|
|
||||||
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
|
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
|
static inline int armv7_pmnc_disable_intens(int idx)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 counter;
|
||||||
|
|
||||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
if (!armv7_pmnc_counter_valid(idx)) {
|
||||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
|
||||||
pr_err("CPU%u disabling wrong PMNC counter"
|
smp_processor_id(), idx);
|
||||||
" interrupt enable %d\n", smp_processor_id(), idx);
|
return -EINVAL;
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idx == ARMV7_CYCLE_COUNTER)
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
val = ARMV7_INTENC_C;
|
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
|
||||||
else
|
|
||||||
val = ARMV7_INTENC_P(idx);
|
|
||||||
|
|
||||||
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
|
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
|
||||||
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
|
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
|
||||||
printk(KERN_INFO "CCNT =0x%08x\n", val);
|
printk(KERN_INFO "CCNT =0x%08x\n", val);
|
||||||
|
|
||||||
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
|
for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
|
||||||
armv7_pmnc_select_counter(cnt);
|
armv7_pmnc_select_counter(cnt);
|
||||||
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
|
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
|
||||||
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
|
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
|
||||||
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
|
ARMV7_IDX_TO_COUNTER(cnt), val);
|
||||||
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
|
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
|
||||||
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
|
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
|
||||||
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
|
ARMV7_IDX_TO_COUNTER(cnt), val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
|
||||||
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable counter and interrupt, and set the counter to count
|
* Enable counter and interrupt, and set the counter to count
|
||||||
* the event that we're interested in.
|
* the event that we're interested in.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable counter
|
* Disable counter
|
||||||
|
@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set event (if destined for PMNx counters)
|
* Set event (if destined for PMNx counters)
|
||||||
* We don't need to set the event if it's a cycle count
|
* We only need to set the event for the cycle counter if we
|
||||||
|
* have the ability to perform event filtering.
|
||||||
*/
|
*/
|
||||||
if (idx != ARMV7_CYCLE_COUNTER)
|
if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
|
||||||
armv7_pmnc_write_evtsel(idx, hwc->config_base);
|
armv7_pmnc_write_evtsel(idx, hwc->config_base);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
*/
|
*/
|
||||||
armv7_pmnc_enable_counter(idx);
|
armv7_pmnc_enable_counter(idx);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable counter and interrupt
|
* Disable counter and interrupt
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable counter
|
* Disable counter
|
||||||
|
@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
*/
|
*/
|
||||||
armv7_pmnc_disable_intens(idx);
|
armv7_pmnc_disable_intens(idx);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
{
|
{
|
||||||
unsigned long pmnc;
|
u32 pmnc;
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct pmu_hw_events *cpuc;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
|
@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have a single interrupt for all counters. Check that
|
* We have a single interrupt for all counters. Check that
|
||||||
* each counter has overflowed before we process it.
|
* each counter has overflowed before we process it.
|
||||||
|
@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
armpmu->disable(hwc, idx);
|
cpu_pmu->disable(hwc, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1108,40 +1056,44 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
static void armv7pmu_start(void)
|
static void armv7pmu_start(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
/* Enable all counters */
|
/* Enable all counters */
|
||||||
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
|
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armv7pmu_stop(void)
|
static void armv7pmu_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
/* Disable all counters */
|
/* Disable all counters */
|
||||||
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
|
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||||
struct hw_perf_event *event)
|
struct hw_perf_event *event)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
|
unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
|
||||||
|
|
||||||
/* Always place a cycle counter into the cycle counter. */
|
/* Always place a cycle counter into the cycle counter. */
|
||||||
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
|
if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
|
||||||
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
|
if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
return ARMV7_CYCLE_COUNTER;
|
return ARMV7_IDX_CYCLE_COUNTER;
|
||||||
} else {
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For anything other than a cycle counter, try and use
|
* For anything other than a cycle counter, try and use
|
||||||
* the events counters
|
* the events counters
|
||||||
*/
|
*/
|
||||||
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
|
for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
if (!test_and_set_bit(idx, cpuc->used_mask))
|
if (!test_and_set_bit(idx, cpuc->used_mask))
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
@ -1149,20 +1101,69 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||||
/* The counters are all in use. */
|
/* The counters are all in use. */
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
|
||||||
|
*/
|
||||||
|
static int armv7pmu_set_event_filter(struct hw_perf_event *event,
|
||||||
|
struct perf_event_attr *attr)
|
||||||
|
{
|
||||||
|
unsigned long config_base = 0;
|
||||||
|
|
||||||
|
if (attr->exclude_idle)
|
||||||
|
return -EPERM;
|
||||||
|
if (attr->exclude_user)
|
||||||
|
config_base |= ARMV7_EXCLUDE_USER;
|
||||||
|
if (attr->exclude_kernel)
|
||||||
|
config_base |= ARMV7_EXCLUDE_PL1;
|
||||||
|
if (!attr->exclude_hv)
|
||||||
|
config_base |= ARMV7_INCLUDE_HYP;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Install the filter into config_base as this is used to
|
||||||
|
* construct the event type.
|
||||||
|
*/
|
||||||
|
event->config_base = config_base;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armv7pmu_reset(void *info)
|
static void armv7pmu_reset(void *info)
|
||||||
{
|
{
|
||||||
u32 idx, nb_cnt = armpmu->num_events;
|
u32 idx, nb_cnt = cpu_pmu->num_events;
|
||||||
|
|
||||||
/* The counter and interrupt enable registers are unknown at reset. */
|
/* The counter and interrupt enable registers are unknown at reset. */
|
||||||
for (idx = 1; idx < nb_cnt; ++idx)
|
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
|
||||||
armv7pmu_disable_event(NULL, idx);
|
armv7pmu_disable_event(NULL, idx);
|
||||||
|
|
||||||
/* Initialize & Reset PMNC: C and P bits */
|
/* Initialize & Reset PMNC: C and P bits */
|
||||||
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
|
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int armv7_a8_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv7_a8_perf_map,
|
||||||
|
&armv7_a8_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int armv7_a9_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv7_a9_perf_map,
|
||||||
|
&armv7_a9_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int armv7_a5_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv7_a5_perf_map,
|
||||||
|
&armv7_a5_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int armv7_a15_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &armv7_a15_perf_map,
|
||||||
|
&armv7_a15_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
static struct arm_pmu armv7pmu = {
|
static struct arm_pmu armv7pmu = {
|
||||||
.handle_irq = armv7pmu_handle_irq,
|
.handle_irq = armv7pmu_handle_irq,
|
||||||
.enable = armv7pmu_enable_event,
|
.enable = armv7pmu_enable_event,
|
||||||
|
@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
|
||||||
.start = armv7pmu_start,
|
.start = armv7pmu_start,
|
||||||
.stop = armv7pmu_stop,
|
.stop = armv7pmu_stop,
|
||||||
.reset = armv7pmu_reset,
|
.reset = armv7pmu_reset,
|
||||||
.raw_event_mask = 0xFF,
|
|
||||||
.max_period = (1LLU << 32) - 1,
|
.max_period = (1LLU << 32) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
|
||||||
return nb_cnt + 1;
|
return nb_cnt + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
static struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||||
{
|
{
|
||||||
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
|
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
|
||||||
armv7pmu.name = "ARMv7 Cortex-A8";
|
armv7pmu.name = "ARMv7 Cortex-A8";
|
||||||
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
|
armv7pmu.map_event = armv7_a8_map_event;
|
||||||
armv7pmu.event_map = &armv7_a8_perf_map;
|
|
||||||
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
||||||
return &armv7pmu;
|
return &armv7pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
static struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||||
{
|
{
|
||||||
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
|
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
|
||||||
armv7pmu.name = "ARMv7 Cortex-A9";
|
armv7pmu.name = "ARMv7 Cortex-A9";
|
||||||
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
|
armv7pmu.map_event = armv7_a9_map_event;
|
||||||
armv7pmu.event_map = &armv7_a9_perf_map;
|
|
||||||
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
||||||
return &armv7pmu;
|
return &armv7pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a5_pmu_init(void)
|
static struct arm_pmu *__init armv7_a5_pmu_init(void)
|
||||||
{
|
{
|
||||||
armv7pmu.id = ARM_PERF_PMU_ID_CA5;
|
armv7pmu.id = ARM_PERF_PMU_ID_CA5;
|
||||||
armv7pmu.name = "ARMv7 Cortex-A5";
|
armv7pmu.name = "ARMv7 Cortex-A5";
|
||||||
armv7pmu.cache_map = &armv7_a5_perf_cache_map;
|
armv7pmu.map_event = armv7_a5_map_event;
|
||||||
armv7pmu.event_map = &armv7_a5_perf_map;
|
|
||||||
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
||||||
return &armv7pmu;
|
return &armv7pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a15_pmu_init(void)
|
static struct arm_pmu *__init armv7_a15_pmu_init(void)
|
||||||
{
|
{
|
||||||
armv7pmu.id = ARM_PERF_PMU_ID_CA15;
|
armv7pmu.id = ARM_PERF_PMU_ID_CA15;
|
||||||
armv7pmu.name = "ARMv7 Cortex-A15";
|
armv7pmu.name = "ARMv7 Cortex-A15";
|
||||||
armv7pmu.cache_map = &armv7_a15_perf_cache_map;
|
armv7pmu.map_event = armv7_a15_map_event;
|
||||||
armv7pmu.event_map = &armv7_a15_perf_map;
|
|
||||||
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
armv7pmu.num_events = armv7_read_num_pmnc_events();
|
||||||
|
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
|
||||||
return &armv7pmu;
|
return &armv7pmu;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
static struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
static struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a5_pmu_init(void)
|
static struct arm_pmu *__init armv7_a5_pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init armv7_a15_pmu_init(void)
|
static struct arm_pmu *__init armv7_a15_pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ enum xscale_perf_types {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum xscale_counters {
|
enum xscale_counters {
|
||||||
XSCALE_CYCLE_COUNTER = 1,
|
XSCALE_CYCLE_COUNTER = 0,
|
||||||
XSCALE_COUNTER0,
|
XSCALE_COUNTER0,
|
||||||
XSCALE_COUNTER1,
|
XSCALE_COUNTER1,
|
||||||
XSCALE_COUNTER2,
|
XSCALE_COUNTER2,
|
||||||
|
@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
{
|
{
|
||||||
unsigned long pmnc;
|
unsigned long pmnc;
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct pmu_hw_events *cpuc;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
|
@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
|
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
armpmu->disable(hwc, idx);
|
cpu_pmu->disable(hwc, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_work_run();
|
irq_work_run();
|
||||||
|
@ -284,6 +281,7 @@ static void
|
||||||
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long val, mask, evt, flags;
|
unsigned long val, mask, evt, flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case XSCALE_CYCLE_COUNTER:
|
case XSCALE_CYCLE_COUNTER:
|
||||||
|
@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale1pmu_read_pmnc();
|
val = xscale1pmu_read_pmnc();
|
||||||
val &= ~mask;
|
val &= ~mask;
|
||||||
val |= evt;
|
val |= evt;
|
||||||
xscale1pmu_write_pmnc(val);
|
xscale1pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long val, mask, evt, flags;
|
unsigned long val, mask, evt, flags;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case XSCALE_CYCLE_COUNTER:
|
case XSCALE_CYCLE_COUNTER:
|
||||||
|
@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale1pmu_read_pmnc();
|
val = xscale1pmu_read_pmnc();
|
||||||
val &= ~mask;
|
val &= ~mask;
|
||||||
val |= evt;
|
val |= evt;
|
||||||
xscale1pmu_write_pmnc(val);
|
xscale1pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||||
struct hw_perf_event *event)
|
struct hw_perf_event *event)
|
||||||
{
|
{
|
||||||
if (XSCALE_PERFCTR_CCNT == event->config_base) {
|
if (XSCALE_PERFCTR_CCNT == event->config_base) {
|
||||||
|
@ -368,24 +367,26 @@ static void
|
||||||
xscale1pmu_start(void)
|
xscale1pmu_start(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale1pmu_read_pmnc();
|
val = xscale1pmu_read_pmnc();
|
||||||
val |= XSCALE_PMU_ENABLE;
|
val |= XSCALE_PMU_ENABLE;
|
||||||
xscale1pmu_write_pmnc(val);
|
xscale1pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
xscale1pmu_stop(void)
|
xscale1pmu_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale1pmu_read_pmnc();
|
val = xscale1pmu_read_pmnc();
|
||||||
val &= ~XSCALE_PMU_ENABLE;
|
val &= ~XSCALE_PMU_ENABLE;
|
||||||
xscale1pmu_write_pmnc(val);
|
xscale1pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32
|
static inline u32
|
||||||
|
@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu xscale1pmu = {
|
static int xscale_map_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return map_cpu_event(event, &xscale_perf_map,
|
||||||
|
&xscale_perf_cache_map, 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct arm_pmu xscale1pmu = {
|
||||||
.id = ARM_PERF_PMU_ID_XSCALE1,
|
.id = ARM_PERF_PMU_ID_XSCALE1,
|
||||||
.name = "xscale1",
|
.name = "xscale1",
|
||||||
.handle_irq = xscale1pmu_handle_irq,
|
.handle_irq = xscale1pmu_handle_irq,
|
||||||
|
@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
|
||||||
.get_event_idx = xscale1pmu_get_event_idx,
|
.get_event_idx = xscale1pmu_get_event_idx,
|
||||||
.start = xscale1pmu_start,
|
.start = xscale1pmu_start,
|
||||||
.stop = xscale1pmu_stop,
|
.stop = xscale1pmu_stop,
|
||||||
.cache_map = &xscale_perf_cache_map,
|
.map_event = xscale_map_event,
|
||||||
.event_map = &xscale_perf_map,
|
|
||||||
.raw_event_mask = 0xFF,
|
|
||||||
.num_events = 3,
|
.num_events = 3,
|
||||||
.max_period = (1LLU << 32) - 1,
|
.max_period = (1LLU << 32) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
static struct arm_pmu *__init xscale1pmu_init(void)
|
||||||
{
|
{
|
||||||
return &xscale1pmu;
|
return &xscale1pmu;
|
||||||
}
|
}
|
||||||
|
@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
{
|
{
|
||||||
unsigned long pmnc, of_flags;
|
unsigned long pmnc, of_flags;
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct pmu_hw_events *cpuc;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
|
@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0);
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
|
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
armpmu->disable(hwc, idx);
|
cpu_pmu->disable(hwc, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_work_run();
|
irq_work_run();
|
||||||
|
@ -616,6 +618,7 @@ static void
|
||||||
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags, ien, evtsel;
|
unsigned long flags, ien, evtsel;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
ien = xscale2pmu_read_int_enable();
|
ien = xscale2pmu_read_int_enable();
|
||||||
evtsel = xscale2pmu_read_event_select();
|
evtsel = xscale2pmu_read_event_select();
|
||||||
|
@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
xscale2pmu_write_event_select(evtsel);
|
xscale2pmu_write_event_select(evtsel);
|
||||||
xscale2pmu_write_int_enable(ien);
|
xscale2pmu_write_int_enable(ien);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags, ien, evtsel;
|
unsigned long flags, ien, evtsel;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
ien = xscale2pmu_read_int_enable();
|
ien = xscale2pmu_read_int_enable();
|
||||||
evtsel = xscale2pmu_read_event_select();
|
evtsel = xscale2pmu_read_event_select();
|
||||||
|
@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
xscale2pmu_write_event_select(evtsel);
|
xscale2pmu_write_event_select(evtsel);
|
||||||
xscale2pmu_write_int_enable(ien);
|
xscale2pmu_write_int_enable(ien);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||||
struct hw_perf_event *event)
|
struct hw_perf_event *event)
|
||||||
{
|
{
|
||||||
int idx = xscale1pmu_get_event_idx(cpuc, event);
|
int idx = xscale1pmu_get_event_idx(cpuc, event);
|
||||||
|
@ -718,24 +722,26 @@ static void
|
||||||
xscale2pmu_start(void)
|
xscale2pmu_start(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
||||||
val |= XSCALE_PMU_ENABLE;
|
val |= XSCALE_PMU_ENABLE;
|
||||||
xscale2pmu_write_pmnc(val);
|
xscale2pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
xscale2pmu_stop(void)
|
xscale2pmu_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, val;
|
unsigned long flags, val;
|
||||||
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
val = xscale2pmu_read_pmnc();
|
val = xscale2pmu_read_pmnc();
|
||||||
val &= ~XSCALE_PMU_ENABLE;
|
val &= ~XSCALE_PMU_ENABLE;
|
||||||
xscale2pmu_write_pmnc(val);
|
xscale2pmu_write_pmnc(val);
|
||||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32
|
static inline u32
|
||||||
|
@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu xscale2pmu = {
|
static struct arm_pmu xscale2pmu = {
|
||||||
.id = ARM_PERF_PMU_ID_XSCALE2,
|
.id = ARM_PERF_PMU_ID_XSCALE2,
|
||||||
.name = "xscale2",
|
.name = "xscale2",
|
||||||
.handle_irq = xscale2pmu_handle_irq,
|
.handle_irq = xscale2pmu_handle_irq,
|
||||||
|
@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
|
||||||
.get_event_idx = xscale2pmu_get_event_idx,
|
.get_event_idx = xscale2pmu_get_event_idx,
|
||||||
.start = xscale2pmu_start,
|
.start = xscale2pmu_start,
|
||||||
.stop = xscale2pmu_stop,
|
.stop = xscale2pmu_stop,
|
||||||
.cache_map = &xscale_perf_cache_map,
|
.map_event = xscale_map_event,
|
||||||
.event_map = &xscale_perf_map,
|
|
||||||
.raw_event_mask = 0xFF,
|
|
||||||
.num_events = 5,
|
.num_events = 5,
|
||||||
.max_period = (1LLU << 32) - 1,
|
.max_period = (1LLU << 32) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
static struct arm_pmu *__init xscale2pmu_init(void)
|
||||||
{
|
{
|
||||||
return &xscale2pmu;
|
return &xscale2pmu;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
static struct arm_pmu *__init xscale1pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
static struct arm_pmu *__init xscale2pmu_init(void)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,192 +10,26 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) "PMU: " fmt
|
|
||||||
|
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/interrupt.h>
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of_device.h>
|
|
||||||
#include <linux/platform_device.h>
|
|
||||||
|
|
||||||
#include <asm/pmu.h>
|
#include <asm/pmu.h>
|
||||||
|
|
||||||
static volatile long pmu_lock;
|
/*
|
||||||
|
* PMU locking to ensure mutual exclusion between different subsystems.
|
||||||
|
*/
|
||||||
|
static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
|
||||||
|
|
||||||
static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
|
int
|
||||||
|
reserve_pmu(enum arm_pmu_type type)
|
||||||
static int __devinit pmu_register(struct platform_device *pdev,
|
|
||||||
enum arm_pmu_type type)
|
|
||||||
{
|
{
|
||||||
if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
|
return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
|
||||||
pr_warning("received registration request for unknown "
|
|
||||||
"device %d\n", type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pmu_devices[type]) {
|
|
||||||
pr_warning("rejecting duplicate registration of PMU device "
|
|
||||||
"type %d.", type);
|
|
||||||
return -ENOSPC;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("registered new PMU device of type %d\n", type);
|
|
||||||
pmu_devices[type] = pdev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define OF_MATCH_PMU(_name, _type) { \
|
|
||||||
.compatible = _name, \
|
|
||||||
.data = (void *)_type, \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
|
|
||||||
|
|
||||||
static struct of_device_id armpmu_of_device_ids[] = {
|
|
||||||
OF_MATCH_CPU("arm,cortex-a9-pmu"),
|
|
||||||
OF_MATCH_CPU("arm,cortex-a8-pmu"),
|
|
||||||
OF_MATCH_CPU("arm,arm1136-pmu"),
|
|
||||||
OF_MATCH_CPU("arm,arm1176-pmu"),
|
|
||||||
{},
|
|
||||||
};
|
|
||||||
|
|
||||||
#define PLAT_MATCH_PMU(_name, _type) { \
|
|
||||||
.name = _name, \
|
|
||||||
.driver_data = _type, \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
|
|
||||||
|
|
||||||
static struct platform_device_id armpmu_plat_device_ids[] = {
|
|
||||||
PLAT_MATCH_CPU("arm-pmu"),
|
|
||||||
{},
|
|
||||||
};
|
|
||||||
|
|
||||||
enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
|
|
||||||
{
|
|
||||||
const struct of_device_id *of_id;
|
|
||||||
const struct platform_device_id *pdev_id;
|
|
||||||
|
|
||||||
/* provided by of_device_id table */
|
|
||||||
if (pdev->dev.of_node) {
|
|
||||||
of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
|
|
||||||
BUG_ON(!of_id);
|
|
||||||
return (enum arm_pmu_type)of_id->data;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Provided by platform_device_id table */
|
|
||||||
pdev_id = platform_get_device_id(pdev);
|
|
||||||
BUG_ON(!pdev_id);
|
|
||||||
return pdev_id->driver_data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __devinit armpmu_device_probe(struct platform_device *pdev)
|
|
||||||
{
|
|
||||||
return pmu_register(pdev, armpmu_device_type(pdev));
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct platform_driver armpmu_driver = {
|
|
||||||
.driver = {
|
|
||||||
.name = "arm-pmu",
|
|
||||||
.of_match_table = armpmu_of_device_ids,
|
|
||||||
},
|
|
||||||
.probe = armpmu_device_probe,
|
|
||||||
.id_table = armpmu_plat_device_ids,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init register_pmu_driver(void)
|
|
||||||
{
|
|
||||||
return platform_driver_register(&armpmu_driver);
|
|
||||||
}
|
|
||||||
device_initcall(register_pmu_driver);
|
|
||||||
|
|
||||||
struct platform_device *
|
|
||||||
reserve_pmu(enum arm_pmu_type device)
|
|
||||||
{
|
|
||||||
struct platform_device *pdev;
|
|
||||||
|
|
||||||
if (test_and_set_bit_lock(device, &pmu_lock)) {
|
|
||||||
pdev = ERR_PTR(-EBUSY);
|
|
||||||
} else if (pmu_devices[device] == NULL) {
|
|
||||||
clear_bit_unlock(device, &pmu_lock);
|
|
||||||
pdev = ERR_PTR(-ENODEV);
|
|
||||||
} else {
|
|
||||||
pdev = pmu_devices[device];
|
|
||||||
}
|
|
||||||
|
|
||||||
return pdev;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(reserve_pmu);
|
EXPORT_SYMBOL_GPL(reserve_pmu);
|
||||||
|
|
||||||
int
|
void
|
||||||
release_pmu(enum arm_pmu_type device)
|
release_pmu(enum arm_pmu_type type)
|
||||||
{
|
{
|
||||||
if (WARN_ON(!pmu_devices[device]))
|
clear_bit_unlock(type, pmu_lock);
|
||||||
return -EINVAL;
|
|
||||||
clear_bit_unlock(device, &pmu_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(release_pmu);
|
|
||||||
|
|
||||||
static int
|
|
||||||
set_irq_affinity(int irq,
|
|
||||||
unsigned int cpu)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
int err = irq_set_affinity(irq, cpumask_of(cpu));
|
|
||||||
if (err)
|
|
||||||
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
|
||||||
irq, cpu);
|
|
||||||
return err;
|
|
||||||
#else
|
|
||||||
return -EINVAL;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
init_cpu_pmu(void)
|
|
||||||
{
|
|
||||||
int i, irqs, err = 0;
|
|
||||||
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
|
|
||||||
|
|
||||||
if (!pdev)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
irqs = pdev->num_resources;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we have a single PMU interrupt that we can't shift, assume that
|
|
||||||
* we're running on a uniprocessor machine and continue.
|
|
||||||
*/
|
|
||||||
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for (i = 0; i < irqs; ++i) {
|
|
||||||
err = set_irq_affinity(platform_get_irq(pdev, i), i);
|
|
||||||
if (err)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
init_pmu(enum arm_pmu_type device)
|
|
||||||
{
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
switch (device) {
|
|
||||||
case ARM_PMU_DEVICE_CPU:
|
|
||||||
err = init_cpu_pmu();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
pr_warning("attempt to initialise unknown device %d\n",
|
|
||||||
device);
|
|
||||||
err = -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(init_pmu);
|
|
||||||
|
|
|
@ -57,7 +57,8 @@ relocate_new_kernel:
|
||||||
mov r0,#0
|
mov r0,#0
|
||||||
ldr r1,kexec_mach_type
|
ldr r1,kexec_mach_type
|
||||||
ldr r2,kexec_boot_atags
|
ldr r2,kexec_boot_atags
|
||||||
mov pc,lr
|
ARM( mov pc, lr )
|
||||||
|
THUMB( bx lr )
|
||||||
|
|
||||||
.align
|
.align
|
||||||
|
|
||||||
|
|
|
@ -280,18 +280,19 @@ static void __init cacheid_init(void)
|
||||||
if (arch >= CPU_ARCH_ARMv6) {
|
if (arch >= CPU_ARCH_ARMv6) {
|
||||||
if ((cachetype & (7 << 29)) == 4 << 29) {
|
if ((cachetype & (7 << 29)) == 4 << 29) {
|
||||||
/* ARMv7 register format */
|
/* ARMv7 register format */
|
||||||
|
arch = CPU_ARCH_ARMv7;
|
||||||
cacheid = CACHEID_VIPT_NONALIASING;
|
cacheid = CACHEID_VIPT_NONALIASING;
|
||||||
if ((cachetype & (3 << 14)) == 1 << 14)
|
if ((cachetype & (3 << 14)) == 1 << 14)
|
||||||
cacheid |= CACHEID_ASID_TAGGED;
|
cacheid |= CACHEID_ASID_TAGGED;
|
||||||
else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
|
|
||||||
cacheid |= CACHEID_VIPT_I_ALIASING;
|
|
||||||
} else if (cachetype & (1 << 23)) {
|
|
||||||
cacheid = CACHEID_VIPT_ALIASING;
|
|
||||||
} else {
|
} else {
|
||||||
|
arch = CPU_ARCH_ARMv6;
|
||||||
|
if (cachetype & (1 << 23))
|
||||||
|
cacheid = CACHEID_VIPT_ALIASING;
|
||||||
|
else
|
||||||
cacheid = CACHEID_VIPT_NONALIASING;
|
cacheid = CACHEID_VIPT_NONALIASING;
|
||||||
if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
|
|
||||||
cacheid |= CACHEID_VIPT_I_ALIASING;
|
|
||||||
}
|
}
|
||||||
|
if (cpu_has_aliasing_icache(arch))
|
||||||
|
cacheid |= CACHEID_VIPT_I_ALIASING;
|
||||||
} else {
|
} else {
|
||||||
cacheid = CACHEID_VIVT;
|
cacheid = CACHEID_VIVT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
||||||
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
|
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
|
||||||
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
|
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
|
||||||
|
|
||||||
|
clockevents_register_device(clk);
|
||||||
|
|
||||||
/* Make sure our local interrupt controller has this enabled */
|
/* Make sure our local interrupt controller has this enabled */
|
||||||
gic_enable_ppi(clk->irq);
|
gic_enable_ppi(clk->irq);
|
||||||
|
|
||||||
clockevents_register_device(clk);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
|
||||||
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
|
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
|
||||||
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
|
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
|
||||||
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
|
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
|
||||||
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
|
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
|
||||||
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
|
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
|
||||||
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
|
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
|
||||||
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
|
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
* TS72xx memory map:
|
* TS72xx memory map:
|
||||||
*
|
*
|
||||||
* virt phys size
|
* virt phys size
|
||||||
* febff000 22000000 4K model number register
|
* febff000 22000000 4K model number register (bits 0-2)
|
||||||
* febfe000 22400000 4K options register
|
* febfe000 22400000 4K options register
|
||||||
* febfd000 22800000 4K options register #2
|
* febfd000 22800000 4K options register #2
|
||||||
* febf9000 10800000 4K TS-5620 RTC index register
|
* febf9000 10800000 4K TS-5620 RTC index register
|
||||||
|
@ -20,6 +20,9 @@
|
||||||
#define TS72XX_MODEL_TS7200 0x00
|
#define TS72XX_MODEL_TS7200 0x00
|
||||||
#define TS72XX_MODEL_TS7250 0x01
|
#define TS72XX_MODEL_TS7250 0x01
|
||||||
#define TS72XX_MODEL_TS7260 0x02
|
#define TS72XX_MODEL_TS7260 0x02
|
||||||
|
#define TS72XX_MODEL_TS7300 0x03
|
||||||
|
#define TS72XX_MODEL_TS7400 0x04
|
||||||
|
#define TS72XX_MODEL_MASK 0x07
|
||||||
|
|
||||||
|
|
||||||
#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
|
#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
|
||||||
|
@ -51,19 +54,34 @@
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
static inline int ts72xx_model(void)
|
||||||
|
{
|
||||||
|
return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int board_is_ts7200(void)
|
static inline int board_is_ts7200(void)
|
||||||
{
|
{
|
||||||
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200;
|
return ts72xx_model() == TS72XX_MODEL_TS7200;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int board_is_ts7250(void)
|
static inline int board_is_ts7250(void)
|
||||||
{
|
{
|
||||||
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250;
|
return ts72xx_model() == TS72XX_MODEL_TS7250;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int board_is_ts7260(void)
|
static inline int board_is_ts7260(void)
|
||||||
{
|
{
|
||||||
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260;
|
return ts72xx_model() == TS72XX_MODEL_TS7260;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int board_is_ts7300(void)
|
||||||
|
{
|
||||||
|
return ts72xx_model() == TS72XX_MODEL_TS7300;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int board_is_ts7400(void)
|
||||||
|
{
|
||||||
|
return ts72xx_model() == TS72XX_MODEL_TS7400;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_max197_installed(void)
|
static inline int is_max197_installed(void)
|
||||||
|
|
|
@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
|
||||||
.ctrlbit = (1 << 21),
|
.ctrlbit = (1 << 21),
|
||||||
}, {
|
}, {
|
||||||
.name = "ac97",
|
.name = "ac97",
|
||||||
.id = -1,
|
.devname = "samsung-ac97",
|
||||||
.enable = exynos4_clk_ip_peril_ctrl,
|
.enable = exynos4_clk_ip_peril_ctrl,
|
||||||
.ctrlbit = (1 << 27),
|
.ctrlbit = (1 << 27),
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -24,12 +24,13 @@
|
||||||
#include <plat/exynos4.h>
|
#include <plat/exynos4.h>
|
||||||
#include <plat/adc-core.h>
|
#include <plat/adc-core.h>
|
||||||
#include <plat/sdhci.h>
|
#include <plat/sdhci.h>
|
||||||
#include <plat/devs.h>
|
|
||||||
#include <plat/fb-core.h>
|
#include <plat/fb-core.h>
|
||||||
#include <plat/fimc-core.h>
|
#include <plat/fimc-core.h>
|
||||||
#include <plat/iic-core.h>
|
#include <plat/iic-core.h>
|
||||||
|
#include <plat/reset.h>
|
||||||
|
|
||||||
#include <mach/regs-irq.h>
|
#include <mach/regs-irq.h>
|
||||||
|
#include <mach/regs-pmu.h>
|
||||||
|
|
||||||
extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
|
extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
|
||||||
unsigned int irq_start);
|
unsigned int irq_start);
|
||||||
|
@ -128,6 +129,11 @@ static void exynos4_idle(void)
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void exynos4_sw_reset(void)
|
||||||
|
{
|
||||||
|
__raw_writel(0x1, S5P_SWRESET);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* exynos4_map_io
|
* exynos4_map_io
|
||||||
*
|
*
|
||||||
|
@ -241,5 +247,8 @@ int __init exynos4_init(void)
|
||||||
/* set idle function */
|
/* set idle function */
|
||||||
pm_idle = exynos4_idle;
|
pm_idle = exynos4_idle;
|
||||||
|
|
||||||
|
/* set sw_reset function */
|
||||||
|
s5p_reset_hook = exynos4_sw_reset;
|
||||||
|
|
||||||
return sysdev_register(&exynos4_sysdev);
|
return sysdev_register(&exynos4_sysdev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,9 +80,8 @@
|
||||||
#define IRQ_HSMMC3 IRQ_SPI(76)
|
#define IRQ_HSMMC3 IRQ_SPI(76)
|
||||||
#define IRQ_DWMCI IRQ_SPI(77)
|
#define IRQ_DWMCI IRQ_SPI(77)
|
||||||
|
|
||||||
#define IRQ_MIPICSI0 IRQ_SPI(78)
|
#define IRQ_MIPI_CSIS0 IRQ_SPI(78)
|
||||||
|
#define IRQ_MIPI_CSIS1 IRQ_SPI(80)
|
||||||
#define IRQ_MIPICSI1 IRQ_SPI(80)
|
|
||||||
|
|
||||||
#define IRQ_ONENAND_AUDI IRQ_SPI(82)
|
#define IRQ_ONENAND_AUDI IRQ_SPI(82)
|
||||||
#define IRQ_ROTATOR IRQ_SPI(83)
|
#define IRQ_ROTATOR IRQ_SPI(83)
|
||||||
|
|
|
@ -29,6 +29,8 @@
|
||||||
#define S5P_USE_STANDBY_WFE1 (1 << 25)
|
#define S5P_USE_STANDBY_WFE1 (1 << 25)
|
||||||
#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
|
#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
|
||||||
|
|
||||||
|
#define S5P_SWRESET S5P_PMUREG(0x0400)
|
||||||
|
|
||||||
#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
|
#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
|
||||||
#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
|
#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
|
||||||
#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)
|
#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
|
|
||||||
#include <mach/regs-gpio.h>
|
#include <mach/regs-gpio.h>
|
||||||
|
|
||||||
|
#include <asm/mach/irq.h>
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(eint_lock);
|
static DEFINE_SPINLOCK(eint_lock);
|
||||||
|
|
||||||
static unsigned int eint0_15_data[16];
|
static unsigned int eint0_15_data[16];
|
||||||
|
@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
|
||||||
|
|
||||||
static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
|
static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
|
struct irq_chip *chip = irq_get_chip(irq);
|
||||||
|
chained_irq_enter(chip, desc);
|
||||||
exynos4_irq_demux_eint(IRQ_EINT(16));
|
exynos4_irq_demux_eint(IRQ_EINT(16));
|
||||||
exynos4_irq_demux_eint(IRQ_EINT(24));
|
exynos4_irq_demux_eint(IRQ_EINT(24));
|
||||||
|
chained_irq_exit(chip, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
|
static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
|
||||||
|
@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
|
||||||
u32 *irq_data = irq_get_handler_data(irq);
|
u32 *irq_data = irq_get_handler_data(irq);
|
||||||
struct irq_chip *chip = irq_get_chip(irq);
|
struct irq_chip *chip = irq_get_chip(irq);
|
||||||
|
|
||||||
|
chained_irq_enter(chip, desc);
|
||||||
chip->irq_mask(&desc->irq_data);
|
chip->irq_mask(&desc->irq_data);
|
||||||
|
|
||||||
if (chip->irq_ack)
|
if (chip->irq_ack)
|
||||||
|
@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
|
||||||
generic_handle_irq(*irq_data);
|
generic_handle_irq(*irq_data);
|
||||||
|
|
||||||
chip->irq_unmask(&desc->irq_data);
|
chip->irq_unmask(&desc->irq_data);
|
||||||
|
chained_irq_exit(chip, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init exynos4_init_irq_eint(void)
|
int __init exynos4_init_irq_eint(void)
|
||||||
|
|
|
@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct regulator_consumer_supply max8952_consumer =
|
static struct regulator_consumer_supply max8952_consumer =
|
||||||
REGULATOR_SUPPLY("vddarm", NULL);
|
REGULATOR_SUPPLY("vdd_arm", NULL);
|
||||||
|
|
||||||
static struct max8952_platform_data universal_max8952_pdata __initdata = {
|
static struct max8952_platform_data universal_max8952_pdata __initdata = {
|
||||||
.gpio_vid0 = EXYNOS4_GPX0(3),
|
.gpio_vid0 = EXYNOS4_GPX0(3),
|
||||||
|
@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct regulator_consumer_supply lp3974_buck1_consumer =
|
static struct regulator_consumer_supply lp3974_buck1_consumer =
|
||||||
REGULATOR_SUPPLY("vddint", NULL);
|
REGULATOR_SUPPLY("vdd_int", NULL);
|
||||||
|
|
||||||
static struct regulator_consumer_supply lp3974_buck2_consumer =
|
static struct regulator_consumer_supply lp3974_buck2_consumer =
|
||||||
REGULATOR_SUPPLY("vddg3d", NULL);
|
REGULATOR_SUPPLY("vddg3d", NULL);
|
||||||
|
|
|
@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
|
||||||
|
|
||||||
rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
|
rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
|
||||||
writel(rstcon, EXYNOS4_RSTCON);
|
writel(rstcon, EXYNOS4_RSTCON);
|
||||||
udelay(50);
|
udelay(80);
|
||||||
|
|
||||||
clk_disable(otg_clk);
|
clk_disable(otg_clk);
|
||||||
clk_put(otg_clk);
|
clk_put(otg_clk);
|
||||||
|
|
|
@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
|
||||||
config ARCH_NETWINDER
|
config ARCH_NETWINDER
|
||||||
bool "NetWinder"
|
bool "NetWinder"
|
||||||
select CLKSRC_I8253
|
select CLKSRC_I8253
|
||||||
|
select CLKEVT_I8253
|
||||||
select FOOTBRIDGE_HOST
|
select FOOTBRIDGE_HOST
|
||||||
select ISA
|
select ISA
|
||||||
select ISA_DMA
|
select ISA_DMA
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <video/vga.h>
|
||||||
|
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
|
|
@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
|
||||||
.init = eukrea_cpuimx27_timer_init,
|
.init = eukrea_cpuimx27_timer_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
MACHINE_START(CPUIMX27, "EUKREA CPUIMX27")
|
MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
|
||||||
.atag_offset = 0x100,
|
.atag_offset = 0x100,
|
||||||
.map_io = mx27_map_io,
|
.map_io = mx27_map_io,
|
||||||
.init_early = imx27_init_early,
|
.init_early = imx27_init_early,
|
||||||
|
|
|
@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
|
||||||
.init = eukrea_cpuimx35_timer_init,
|
.init = eukrea_cpuimx35_timer_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35")
|
MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
|
||||||
/* Maintainer: Eukrea Electromatique */
|
/* Maintainer: Eukrea Electromatique */
|
||||||
.atag_offset = 0x100,
|
.atag_offset = 0x100,
|
||||||
.map_io = mx35_map_io,
|
.map_io = mx35_map_io,
|
||||||
|
|
|
@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
|
||||||
.init = eukrea_cpuimx25_timer_init,
|
.init = eukrea_cpuimx25_timer_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25")
|
MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
|
||||||
/* Maintainer: Eukrea Electromatique */
|
/* Maintainer: Eukrea Electromatique */
|
||||||
.atag_offset = 0x100,
|
.atag_offset = 0x100,
|
||||||
.map_io = mx25_map_io,
|
.map_io = mx25_map_io,
|
||||||
|
|
|
@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
/*
|
/*
|
||||||
* Check for devices with hard-wired IRQs.
|
* Check for devices with hard-wired IRQs.
|
||||||
*/
|
*/
|
||||||
irq = orion5x_pci_map_irq(const dev, slot, pin);
|
irq = orion5x_pci_map_irq(dev, slot, pin);
|
||||||
if (irq != -1)
|
if (irq != -1)
|
||||||
return irq;
|
return irq;
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/mbus.h>
|
#include <linux/mbus.h>
|
||||||
|
#include <video/vga.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/mach/pci.h>
|
#include <asm/mach/pci.h>
|
||||||
#include <plat/pcie.h>
|
#include <plat/pcie.h>
|
||||||
|
|
|
@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
|
||||||
*/
|
*/
|
||||||
if (realview_reset)
|
if (realview_reset)
|
||||||
realview_reset(mode);
|
realview_reset(mode);
|
||||||
|
dsb();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/serial_core.h>
|
#include <linux/serial_core.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/gpio.h>
|
||||||
|
|
||||||
#include <mach/map.h>
|
#include <mach/map.h>
|
||||||
#include <mach/irqs.h>
|
#include <mach/irqs.h>
|
||||||
|
|
|
@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
ct = gc->chip_types;
|
ct = gc->chip_types;
|
||||||
ct->chip.irq_ack = irq_gc_ack;
|
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||||
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
||||||
ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
|
ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
|
||||||
|
|
|
@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
|
||||||
SAVE_ITEM(S3C2410_TCNTO(0)),
|
SAVE_ITEM(S3C2410_TCNTO(0)),
|
||||||
};
|
};
|
||||||
|
|
||||||
void s5pv210_cpu_suspend(unsigned long arg)
|
static int s5pv210_cpu_suspend(unsigned long arg)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
|
|
@ -342,6 +342,7 @@ static struct platform_device mipidsi0_device = {
|
||||||
static struct sh_mobile_sdhi_info sdhi0_info = {
|
static struct sh_mobile_sdhi_info sdhi0_info = {
|
||||||
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
||||||
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
||||||
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
|
||||||
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
|
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
|
||||||
.tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
|
.tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
|
||||||
};
|
};
|
||||||
|
@ -383,7 +384,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sh_mobile_sdhi_info sh_sdhi1_info = {
|
static struct sh_mobile_sdhi_info sh_sdhi1_info = {
|
||||||
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
|
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
|
||||||
.tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
|
.tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
|
||||||
.tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
|
.tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
|
||||||
.set_pwr = ag5evm_sdhi1_set_pwr,
|
.set_pwr = ag5evm_sdhi1_set_pwr,
|
||||||
|
|
|
@ -642,6 +642,8 @@ static struct usbhs_private usbhs0_private = {
|
||||||
},
|
},
|
||||||
.driver_param = {
|
.driver_param = {
|
||||||
.buswait_bwait = 4,
|
.buswait_bwait = 4,
|
||||||
|
.d0_tx_id = SHDMA_SLAVE_USB0_TX,
|
||||||
|
.d1_rx_id = SHDMA_SLAVE_USB0_RX,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -811,6 +813,8 @@ static struct usbhs_private usbhs1_private = {
|
||||||
.buswait_bwait = 4,
|
.buswait_bwait = 4,
|
||||||
.pipe_type = usbhs1_pipe_cfg,
|
.pipe_type = usbhs1_pipe_cfg,
|
||||||
.pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
|
.pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
|
||||||
|
.d0_tx_id = SHDMA_SLAVE_USB1_TX,
|
||||||
|
.d1_rx_id = SHDMA_SLAVE_USB1_RX,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -503,16 +503,17 @@ static struct clk *late_main_clks[] = {
|
||||||
&sh7372_fsidivb_clk,
|
&sh7372_fsidivb_clk,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum { MSTP001,
|
enum { MSTP001, MSTP000,
|
||||||
MSTP131, MSTP130,
|
MSTP131, MSTP130,
|
||||||
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
|
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
|
||||||
MSTP118, MSTP117, MSTP116, MSTP113,
|
MSTP118, MSTP117, MSTP116, MSTP113,
|
||||||
MSTP106, MSTP101, MSTP100,
|
MSTP106, MSTP101, MSTP100,
|
||||||
MSTP223,
|
MSTP223,
|
||||||
MSTP218, MSTP217, MSTP216,
|
MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
|
||||||
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
|
MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
|
||||||
MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
|
MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
|
||||||
MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
|
MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
|
||||||
|
MSTP405, MSTP404, MSTP403, MSTP400,
|
||||||
MSTP_NR };
|
MSTP_NR };
|
||||||
|
|
||||||
#define MSTP(_parent, _reg, _bit, _flags) \
|
#define MSTP(_parent, _reg, _bit, _flags) \
|
||||||
|
@ -520,6 +521,7 @@ enum { MSTP001,
|
||||||
|
|
||||||
static struct clk mstp_clks[MSTP_NR] = {
|
static struct clk mstp_clks[MSTP_NR] = {
|
||||||
[MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
|
[MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
|
||||||
|
[MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */
|
||||||
[MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
|
[MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
|
||||||
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
|
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
|
||||||
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
|
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
|
||||||
|
@ -538,14 +540,16 @@ static struct clk mstp_clks[MSTP_NR] = {
|
||||||
[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
|
[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
|
||||||
[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
|
[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
|
||||||
[MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
|
[MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
|
||||||
|
[MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
|
||||||
|
[MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */
|
||||||
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
|
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
|
||||||
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
|
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
|
||||||
|
[MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */
|
||||||
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
|
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
|
||||||
[MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
|
[MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
|
||||||
[MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
|
[MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
|
||||||
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
|
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
|
||||||
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
|
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
|
||||||
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
|
|
||||||
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
|
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
|
||||||
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
|
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
|
||||||
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
|
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
|
||||||
|
@ -557,8 +561,12 @@ static struct clk mstp_clks[MSTP_NR] = {
|
||||||
[MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
|
[MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
|
||||||
[MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
|
[MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
|
||||||
[MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
|
[MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
|
||||||
|
[MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */
|
||||||
[MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
|
[MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
|
||||||
|
[MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */
|
||||||
|
[MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */
|
||||||
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
|
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
|
||||||
|
[MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct clk_lookup lookups[] = {
|
static struct clk_lookup lookups[] = {
|
||||||
|
@ -609,6 +617,7 @@ static struct clk_lookup lookups[] = {
|
||||||
|
|
||||||
/* MSTP32 clocks */
|
/* MSTP32 clocks */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
|
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
|
||||||
|
CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */
|
||||||
CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
|
CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
|
||||||
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
|
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
|
||||||
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
|
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
|
||||||
|
@ -629,14 +638,16 @@ static struct clk_lookup lookups[] = {
|
||||||
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
|
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
|
||||||
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
|
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
|
||||||
CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
|
CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
|
||||||
|
CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */
|
||||||
|
CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */
|
||||||
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
|
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
|
||||||
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
|
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
|
||||||
|
CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */
|
||||||
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
|
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
|
||||||
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
|
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
|
||||||
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
|
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
|
||||||
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
|
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
|
||||||
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
|
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
|
||||||
CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
|
|
||||||
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
|
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
|
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
|
||||||
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
|
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
|
||||||
|
@ -650,10 +661,14 @@ static struct clk_lookup lookups[] = {
|
||||||
CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
|
CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
|
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
|
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
|
||||||
|
CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */
|
||||||
CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
|
CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
|
||||||
CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
|
CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
|
||||||
CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
|
CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
|
||||||
|
CLKDEV_DEV_ID("sh_cmt.4", &mstp_clks[MSTP405]), /* CMT4 */
|
||||||
|
CLKDEV_DEV_ID("sh_cmt.3", &mstp_clks[MSTP404]), /* CMT3 */
|
||||||
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
|
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
|
||||||
|
CLKDEV_DEV_ID("sh_cmt.2", &mstp_clks[MSTP400]), /* CMT2 */
|
||||||
|
|
||||||
CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
|
CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
|
||||||
&div6_reparent_clks[DIV6_HDMI]),
|
&div6_reparent_clks[DIV6_HDMI]),
|
||||||
|
|
|
@ -365,7 +365,7 @@ void __init sh73a0_clock_init(void)
|
||||||
__raw_writel(0x108, SD2CKCR);
|
__raw_writel(0x108, SD2CKCR);
|
||||||
|
|
||||||
/* detect main clock parent */
|
/* detect main clock parent */
|
||||||
switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
|
switch ((__raw_readl(CKSCR) >> 28) & 0x03) {
|
||||||
case 0:
|
case 0:
|
||||||
main_clk.parent = &sh73a0_extal1_clk;
|
main_clk.parent = &sh73a0_extal1_clk;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -459,6 +459,10 @@ enum {
|
||||||
SHDMA_SLAVE_SDHI2_TX,
|
SHDMA_SLAVE_SDHI2_TX,
|
||||||
SHDMA_SLAVE_MMCIF_RX,
|
SHDMA_SLAVE_MMCIF_RX,
|
||||||
SHDMA_SLAVE_MMCIF_TX,
|
SHDMA_SLAVE_MMCIF_TX,
|
||||||
|
SHDMA_SLAVE_USB0_TX,
|
||||||
|
SHDMA_SLAVE_USB0_RX,
|
||||||
|
SHDMA_SLAVE_USB1_TX,
|
||||||
|
SHDMA_SLAVE_USB1_RX,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct clk sh7372_extal1_clk;
|
extern struct clk sh7372_extal1_clk;
|
||||||
|
|
|
@ -379,7 +379,7 @@ enum {
|
||||||
/* BBIF2 */
|
/* BBIF2 */
|
||||||
VPU,
|
VPU,
|
||||||
TSIF1,
|
TSIF1,
|
||||||
_3DG_SGX530,
|
/* 3DG */
|
||||||
_2DDMAC,
|
_2DDMAC,
|
||||||
IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
|
IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
|
||||||
IPMMU_IPMMUR, IPMMU_IPMMUR2,
|
IPMMU_IPMMUR, IPMMU_IPMMUR2,
|
||||||
|
@ -436,7 +436,7 @@ static struct intc_vect intcs_vectors[] = {
|
||||||
/* BBIF2 */
|
/* BBIF2 */
|
||||||
INTCS_VECT(VPU, 0x980),
|
INTCS_VECT(VPU, 0x980),
|
||||||
INTCS_VECT(TSIF1, 0x9a0),
|
INTCS_VECT(TSIF1, 0x9a0),
|
||||||
INTCS_VECT(_3DG_SGX530, 0x9e0),
|
/* 3DG */
|
||||||
INTCS_VECT(_2DDMAC, 0xa00),
|
INTCS_VECT(_2DDMAC, 0xa00),
|
||||||
INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
|
INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
|
||||||
INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
|
INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
|
||||||
|
@ -521,7 +521,7 @@ static struct intc_mask_reg intcs_mask_registers[] = {
|
||||||
RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
|
RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
|
||||||
{ 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
|
{ 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
|
||||||
{ 0, 0, MSIOF, 0,
|
{ 0, 0, MSIOF, 0,
|
||||||
_3DG_SGX530, 0, 0, 0 } },
|
0, 0, 0, 0 } },
|
||||||
{ 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
|
{ 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
|
||||||
{ 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
|
{ 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
|
||||||
0, 0, 0, 0 } },
|
0, 0, 0, 0 } },
|
||||||
|
@ -561,7 +561,6 @@ static struct intc_prio_reg intcs_prio_registers[] = {
|
||||||
TMU_TUNI2, TSIF1 } },
|
TMU_TUNI2, TSIF1 } },
|
||||||
{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
|
{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
|
||||||
{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
|
{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
|
||||||
{ 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX530, 0, 0 } },
|
|
||||||
{ 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
|
{ 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
|
||||||
{ 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
|
{ 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
|
||||||
{ 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
|
{ 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
|
||||||
|
|
|
@ -169,35 +169,35 @@ static struct platform_device scif6_device = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* CMT */
|
/* CMT */
|
||||||
static struct sh_timer_config cmt10_platform_data = {
|
static struct sh_timer_config cmt2_platform_data = {
|
||||||
.name = "CMT10",
|
.name = "CMT2",
|
||||||
.channel_offset = 0x10,
|
.channel_offset = 0x40,
|
||||||
.timer_bit = 0,
|
.timer_bit = 5,
|
||||||
.clockevent_rating = 125,
|
.clockevent_rating = 125,
|
||||||
.clocksource_rating = 125,
|
.clocksource_rating = 125,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource cmt10_resources[] = {
|
static struct resource cmt2_resources[] = {
|
||||||
[0] = {
|
[0] = {
|
||||||
.name = "CMT10",
|
.name = "CMT2",
|
||||||
.start = 0xe6138010,
|
.start = 0xe6130040,
|
||||||
.end = 0xe613801b,
|
.end = 0xe613004b,
|
||||||
.flags = IORESOURCE_MEM,
|
.flags = IORESOURCE_MEM,
|
||||||
},
|
},
|
||||||
[1] = {
|
[1] = {
|
||||||
.start = evt2irq(0x0b00), /* CMT1_CMT10 */
|
.start = evt2irq(0x0b80), /* CMT2 */
|
||||||
.flags = IORESOURCE_IRQ,
|
.flags = IORESOURCE_IRQ,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device cmt10_device = {
|
static struct platform_device cmt2_device = {
|
||||||
.name = "sh_cmt",
|
.name = "sh_cmt",
|
||||||
.id = 10,
|
.id = 2,
|
||||||
.dev = {
|
.dev = {
|
||||||
.platform_data = &cmt10_platform_data,
|
.platform_data = &cmt2_platform_data,
|
||||||
},
|
},
|
||||||
.resource = cmt10_resources,
|
.resource = cmt2_resources,
|
||||||
.num_resources = ARRAY_SIZE(cmt10_resources),
|
.num_resources = ARRAY_SIZE(cmt2_resources),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* TMU */
|
/* TMU */
|
||||||
|
@ -602,6 +602,150 @@ static struct platform_device dma2_device = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* USB-DMAC
|
||||||
|
*/
|
||||||
|
|
||||||
|
unsigned int usbts_shift[] = {3, 4, 5};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
XMIT_SZ_8BYTE = 0,
|
||||||
|
XMIT_SZ_16BYTE = 1,
|
||||||
|
XMIT_SZ_32BYTE = 2,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
|
||||||
|
|
||||||
|
static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
|
||||||
|
{
|
||||||
|
.offset = 0,
|
||||||
|
}, {
|
||||||
|
.offset = 0x20,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/* USB DMAC0 */
|
||||||
|
static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
|
||||||
|
{
|
||||||
|
.slave_id = SHDMA_SLAVE_USB0_TX,
|
||||||
|
.chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
|
||||||
|
}, {
|
||||||
|
.slave_id = SHDMA_SLAVE_USB0_RX,
|
||||||
|
.chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct sh_dmae_pdata usb_dma0_platform_data = {
|
||||||
|
.slave = sh7372_usb_dmae0_slaves,
|
||||||
|
.slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
|
||||||
|
.channel = sh7372_usb_dmae_channels,
|
||||||
|
.channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
|
||||||
|
.ts_low_shift = 6,
|
||||||
|
.ts_low_mask = 0xc0,
|
||||||
|
.ts_high_shift = 0,
|
||||||
|
.ts_high_mask = 0,
|
||||||
|
.ts_shift = usbts_shift,
|
||||||
|
.ts_shift_num = ARRAY_SIZE(usbts_shift),
|
||||||
|
.dmaor_init = DMAOR_DME,
|
||||||
|
.chcr_offset = 0x14,
|
||||||
|
.chcr_ie_bit = 1 << 5,
|
||||||
|
.dmaor_is_32bit = 1,
|
||||||
|
.needs_tend_set = 1,
|
||||||
|
.no_dmars = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct resource sh7372_usb_dmae0_resources[] = {
|
||||||
|
{
|
||||||
|
/* Channel registers and DMAOR */
|
||||||
|
.start = 0xe68a0020,
|
||||||
|
.end = 0xe68a0064 - 1,
|
||||||
|
.flags = IORESOURCE_MEM,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* VCR/SWR/DMICR */
|
||||||
|
.start = 0xe68a0000,
|
||||||
|
.end = 0xe68a0014 - 1,
|
||||||
|
.flags = IORESOURCE_MEM,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* IRQ for channels */
|
||||||
|
.start = evt2irq(0x0a00),
|
||||||
|
.end = evt2irq(0x0a00),
|
||||||
|
.flags = IORESOURCE_IRQ,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_device usb_dma0_device = {
|
||||||
|
.name = "sh-dma-engine",
|
||||||
|
.id = 3,
|
||||||
|
.resource = sh7372_usb_dmae0_resources,
|
||||||
|
.num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources),
|
||||||
|
.dev = {
|
||||||
|
.platform_data = &usb_dma0_platform_data,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/* USB DMAC1 */
|
||||||
|
static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
|
||||||
|
{
|
||||||
|
.slave_id = SHDMA_SLAVE_USB1_TX,
|
||||||
|
.chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
|
||||||
|
}, {
|
||||||
|
.slave_id = SHDMA_SLAVE_USB1_RX,
|
||||||
|
.chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct sh_dmae_pdata usb_dma1_platform_data = {
|
||||||
|
.slave = sh7372_usb_dmae1_slaves,
|
||||||
|
.slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
|
||||||
|
.channel = sh7372_usb_dmae_channels,
|
||||||
|
.channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
|
||||||
|
.ts_low_shift = 6,
|
||||||
|
.ts_low_mask = 0xc0,
|
||||||
|
.ts_high_shift = 0,
|
||||||
|
.ts_high_mask = 0,
|
||||||
|
.ts_shift = usbts_shift,
|
||||||
|
.ts_shift_num = ARRAY_SIZE(usbts_shift),
|
||||||
|
.dmaor_init = DMAOR_DME,
|
||||||
|
.chcr_offset = 0x14,
|
||||||
|
.chcr_ie_bit = 1 << 5,
|
||||||
|
.dmaor_is_32bit = 1,
|
||||||
|
.needs_tend_set = 1,
|
||||||
|
.no_dmars = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct resource sh7372_usb_dmae1_resources[] = {
|
||||||
|
{
|
||||||
|
/* Channel registers and DMAOR */
|
||||||
|
.start = 0xe68c0020,
|
||||||
|
.end = 0xe68c0064 - 1,
|
||||||
|
.flags = IORESOURCE_MEM,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* VCR/SWR/DMICR */
|
||||||
|
.start = 0xe68c0000,
|
||||||
|
.end = 0xe68c0014 - 1,
|
||||||
|
.flags = IORESOURCE_MEM,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* IRQ for channels */
|
||||||
|
.start = evt2irq(0x1d00),
|
||||||
|
.end = evt2irq(0x1d00),
|
||||||
|
.flags = IORESOURCE_IRQ,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_device usb_dma1_device = {
|
||||||
|
.name = "sh-dma-engine",
|
||||||
|
.id = 4,
|
||||||
|
.resource = sh7372_usb_dmae1_resources,
|
||||||
|
.num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources),
|
||||||
|
.dev = {
|
||||||
|
.platform_data = &usb_dma1_platform_data,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
/* VPU */
|
/* VPU */
|
||||||
static struct uio_info vpu_platform_data = {
|
static struct uio_info vpu_platform_data = {
|
||||||
.name = "VPU5HG",
|
.name = "VPU5HG",
|
||||||
|
@ -818,7 +962,7 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
|
||||||
&scif4_device,
|
&scif4_device,
|
||||||
&scif5_device,
|
&scif5_device,
|
||||||
&scif6_device,
|
&scif6_device,
|
||||||
&cmt10_device,
|
&cmt2_device,
|
||||||
&tmu00_device,
|
&tmu00_device,
|
||||||
&tmu01_device,
|
&tmu01_device,
|
||||||
};
|
};
|
||||||
|
@ -829,6 +973,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
|
||||||
&dma0_device,
|
&dma0_device,
|
||||||
&dma1_device,
|
&dma1_device,
|
||||||
&dma2_device,
|
&dma2_device,
|
||||||
|
&usb_dma0_device,
|
||||||
|
&usb_dma1_device,
|
||||||
&vpu_device,
|
&vpu_device,
|
||||||
&veu0_device,
|
&veu0_device,
|
||||||
&veu1_device,
|
&veu1_device,
|
||||||
|
|
|
@ -318,6 +318,10 @@ static struct clk v2m_sp804_clk = {
|
||||||
.rate = 1000000,
|
.rate = 1000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct clk v2m_ref_clk = {
|
||||||
|
.rate = 32768,
|
||||||
|
};
|
||||||
|
|
||||||
static struct clk dummy_apb_pclk;
|
static struct clk dummy_apb_pclk;
|
||||||
|
|
||||||
static struct clk_lookup v2m_lookups[] = {
|
static struct clk_lookup v2m_lookups[] = {
|
||||||
|
@ -348,6 +352,9 @@ static struct clk_lookup v2m_lookups[] = {
|
||||||
}, { /* CLCD */
|
}, { /* CLCD */
|
||||||
.dev_id = "mb:clcd",
|
.dev_id = "mb:clcd",
|
||||||
.clk = &osc1_clk,
|
.clk = &osc1_clk,
|
||||||
|
}, { /* SP805 WDT */
|
||||||
|
.dev_id = "mb:wdt",
|
||||||
|
.clk = &v2m_ref_clk,
|
||||||
}, { /* SP804 timers */
|
}, { /* SP804 timers */
|
||||||
.dev_id = "sp804",
|
.dev_id = "sp804",
|
||||||
.con_id = "v2m-timer0",
|
.con_id = "v2m-timer0",
|
||||||
|
|
|
@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
|
||||||
|
|
||||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||||
.globl cpu_arm920_suspend_size
|
.globl cpu_arm920_suspend_size
|
||||||
.equ cpu_arm920_suspend_size, 4 * 3
|
.equ cpu_arm920_suspend_size, 4 * 4
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
ENTRY(cpu_arm920_do_suspend)
|
ENTRY(cpu_arm920_do_suspend)
|
||||||
stmfd sp!, {r4 - r7, lr}
|
stmfd sp!, {r4 - r7, lr}
|
||||||
|
|
|
@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
|
||||||
|
|
||||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||||
.globl cpu_arm926_suspend_size
|
.globl cpu_arm926_suspend_size
|
||||||
.equ cpu_arm926_suspend_size, 4 * 3
|
.equ cpu_arm926_suspend_size, 4 * 4
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
ENTRY(cpu_arm926_do_suspend)
|
ENTRY(cpu_arm926_do_suspend)
|
||||||
stmfd sp!, {r4 - r7, lr}
|
stmfd sp!, {r4 - r7, lr}
|
||||||
|
|
|
@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
|
||||||
|
|
||||||
ENTRY(cpu_sa1100_do_resume)
|
ENTRY(cpu_sa1100_do_resume)
|
||||||
ldmia r0, {r4 - r7} @ load cp regs
|
ldmia r0, {r4 - r7} @ load cp regs
|
||||||
mov r1, #0
|
mov ip, #0
|
||||||
mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs
|
mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
|
||||||
mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache
|
mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
|
||||||
mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
|
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
|
||||||
mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB
|
mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
|
||||||
|
|
||||||
mcr p15, 0, r4, c3, c0, 0 @ domain ID
|
mcr p15, 0, r4, c3, c0, 0 @ domain ID
|
||||||
mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
|
mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
|
||||||
|
|
|
@ -223,6 +223,22 @@ __v6_setup:
|
||||||
mrc p15, 0, r0, c1, c0, 0 @ read control register
|
mrc p15, 0, r0, c1, c0, 0 @ read control register
|
||||||
bic r0, r0, r5 @ clear bits them
|
bic r0, r0, r5 @ clear bits them
|
||||||
orr r0, r0, r6 @ set them
|
orr r0, r0, r6 @ set them
|
||||||
|
#ifdef CONFIG_ARM_ERRATA_364296
|
||||||
|
/*
|
||||||
|
* Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
|
||||||
|
* corruption with hit-under-miss enabled). The conditional code below
|
||||||
|
* (setting the undocumented bit 31 in the auxiliary control register
|
||||||
|
* and the FI bit in the control register) disables hit-under-miss
|
||||||
|
* without putting the processor into full low interrupt latency mode.
|
||||||
|
*/
|
||||||
|
ldr r6, =0x4107b362 @ id for ARM1136 r0p2
|
||||||
|
mrc p15, 0, r5, c0, c0, 0 @ get processor id
|
||||||
|
teq r5, r6 @ check for the faulty core
|
||||||
|
mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
|
||||||
|
orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
|
||||||
|
mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
|
||||||
|
orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
|
||||||
|
#endif
|
||||||
mov pc, lr @ return to head.S:__ret
|
mov pc, lr @ return to head.S:__ret
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
|
||||||
ENTRY(cpu_v7_reset)
|
ENTRY(cpu_v7_reset)
|
||||||
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
|
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
|
||||||
bic r1, r1, #0x1 @ ...............m
|
bic r1, r1, #0x1 @ ...............m
|
||||||
|
THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
|
||||||
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
|
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
|
||||||
isb
|
isb
|
||||||
mov pc, r0
|
mov pc, r0
|
||||||
|
@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
|
||||||
mcr p15, 0, r7, c2, c0, 0 @ TTB 0
|
mcr p15, 0, r7, c2, c0, 0 @ TTB 0
|
||||||
mcr p15, 0, r8, c2, c0, 1 @ TTB 1
|
mcr p15, 0, r8, c2, c0, 1 @ TTB 1
|
||||||
mcr p15, 0, ip, c2, c0, 2 @ TTB control register
|
mcr p15, 0, ip, c2, c0, 2 @ TTB control register
|
||||||
mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register
|
mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
|
||||||
|
teq r4, r10 @ Is it already set?
|
||||||
|
mcrne p15, 0, r10, c1, c0, 1 @ No, so write it
|
||||||
mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
|
mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
|
||||||
ldr r4, =PRRR @ PRRR
|
ldr r4, =PRRR @ PRRR
|
||||||
ldr r5, =NMRR @ NMRR
|
ldr r5, =NMRR @ NMRR
|
||||||
mcr p15, 0, r4, c10, c2, 0 @ write PRRR
|
mcr p15, 0, r4, c10, c2, 0 @ write PRRR
|
||||||
mcr p15, 0, r5, c10, c2, 1 @ write NMRR
|
mcr p15, 0, r5, c10, c2, 1 @ write NMRR
|
||||||
isb
|
isb
|
||||||
|
dsb
|
||||||
mov r0, r9 @ control register
|
mov r0, r9 @ control register
|
||||||
mov r2, r7, lsr #14 @ get TTB0 base
|
mov r2, r7, lsr #14 @ get TTB0 base
|
||||||
mov r2, r2, lsl #14
|
mov r2, r2, lsl #14
|
||||||
|
|
|
@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
|
||||||
.align
|
.align
|
||||||
|
|
||||||
.globl cpu_xsc3_suspend_size
|
.globl cpu_xsc3_suspend_size
|
||||||
.equ cpu_xsc3_suspend_size, 4 * 8
|
.equ cpu_xsc3_suspend_size, 4 * 7
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
ENTRY(cpu_xsc3_do_suspend)
|
ENTRY(cpu_xsc3_do_suspend)
|
||||||
stmfd sp!, {r4 - r10, lr}
|
stmfd sp!, {r4 - r10, lr}
|
||||||
|
@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
|
||||||
mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
|
mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
|
||||||
mrc p15, 0, r10, c1, c0, 0 @ control reg
|
mrc p15, 0, r10, c1, c0, 0 @ control reg
|
||||||
bic r4, r4, #2 @ clear frequency change bit
|
bic r4, r4, #2 @ clear frequency change bit
|
||||||
stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs
|
stmia r0, {r4 - r10} @ store cp regs
|
||||||
ldmia sp!, {r4 - r10, pc}
|
ldmia sp!, {r4 - r10, pc}
|
||||||
ENDPROC(cpu_xsc3_do_suspend)
|
ENDPROC(cpu_xsc3_do_suspend)
|
||||||
|
|
||||||
ENTRY(cpu_xsc3_do_resume)
|
ENTRY(cpu_xsc3_do_resume)
|
||||||
ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs
|
ldmia r0, {r4 - r10} @ load cp regs
|
||||||
mov ip, #0
|
mov ip, #0
|
||||||
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
|
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
|
||||||
mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
|
mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
|
||||||
|
|
|
@ -192,7 +192,7 @@ unsigned long s5p_spdif_get_rate(struct clk *clk)
|
||||||
if (IS_ERR(pclk))
|
if (IS_ERR(pclk))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
rate = pclk->ops->get_rate(clk);
|
rate = pclk->ops->get_rate(pclk);
|
||||||
clk_put(pclk);
|
clk_put(pclk);
|
||||||
|
|
||||||
return rate;
|
return rate;
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
#include <plat/gpio-core.h>
|
#include <plat/gpio-core.h>
|
||||||
#include <plat/gpio-cfg.h>
|
#include <plat/gpio-cfg.h>
|
||||||
|
|
||||||
|
#include <asm/mach/irq.h>
|
||||||
|
|
||||||
#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u)
|
#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u)
|
||||||
|
|
||||||
#define CON_OFFSET 0x700
|
#define CON_OFFSET 0x700
|
||||||
|
@ -81,6 +83,9 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
|
||||||
int group, pend_offset, mask_offset;
|
int group, pend_offset, mask_offset;
|
||||||
unsigned int pend, mask;
|
unsigned int pend, mask;
|
||||||
|
|
||||||
|
struct irq_chip *chip = irq_get_chip(irq);
|
||||||
|
chained_irq_enter(chip, desc);
|
||||||
|
|
||||||
for (group = 0; group < bank->nr_groups; group++) {
|
for (group = 0; group < bank->nr_groups; group++) {
|
||||||
struct s3c_gpio_chip *chip = bank->chips[group];
|
struct s3c_gpio_chip *chip = bank->chips[group];
|
||||||
if (!chip)
|
if (!chip)
|
||||||
|
@ -102,6 +107,7 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
|
||||||
pend &= ~BIT(offset);
|
pend &= ~BIT(offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
chained_irq_exit(chip, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
|
static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
|
||||||
|
|
|
@ -20,7 +20,7 @@ struct samsung_bl_gpio_info {
|
||||||
int func;
|
int func;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
|
extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
|
||||||
struct platform_pwm_backlight_data *bl_data);
|
struct platform_pwm_backlight_data *bl_data);
|
||||||
|
|
||||||
#endif /* __ASM_PLAT_BACKLIGHT_H */
|
#endif /* __ASM_PLAT_BACKLIGHT_H */
|
||||||
|
|
|
@ -22,9 +22,14 @@
|
||||||
#include <plat/irq-vic-timer.h>
|
#include <plat/irq-vic-timer.h>
|
||||||
#include <plat/regs-timer.h>
|
#include <plat/regs-timer.h>
|
||||||
|
|
||||||
|
#include <asm/mach/irq.h>
|
||||||
|
|
||||||
static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
|
static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
|
struct irq_chip *chip = irq_get_chip(irq);
|
||||||
|
chained_irq_enter(chip, desc);
|
||||||
generic_handle_irq((int)desc->irq_data.handler_data);
|
generic_handle_irq((int)desc->irq_data.handler_data);
|
||||||
|
chained_irq_exit(chip, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
|
/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
|
||||||
|
|
|
@ -351,7 +351,7 @@ centro MACH_CENTRO CENTRO 1944
|
||||||
nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
|
nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
|
||||||
omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
|
omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
|
||||||
cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
|
cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
|
||||||
eukrea_cpuimx27 MACH_CPUIMX27 CPUIMX27 1975
|
eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
|
||||||
acs5k MACH_ACS5K ACS5K 1982
|
acs5k MACH_ACS5K ACS5K 1982
|
||||||
snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
|
snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
|
||||||
dsm320 MACH_DSM320 DSM320 1988
|
dsm320 MACH_DSM320 DSM320 1988
|
||||||
|
@ -476,8 +476,8 @@ cns3420vb MACH_CNS3420VB CNS3420VB 2776
|
||||||
omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
|
omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
|
||||||
ti8168evm MACH_TI8168EVM TI8168EVM 2800
|
ti8168evm MACH_TI8168EVM TI8168EVM 2800
|
||||||
teton_bga MACH_TETON_BGA TETON_BGA 2816
|
teton_bga MACH_TETON_BGA TETON_BGA 2816
|
||||||
eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25 EUKREA_CPUIMX25 2820
|
eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
|
||||||
eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35 EUKREA_CPUIMX35 2821
|
eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
|
||||||
eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
|
eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
|
||||||
eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
|
eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
|
||||||
smdkc210 MACH_SMDKC210 SMDKC210 2838
|
smdkc210 MACH_SMDKC210 SMDKC210 2838
|
||||||
|
|
|
@ -259,7 +259,7 @@
|
||||||
ENTRY_SAME(ni_syscall) /* query_module */
|
ENTRY_SAME(ni_syscall) /* query_module */
|
||||||
ENTRY_SAME(poll)
|
ENTRY_SAME(poll)
|
||||||
/* structs contain pointers and an in_addr... */
|
/* structs contain pointers and an in_addr... */
|
||||||
ENTRY_COMP(nfsservctl)
|
ENTRY_SAME(ni_syscall) /* was nfsservctl */
|
||||||
ENTRY_SAME(setresgid) /* 170 */
|
ENTRY_SAME(setresgid) /* 170 */
|
||||||
ENTRY_SAME(getresgid)
|
ENTRY_SAME(getresgid)
|
||||||
ENTRY_SAME(prctl)
|
ENTRY_SAME(prctl)
|
||||||
|
|
|
@ -171,7 +171,7 @@ SYSCALL_SPU(setresuid)
|
||||||
SYSCALL_SPU(getresuid)
|
SYSCALL_SPU(getresuid)
|
||||||
SYSCALL(ni_syscall)
|
SYSCALL(ni_syscall)
|
||||||
SYSCALL_SPU(poll)
|
SYSCALL_SPU(poll)
|
||||||
COMPAT_SYS(nfsservctl)
|
SYSCALL(ni_syscall)
|
||||||
SYSCALL_SPU(setresgid)
|
SYSCALL_SPU(setresgid)
|
||||||
SYSCALL_SPU(getresgid)
|
SYSCALL_SPU(getresgid)
|
||||||
COMPAT_SYS_SPU(prctl)
|
COMPAT_SYS_SPU(prctl)
|
||||||
|
|
|
@ -123,7 +123,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
||||||
struct perf_event;
|
struct perf_event;
|
||||||
struct perf_sample_data;
|
struct perf_sample_data;
|
||||||
|
|
||||||
extern void ptrace_triggered(struct perf_event *bp, int nmi,
|
extern void ptrace_triggered(struct perf_event *bp,
|
||||||
struct perf_sample_data *data, struct pt_regs *regs);
|
struct perf_sample_data *data, struct pt_regs *regs);
|
||||||
|
|
||||||
#define task_pt_regs(task) \
|
#define task_pt_regs(task) \
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/serial_sci.h>
|
#include <linux/serial_sci.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/sh_timer.h>
|
#include <linux/sh_timer.h>
|
||||||
#include <linux/sh_dma.h>
|
#include <linux/sh_dma.h>
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
|
||||||
static void (*pm_idle)(void);
|
void (*pm_idle)(void);
|
||||||
|
|
||||||
static int hlt_counter;
|
static int hlt_counter;
|
||||||
|
|
||||||
|
|
|
@ -316,6 +316,35 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 9: /* mov.w @(disp,PC),Rn */
|
||||||
|
srcu = (unsigned char __user *)regs->pc;
|
||||||
|
srcu += 4;
|
||||||
|
srcu += (instruction & 0x00FF) << 1;
|
||||||
|
dst = (unsigned char *)rn;
|
||||||
|
*(unsigned long *)dst = 0;
|
||||||
|
|
||||||
|
#if !defined(__LITTLE_ENDIAN__)
|
||||||
|
dst += 2;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (ma->from(dst, srcu, 2))
|
||||||
|
goto fetch_fault;
|
||||||
|
sign_extend(2, dst);
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0xd: /* mov.l @(disp,PC),Rn */
|
||||||
|
srcu = (unsigned char __user *)(regs->pc & ~0x3);
|
||||||
|
srcu += 4;
|
||||||
|
srcu += (instruction & 0x00FF) << 2;
|
||||||
|
dst = (unsigned char *)rn;
|
||||||
|
*(unsigned long *)dst = 0;
|
||||||
|
|
||||||
|
if (ma->from(dst, srcu, 4))
|
||||||
|
goto fetch_fault;
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -466,6 +495,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
|
||||||
case 0x0500: /* mov.w @(disp,Rm),R0 */
|
case 0x0500: /* mov.w @(disp,Rm),R0 */
|
||||||
goto simple;
|
goto simple;
|
||||||
case 0x0B00: /* bf lab - no delayslot*/
|
case 0x0B00: /* bf lab - no delayslot*/
|
||||||
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case 0x0F00: /* bf/s lab */
|
case 0x0F00: /* bf/s lab */
|
||||||
ret = handle_delayslot(regs, instruction, ma);
|
ret = handle_delayslot(regs, instruction, ma);
|
||||||
|
@ -479,6 +509,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 0x0900: /* bt lab - no delayslot */
|
case 0x0900: /* bt lab - no delayslot */
|
||||||
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case 0x0D00: /* bt/s lab */
|
case 0x0D00: /* bt/s lab */
|
||||||
ret = handle_delayslot(regs, instruction, ma);
|
ret = handle_delayslot(regs, instruction, ma);
|
||||||
|
@ -494,6 +525,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 0x9000: /* mov.w @(disp,Rm),Rn */
|
||||||
|
goto simple;
|
||||||
|
|
||||||
case 0xA000: /* bra label */
|
case 0xA000: /* bra label */
|
||||||
ret = handle_delayslot(regs, instruction, ma);
|
ret = handle_delayslot(regs, instruction, ma);
|
||||||
if (ret==0)
|
if (ret==0)
|
||||||
|
@ -507,6 +541,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
|
||||||
regs->pc += SH_PC_12BIT_OFFSET(instruction);
|
regs->pc += SH_PC_12BIT_OFFSET(instruction);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 0xD000: /* mov.l @(disp,Rm),Rn */
|
||||||
|
goto simple;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
|
||||||
#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
|
#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
|
||||||
|
|
||||||
/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
|
/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
|
||||||
#define SUN4D_IPI_IRQ 14
|
#define SUN4D_IPI_IRQ 13
|
||||||
|
|
||||||
extern void sun4d_ipi_interrupt(void);
|
extern void sun4d_ipi_interrupt(void);
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ sys_call_table32:
|
||||||
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
|
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
|
||||||
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
|
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
|
||||||
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
|
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
|
||||||
/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
|
/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall
|
||||||
.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
|
.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
|
||||||
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
|
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
|
||||||
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
|
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
|
||||||
|
|
|
@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");
|
||||||
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
|
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
|
||||||
static int bcma_device_probe(struct device *dev);
|
static int bcma_device_probe(struct device *dev);
|
||||||
static int bcma_device_remove(struct device *dev);
|
static int bcma_device_remove(struct device *dev);
|
||||||
|
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
|
||||||
|
|
||||||
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
|
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {
|
||||||
.match = bcma_bus_match,
|
.match = bcma_bus_match,
|
||||||
.probe = bcma_device_probe,
|
.probe = bcma_device_probe,
|
||||||
.remove = bcma_device_remove,
|
.remove = bcma_device_remove,
|
||||||
|
.uevent = bcma_device_uevent,
|
||||||
.dev_attrs = bcma_device_attrs,
|
.dev_attrs = bcma_device_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -227,6 +229,16 @@ static int bcma_device_remove(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||||
|
{
|
||||||
|
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
|
||||||
|
|
||||||
|
return add_uevent_var(env,
|
||||||
|
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
|
||||||
|
core->id.manuf, core->id.id,
|
||||||
|
core->id.rev, core->id.class);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init bcma_modinit(void)
|
static int __init bcma_modinit(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
|
@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
|
||||||
/* Atheros AR3011 with sflash firmware*/
|
/* Atheros AR3011 with sflash firmware*/
|
||||||
{ USB_DEVICE(0x0CF3, 0x3002) },
|
{ USB_DEVICE(0x0CF3, 0x3002) },
|
||||||
{ USB_DEVICE(0x13d3, 0x3304) },
|
{ USB_DEVICE(0x13d3, 0x3304) },
|
||||||
|
{ USB_DEVICE(0x0930, 0x0215) },
|
||||||
|
|
||||||
/* Atheros AR9285 Malbec with sflash firmware */
|
/* Atheros AR9285 Malbec with sflash firmware */
|
||||||
{ USB_DEVICE(0x03F0, 0x311D) },
|
{ USB_DEVICE(0x03F0, 0x311D) },
|
||||||
|
|
|
@ -106,6 +106,7 @@ static struct usb_device_id blacklist_table[] = {
|
||||||
/* Atheros 3011 with sflash firmware */
|
/* Atheros 3011 with sflash firmware */
|
||||||
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
|
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
|
||||||
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
|
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
|
||||||
|
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
|
||||||
|
|
||||||
/* Atheros AR9285 Malbec with sflash firmware */
|
/* Atheros AR9285 Malbec with sflash firmware */
|
||||||
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
|
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
|
||||||
|
@ -256,7 +257,9 @@ static void btusb_intr_complete(struct urb *urb)
|
||||||
|
|
||||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (err != -EPERM)
|
/* -EPERM: urb is being killed;
|
||||||
|
* -ENODEV: device got disconnected */
|
||||||
|
if (err != -EPERM && err != -ENODEV)
|
||||||
BT_ERR("%s urb %p failed to resubmit (%d)",
|
BT_ERR("%s urb %p failed to resubmit (%d)",
|
||||||
hdev->name, urb, -err);
|
hdev->name, urb, -err);
|
||||||
usb_unanchor_urb(urb);
|
usb_unanchor_urb(urb);
|
||||||
|
@ -341,7 +344,9 @@ static void btusb_bulk_complete(struct urb *urb)
|
||||||
|
|
||||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (err != -EPERM)
|
/* -EPERM: urb is being killed;
|
||||||
|
* -ENODEV: device got disconnected */
|
||||||
|
if (err != -EPERM && err != -ENODEV)
|
||||||
BT_ERR("%s urb %p failed to resubmit (%d)",
|
BT_ERR("%s urb %p failed to resubmit (%d)",
|
||||||
hdev->name, urb, -err);
|
hdev->name, urb, -err);
|
||||||
usb_unanchor_urb(urb);
|
usb_unanchor_urb(urb);
|
||||||
|
@ -431,7 +436,9 @@ static void btusb_isoc_complete(struct urb *urb)
|
||||||
|
|
||||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (err != -EPERM)
|
/* -EPERM: urb is being killed;
|
||||||
|
* -ENODEV: device got disconnected */
|
||||||
|
if (err != -EPERM && err != -ENODEV)
|
||||||
BT_ERR("%s urb %p failed to resubmit (%d)",
|
BT_ERR("%s urb %p failed to resubmit (%d)",
|
||||||
hdev->name, urb, -err);
|
hdev->name, urb, -err);
|
||||||
usb_unanchor_urb(urb);
|
usb_unanchor_urb(urb);
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
#include <linux/sh_timer.h>
|
#include <linux/sh_timer.h>
|
||||||
|
@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
|
||||||
|
|
||||||
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
|
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
|
||||||
{
|
{
|
||||||
int ret;
|
int k, ret;
|
||||||
|
|
||||||
/* enable clock */
|
/* enable clock */
|
||||||
ret = clk_enable(p->clk);
|
ret = clk_enable(p->clk);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&p->pdev->dev, "cannot enable clock\n");
|
dev_err(&p->pdev->dev, "cannot enable clock\n");
|
||||||
return ret;
|
goto err0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make sure channel is disabled */
|
/* make sure channel is disabled */
|
||||||
|
@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
|
||||||
sh_cmt_write(p, CMCOR, 0xffffffff);
|
sh_cmt_write(p, CMCOR, 0xffffffff);
|
||||||
sh_cmt_write(p, CMCNT, 0);
|
sh_cmt_write(p, CMCNT, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* According to the sh73a0 user's manual, as CMCNT can be operated
|
||||||
|
* only by the RCLK (Pseudo 32 KHz), there's one restriction on
|
||||||
|
* modifying CMCNT register; two RCLK cycles are necessary before
|
||||||
|
* this register is either read or any modification of the value
|
||||||
|
* it holds is reflected in the LSI's actual operation.
|
||||||
|
*
|
||||||
|
* While at it, we're supposed to clear out the CMCNT as of this
|
||||||
|
* moment, so make sure it's processed properly here. This will
|
||||||
|
* take RCLKx2 at maximum.
|
||||||
|
*/
|
||||||
|
for (k = 0; k < 100; k++) {
|
||||||
|
if (!sh_cmt_read(p, CMCNT))
|
||||||
|
break;
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sh_cmt_read(p, CMCNT)) {
|
||||||
|
dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
|
||||||
|
ret = -ETIMEDOUT;
|
||||||
|
goto err1;
|
||||||
|
}
|
||||||
|
|
||||||
/* enable channel */
|
/* enable channel */
|
||||||
sh_cmt_start_stop_ch(p, 1);
|
sh_cmt_start_stop_ch(p, 1);
|
||||||
return 0;
|
return 0;
|
||||||
|
err1:
|
||||||
|
/* stop clock */
|
||||||
|
clk_disable(p->clk);
|
||||||
|
|
||||||
|
err0:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sh_cmt_disable(struct sh_cmt_priv *p)
|
static void sh_cmt_disable(struct sh_cmt_priv *p)
|
||||||
|
|
|
@ -503,9 +503,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||||
spin_unlock_irqrestore(&priv->mbx_lock, flags);
|
spin_unlock_irqrestore(&priv->mbx_lock, flags);
|
||||||
|
|
||||||
/* Prepare mailbox for transmission */
|
/* Prepare mailbox for transmission */
|
||||||
|
data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
|
||||||
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
|
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
|
||||||
data |= HECC_CANMCF_RTR;
|
data |= HECC_CANMCF_RTR;
|
||||||
data |= get_tx_head_prio(priv) << 8;
|
|
||||||
hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
|
hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
|
||||||
|
|
||||||
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
|
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
|
||||||
|
@ -923,6 +923,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||||
priv->can.do_get_state = ti_hecc_get_state;
|
priv->can.do_get_state = ti_hecc_get_state;
|
||||||
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
|
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
|
||||||
|
|
||||||
|
spin_lock_init(&priv->mbx_lock);
|
||||||
ndev->irq = irq->start;
|
ndev->irq = irq->start;
|
||||||
ndev->flags |= IFF_ECHO;
|
ndev->flags |= IFF_ECHO;
|
||||||
platform_set_drvdata(pdev, ndev);
|
platform_set_drvdata(pdev, ndev);
|
||||||
|
|
|
@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
|
||||||
struct net_device *dev = dev_id;
|
struct net_device *dev = dev_id;
|
||||||
struct cas *cp = netdev_priv(dev);
|
struct cas *cp = netdev_priv(dev);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ring;
|
int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
|
||||||
u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
|
u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
|
||||||
|
|
||||||
/* check for shared irq */
|
/* check for shared irq */
|
||||||
if (status == 0)
|
if (status == 0)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
|
|
||||||
spin_lock_irqsave(&cp->lock, flags);
|
spin_lock_irqsave(&cp->lock, flags);
|
||||||
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
|
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
|
||||||
#ifdef USE_NAPI
|
#ifdef USE_NAPI
|
||||||
|
|
|
@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
|
||||||
}
|
}
|
||||||
|
|
||||||
/* recycle the current buffer on the rx queue */
|
/* recycle the current buffer on the rx queue */
|
||||||
static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
||||||
{
|
{
|
||||||
u32 q_index = adapter->rx_queue.index;
|
u32 q_index = adapter->rx_queue.index;
|
||||||
u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
|
u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
|
||||||
|
@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
||||||
unsigned int index = correlator & 0xffffffffUL;
|
unsigned int index = correlator & 0xffffffffUL;
|
||||||
union ibmveth_buf_desc desc;
|
union ibmveth_buf_desc desc;
|
||||||
unsigned long lpar_rc;
|
unsigned long lpar_rc;
|
||||||
|
int ret = 1;
|
||||||
|
|
||||||
BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
|
BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
|
||||||
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
|
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
|
||||||
|
@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
||||||
if (!adapter->rx_buff_pool[pool].active) {
|
if (!adapter->rx_buff_pool[pool].active) {
|
||||||
ibmveth_rxq_harvest_buffer(adapter);
|
ibmveth_rxq_harvest_buffer(adapter);
|
||||||
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
|
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
desc.fields.flags_len = IBMVETH_BUF_VALID |
|
desc.fields.flags_len = IBMVETH_BUF_VALID |
|
||||||
|
@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
||||||
netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
|
netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
|
||||||
"during recycle rc=%ld", lpar_rc);
|
"during recycle rc=%ld", lpar_rc);
|
||||||
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
|
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
|
||||||
adapter->rx_queue.index = 0;
|
adapter->rx_queue.index = 0;
|
||||||
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
|
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
|
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
|
||||||
|
@ -1084,8 +1089,9 @@ restart_poll:
|
||||||
if (rx_flush)
|
if (rx_flush)
|
||||||
ibmveth_flush_buffer(skb->data,
|
ibmveth_flush_buffer(skb->data,
|
||||||
length + offset);
|
length + offset);
|
||||||
|
if (!ibmveth_rxq_recycle_buffer(adapter))
|
||||||
|
kfree_skb(skb);
|
||||||
skb = new_skb;
|
skb = new_skb;
|
||||||
ibmveth_rxq_recycle_buffer(adapter);
|
|
||||||
} else {
|
} else {
|
||||||
ibmveth_rxq_harvest_buffer(adapter);
|
ibmveth_rxq_harvest_buffer(adapter);
|
||||||
skb_reserve(skb, offset);
|
skb_reserve(skb, offset);
|
||||||
|
|
|
@ -25,8 +25,9 @@
|
||||||
/* DP83865 phy identifier values */
|
/* DP83865 phy identifier values */
|
||||||
#define DP83865_PHY_ID 0x20005c7a
|
#define DP83865_PHY_ID 0x20005c7a
|
||||||
|
|
||||||
#define DP83865_INT_MASK_REG 0x15
|
#define DP83865_INT_STATUS 0x14
|
||||||
#define DP83865_INT_MASK_STATUS 0x14
|
#define DP83865_INT_MASK 0x15
|
||||||
|
#define DP83865_INT_CLEAR 0x17
|
||||||
|
|
||||||
#define DP83865_INT_REMOTE_FAULT 0x0008
|
#define DP83865_INT_REMOTE_FAULT 0x0008
|
||||||
#define DP83865_INT_ANE_COMPLETED 0x0010
|
#define DP83865_INT_ANE_COMPLETED 0x0010
|
||||||
|
@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
|
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
|
||||||
err = phy_write(phydev, DP83865_INT_MASK_REG,
|
err = phy_write(phydev, DP83865_INT_MASK,
|
||||||
DP83865_INT_MASK_DEFAULT);
|
DP83865_INT_MASK_DEFAULT);
|
||||||
else
|
else
|
||||||
err = phy_write(phydev, DP83865_INT_MASK_REG, 0);
|
err = phy_write(phydev, DP83865_INT_MASK, 0);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ns_ack_interrupt(struct phy_device *phydev)
|
static int ns_ack_interrupt(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
int ret = phy_read(phydev, DP83865_INT_MASK_STATUS);
|
int ret = phy_read(phydev, DP83865_INT_STATUS);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return 0;
|
/* Clear the interrupt status bit by writing a “1”
|
||||||
|
* to the corresponding bit in INT_CLEAR (2:0 are reserved) */
|
||||||
|
ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
|
static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/phy.h>
|
#include <linux/phy.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/ethtool.h>
|
#include <linux/ethtool.h>
|
||||||
|
|
|
@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
|
||||||
static const struct ar9300_eeprom ar9300_default = {
|
static const struct ar9300_eeprom ar9300_default = {
|
||||||
.eepromVersion = 2,
|
.eepromVersion = 2,
|
||||||
.templateVersion = 2,
|
.templateVersion = 2,
|
||||||
.macAddr = {1, 2, 3, 4, 5, 6},
|
.macAddr = {0, 2, 3, 4, 5, 6},
|
||||||
.custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
.custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
.baseEepHeader = {
|
.baseEepHeader = {
|
||||||
|
|
|
@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
|
||||||
|
|
||||||
mutex_lock(&sc->mutex);
|
mutex_lock(&sc->mutex);
|
||||||
ah->coverage_class = coverage_class;
|
ah->coverage_class = coverage_class;
|
||||||
|
|
||||||
|
ath9k_ps_wakeup(sc);
|
||||||
ath9k_hw_init_global_settings(ah);
|
ath9k_hw_init_global_settings(ah);
|
||||||
|
ath9k_ps_restore(sc);
|
||||||
|
|
||||||
mutex_unlock(&sc->mutex);
|
mutex_unlock(&sc->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||||
* the high througput speed in 802.11n networks.
|
* the high througput speed in 802.11n networks.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!is_main_vif(ar, vif))
|
if (!is_main_vif(ar, vif)) {
|
||||||
|
mutex_lock(&ar->mutex);
|
||||||
goto err_softw;
|
goto err_softw;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While the hardware supports *catch-all* key, for offloading
|
* While the hardware supports *catch-all* key, for offloading
|
||||||
|
|
|
@ -871,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
|
||||||
{
|
{
|
||||||
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
||||||
struct rt2x00_dev *rt2x00dev = hw->priv;
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
||||||
int retval;
|
|
||||||
|
|
||||||
retval = rt2x00lib_suspend(rt2x00dev, state);
|
return rt2x00lib_suspend(rt2x00dev, state);
|
||||||
if (retval)
|
|
||||||
return retval;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Decrease usbdev refcount.
|
|
||||||
*/
|
|
||||||
usb_put_dev(interface_to_usbdev(usb_intf));
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
|
EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
|
||||||
|
|
||||||
|
@ -891,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
|
||||||
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
||||||
struct rt2x00_dev *rt2x00dev = hw->priv;
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
||||||
|
|
||||||
usb_get_dev(interface_to_usbdev(usb_intf));
|
|
||||||
|
|
||||||
return rt2x00lib_resume(rt2x00dev);
|
return rt2x00lib_resume(rt2x00dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
|
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
|
||||||
|
|
|
@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
|
||||||
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||||
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
|
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
|
||||||
wl->hw->wiphy->max_scan_ssids = 1;
|
wl->hw->wiphy->max_scan_ssids = 1;
|
||||||
|
wl->hw->wiphy->max_sched_scan_ssids = 1;
|
||||||
/*
|
/*
|
||||||
* Maximum length of elements in scanning probe request templates
|
* Maximum length of elements in scanning probe request templates
|
||||||
* should be the maximum length possible for a template, without
|
* should be the maximum length possible for a template, without
|
||||||
|
|
|
@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
|
||||||
/* If enabled, tell runtime PM not to power off the card */
|
/* If enabled, tell runtime PM not to power off the card */
|
||||||
if (pm_runtime_enabled(&func->dev)) {
|
if (pm_runtime_enabled(&func->dev)) {
|
||||||
ret = pm_runtime_get_sync(&func->dev);
|
ret = pm_runtime_get_sync(&func->dev);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
/* Runtime PM is disabled: power up the card manually */
|
/* Runtime PM is disabled: power up the card manually */
|
||||||
|
|
|
@ -36,7 +36,6 @@ enum wl1271_tm_commands {
|
||||||
WL1271_TM_CMD_TEST,
|
WL1271_TM_CMD_TEST,
|
||||||
WL1271_TM_CMD_INTERROGATE,
|
WL1271_TM_CMD_INTERROGATE,
|
||||||
WL1271_TM_CMD_CONFIGURE,
|
WL1271_TM_CMD_CONFIGURE,
|
||||||
WL1271_TM_CMD_NVS_PUSH,
|
|
||||||
WL1271_TM_CMD_SET_PLT_MODE,
|
WL1271_TM_CMD_SET_PLT_MODE,
|
||||||
WL1271_TM_CMD_RECOVER,
|
WL1271_TM_CMD_RECOVER,
|
||||||
|
|
||||||
|
@ -190,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
size_t len;
|
|
||||||
void *buf;
|
|
||||||
|
|
||||||
wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
|
|
||||||
|
|
||||||
if (!tb[WL1271_TM_ATTR_DATA])
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
|
|
||||||
len = nla_len(tb[WL1271_TM_ATTR_DATA]);
|
|
||||||
|
|
||||||
mutex_lock(&wl->mutex);
|
|
||||||
|
|
||||||
kfree(wl->nvs);
|
|
||||||
|
|
||||||
if ((wl->chip.id == CHIP_ID_1283_PG20) &&
|
|
||||||
(len != sizeof(struct wl128x_nvs_file)))
|
|
||||||
return -EINVAL;
|
|
||||||
else if (len != sizeof(struct wl1271_nvs_file))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
wl->nvs = kzalloc(len, GFP_KERNEL);
|
|
||||||
if (!wl->nvs) {
|
|
||||||
wl1271_error("could not allocate memory for the nvs file");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(wl->nvs, buf, len);
|
|
||||||
wl->nvs_len = len;
|
|
||||||
|
|
||||||
wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&wl->mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
|
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
|
@ -288,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
|
||||||
return wl1271_tm_cmd_interrogate(wl, tb);
|
return wl1271_tm_cmd_interrogate(wl, tb);
|
||||||
case WL1271_TM_CMD_CONFIGURE:
|
case WL1271_TM_CMD_CONFIGURE:
|
||||||
return wl1271_tm_cmd_configure(wl, tb);
|
return wl1271_tm_cmd_configure(wl, tb);
|
||||||
case WL1271_TM_CMD_NVS_PUSH:
|
|
||||||
return wl1271_tm_cmd_nvs_push(wl, tb);
|
|
||||||
case WL1271_TM_CMD_SET_PLT_MODE:
|
case WL1271_TM_CMD_SET_PLT_MODE:
|
||||||
return wl1271_tm_cmd_set_plt_mode(wl, tb);
|
return wl1271_tm_cmd_set_plt_mode(wl, tb);
|
||||||
case WL1271_TM_CMD_RECOVER:
|
case WL1271_TM_CMD_RECOVER:
|
||||||
|
|
|
@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
|
||||||
!defined(CONFIG_CPU_SUBTYPE_SH7709)
|
!defined(CONFIG_CPU_SUBTYPE_SH7709)
|
||||||
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
|
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(CONFIG_ARCH_SH7372)
|
||||||
|
[IRQ_TYPE_EDGE_BOTH] = VALID(4),
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static int intc_set_type(struct irq_data *data, unsigned int type)
|
static int intc_set_type(struct irq_data *data, unsigned int type)
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
@ -95,6 +96,12 @@ struct sci_port {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct notifier_block freq_transition;
|
struct notifier_block freq_transition;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
|
||||||
|
unsigned short saved_smr;
|
||||||
|
unsigned short saved_fcr;
|
||||||
|
unsigned char saved_brr;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Function prototypes */
|
/* Function prototypes */
|
||||||
|
@ -1076,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
|
||||||
/* This routine is used for getting signals of: DTR, DCD, DSR, RI,
|
/* This routine is used for getting signals of: DTR, DCD, DSR, RI,
|
||||||
and CTS/RTS */
|
and CTS/RTS */
|
||||||
|
|
||||||
return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
|
return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
||||||
|
@ -1633,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
|
||||||
return ((freq + 16 * bps) / (32 * bps) - 1);
|
return ((freq + 16 * bps) / (32 * bps) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sci_reset(struct uart_port *port)
|
||||||
|
{
|
||||||
|
unsigned int status;
|
||||||
|
|
||||||
|
do {
|
||||||
|
status = sci_in(port, SCxSR);
|
||||||
|
} while (!(status & SCxSR_TEND(port)));
|
||||||
|
|
||||||
|
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
|
||||||
|
|
||||||
|
if (port->type != PORT_SCI)
|
||||||
|
sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
|
||||||
|
}
|
||||||
|
|
||||||
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||||
struct ktermios *old)
|
struct ktermios *old)
|
||||||
{
|
{
|
||||||
struct sci_port *s = to_sci_port(port);
|
struct sci_port *s = to_sci_port(port);
|
||||||
unsigned int status, baud, smr_val, max_baud;
|
unsigned int baud, smr_val, max_baud;
|
||||||
int t = -1;
|
int t = -1;
|
||||||
u16 scfcr = 0;
|
u16 scfcr = 0;
|
||||||
|
|
||||||
|
@ -1657,14 +1678,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||||
|
|
||||||
sci_port_enable(s);
|
sci_port_enable(s);
|
||||||
|
|
||||||
do {
|
sci_reset(port);
|
||||||
status = sci_in(port, SCxSR);
|
|
||||||
} while (!(status & SCxSR_TEND(port)));
|
|
||||||
|
|
||||||
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
|
|
||||||
|
|
||||||
if (port->type != PORT_SCI)
|
|
||||||
sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
|
|
||||||
|
|
||||||
smr_val = sci_in(port, SCSMR) & 3;
|
smr_val = sci_in(port, SCSMR) & 3;
|
||||||
|
|
||||||
|
@ -2037,7 +2051,8 @@ static int __devinit serial_console_setup(struct console *co, char *options)
|
||||||
if (options)
|
if (options)
|
||||||
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
||||||
|
|
||||||
/* TODO: disable clock */
|
sci_port_disable(sci_port);
|
||||||
|
|
||||||
return uart_set_options(port, co, baud, parity, bits, flow);
|
return uart_set_options(port, co, baud, parity, bits, flow);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2080,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define uart_console(port) ((port)->cons->index == (port)->line)
|
||||||
|
|
||||||
|
static int sci_runtime_suspend(struct device *dev)
|
||||||
|
{
|
||||||
|
struct sci_port *sci_port = dev_get_drvdata(dev);
|
||||||
|
struct uart_port *port = &sci_port->port;
|
||||||
|
|
||||||
|
if (uart_console(port)) {
|
||||||
|
sci_port->saved_smr = sci_in(port, SCSMR);
|
||||||
|
sci_port->saved_brr = sci_in(port, SCBRR);
|
||||||
|
sci_port->saved_fcr = sci_in(port, SCFCR);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sci_runtime_resume(struct device *dev)
|
||||||
|
{
|
||||||
|
struct sci_port *sci_port = dev_get_drvdata(dev);
|
||||||
|
struct uart_port *port = &sci_port->port;
|
||||||
|
|
||||||
|
if (uart_console(port)) {
|
||||||
|
sci_reset(port);
|
||||||
|
sci_out(port, SCSMR, sci_port->saved_smr);
|
||||||
|
sci_out(port, SCBRR, sci_port->saved_brr);
|
||||||
|
sci_out(port, SCFCR, sci_port->saved_fcr);
|
||||||
|
sci_out(port, SCSCR, sci_port->cfg->scscr);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define SCI_CONSOLE (&serial_console)
|
#define SCI_CONSOLE (&serial_console)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -2089,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SCI_CONSOLE NULL
|
#define SCI_CONSOLE NULL
|
||||||
|
#define sci_runtime_suspend NULL
|
||||||
|
#define sci_runtime_resume NULL
|
||||||
|
|
||||||
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
|
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
|
||||||
|
|
||||||
|
@ -2204,6 +2251,8 @@ static int sci_resume(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops sci_dev_pm_ops = {
|
static const struct dev_pm_ops sci_dev_pm_ops = {
|
||||||
|
.runtime_suspend = sci_runtime_suspend,
|
||||||
|
.runtime_resume = sci_runtime_resume,
|
||||||
.suspend = sci_suspend,
|
.suspend = sci_suspend,
|
||||||
.resume = sci_resume,
|
.resume = sci_resume,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1865,6 +1865,9 @@ struct wiphy {
|
||||||
* you need use set_wiphy_dev() (see below) */
|
* you need use set_wiphy_dev() (see below) */
|
||||||
struct device dev;
|
struct device dev;
|
||||||
|
|
||||||
|
/* protects ->resume, ->suspend sysfs callbacks against unregister hw */
|
||||||
|
bool registered;
|
||||||
|
|
||||||
/* dir in debugfs: ieee80211/<wiphyname> */
|
/* dir in debugfs: ieee80211/<wiphyname> */
|
||||||
struct dentry *debugfsdir;
|
struct dentry *debugfsdir;
|
||||||
|
|
||||||
|
|
|
@ -5715,6 +5715,7 @@ struct pmu *perf_init_event(struct perf_event *event)
|
||||||
pmu = idr_find(&pmu_idr, event->attr.type);
|
pmu = idr_find(&pmu_idr, event->attr.type);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (pmu) {
|
if (pmu) {
|
||||||
|
event->pmu = pmu;
|
||||||
ret = pmu->event_init(event);
|
ret = pmu->event_init(event);
|
||||||
if (ret)
|
if (ret)
|
||||||
pmu = ERR_PTR(ret);
|
pmu = ERR_PTR(ret);
|
||||||
|
@ -5722,6 +5723,7 @@ struct pmu *perf_init_event(struct perf_event *event)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||||
|
event->pmu = pmu;
|
||||||
ret = pmu->event_init(event);
|
ret = pmu->event_init(event);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -5848,8 +5850,6 @@ done:
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
event->pmu = pmu;
|
|
||||||
|
|
||||||
if (!event->parent) {
|
if (!event->parent) {
|
||||||
if (event->attach_state & PERF_ATTACH_TASK)
|
if (event->attach_state & PERF_ATTACH_TASK)
|
||||||
jump_label_inc(&perf_sched_events);
|
jump_label_inc(&perf_sched_events);
|
||||||
|
|
|
@ -494,9 +494,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
|
||||||
BT_DBG("sk %p", sk);
|
BT_DBG("sk %p", sk);
|
||||||
|
|
||||||
add_wait_queue(sk_sleep(sk), &wait);
|
add_wait_queue(sk_sleep(sk), &wait);
|
||||||
while (sk->sk_state != state) {
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
while (sk->sk_state != state) {
|
||||||
if (!timeo) {
|
if (!timeo) {
|
||||||
err = -EINPROGRESS;
|
err = -EINPROGRESS;
|
||||||
break;
|
break;
|
||||||
|
@ -510,12 +509,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
timeo = schedule_timeout(timeo);
|
timeo = schedule_timeout(timeo);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
err = sock_error(sk);
|
err = sock_error(sk);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,6 +155,7 @@ struct bnep_session {
|
||||||
unsigned int role;
|
unsigned int role;
|
||||||
unsigned long state;
|
unsigned long state;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
atomic_t terminate;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
struct ethhdr eh;
|
struct ethhdr eh;
|
||||||
|
|
|
@ -484,9 +484,11 @@ static int bnep_session(void *arg)
|
||||||
|
|
||||||
init_waitqueue_entry(&wait, current);
|
init_waitqueue_entry(&wait, current);
|
||||||
add_wait_queue(sk_sleep(sk), &wait);
|
add_wait_queue(sk_sleep(sk), &wait);
|
||||||
while (!kthread_should_stop()) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
if (atomic_read(&s->terminate))
|
||||||
|
break;
|
||||||
/* RX */
|
/* RX */
|
||||||
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
|
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
|
@ -504,7 +506,7 @@ static int bnep_session(void *arg)
|
||||||
|
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
|
|
||||||
/* Cleanup session */
|
/* Cleanup session */
|
||||||
|
@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
|
||||||
down_read(&bnep_session_sem);
|
down_read(&bnep_session_sem);
|
||||||
|
|
||||||
s = __bnep_get_session(req->dst);
|
s = __bnep_get_session(req->dst);
|
||||||
if (s)
|
if (s) {
|
||||||
kthread_stop(s->task);
|
atomic_inc(&s->terminate);
|
||||||
else
|
wake_up_process(s->task);
|
||||||
|
} else
|
||||||
err = -ENOENT;
|
err = -ENOENT;
|
||||||
|
|
||||||
up_read(&bnep_session_sem);
|
up_read(&bnep_session_sem);
|
||||||
|
|
|
@ -386,7 +386,8 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
|
||||||
|
|
||||||
capi_ctr_down(ctrl);
|
capi_ctr_down(ctrl);
|
||||||
|
|
||||||
kthread_stop(session->task);
|
atomic_inc(&session->terminate);
|
||||||
|
wake_up_process(session->task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
|
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
|
||||||
|
|
|
@ -81,6 +81,7 @@ struct cmtp_session {
|
||||||
|
|
||||||
char name[BTNAMSIZ];
|
char name[BTNAMSIZ];
|
||||||
|
|
||||||
|
atomic_t terminate;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
|
|
|
@ -292,9 +292,11 @@ static int cmtp_session(void *arg)
|
||||||
|
|
||||||
init_waitqueue_entry(&wait, current);
|
init_waitqueue_entry(&wait, current);
|
||||||
add_wait_queue(sk_sleep(sk), &wait);
|
add_wait_queue(sk_sleep(sk), &wait);
|
||||||
while (!kthread_should_stop()) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
if (atomic_read(&session->terminate))
|
||||||
|
break;
|
||||||
if (sk->sk_state != BT_CONNECTED)
|
if (sk->sk_state != BT_CONNECTED)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -307,7 +309,7 @@ static int cmtp_session(void *arg)
|
||||||
|
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
|
|
||||||
down_write(&cmtp_session_sem);
|
down_write(&cmtp_session_sem);
|
||||||
|
@ -380,16 +382,17 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
|
||||||
|
|
||||||
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
|
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
|
||||||
err = cmtp_attach_device(session);
|
err = cmtp_attach_device(session);
|
||||||
if (err < 0)
|
if (err < 0) {
|
||||||
goto detach;
|
atomic_inc(&session->terminate);
|
||||||
|
wake_up_process(session->task);
|
||||||
|
up_write(&cmtp_session_sem);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&cmtp_session_sem);
|
up_write(&cmtp_session_sem);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
detach:
|
|
||||||
cmtp_detach_device(session);
|
|
||||||
|
|
||||||
unlink:
|
unlink:
|
||||||
__cmtp_unlink_session(session);
|
__cmtp_unlink_session(session);
|
||||||
|
|
||||||
|
@ -414,7 +417,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
|
||||||
skb_queue_purge(&session->transmit);
|
skb_queue_purge(&session->transmit);
|
||||||
|
|
||||||
/* Stop session thread */
|
/* Stop session thread */
|
||||||
kthread_stop(session->task);
|
atomic_inc(&session->terminate);
|
||||||
|
wake_up_process(session->task);
|
||||||
} else
|
} else
|
||||||
err = -ENOENT;
|
err = -ENOENT;
|
||||||
|
|
||||||
|
|
|
@ -1209,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg)
|
||||||
|
|
||||||
BT_ERR("%s command tx timeout", hdev->name);
|
BT_ERR("%s command tx timeout", hdev->name);
|
||||||
atomic_set(&hdev->cmd_cnt, 1);
|
atomic_set(&hdev->cmd_cnt, 1);
|
||||||
clear_bit(HCI_RESET, &hdev->flags);
|
|
||||||
tasklet_schedule(&hdev->cmd_task);
|
tasklet_schedule(&hdev->cmd_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1327,7 +1326,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
||||||
|
|
||||||
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
|
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
return -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2408,6 +2407,9 @@ static void hci_cmd_task(unsigned long arg)
|
||||||
if (hdev->sent_cmd) {
|
if (hdev->sent_cmd) {
|
||||||
atomic_dec(&hdev->cmd_cnt);
|
atomic_dec(&hdev->cmd_cnt);
|
||||||
hci_send_frame(skb);
|
hci_send_frame(skb);
|
||||||
|
if (test_bit(HCI_RESET, &hdev->flags))
|
||||||
|
del_timer(&hdev->cmd_timer);
|
||||||
|
else
|
||||||
mod_timer(&hdev->cmd_timer,
|
mod_timer(&hdev->cmd_timer,
|
||||||
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
|
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -764,6 +764,7 @@ static int hidp_session(void *arg)
|
||||||
|
|
||||||
up_write(&hidp_session_sem);
|
up_write(&hidp_session_sem);
|
||||||
|
|
||||||
|
kfree(session->rd_data);
|
||||||
kfree(session);
|
kfree(session);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
|
||||||
|
|
||||||
err = input_register_device(input);
|
err = input_register_device(input);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
hci_conn_put_device(session->conn);
|
input_free_device(input);
|
||||||
|
session->input = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = hid_add_device(session->hid);
|
err = hid_add_device(session->hid);
|
||||||
if (err < 0)
|
if (err < 0) {
|
||||||
goto err_add_device;
|
atomic_inc(&session->terminate);
|
||||||
|
wake_up_process(session->task);
|
||||||
|
up_write(&hidp_session_sem);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
if (session->input) {
|
if (session->input) {
|
||||||
hidp_send_ctrl_message(session,
|
hidp_send_ctrl_message(session,
|
||||||
|
@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
|
||||||
up_write(&hidp_session_sem);
|
up_write(&hidp_session_sem);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_add_device:
|
|
||||||
hid_destroy_device(session->hid);
|
|
||||||
session->hid = NULL;
|
|
||||||
atomic_inc(&session->terminate);
|
|
||||||
wake_up_process(session->task);
|
|
||||||
|
|
||||||
unlink:
|
unlink:
|
||||||
hidp_del_timer(session);
|
hidp_del_timer(session);
|
||||||
|
|
||||||
|
@ -1090,7 +1090,6 @@ purge:
|
||||||
failed:
|
failed:
|
||||||
up_write(&hidp_session_sem);
|
up_write(&hidp_session_sem);
|
||||||
|
|
||||||
input_free_device(session->input);
|
|
||||||
kfree(session);
|
kfree(session);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1159,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
|
||||||
int timeo = HZ/5;
|
int timeo = HZ/5;
|
||||||
|
|
||||||
add_wait_queue(sk_sleep(sk), &wait);
|
add_wait_queue(sk_sleep(sk), &wait);
|
||||||
while ((chan->unacked_frames > 0 && chan->conn)) {
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
while (chan->unacked_frames > 0 && chan->conn) {
|
||||||
if (!timeo)
|
if (!timeo)
|
||||||
timeo = HZ/5;
|
timeo = HZ/5;
|
||||||
|
|
||||||
|
@ -1173,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
timeo = schedule_timeout(timeo);
|
timeo = schedule_timeout(timeo);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
err = sock_error(sk);
|
err = sock_error(sk);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -235,39 +235,39 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
|
||||||
|
|
||||||
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
|
||||||
err = -EBADFD;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
||||||
|
|
||||||
BT_DBG("sk %p timeo %ld", sk, timeo);
|
BT_DBG("sk %p timeo %ld", sk, timeo);
|
||||||
|
|
||||||
/* Wait for an incoming connection. (wake-one). */
|
/* Wait for an incoming connection. (wake-one). */
|
||||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||||
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!timeo) {
|
|
||||||
err = -EAGAIN;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
release_sock(sk);
|
|
||||||
timeo = schedule_timeout(timeo);
|
|
||||||
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
if (sk->sk_state != BT_LISTEN) {
|
||||||
err = -EBADFD;
|
err = -EBADFD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nsk = bt_accept_dequeue(sk, newsock);
|
||||||
|
if (nsk)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!timeo) {
|
||||||
|
err = -EAGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
err = sock_intr_errno(timeo);
|
err = sock_intr_errno(timeo);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
release_sock(sk);
|
||||||
|
timeo = schedule_timeout(timeo);
|
||||||
|
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -993,7 +993,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
|
||||||
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
|
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
|
||||||
|
|
||||||
sk->sk_destruct = l2cap_sock_destruct;
|
sk->sk_destruct = l2cap_sock_destruct;
|
||||||
sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
|
sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
|
||||||
|
|
||||||
sock_reset_flag(sk, SOCK_ZAPPED);
|
sock_reset_flag(sk, SOCK_ZAPPED);
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
|
||||||
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
|
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
|
||||||
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
|
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
|
||||||
|
|
||||||
static unsigned long rfcomm_event;
|
|
||||||
|
|
||||||
static LIST_HEAD(session_list);
|
static LIST_HEAD(session_list);
|
||||||
|
|
||||||
|
@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
|
||||||
{
|
{
|
||||||
if (!rfcomm_thread)
|
if (!rfcomm_thread)
|
||||||
return;
|
return;
|
||||||
set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
|
|
||||||
wake_up_process(rfcomm_thread);
|
wake_up_process(rfcomm_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2038,19 +2036,18 @@ static int rfcomm_run(void *unused)
|
||||||
|
|
||||||
rfcomm_add_listener(BDADDR_ANY);
|
rfcomm_add_listener(BDADDR_ANY);
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
|
|
||||||
/* No pending events. Let's sleep.
|
if (kthread_should_stop())
|
||||||
* Incoming connections and data will wake us up. */
|
break;
|
||||||
schedule();
|
|
||||||
}
|
|
||||||
set_current_state(TASK_RUNNING);
|
|
||||||
|
|
||||||
/* Process stuff */
|
/* Process stuff */
|
||||||
clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
|
|
||||||
rfcomm_process_sessions();
|
rfcomm_process_sessions();
|
||||||
|
|
||||||
|
schedule();
|
||||||
}
|
}
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
rfcomm_kill_listener();
|
rfcomm_kill_listener();
|
||||||
|
|
||||||
|
|
|
@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
|
||||||
err = -EBADFD;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sk->sk_type != SOCK_STREAM) {
|
if (sk->sk_type != SOCK_STREAM) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -501,28 +496,33 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
|
||||||
|
|
||||||
/* Wait for an incoming connection. (wake-one). */
|
/* Wait for an incoming connection. (wake-one). */
|
||||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||||
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!timeo) {
|
|
||||||
err = -EAGAIN;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
release_sock(sk);
|
|
||||||
timeo = schedule_timeout(timeo);
|
|
||||||
lock_sock(sk);
|
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
if (sk->sk_state != BT_LISTEN) {
|
||||||
err = -EBADFD;
|
err = -EBADFD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nsk = bt_accept_dequeue(sk, newsock);
|
||||||
|
if (nsk)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!timeo) {
|
||||||
|
err = -EAGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
err = sock_intr_errno(timeo);
|
err = sock_intr_errno(timeo);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
release_sock(sk);
|
||||||
|
timeo = schedule_timeout(timeo);
|
||||||
|
lock_sock(sk);
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -564,39 +564,39 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
|
||||||
err = -EBADFD;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
||||||
|
|
||||||
BT_DBG("sk %p timeo %ld", sk, timeo);
|
BT_DBG("sk %p timeo %ld", sk, timeo);
|
||||||
|
|
||||||
/* Wait for an incoming connection. (wake-one). */
|
/* Wait for an incoming connection. (wake-one). */
|
||||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||||
while (!(ch = bt_accept_dequeue(sk, newsock))) {
|
while (1) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!timeo) {
|
|
||||||
err = -EAGAIN;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
release_sock(sk);
|
|
||||||
timeo = schedule_timeout(timeo);
|
|
||||||
lock_sock(sk);
|
|
||||||
|
|
||||||
if (sk->sk_state != BT_LISTEN) {
|
if (sk->sk_state != BT_LISTEN) {
|
||||||
err = -EBADFD;
|
err = -EBADFD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ch = bt_accept_dequeue(sk, newsock);
|
||||||
|
if (ch)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!timeo) {
|
||||||
|
err = -EAGAIN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
err = sock_intr_errno(timeo);
|
err = sock_intr_errno(timeo);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
release_sock(sk);
|
||||||
|
timeo = schedule_timeout(timeo);
|
||||||
|
lock_sock(sk);
|
||||||
}
|
}
|
||||||
set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(sk_sleep(sk), &wait);
|
remove_wait_queue(sk_sleep(sk), &wait);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
||||||
{
|
{
|
||||||
struct sk_buff *skb2;
|
struct sk_buff *skb2;
|
||||||
const struct ipv6hdr *ip6h;
|
const struct ipv6hdr *ip6h;
|
||||||
struct icmp6hdr *icmp6h;
|
u8 icmp6_type;
|
||||||
u8 nexthdr;
|
u8 nexthdr;
|
||||||
unsigned len;
|
unsigned len;
|
||||||
int offset;
|
int offset;
|
||||||
|
@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
||||||
__skb_pull(skb2, offset);
|
__skb_pull(skb2, offset);
|
||||||
skb_reset_transport_header(skb2);
|
skb_reset_transport_header(skb2);
|
||||||
|
|
||||||
icmp6h = icmp6_hdr(skb2);
|
icmp6_type = icmp6_hdr(skb2)->icmp6_type;
|
||||||
|
|
||||||
switch (icmp6h->icmp6_type) {
|
switch (icmp6_type) {
|
||||||
case ICMPV6_MGM_QUERY:
|
case ICMPV6_MGM_QUERY:
|
||||||
case ICMPV6_MGM_REPORT:
|
case ICMPV6_MGM_REPORT:
|
||||||
case ICMPV6_MGM_REDUCTION:
|
case ICMPV6_MGM_REDUCTION:
|
||||||
|
@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
||||||
err = pskb_trim_rcsum(skb2, len);
|
err = pskb_trim_rcsum(skb2, len);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
err = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ip6h = ipv6_hdr(skb2);
|
||||||
|
|
||||||
switch (skb2->ip_summed) {
|
switch (skb2->ip_summed) {
|
||||||
case CHECKSUM_COMPLETE:
|
case CHECKSUM_COMPLETE:
|
||||||
if (!csum_fold(skb2->csum))
|
if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
|
||||||
|
IPPROTO_ICMPV6, skb2->csum))
|
||||||
break;
|
break;
|
||||||
/*FALLTHROUGH*/
|
/*FALLTHROUGH*/
|
||||||
case CHECKSUM_NONE:
|
case CHECKSUM_NONE:
|
||||||
skb2->csum = 0;
|
skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
|
||||||
if (skb_checksum_complete(skb2))
|
&ip6h->daddr,
|
||||||
|
skb2->len,
|
||||||
|
IPPROTO_ICMPV6, 0));
|
||||||
|
if (__skb_checksum_complete(skb2))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
|
||||||
|
|
||||||
BR_INPUT_SKB_CB(skb)->igmp = 1;
|
BR_INPUT_SKB_CB(skb)->igmp = 1;
|
||||||
|
|
||||||
switch (icmp6h->icmp6_type) {
|
switch (icmp6_type) {
|
||||||
case ICMPV6_MGM_REPORT:
|
case ICMPV6_MGM_REPORT:
|
||||||
{
|
{
|
||||||
struct mld_msg *mld;
|
struct mld_msg *mld;
|
||||||
|
|
|
@ -1319,11 +1319,15 @@ static void neigh_proxy_process(unsigned long arg)
|
||||||
|
|
||||||
if (tdif <= 0) {
|
if (tdif <= 0) {
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
|
|
||||||
__skb_unlink(skb, &tbl->proxy_queue);
|
__skb_unlink(skb, &tbl->proxy_queue);
|
||||||
if (tbl->proxy_redo && netif_running(dev))
|
if (tbl->proxy_redo && netif_running(dev)) {
|
||||||
|
rcu_read_lock();
|
||||||
tbl->proxy_redo(skb);
|
tbl->proxy_redo(skb);
|
||||||
else
|
rcu_read_unlock();
|
||||||
|
} else {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
}
|
||||||
|
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
} else if (!sched_next || tdif < sched_next)
|
} else if (!sched_next || tdif < sched_next)
|
||||||
|
|
|
@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb)
|
||||||
if (skb_shared(skb))
|
if (skb_shared(skb))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
iph = (struct iphdr *)skb->data;
|
|
||||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||||
goto out;
|
goto out;
|
||||||
|
iph = (struct iphdr *)skb->data;
|
||||||
if (iph->ihl < 5 || iph->version != 4)
|
if (iph->ihl < 5 || iph->version != 4)
|
||||||
goto out;
|
goto out;
|
||||||
if (!pskb_may_pull(skb, iph->ihl*4))
|
if (!pskb_may_pull(skb, iph->ihl*4))
|
||||||
goto out;
|
goto out;
|
||||||
|
iph = (struct iphdr *)skb->data;
|
||||||
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
|
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb)
|
||||||
if (pskb_trim_rcsum(skb, len))
|
if (pskb_trim_rcsum(skb, len))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
iph = (struct iphdr *)skb->data;
|
||||||
if (iph->protocol != IPPROTO_UDP)
|
if (iph->protocol != IPPROTO_UDP)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
|
||||||
break;
|
break;
|
||||||
for (i=0; i<nsrcs; i++) {
|
for (i=0; i<nsrcs; i++) {
|
||||||
/* skip inactive filters */
|
/* skip inactive filters */
|
||||||
if (pmc->sfcount[MCAST_INCLUDE] ||
|
if (psf->sf_count[MCAST_INCLUDE] ||
|
||||||
pmc->sfcount[MCAST_EXCLUDE] !=
|
pmc->sfcount[MCAST_EXCLUDE] !=
|
||||||
psf->sf_count[MCAST_EXCLUDE])
|
psf->sf_count[MCAST_EXCLUDE])
|
||||||
continue;
|
continue;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue