ARM: Improve the L2 cache performance when PL310 is used
With this L2 cache controller, the cache maintenance by PA and sync operations are atomic and do not require a "wait" loop. This patch conditionally defines the cache_wait() function. Since L2x0 cache controllers do not work with ARMv7 CPUs, the patch automatically enables CACHE_PL310 when only CPU_V7 is defined. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
899611ee7d
commit
9a6655e49f
2 changed files with 20 additions and 3 deletions
|
@ -779,6 +779,14 @@ config CACHE_L2X0
|
||||||
help
|
help
|
||||||
This option enables the L2x0 PrimeCell.
|
This option enables the L2x0 PrimeCell.
|
||||||
|
|
||||||
|
config CACHE_PL310
|
||||||
|
bool
|
||||||
|
depends on CACHE_L2X0
|
||||||
|
default y if CPU_V7 && !CPU_V6
|
||||||
|
help
|
||||||
|
This option enables optimisations for the PL310 cache
|
||||||
|
controller.
|
||||||
|
|
||||||
config CACHE_TAUROS2
|
config CACHE_TAUROS2
|
||||||
bool "Enable the Tauros2 L2 cache controller"
|
bool "Enable the Tauros2 L2 cache controller"
|
||||||
depends on (ARCH_DOVE || ARCH_MMP)
|
depends on (ARCH_DOVE || ARCH_MMP)
|
||||||
|
|
|
@ -29,13 +29,22 @@ static void __iomem *l2x0_base;
|
||||||
static DEFINE_SPINLOCK(l2x0_lock);
|
static DEFINE_SPINLOCK(l2x0_lock);
|
||||||
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
|
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
|
||||||
|
|
||||||
static inline void cache_wait(void __iomem *reg, unsigned long mask)
|
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
|
||||||
{
|
{
|
||||||
/* wait for the operation to complete */
|
/* wait for cache operation by line or way to complete */
|
||||||
while (readl_relaxed(reg) & mask)
|
while (readl_relaxed(reg) & mask)
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CACHE_PL310
|
||||||
|
static inline void cache_wait(void __iomem *reg, unsigned long mask)
|
||||||
|
{
|
||||||
|
/* cache operations by line are atomic on PL310 */
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define cache_wait cache_wait_way
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void cache_sync(void)
|
static inline void cache_sync(void)
|
||||||
{
|
{
|
||||||
void __iomem *base = l2x0_base;
|
void __iomem *base = l2x0_base;
|
||||||
|
@ -110,7 +119,7 @@ static inline void l2x0_inv_all(void)
|
||||||
/* invalidate all ways */
|
/* invalidate all ways */
|
||||||
spin_lock_irqsave(&l2x0_lock, flags);
|
spin_lock_irqsave(&l2x0_lock, flags);
|
||||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
||||||
cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
|
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
|
||||||
cache_sync();
|
cache_sync();
|
||||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue