mmc: fix a race between card-detect rescan and clock-gate work instances
Currently there is a race in the MMC core between a card-detect rescan work and the clock-gating work, scheduled from a command completion. Fix it by removing the dedicated clock-gating mutex and using the MMC standard locking mechanism instead. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Cc: Simon Horman <horms@verge.net.au> Cc: Magnus Damm <damm@opensource.se> Acked-by: Linus Walleij <linus.walleij@linaro.org> Cc: <stable@kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
This commit is contained in:
parent
f694751421
commit
26fc8775b5
2 changed files with 4 additions and 6 deletions
|
@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&host->clk_gate_mutex);
|
mmc_claim_host(host);
|
||||||
spin_lock_irqsave(&host->clk_lock, flags);
|
spin_lock_irqsave(&host->clk_lock, flags);
|
||||||
if (!host->clk_requests) {
|
if (!host->clk_requests) {
|
||||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||||
|
@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||||
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||||
mutex_unlock(&host->clk_gate_mutex);
|
mmc_release_host(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
mutex_lock(&host->clk_gate_mutex);
|
mmc_claim_host(host);
|
||||||
spin_lock_irqsave(&host->clk_lock, flags);
|
spin_lock_irqsave(&host->clk_lock, flags);
|
||||||
if (host->clk_gated) {
|
if (host->clk_gated) {
|
||||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||||
|
@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
||||||
}
|
}
|
||||||
host->clk_requests++;
|
host->clk_requests++;
|
||||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||||
mutex_unlock(&host->clk_gate_mutex);
|
mmc_release_host(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -215,7 +215,6 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||||
host->clk_gated = false;
|
host->clk_gated = false;
|
||||||
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||||
spin_lock_init(&host->clk_lock);
|
spin_lock_init(&host->clk_lock);
|
||||||
mutex_init(&host->clk_gate_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -183,7 +183,6 @@ struct mmc_host {
|
||||||
struct work_struct clk_gate_work; /* delayed clock gate */
|
struct work_struct clk_gate_work; /* delayed clock gate */
|
||||||
unsigned int clk_old; /* old clock value cache */
|
unsigned int clk_old; /* old clock value cache */
|
||||||
spinlock_t clk_lock; /* lock for clk fields */
|
spinlock_t clk_lock; /* lock for clk fields */
|
||||||
struct mutex clk_gate_mutex; /* mutex for clock gating */
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* host specific block data */
|
/* host specific block data */
|
||||||
|
|
Loading…
Add table
Reference in a new issue