mmc: sdhci: remove support for pm_qos

pm_qos is causing race conditions in CQ mode with
power management. Removing the feature in order to
allow power management.

Change-Id: I340cd784829f389f18df6bff664337aca0f3c867
Signed-off-by: Dov Levenglick <dovl@codeaurora.org>
Signed-off-by: Konstantin Dorfman <kdorfman@codeaurora.org>
This commit is contained in:
Konstantin Dorfman 2015-06-22 17:44:46 +03:00 committed by Subhash Jadavani
parent 505869b6c2
commit fd6903e1eb
5 changed files with 0 additions and 152 deletions

View file

@ -71,18 +71,6 @@ In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltag
- pinctrl-names
- pinctrl-0, pinctrl-1,.. pinctrl-n
- qcom,cpu-affinity: this is a string that specifies the pm QoS request
type. The supported cpu affinity modes are :
"all_cores" - PM_QOS_REQ_ALL_CORES is applicable to all CPU cores that
are online and this would have a power impact when there are more
number of CPUs.
"affine_irq" - PM_QOS_REQ_AFFINE_IRQ request type shall update/apply
the vote only to that CPU to which this IRQ's affinity is set to.
"affine_cores" - PM_QOS_REQ_AFFINE_CORES request type is used for
targets that have little cluster and will update/apply the vote to
all the cores in the little cluster.
The default CPU affinity mode is PM_QOS_REQ_AFFINE_IRQ to maintain
backward compatibility.
- qcom,large-address-bus - specifies whether the soc is capable of
supporting larger than 32 bit address bus width.

View file

@ -176,8 +176,6 @@
#define CORE_DDR_CONFIG_2 0x1BC
#define DDR_CONFIG_2_POR_VAL 0x80040873
#define MSM_MMC_DEFAULT_CPU_DMA_LATENCY 200 /* usecs */
/* 512 descriptors */
#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
@ -1426,30 +1424,6 @@ out:
return ret;
}
#ifdef CONFIG_SMP
static void sdhci_msm_populate_affinity_type(struct sdhci_msm_pltfm_data *pdata,
struct device_node *np)
{
const char *cpu_affinity = NULL;
pdata->cpu_affinity_type = PM_QOS_REQ_AFFINE_IRQ;
if (!of_property_read_string(np, "qcom,cpu-affinity",
&cpu_affinity)) {
if (!strcmp(cpu_affinity, "all_cores"))
pdata->cpu_affinity_type = PM_QOS_REQ_ALL_CORES;
else if (!strcmp(cpu_affinity, "affine_cores"))
pdata->cpu_affinity_type = PM_QOS_REQ_AFFINE_CORES;
else if (!strcmp(cpu_affinity, "affine_irq"))
pdata->cpu_affinity_type = PM_QOS_REQ_AFFINE_IRQ;
}
}
#else
static void sdhci_msm_populate_affinity_type(struct sdhci_msm_pltfm_data *pdata,
struct device_node *np)
{
}
#endif
/* Parse platform data */
static
struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
@ -1458,7 +1432,6 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
struct sdhci_msm_pltfm_data *pdata = NULL;
struct device_node *np = dev->of_node;
u32 bus_width = 0;
u32 cpu_dma_latency;
int len, i;
int clk_table_len;
u32 *clk_table = NULL;
@ -1486,12 +1459,6 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
pdata->mmc_bus_width = 0;
}
if (!of_property_read_u32(np, "qcom,cpu-dma-latency-us",
&cpu_dma_latency))
pdata->cpu_dma_latency_us = cpu_dma_latency;
else
pdata->cpu_dma_latency_us = MSM_MMC_DEFAULT_CPU_DMA_LATENCY;
if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
&msm_host->mmc->clk_scaling.freq_table,
&msm_host->mmc->clk_scaling.freq_table_sz, 0))
@ -1595,8 +1562,6 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
pdata->largeaddressbus =
of_property_read_bool(np, "qcom,large-address-bus");
sdhci_msm_populate_affinity_type(pdata, np);
if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
msm_host->mmc->wakeup_on_idle = true;
@ -3493,8 +3458,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pdata->nonhotplug)
msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
host->pm_qos_req_dma.type = msm_host->pdata->cpu_affinity_type;
/* Initialize ICE if present */
if (msm_host->ice.pdev) {

View file

@ -98,12 +98,10 @@ struct sdhci_msm_pltfm_data {
bool pin_cfg_sts;
struct sdhci_msm_pin_data *pin_data;
struct sdhci_pinctrl_data *pctrl_data;
u32 cpu_dma_latency_us;
int status_gpio; /* card detection GPIO that is configured as IRQ */
struct sdhci_msm_bus_voting_data *voting_data;
u32 *sup_clk_table;
unsigned char sup_clk_cnt;
enum pm_qos_req_type cpu_affinity_type;
u32 *sup_ice_clk_table;
unsigned char sup_ice_clk_cnt;
u32 ice_clk_max;

View file

@ -172,33 +172,6 @@ static void sdhci_dumpregs(struct sdhci_host *host)
pr_info(DRIVER_NAME ": ===========================================\n");
}
#define MAX_PM_QOS_TIMEOUT_VALUE 100000 /* 100 ms */
static ssize_t
show_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d us\n", host->pm_qos_timeout_us);
}
static ssize_t
store_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sdhci_host *host = dev_get_drvdata(dev);
uint32_t value;
unsigned long flags;
if (!kstrtou32(buf, 0, &value)) {
spin_lock_irqsave(&host->lock, flags);
if (value <= MAX_PM_QOS_TIMEOUT_VALUE)
host->pm_qos_timeout_us = value;
spin_unlock_irqrestore(&host->lock, flags);
}
return count;
}
/*****************************************************************************\
* *
* Low level functions *
@ -1527,12 +1500,6 @@ static int sdhci_enable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (mmc->card && !mmc_card_cmdq(mmc->card) &&
(host->cpu_dma_latency_us)) {
pm_qos_update_request(&host->pm_qos_req_dma,
host->cpu_dma_latency_us);
}
if (host->ops->platform_bus_voting)
host->ops->platform_bus_voting(host, 1);
@ -1543,24 +1510,6 @@ static int sdhci_disable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (mmc->card && !mmc_card_cmdq(mmc->card) &&
(host->cpu_dma_latency_us)) {
/*
* In performance mode, release QoS vote after a timeout to
* make sure back-to-back requests don't suffer from latencies
* that are involved to wake CPU from low power modes in cases
* where the CPU goes into low power mode as soon as QoS vote is
* released.
*/
if (host->power_policy == SDHCI_PERFORMANCE_MODE)
pm_qos_update_request_timeout(&host->pm_qos_req_dma,
host->cpu_dma_latency_us,
host->pm_qos_timeout_us);
else
pm_qos_update_request(&host->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
}
if (host->ops->platform_bus_voting)
host->ops->platform_bus_voting(host, 0);
@ -3436,31 +3385,6 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
#ifdef CONFIG_SMP
static void sdhci_set_pmqos_req_type(struct sdhci_host *host)
{
/*
* The default request type PM_QOS_REQ_ALL_CORES is
* applicable to all CPU cores that are online and
* this would have a power impact when there are more
* number of CPUs. This new PM_QOS_REQ_AFFINE_IRQ request
* type shall update/apply the vote only to that CPU to
* which this IRQ's affinity is set to.
* PM_QOS_REQ_AFFINE_CORES request type is used for targets that have
* little cluster and will update/apply the vote to all the cores in
* the little cluster.
*/
if (host->pm_qos_req_dma.type == PM_QOS_REQ_AFFINE_CORES)
host->pm_qos_req_dma.cpus_affine.bits[0] = 0x0F;
else if (host->pm_qos_req_dma.type == PM_QOS_REQ_AFFINE_IRQ)
host->pm_qos_req_dma.irq = host->irq;
}
#else
static void sdhci_set_pmqos_req_type(struct sdhci_host *host)
{
}
#endif
#ifdef CONFIG_MMC_CQ_HCI
static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
{
@ -4156,23 +4080,6 @@ int sdhci_add_host(struct sdhci_host *host)
mmiowb();
if (host->cpu_dma_latency_us) {
host->pm_qos_timeout_us = 10000; /* default value */
sdhci_set_pmqos_req_type(host);
pm_qos_add_request(&host->pm_qos_req_dma,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
host->pm_qos_tout.show = show_sdhci_pm_qos_tout;
host->pm_qos_tout.store = store_sdhci_pm_qos_tout;
sysfs_attr_init(&host->pm_qos_tout.attr);
host->pm_qos_tout.attr.name = "pm_qos_unvote_delay";
host->pm_qos_tout.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(mmc_dev(mmc), &host->pm_qos_tout);
if (ret)
pr_err("%s: cannot create pm_qos_unvote_delay %d\n",
mmc_hostname(mmc), ret);
}
if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
@ -4242,8 +4149,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
sdhci_disable_card_detection(host);
if (host->cpu_dma_latency_us)
pm_qos_remove_request(&host->pm_qos_req_dma);
mmc_remove_host(host->mmc);
#ifdef SDHCI_USE_LEDS_CLASS

View file

@ -17,7 +17,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/pm_qos.h>
#include <linux/ratelimit.h>
#include <linux/mmc/host.h>
@ -605,13 +604,8 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
unsigned int cpu_dma_latency_us;
struct pm_qos_request pm_qos_req_dma;
ktime_t data_start_time;
unsigned int pm_qos_timeout_us; /* timeout for PM QoS request */
struct device_attribute pm_qos_tout;
enum sdhci_power_policy power_policy;
bool is_crypto_en;