scsi: ufs: define cpu affinity mask for PM QoS voting

PM QoS request type PM_QOS_REQ_AFFINE_CORES specifies for which CPU
cores the voting is applied to by the cpu affinity mask.
This change defines the cpu mask to be used for the voting in the
device tree node so it can be customized for each target.

Change-Id: I004dea47b42eaf3cdf0489427b2bb894c9982f22
Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
This commit is contained in:
Gilad Broner 2014-11-17 15:00:19 +02:00 committed by David Keitel
parent f54f471c44
commit ebfaba8c84
3 changed files with 24 additions and 17 deletions

View file

@ -126,6 +126,10 @@ bus vectors in the same order as qcom,msm-bus,vectors-KBps property.
have little cluster and will update/apply the vote to all the cores in the
little cluster.
The default CPU affinity mode is PM_QOS_REQ_AFFINE_IRQ.
- qcom,cpu-affinity-mask: this property is taken into consideration only in case
"affine_cores" is specified for qcom,cpu-affinity. It specifies which cores the
PM QoS voting should apply to. In practice, for system with big / little cluster
configuration, this should specify the cores of the little cluster.
Example:
ufshc@0xfc598000 {

View file

@ -236,9 +236,10 @@ static void ufshcd_parse_pm_levels(struct ufs_hba *hba)
}
#ifdef CONFIG_SMP
static void ufshcd_parse_pm_qos(struct ufs_hba *hba)
static void ufshcd_parse_pm_qos(struct ufs_hba *hba, int irq)
{
const char *cpu_affinity = NULL;
u32 cpu_mask;
hba->pm_qos.cpu_dma_latency_us = UFS_DEFAULT_CPU_DMA_LATENCY_US;
of_property_read_u32(hba->dev->of_node, "qcom,cpu-dma-latency-us",
@ -246,18 +247,30 @@ static void ufshcd_parse_pm_qos(struct ufs_hba *hba)
dev_dbg(hba->dev, "cpu_dma_latency_us = %u\n",
hba->pm_qos.cpu_dma_latency_us);
/* Default to affine irq in case parsing fails */
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
hba->pm_qos.req.irq = irq;
if (!of_property_read_string(hba->dev->of_node, "qcom,cpu-affinity",
&cpu_affinity)) {
if (!strcmp(cpu_affinity, "all_cores"))
hba->pm_qos.req.type = PM_QOS_REQ_ALL_CORES;
else if (!strcmp(cpu_affinity, "affine_cores"))
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_CORES;
else if (!strcmp(cpu_affinity, "affine_irq"))
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
/*
* PM_QOS_REQ_AFFINE_CORES request type is used for
* targets that have little cluster and will apply
* the vote to all the cores in the little cluster.
*/
if (!of_property_read_u32(hba->dev->of_node,
"qcom,cpu-affinity-mask", &cpu_mask)) {
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_CORES;
/* Convert u32 to cpu bit mask */
cpumask_bits(&hba->pm_qos.req.cpus_affine)[0] =
cpu_mask;
}
}
dev_dbg(hba->dev, "hba->pm_qos.pm_qos_req.type = %u\n",
hba->pm_qos.req.type);
dev_dbg(hba->dev, "hba->pm_qos.pm_qos_req.type = %u, cpu_mask=0x%lx\n",
hba->pm_qos.req.type, hba->pm_qos.req.cpus_affine.bits[0]);
}
/**
@ -363,7 +376,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
ufshcd_parse_pm_qos(hba);
ufshcd_parse_pm_qos(hba, irq);
ufshcd_parse_pm_levels(hba);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);

View file

@ -1744,16 +1744,6 @@ static void ufshcd_pm_qos_unvote_work(struct work_struct *work)
static int ufshcd_pm_qos_init(struct ufs_hba *hba)
{
/*
* PM_QOS_REQ_AFFINE_CORES request type is used for targets that have
* little cluster and will update/apply the vote to all the cores in
* the little cluster.
*/
if (hba->pm_qos.req.type == PM_QOS_REQ_AFFINE_CORES)
hba->pm_qos.req.cpus_affine.bits[0] = 0x0F;
else if (hba->pm_qos.req.type == PM_QOS_REQ_AFFINE_IRQ)
hba->pm_qos.req.irq = hba->irq;
if (hba->pm_qos.cpu_dma_latency_us)
pm_qos_add_request(&hba->pm_qos.req,
PM_QOS_CPU_DMA_LATENCY, hba->pm_qos.cpu_dma_latency_us);