drm/msm: gpu: Add support for the GPMU
Most 5XX targets have GPMU (Graphics Power Management Unit) that handles a lot of the heavy lifting for power management including thermal and limits management and dynamic power collapse. While the GPMU itself is optional, it is usually nessesary to hit aggressive power targets. If the GPMU is to be used a filename and minimum version are defined in the device tree. The GPMU firmware needs to be loaded into the GPMU at init time via a shared hardware block of registers. Using the GPU to write the microcode is more efficient than using the CPU so at first load create an indirect buffer that can be executed during subsequent initalization sequences. After loading the GPMU gets initalized through a shared register interface and then we mostly get out of its way and let it do its thing. Change-Id: Ic0dedbad8d899177919b71500f2e944b187e87c0 Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
This commit is contained in:
parent
045b6f0aab
commit
0a7274232b
5 changed files with 576 additions and 2 deletions
|
@ -53,7 +53,8 @@ msm_drm-y += adreno/adreno_device.o \
|
|||
adreno/adreno_gpu.o \
|
||||
adreno/a3xx_gpu.o \
|
||||
adreno/a4xx_gpu.o \
|
||||
adreno/a5xx_gpu.o
|
||||
adreno/a5xx_gpu.o \
|
||||
adreno/a5xx_power.o
|
||||
endif
|
||||
|
||||
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
|
||||
|
|
|
@ -463,6 +463,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
|
||||
|
||||
/* Load the GPMU firmware before starting the HW init */
|
||||
a5xx_gpmu_ucode_init(gpu);
|
||||
|
||||
ret = adreno_hw_init(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -480,6 +483,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = a5xx_power_init(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Send a pipeline event stat to get misbehaving counters to start
|
||||
* ticking correctly
|
||||
|
@ -535,6 +542,12 @@ static void a5xx_destroy(struct msm_gpu *gpu)
|
|||
drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
|
||||
}
|
||||
|
||||
if (a5xx_gpu->gpmu_bo) {
|
||||
if (a5xx_gpu->gpmu_iova)
|
||||
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
|
||||
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
|
||||
}
|
||||
|
||||
adreno_gpu_cleanup(adreno_gpu);
|
||||
kfree(a5xx_gpu);
|
||||
}
|
||||
|
@ -777,11 +790,55 @@ static void a5xx_dump(struct msm_gpu *gpu)
|
|||
|
||||
static int a5xx_pm_resume(struct msm_gpu *gpu)
|
||||
{
|
||||
return msm_gpu_pm_resume(gpu);
|
||||
int ret;
|
||||
|
||||
/* Turn on the core power */
|
||||
ret = msm_gpu_pm_resume(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Turn the RBCCU domain first to limit the chances of voltage droop */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
|
||||
|
||||
/* Wait 3 usecs before polling */
|
||||
udelay(3);
|
||||
|
||||
ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
|
||||
(1 << 20), (1 << 20));
|
||||
if (ret) {
|
||||
DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
|
||||
gpu->name,
|
||||
gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Turn on the SP domain */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
|
||||
ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
|
||||
(1 << 20), (1 << 20));
|
||||
if (ret)
|
||||
DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
|
||||
gpu->name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
/* Clear the VBIF pipe before shutting down */
|
||||
|
||||
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
|
||||
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||
|
||||
/*
|
||||
* Reset the VBIF before power collapse to avoid issue with FIFO
|
||||
* entries
|
||||
*/
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
|
||||
|
||||
return msm_gpu_pm_suspend(gpu);
|
||||
}
|
||||
|
||||
|
@ -826,6 +883,65 @@ static const struct adreno_gpu_funcs funcs = {
|
|||
.get_timestamp = a5xx_get_timestamp,
|
||||
};
|
||||
|
||||
/* Read the limits management leakage from the efuses */
|
||||
static void a530_efuse_leakage(struct platform_device *pdev,
|
||||
struct adreno_gpu *adreno_gpu, void *base,
|
||||
size_t size)
|
||||
{
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
unsigned int row0, row2;
|
||||
unsigned int leakage_pwr_on, coeff;
|
||||
|
||||
if (size < 0x148)
|
||||
return;
|
||||
|
||||
/* Leakage */
|
||||
row0 = readl_relaxed(base + 0x134);
|
||||
row2 = readl_relaxed(base + 0x144);
|
||||
|
||||
/* Read barrier to get the previous two reads */
|
||||
rmb();
|
||||
|
||||
/* Get the leakage coefficient from device tree */
|
||||
if (of_property_read_u32(pdev->dev.of_node,
|
||||
"qcom,base-leakage-coefficent", &coeff))
|
||||
return;
|
||||
|
||||
leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
|
||||
a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
|
||||
((leakage_pwr_on * coeff) / 100);
|
||||
}
|
||||
|
||||
/* Read target specific configuration from the efuses */
|
||||
static void a5xx_efuses_read(struct platform_device *pdev,
|
||||
struct adreno_gpu *adreno_gpu)
|
||||
{
|
||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||
const struct adreno_info *info = adreno_info(config->rev);
|
||||
struct resource *res;
|
||||
void *base;
|
||||
|
||||
/*
|
||||
* The adreno_gpu->revn mechanism isn't set up yet so we need to check
|
||||
* it directly here
|
||||
*/
|
||||
if (info->revn != 530)
|
||||
return;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"qfprom_memory");
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
base = ioremap(res->start, resource_size(res));
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
|
||||
|
||||
iounmap(base);
|
||||
}
|
||||
|
||||
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
@ -851,6 +967,11 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
|||
adreno_gpu->registers = a5xx_registers;
|
||||
adreno_gpu->reg_offsets = a5xx_register_offsets;
|
||||
|
||||
a5xx_gpu->lm_leakage = 0x4E001A;
|
||||
|
||||
/* Check the efuses for some configuration */
|
||||
a5xx_efuses_read(pdev, adreno_gpu);
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
|
||||
if (ret) {
|
||||
a5xx_destroy(&(a5xx_gpu->base.base));
|
||||
|
|
|
@ -30,8 +30,31 @@ struct a5xx_gpu {
|
|||
|
||||
struct drm_gem_object *pfp_bo;
|
||||
uint64_t pfp_iova;
|
||||
|
||||
struct drm_gem_object *gpmu_bo;
|
||||
uint64_t gpmu_iova;
|
||||
uint32_t gpmu_dwords;
|
||||
|
||||
uint32_t lm_leakage;
|
||||
};
|
||||
|
||||
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
|
||||
|
||||
int a5xx_power_init(struct msm_gpu *gpu);
|
||||
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
|
||||
|
||||
static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
|
||||
uint32_t reg, uint32_t mask, uint32_t value)
|
||||
{
|
||||
while (usecs--) {
|
||||
udelay(1);
|
||||
if ((gpu_read(gpu, reg) & mask) == value)
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
||||
#endif /* __A5XX_GPU_H__ */
|
||||
|
|
426
drivers/gpu/drm/msm/adreno/a5xx_power.c
Normal file
426
drivers/gpu/drm/msm/adreno/a5xx_power.c
Normal file
|
@ -0,0 +1,426 @@
|
|||
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/pm_opp.h>
|
||||
#include "a5xx_gpu.h"
|
||||
|
||||
/*
|
||||
* The GPMU data block is a block of shared registers that can be used to
|
||||
* communicate back and forth. These "registers" are by convention with the GPMU
|
||||
* firwmare and not bound to any specific hardware design
|
||||
*/
|
||||
|
||||
#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
|
||||
#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
|
||||
#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
|
||||
|
||||
#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
|
||||
#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
|
||||
#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
|
||||
#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
|
||||
|
||||
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
|
||||
#define AGC_INIT_MSG_VALUE 0xBABEFACE
|
||||
|
||||
static struct {
|
||||
uint32_t reg;
|
||||
uint32_t value;
|
||||
} a5xx_sequence_regs[] = {
|
||||
{ 0xB9A1, 0x00010303 },
|
||||
{ 0xB9A2, 0x13000000 },
|
||||
{ 0xB9A3, 0x00460020 },
|
||||
{ 0xB9A4, 0x10000000 },
|
||||
{ 0xB9A5, 0x040A1707 },
|
||||
{ 0xB9A6, 0x00010000 },
|
||||
{ 0xB9A7, 0x0E000904 },
|
||||
{ 0xB9A8, 0x10000000 },
|
||||
{ 0xB9A9, 0x01165000 },
|
||||
{ 0xB9AA, 0x000E0002 },
|
||||
{ 0xB9AB, 0x03884141 },
|
||||
{ 0xB9AC, 0x10000840 },
|
||||
{ 0xB9AD, 0x572A5000 },
|
||||
{ 0xB9AE, 0x00000003 },
|
||||
{ 0xB9AF, 0x00000000 },
|
||||
{ 0xB9B0, 0x10000000 },
|
||||
{ 0xB828, 0x6C204010 },
|
||||
{ 0xB829, 0x6C204011 },
|
||||
{ 0xB82A, 0x6C204012 },
|
||||
{ 0xB82B, 0x6C204013 },
|
||||
{ 0xB82C, 0x6C204014 },
|
||||
{ 0xB90F, 0x00000004 },
|
||||
{ 0xB910, 0x00000002 },
|
||||
{ 0xB911, 0x00000002 },
|
||||
{ 0xB912, 0x00000002 },
|
||||
{ 0xB913, 0x00000002 },
|
||||
{ 0xB92F, 0x00000004 },
|
||||
{ 0xB930, 0x00000005 },
|
||||
{ 0xB931, 0x00000005 },
|
||||
{ 0xB932, 0x00000005 },
|
||||
{ 0xB933, 0x00000005 },
|
||||
{ 0xB96F, 0x00000001 },
|
||||
{ 0xB970, 0x00000003 },
|
||||
{ 0xB94F, 0x00000004 },
|
||||
{ 0xB950, 0x0000000B },
|
||||
{ 0xB951, 0x0000000B },
|
||||
{ 0xB952, 0x0000000B },
|
||||
{ 0xB953, 0x0000000B },
|
||||
{ 0xB907, 0x00000019 },
|
||||
{ 0xB927, 0x00000019 },
|
||||
{ 0xB947, 0x00000019 },
|
||||
{ 0xB967, 0x00000019 },
|
||||
{ 0xB987, 0x00000019 },
|
||||
{ 0xB906, 0x00220001 },
|
||||
{ 0xB926, 0x00220001 },
|
||||
{ 0xB946, 0x00220001 },
|
||||
{ 0xB966, 0x00220001 },
|
||||
{ 0xB986, 0x00300000 },
|
||||
{ 0xAC40, 0x0340FF41 },
|
||||
{ 0xAC41, 0x03BEFED0 },
|
||||
{ 0xAC42, 0x00331FED },
|
||||
{ 0xAC43, 0x021FFDD3 },
|
||||
{ 0xAC44, 0x5555AAAA },
|
||||
{ 0xAC45, 0x5555AAAA },
|
||||
{ 0xB9BA, 0x00000008 },
|
||||
};
|
||||
|
||||
/*
|
||||
* Get the actual voltage value for the operating point at the specified
|
||||
* frequency
|
||||
*/
|
||||
static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct platform_device *pdev = priv->gpu_pdev;
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
|
||||
|
||||
return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
|
||||
}
|
||||
|
||||
/* Setup thermal limit management */
|
||||
static void a5xx_lm_setup(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
uint32_t tsens = 0;
|
||||
uint32_t lm_limit = 6000;
|
||||
uint32_t max_power = 0;
|
||||
unsigned int i;
|
||||
|
||||
/* Write the block of sequence registers */
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
|
||||
gpu_write(gpu, a5xx_sequence_regs[i].reg,
|
||||
a5xx_sequence_regs[i].value);
|
||||
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
|
||||
|
||||
/* Until we get clock scaling 0 is always the active power level */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
|
||||
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
|
||||
|
||||
/* Write the voltage table */
|
||||
|
||||
/* Get the max-power from the device tree */
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_STATE, 1);
|
||||
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
|
||||
|
||||
/*
|
||||
* For now just write the one voltage level - we will do more when we
|
||||
* can do scaling
|
||||
*/
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
|
||||
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
|
||||
}
|
||||
|
||||
/* Enable SP/TP cpower collapse */
|
||||
static void a5xx_pc_init(struct msm_gpu *gpu)
|
||||
{
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
|
||||
}
|
||||
|
||||
/* Enable the GPMU microcontroller */
|
||||
static int a5xx_gpmu_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct msm_ringbuffer *ring = gpu->rb;
|
||||
|
||||
if (!a5xx_gpu->gpmu_dwords)
|
||||
return 0;
|
||||
|
||||
/* Turn off protected mode for this operation */
|
||||
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
|
||||
OUT_RING(ring, 0);
|
||||
|
||||
/* Kick off the IB to load the GPMU microcode */
|
||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||
OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
|
||||
OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
|
||||
OUT_RING(ring, a5xx_gpu->gpmu_dwords);
|
||||
|
||||
/* Turn back on protected mode */
|
||||
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
|
||||
OUT_RING(ring, 1);
|
||||
|
||||
gpu->funcs->flush(gpu);
|
||||
|
||||
if (!gpu->funcs->idle(gpu)) {
|
||||
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
|
||||
gpu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
|
||||
|
||||
/* Kick off the GPMU */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
|
||||
|
||||
/*
|
||||
* Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
|
||||
* won't have advanced power collapse.
|
||||
*/
|
||||
if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
|
||||
0xBABEFACE))
|
||||
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
|
||||
gpu->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enable limits management */
|
||||
static void a5xx_lm_enable(struct msm_gpu *gpu)
|
||||
{
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
|
||||
}
|
||||
|
||||
int a5xx_power_init(struct msm_gpu *gpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set up the limits management */
|
||||
a5xx_lm_setup(gpu);
|
||||
|
||||
/* Set up SP/TP power collpase */
|
||||
a5xx_pc_init(gpu);
|
||||
|
||||
/* Start the GPMU */
|
||||
ret = a5xx_gpmu_init(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Start the limits management */
|
||||
a5xx_lm_enable(gpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _read_header(unsigned int *data, uint32_t fwsize,
|
||||
unsigned int *major, unsigned int *minor)
|
||||
{
|
||||
uint32_t size;
|
||||
unsigned int i;
|
||||
|
||||
/* First dword of the header is the header size */
|
||||
if (fwsize < 4)
|
||||
return -EINVAL;
|
||||
|
||||
size = data[0];
|
||||
|
||||
/* Make sure the header isn't too big and is a multiple of two */
|
||||
if ((size % 2) || (size > 10) || size > (fwsize >> 2))
|
||||
return -EINVAL;
|
||||
|
||||
/* Read the values in pairs */
|
||||
for (i = 1; i < size; i += 2) {
|
||||
switch (data[i]) {
|
||||
case 1:
|
||||
*major = data[i + 1];
|
||||
break;
|
||||
case 2:
|
||||
*minor = data[i + 1];
|
||||
break;
|
||||
default:
|
||||
/* Invalid values are non fatal */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure cur_major and cur_minor are greater than or equal to the minimum
|
||||
* allowable major/minor
|
||||
*/
|
||||
static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
|
||||
uint32_t min_major, uint32_t min_minor)
|
||||
{
|
||||
return ((cur_major > min_major) ||
|
||||
((cur_major == min_major) && (cur_minor >= min_minor)));
|
||||
}
|
||||
|
||||
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct drm_device *drm = gpu->dev;
|
||||
const char *name;
|
||||
const struct firmware *fw;
|
||||
uint32_t version[2] = { 0, 0 };
|
||||
uint32_t dwords = 0, offset = 0;
|
||||
uint32_t major = 0, minor = 0, bosize;
|
||||
unsigned int *data, *ptr, *cmds;
|
||||
unsigned int cmds_size;
|
||||
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Read the firmware name from the device tree - if it doesn't exist
|
||||
* then don't initialize the GPMU for this target
|
||||
*/
|
||||
if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
|
||||
&name))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The version isn't mandatory, but if it exists, we need to enforce
|
||||
* that the version of the GPMU firmware matches or is newer than the
|
||||
* value
|
||||
*/
|
||||
of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
|
||||
version, 2);
|
||||
|
||||
/* Get the firmware */
|
||||
if (request_firmware(&fw, name, drm->dev)) {
|
||||
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
|
||||
gpu->name);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (unsigned int *) fw->data;
|
||||
|
||||
/*
|
||||
* The first dword is the size of the remaining data in dwords. Use it
|
||||
* as a checksum of sorts and make sure it matches the actual size of
|
||||
* the firmware that we read
|
||||
*/
|
||||
|
||||
if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
|
||||
goto out;
|
||||
|
||||
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
|
||||
if (data[1] != 2)
|
||||
goto out;
|
||||
|
||||
/* Read the header and get the major/minor of the read firmware */
|
||||
if (_read_header(&data[2], fw->size - 8, &major, &minor))
|
||||
goto out;
|
||||
|
||||
if (!_check_gpmu_version(major, minor, version[0], version[1])) {
|
||||
DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
|
||||
gpu->name, major, minor);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmds = data + data[2] + 3;
|
||||
cmds_size = data[0] - data[2] - 2;
|
||||
|
||||
/*
|
||||
* A single type4 opcode can only have so many values attached so
|
||||
* add enough opcodes to load the all the commands
|
||||
*/
|
||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
||||
goto err;
|
||||
|
||||
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
|
||||
&a5xx_gpu->gpmu_iova))
|
||||
goto err;
|
||||
|
||||
ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
|
||||
if (!ptr)
|
||||
goto err;
|
||||
|
||||
while (cmds_size > 0) {
|
||||
int i;
|
||||
uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
|
||||
TYPE4_MAX_PAYLOAD : cmds_size;
|
||||
|
||||
ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
|
||||
_size);
|
||||
|
||||
for (i = 0; i < _size; i++)
|
||||
ptr[dwords++] = *cmds++;
|
||||
|
||||
offset += _size;
|
||||
cmds_size -= _size;
|
||||
}
|
||||
|
||||
a5xx_gpu->gpmu_dwords = dwords;
|
||||
|
||||
goto out;
|
||||
|
||||
err:
|
||||
if (a5xx_gpu->gpmu_iova)
|
||||
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
|
||||
|
||||
a5xx_gpu->gpmu_bo = NULL;
|
||||
a5xx_gpu->gpmu_iova = 0;
|
||||
a5xx_gpu->gpmu_dwords = 0;
|
||||
|
||||
out:
|
||||
/* No need to keep that firmware laying around anymore */
|
||||
release_firmware(fw);
|
||||
}
|
|
@ -148,6 +148,9 @@ struct adreno_platform_config {
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define GPU_OF_NODE(_g) \
|
||||
(((struct msm_drm_private *) \
|
||||
((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
|
||||
|
||||
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue