msm: kgsl: Streamline ringbuffer initialization

Move device specific features to the device rather than trying
to do them in the common initialization code.

Change-Id: I812db29a2eae90ca532755c265aaa2e52db972d7
Signed-off-by: Carter Cooper <ccooper@codeaurora.org>
This commit is contained in:
Carter Cooper 2015-11-24 11:46:22 -07:00 committed by David Keitel
parent 8e9072d633
commit 3ad47c7df0
7 changed files with 316 additions and 290 deletions

View file

@ -945,7 +945,7 @@ static int adreno_probe(struct platform_device *pdev)
if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
device->mmu.secured = false;
status = adreno_ringbuffer_init(adreno_dev, nopreempt);
status = adreno_ringbuffer_probe(adreno_dev, nopreempt);
if (status)
goto out;
@ -1132,15 +1132,6 @@ static int adreno_init(struct kgsl_device *device)
/* Power down the device */
kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
/*
* Enable the power on shader corruption fix
* This is only applicable for 28nm targets
*/
if (adreno_is_a3xx(adreno_dev))
adreno_a3xx_pwron_fixup_init(adreno_dev);
else if ((adreno_is_a405(adreno_dev)) || (adreno_is_a420(adreno_dev)))
adreno_a4xx_pwron_fixup_init(adreno_dev);
if (gpudev->init != NULL)
gpudev->init(adreno_dev);
@ -1163,16 +1154,6 @@ static int adreno_init(struct kgsl_device *device)
}
}
/* Adjust snapshot section sizes according to core */
if ((adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))) {
gpudev->snapshot_data->sect_sizes->cp_pfp =
A320_SNAPSHOT_CP_STATE_SECTION_SIZE;
gpudev->snapshot_data->sect_sizes->roq =
A320_SNAPSHOT_ROQ_SECTION_SIZE;
gpudev->snapshot_data->sect_sizes->cp_merciu =
A320_SNAPSHOT_CP_MERCIU_SECTION_SIZE;
}
/*
* Allocate a small chunk of memory for precise cmdbatch profiling for
* those targets that have the always on timer
@ -1458,12 +1439,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
if (status)
goto error_mmu_off;
if (gpudev->hw_init) {
status = gpudev->hw_init(adreno_dev);
if (status)
goto error_mmu_off;
}
/* Start the dispatcher */
adreno_dispatcher_start(device);
@ -2121,19 +2096,11 @@ static int adreno_soft_reset(struct kgsl_device *device)
ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_WARM);
else
ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
if (ret)
goto done;
if (ret == 0) {
device->reset_counter++;
set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
}
if (gpudev->hw_init)
ret = gpudev->hw_init(adreno_dev);
if (ret)
goto done;
device->reset_counter++;
/* device is back online */
set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
done:
return ret;
}

View file

@ -706,10 +706,8 @@ struct adreno_gpudev {
void (*platform_setup)(struct adreno_device *);
void (*init)(struct adreno_device *);
void (*remove)(struct adreno_device *);
int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
int (*hw_init)(struct adreno_device *);
int (*rb_start)(struct adreno_device *, unsigned int start_type);
int (*microcode_read)(struct adreno_device *);
int (*microcode_load)(struct adreno_device *, unsigned int start_type);
void (*perfcounter_init)(struct adreno_device *);
void (*perfcounter_close)(struct adreno_device *);
void (*start)(struct adreno_device *);
@ -854,9 +852,6 @@ void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
struct kgsl_cmdbatch *cmdbatch);
int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev);
int adreno_a4xx_pwron_fixup_init(struct adreno_device *adreno_dev);
int adreno_coresight_init(struct adreno_device *adreno_dev);
void adreno_coresight_start(struct adreno_device *adreno_dev);

View file

@ -27,6 +27,7 @@
#include "adreno_trace.h"
#include "adreno_pm4types.h"
#include "adreno_perfcounter.h"
#include "adreno_snapshot.h"
/*
* Define registers for a3xx that contain addresses used by the
@ -151,7 +152,7 @@ static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
};
/**
* adreno_a3xx_pwron_fixup_init() - Initalize a special command buffer to run a
* _a3xx_pwron_fixup() - Initialize a special command buffer to run a
* post-power collapse shader workaround
* @adreno_dev: Pointer to a adreno_device struct
*
@ -161,7 +162,7 @@ static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
*
* Returns: 0 on success or negative on error
*/
int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev)
static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev)
{
unsigned int *cmds;
int count = ARRAY_SIZE(_a3xx_pwron_fixup_fs_instructions);
@ -605,15 +606,7 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev)
}
}
/*
* a3xx_rb_init() - Initialize ringbuffer
* @adreno_dev: Pointer to adreno device
* @rb: Pointer to the ringbuffer of device
*
* Submit commands for ME initialization, common function shared between
* a3xx devices
*/
static int a3xx_rb_init(struct adreno_device *adreno_dev,
static int a3xx_send_me_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
unsigned int *cmds;
@ -659,6 +652,58 @@ static int a3xx_rb_init(struct adreno_device *adreno_dev,
return ret;
}
static int a3xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
int ret;
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
* Also disable the host RPTR shadow register as it might be unreliable
* in certain circumstances.
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
(1 << 27));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
ret = a3xx_microcode_load(adreno_dev, start_type);
if (ret == 0) {
/* clear ME_HALT to start micro engine */
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
ret = a3xx_send_me_init(adreno_dev, rb);
}
return ret;
}
/*
* a3xx_init() - Initialize gpu specific data
* @adreno_dev: Pointer to adreno device
*/
static void a3xx_init(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
_a3xx_pwron_fixup(adreno_dev);
/* Adjust snapshot section sizes according to core */
if ((adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))) {
gpudev->snapshot_data->sect_sizes->cp_pfp =
A320_SNAPSHOT_CP_STATE_SECTION_SIZE;
gpudev->snapshot_data->sect_sizes->roq =
A320_SNAPSHOT_ROQ_SECTION_SIZE;
gpudev->snapshot_data->sect_sizes->cp_merciu =
A320_SNAPSHOT_CP_MERCIU_SECTION_SIZE;
}
}
/*
* a3xx_err_callback() - Call back for a3xx error interrupts
* @adreno_dev: Pointer to device
@ -1817,9 +1862,9 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
.num_prio_levels = 1,
.vbif_xin_halt_ctrl0_mask = A3XX_VBIF_XIN_HALT_CTRL0_MASK,
.platform_setup = a3xx_platform_setup,
.rb_init = a3xx_rb_init,
.rb_start = a3xx_rb_start,
.init = a3xx_init,
.microcode_read = a3xx_microcode_read,
.microcode_load = a3xx_microcode_load,
.perfcounter_init = a3xx_perfcounter_init,
.perfcounter_close = a3xx_perfcounter_close,
.start = a3xx_start,

View file

@ -1439,7 +1439,7 @@ static const unsigned int _a4xx_pwron_fixup_fs_instructions[] = {
};
/**
* adreno_a4xx_pwron_fixup_init() - Initalize a special command buffer to run a
* _a4xx_pwron_fixup() - Initialize a special command buffer to run a
* post-power collapse shader workaround
* @adreno_dev: Pointer to a adreno_device struct
*
@ -1449,7 +1449,7 @@ static const unsigned int _a4xx_pwron_fixup_fs_instructions[] = {
*
* Returns: 0 on success or negative on error
*/
int adreno_a4xx_pwron_fixup_init(struct adreno_device *adreno_dev)
static int _a4xx_pwron_fixup(struct adreno_device *adreno_dev)
{
unsigned int *cmds;
unsigned int count = ARRAY_SIZE(_a4xx_pwron_fixup_fs_instructions);
@ -1560,23 +1560,17 @@ int adreno_a4xx_pwron_fixup_init(struct adreno_device *adreno_dev)
return 0;
}
static int a4xx_hw_init(struct adreno_device *adreno_dev)
/*
* a4xx_init() - Initialize gpu specific data
* @adreno_dev: Pointer to adreno device
*/
static void a4xx_init(struct adreno_device *adreno_dev)
{
a4xx_enable_pc(adreno_dev);
a4xx_enable_ppd(adreno_dev);
return 0;
if ((adreno_is_a405(adreno_dev)) || (adreno_is_a420(adreno_dev)))
_a4xx_pwron_fixup(adreno_dev);
}
/*
* a4xx_rb_init() - Initialize ringbuffer
* @adreno_dev: Pointer to adreno device
* @rb: Pointer to the ringbuffer of device
*
* Submit commands for ME initialization, common function shared between
* a4xx devices
*/
static int a4xx_rb_init(struct adreno_device *adreno_dev,
static int a4xx_send_me_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
unsigned int *cmds;
@ -1631,6 +1625,47 @@ static int a4xx_rb_init(struct adreno_device *adreno_dev,
return ret;
}
/*
* a4xx_rb_start() - Start the ringbuffer
* @adreno_dev: Pointer to adreno device
* @start_type: Warm or cold start
*/
static int a4xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
int ret;
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
* Also disable the host RPTR shadow register as it might be unreliable
* in certain circumstances.
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
(1 << 27));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
ret = a3xx_microcode_load(adreno_dev, start_type);
if (ret)
return ret;
/* clear ME_HALT to start micro engine */
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
ret = a4xx_send_me_init(adreno_dev, rb);
if (ret == 0) {
a4xx_enable_pc(adreno_dev);
a4xx_enable_ppd(adreno_dev);
}
return ret;
}
static ADRENO_CORESIGHT_ATTR(cfg_debbus_ctrlt, &a4xx_coresight_registers[0]);
static ADRENO_CORESIGHT_ATTR(cfg_debbus_sela, &a4xx_coresight_registers[1]);
static ADRENO_CORESIGHT_ATTR(cfg_debbus_selb, &a4xx_coresight_registers[2]);
@ -2238,10 +2273,9 @@ struct adreno_gpudev adreno_a4xx_gpudev = {
.perfcounter_init = a4xx_perfcounter_init,
.perfcounter_close = a4xx_perfcounter_close,
.rb_init = a4xx_rb_init,
.hw_init = a4xx_hw_init,
.rb_start = a4xx_rb_start,
.init = a4xx_init,
.microcode_read = a3xx_microcode_read,
.microcode_load = a3xx_microcode_load,
.coresight = &a4xx_coresight,
.start = a4xx_start,
.snapshot = a4xx_snapshot,

View file

@ -88,6 +88,33 @@ static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
#define A530_QFPROM_RAW_PTE_ROW0_MSB 0x134
#define A530_QFPROM_RAW_PTE_ROW2_MSB 0x144
/* Print some key registers if a spin-for-idle times out */
static void spin_idle_debug(struct kgsl_device *device,
const char *str)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int rptr, wptr;
unsigned int status, status3, intstatus;
unsigned int hwfault;
dev_err(device->dev, str);
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
kgsl_regread(device, A5XX_RBBM_STATUS, &status);
kgsl_regread(device, A5XX_RBBM_STATUS3, &status3);
kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &intstatus);
kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
dev_err(device->dev,
" rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
rptr, wptr, status, status3, intstatus);
dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
kgsl_device_snapshot(device, NULL);
}
static void a530_efuse_leakage(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@ -916,6 +943,7 @@ static int _gpmu_send_init_cmds(struct adreno_device *adreno_dev)
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
uint32_t *cmds;
uint32_t size = adreno_dev->gpmu_cmds_size;
int ret;
if (size == 0 || adreno_dev->gpmu_cmds == NULL)
return -EINVAL;
@ -928,7 +956,13 @@ static int _gpmu_send_init_cmds(struct adreno_device *adreno_dev)
/* Copy to the RB the predefined fw sequence cmds */
memcpy(cmds, adreno_dev->gpmu_cmds, size << 2);
return adreno_ringbuffer_submit_spin(rb, NULL, 2000);
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret != 0)
spin_idle_debug(&adreno_dev->dev,
"gpmu initialization failed to idle\n");
return ret;
}
/*
@ -948,10 +982,8 @@ static int a5xx_gpmu_start(struct adreno_device *adreno_dev)
return 0;
ret = _gpmu_send_init_cmds(adreno_dev);
if (ret) {
KGSL_CORE_ERR("Failed to program the GPMU: %d\n", ret);
if (ret)
return ret;
}
if (adreno_is_a530(adreno_dev)) {
/* GPMU clock gating setup */
@ -2305,28 +2337,6 @@ static int _preemption_init(
return cmds - cmds_orig;
}
/* Print some key registers if a spin-for-idle times out */
static void spin_idle_debug(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int rptr, wptr;
unsigned int status, status3, intstatus;
unsigned int hwfault;
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
kgsl_regread(device, A5XX_RBBM_STATUS, &status);
kgsl_regread(device, A5XX_RBBM_STATUS3, &status3);
kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &intstatus);
kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
dev_err(device->dev,
" rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
rptr, wptr, status, status3, intstatus);
dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
}
static int a5xx_post_start(struct adreno_device *adreno_dev)
{
int ret;
@ -2360,18 +2370,12 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
rb->wptr = rb->wptr - (42 - (cmds - start));
if (cmds == start)
return 0;
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret) {
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (ret)
spin_idle_debug(KGSL_DEVICE(adreno_dev),
"hw initialization failed to idle\n");
KGSL_DRV_ERR(device, "hw initialization failed to idle\n");
kgsl_device_snapshot(device, NULL);
return ret;
}
return 0;
return ret;
}
static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
@ -2398,23 +2402,6 @@ static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
return 0;
}
/*
* a5xx_hw_init() - Initialize GPU HW using PM4 cmds
* @adreno_dev: Pointer to adreno device
*
* Submit PM4 commands for HW initialization,
*/
static int a5xx_hw_init(struct adreno_device *adreno_dev)
{
int ret = a5xx_gpmu_init(adreno_dev);
if (!ret)
ret = a5xx_post_start(adreno_dev);
return ret;
}
static int a5xx_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
@ -2430,17 +2417,71 @@ static int a5xx_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
cmds += cp_secure_mode(adreno_dev, cmds, 0);
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret != 0) {
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
dev_err(device->dev, "Switch to unsecure failed to idle\n");
spin_idle_debug(device);
kgsl_device_snapshot(device, NULL);
}
if (ret)
spin_idle_debug(KGSL_DEVICE(adreno_dev),
"Switch to unsecure failed to idle\n");
return ret;
}
/*
* a5xx_microcode_load() - Load microcode
* @adreno_dev: Pointer to adreno device
*/
static int a5xx_microcode_load(struct adreno_device *adreno_dev)
{
void *ptr;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
uint64_t gpuaddr;
gpuaddr = adreno_dev->pm4.gpuaddr;
kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_LO,
lower_32_bits(gpuaddr));
kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_HI,
upper_32_bits(gpuaddr));
gpuaddr = adreno_dev->pfp.gpuaddr;
kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_LO,
lower_32_bits(gpuaddr));
kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_HI,
upper_32_bits(gpuaddr));
/*
* Resume call to write the zap shader base address into the
* appropriate register,
* skip if retention is supported for the CPZ register
*/
if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
ADRENO_CPZ_RETENTION))) {
int ret;
struct scm_desc desc = {0};
desc.args[0] = 0;
desc.args[1] = 13;
desc.arginfo = SCM_ARGS(2);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0xA), &desc);
if (ret) {
pr_err("SCM resume call failed with error %d\n", ret);
return ret;
}
}
/* Load the zap shader firmware through PIL if its available */
if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
ptr = subsystem_get(adreno_dev->gpucore->zap_name);
/* Return error if the zap shader cannot be loaded */
if (IS_ERR_OR_NULL(ptr))
return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
zap_ucode_loaded = 1;
}
return 0;
}
static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev)
{
switch (ADRENO_GPUREV(adreno_dev)) {
@ -2546,26 +2587,21 @@ static int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
*cmds++ = crit_pkts_dwords;
ret = adreno_ringbuffer_submit_spin(rb, NULL, 20);
if (ret != 0) {
struct kgsl_device *device = &adreno_dev->dev;
dev_err(device->dev,
if (ret)
spin_idle_debug(KGSL_DEVICE(adreno_dev),
"Critical packet submission failed to idle\n");
spin_idle_debug(device);
kgsl_device_snapshot(device, NULL);
}
return ret;
}
/*
* a5xx_rb_init() - Initialize ringbuffer
* a5xx_send_me_init() - Initialize ringbuffer
* @adreno_dev: Pointer to adreno device
* @rb: Pointer to the ringbuffer of device
*
* Submit commands for ME initialization,
*/
static int a5xx_rb_init(struct adreno_device *adreno_dev,
static int a5xx_send_me_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
unsigned int *cmds;
@ -2582,13 +2618,17 @@ static int a5xx_rb_init(struct adreno_device *adreno_dev,
_set_ordinals(adreno_dev, cmds, 8);
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret != 0) {
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (ret)
spin_idle_debug(KGSL_DEVICE(adreno_dev),
"CP initialization failed to idle\n");
dev_err(device->dev, "CP initialization failed to idle\n");
spin_idle_debug(device);
kgsl_device_snapshot(device, NULL);
}
return ret;
}
static int a5xx_set_unsecured_mode(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
int ret = 0;
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
ret = a5xx_critical_packet_submit(adreno_dev, rb);
@ -2599,10 +2639,73 @@ static int a5xx_rb_init(struct adreno_device *adreno_dev,
/* GPU comes up in secured mode, make it unsecured by default */
if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
ret = a5xx_switch_to_unsecure_mode(adreno_dev, rb);
else
kgsl_regwrite(&adreno_dev->dev,
A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
return ret;
}
/*
* a5xx_rb_start() - Start the ringbuffer
* @adreno_dev: Pointer to adreno device
* @start_type: Warm or cold start
*/
static int a5xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
int ret;
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
* Also disable the host RPTR shadow register as it might be unreliable
* in certain circumstances.
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
(1 << 27));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
ret = a5xx_microcode_load(adreno_dev);
if (ret)
return ret;
/* clear ME_HALT to start micro engine */
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
ret = a5xx_send_me_init(adreno_dev, rb);
if (ret)
return ret;
/* GPU comes up in secured mode, make it unsecured by default */
ret = a5xx_set_unsecured_mode(adreno_dev, rb);
if (ret)
return ret;
/* Set up LM before initializing the GPMU */
a5xx_lm_init(adreno_dev);
/* Enable SPTP based power collapse before enabling GPMU */
a5xx_enable_pc(adreno_dev);
/* Program the GPMU */
ret = a5xx_gpmu_start(adreno_dev);
if (ret)
return ret;
/* Enable limits management */
a5xx_lm_enable(adreno_dev);
a5xx_post_start(adreno_dev);
return 0;
}
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
struct kgsl_memdesc *ucode, size_t *ucode_size,
unsigned int *ucode_version)
@ -2663,66 +2766,6 @@ static int a5xx_microcode_read(struct adreno_device *adreno_dev)
return ret;
}
/*
* a5xx_microcode_load() - Load microcode
* @adreno_dev: Pointer to adreno device
* @start_type: type of device start cold/warm
*/
static int a5xx_microcode_load(struct adreno_device *adreno_dev,
unsigned int start_type)
{
void *ptr;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
uint64_t gpuaddr;
gpuaddr = adreno_dev->pm4.gpuaddr;
kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_LO,
lower_32_bits(gpuaddr));
kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_HI,
upper_32_bits(gpuaddr));
gpuaddr = adreno_dev->pfp.gpuaddr;
kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_LO,
lower_32_bits(gpuaddr));
kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_HI,
upper_32_bits(gpuaddr));
/*
* Resume call to write the zap shader base address into the
* appropriate register,
* skip if retention is supported for the CPZ register
*/
if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
ADRENO_CPZ_RETENTION))) {
int ret;
struct scm_desc desc = {0};
desc.args[0] = 0;
desc.args[1] = 13;
desc.arginfo = SCM_ARGS(2);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0xA), &desc);
if (ret) {
pr_err("SCM resume call failed with error %d\n", ret);
return ret;
}
}
/* Load the zap shader firmware through PIL if its available */
if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
ptr = subsystem_get(adreno_dev->gpucore->zap_name);
/* Return error if the zap shader cannot be loaded */
if (IS_ERR_OR_NULL(ptr))
return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
zap_ucode_loaded = 1;
}
return 0;
}
static struct adreno_perfcount_register a5xx_perfcounters_cp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_0_LO,
A5XX_RBBM_PERFCTR_CP_0_HI, 0, A5XX_CP_PERFCTR_CP_SEL_0 },
@ -4120,10 +4163,8 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.platform_setup = a5xx_platform_setup,
.init = a5xx_init,
.remove = a5xx_remove,
.rb_init = a5xx_rb_init,
.hw_init = a5xx_hw_init,
.rb_start = a5xx_rb_start,
.microcode_read = a5xx_microcode_read,
.microcode_load = a5xx_microcode_load,
.perfcounters = &a5xx_perfcounters,
.vbif_xin_halt_ctrl0_mask = A5XX_VBIF_XIN_HALT_CTRL0_MASK,
.is_sptp_idle = a5xx_is_sptp_idle,

View file

@ -137,7 +137,7 @@ int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
adreno_ringbuffer_submit(rb, NULL);
adreno_ringbuffer_submit(rb, time);
return adreno_spin_idle(adreno_dev, timeout);
}
@ -262,70 +262,6 @@ unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
return ptr;
}
/**
* _ringbuffer_setup_common() - Ringbuffer start
* @adreno_dev: Pointer to an adreno_device
*
* Setup ringbuffer for GPU.
*/
static void _ringbuffer_setup_common(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_ringbuffer *rb;
int i;
/* Initialize all of the ringbuffers */
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_sharedmem_set(device, &(rb->buffer_desc), 0,
0xAA, KGSL_RB_SIZE);
rb->wptr = 0;
rb->rptr = 0;
rb->wptr_preempt_end = 0xFFFFFFFF;
rb->starve_timer_state =
ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
}
/* Continue setting up the current ringbuffer */
rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
* Also disable the host RPTR shadow register as it might be unreliable
* in certain circumstances.
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
(1 << 27));
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
}
/**
* _ringbuffer_start_common() - Ringbuffer start
* @adreno_dev: Pointer to an adreno device
*
* Start ringbuffer for GPU.
*/
static int _ringbuffer_start_common(struct adreno_device *adreno_dev)
{
int status;
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
/* clear ME_HALT to start micro engine */
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
/* ME init is GPU specific, so jump into the sub-function */
status = gpudev->rb_init(adreno_dev, rb);
if (status)
return status;
return status;
}
/**
* adreno_ringbuffer_start() - Ringbuffer start
* @adreno_dev: Pointer to adreno device
@ -334,16 +270,24 @@ static int _ringbuffer_start_common(struct adreno_device *adreno_dev)
int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
int status;
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_ringbuffer *rb;
int i;
_ringbuffer_setup_common(adreno_dev);
/* Setup the ringbuffers state before we start */
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_sharedmem_set(device, &(rb->buffer_desc),
0, 0xAA, KGSL_RB_SIZE);
rb->wptr = 0;
rb->rptr = 0;
rb->wptr_preempt_end = 0xFFFFFFFF;
rb->starve_timer_state =
ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
}
status = gpudev->microcode_load(adreno_dev, start_type);
if (status)
return status;
return _ringbuffer_start_common(adreno_dev);
/* start is specific GPU rb */
return gpudev->rb_start(adreno_dev, start_type);
}
void adreno_ringbuffer_stop(struct adreno_device *adreno_dev)
@ -363,7 +307,7 @@ static int _rb_readtimestamp(struct kgsl_device *device,
timestamp);
}
static int _adreno_ringbuffer_init(struct adreno_device *adreno_dev,
static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
int id)
{
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[id];
@ -391,7 +335,7 @@ static int _adreno_ringbuffer_init(struct adreno_device *adreno_dev,
KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0);
}
int adreno_ringbuffer_init(struct adreno_device *adreno_dev, bool nopreempt)
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
{
int status = 0;
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@ -403,7 +347,7 @@ int adreno_ringbuffer_init(struct adreno_device *adreno_dev, bool nopreempt)
adreno_dev->num_ringbuffers = 1;
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
status = _adreno_ringbuffer_init(adreno_dev, i);
status = _adreno_ringbuffer_probe(adreno_dev, i);
if (status != 0)
break;
}

View file

@ -152,7 +152,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_cmdbatch *cmdbatch,
struct adreno_submit_time *time);
int adreno_ringbuffer_init(struct adreno_device *adreno_dev, bool nopreempt);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
unsigned int start_type);