Merge "drm/msm: Add hint to DRM_IOCTL_MSM_GEM_INFO to return an object IOVA"
This commit is contained in:
commit
fb98e68c1c
50 changed files with 5959 additions and 638 deletions
|
@ -52,7 +52,9 @@ ifneq ($(CONFIG_QCOM_KGSL),y)
|
|||
msm_drm-y += adreno/adreno_device.o \
|
||||
adreno/adreno_gpu.o \
|
||||
adreno/a3xx_gpu.o \
|
||||
adreno/a4xx_gpu.o
|
||||
adreno/a4xx_gpu.o \
|
||||
adreno/a5xx_gpu.o \
|
||||
adreno/a5xx_power.o
|
||||
endif
|
||||
|
||||
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
|
||||
|
|
|
@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
|
||||
- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
|
||||
- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
|
||||
- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
|
||||
|
||||
Copyright (C) 2013-2015 by the following authors:
|
||||
Copyright (C) 2013-2016 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
|
||||
|
||||
|
|
|
@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
|
||||
- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
|
||||
- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
|
||||
- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
|
||||
|
||||
Copyright (C) 2013-2016 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
|
|||
TFMT_Z16_UNORM = 9,
|
||||
TFMT_X8Z24_UNORM = 10,
|
||||
TFMT_Z32_FLOAT = 11,
|
||||
TFMT_NV12_UV_TILED = 17,
|
||||
TFMT_NV12_Y_TILED = 19,
|
||||
TFMT_NV12_UV = 21,
|
||||
TFMT_NV12_Y = 23,
|
||||
TFMT_UV_64X32 = 16,
|
||||
TFMT_VU_64X32 = 17,
|
||||
TFMT_Y_64X32 = 18,
|
||||
TFMT_NV12_64X32 = 19,
|
||||
TFMT_UV_LINEAR = 20,
|
||||
TFMT_VU_LINEAR = 21,
|
||||
TFMT_Y_LINEAR = 22,
|
||||
TFMT_NV12_LINEAR = 23,
|
||||
TFMT_I420_Y = 24,
|
||||
TFMT_I420_U = 26,
|
||||
TFMT_I420_V = 27,
|
||||
|
@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
|
|||
UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
|
||||
};
|
||||
|
||||
enum a3xx_rb_blend_opcode {
|
||||
BLEND_DST_PLUS_SRC = 0,
|
||||
BLEND_SRC_MINUS_DST = 1,
|
||||
BLEND_DST_MINUS_SRC = 2,
|
||||
BLEND_MIN_DST_SRC = 3,
|
||||
BLEND_MAX_DST_SRC = 4,
|
||||
};
|
||||
|
||||
enum a3xx_intp_mode {
|
||||
SMOOTH = 0,
|
||||
FLAT = 1,
|
||||
|
@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
|
|||
{
|
||||
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
|
||||
}
|
||||
#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
|
||||
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
|
||||
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
|
||||
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
|
||||
{
|
||||
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
|
||||
}
|
||||
#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
|
||||
#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
|
||||
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
|
||||
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
|
||||
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
|
||||
|
@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
|
|||
{
|
||||
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
|
||||
}
|
||||
#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
|
||||
#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
|
||||
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
|
||||
|
||||
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
|
||||
|
|
|
@ -422,91 +422,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
|
|||
}
|
||||
/* Register offset defines for A3XX */
|
||||
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
|
||||
REG_A3XX_CP_PFP_UCODE_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
|
||||
REG_A3XX_CP_PFP_UCODE_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
|
||||
REG_A3XX_CP_PROTECT_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
|
||||
REG_A3XX_RBBM_PERFCTR_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
|
||||
REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
|
||||
REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
|
||||
REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
|
||||
REG_A3XX_RBBM_INT_0_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
|
||||
REG_A3XX_RBBM_AHB_ERROR_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
|
||||
REG_A3XX_RBBM_INT_CLEAR_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
|
||||
REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
|
||||
REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
|
||||
REG_A3XX_VSC_SIZE_ADDRESS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
|
||||
REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
|
||||
REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
|
||||
REG_A3XX_SP_VS_OBJ_START_REG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
|
||||
REG_A3XX_SP_FS_OBJ_START_REG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
|
||||
REG_A3XX_RBBM_PM_OVERRIDE2),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
|
||||
REG_A3XX_SQ_GPR_MANAGEMENT),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
|
||||
REG_A3XX_SQ_INST_STORE_MANAGMENT),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
|
||||
REG_A3XX_RBBM_SW_RESET_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
|
||||
REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
|
||||
REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
|
||||
REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
|
||||
};
|
||||
|
||||
static const struct adreno_gpu_funcs funcs = {
|
||||
|
|
|
@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
|
||||
- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
|
||||
- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
|
||||
- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
|
||||
|
||||
Copyright (C) 2013-2016 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
enum a4xx_color_fmt {
|
||||
RB4_A8_UNORM = 1,
|
||||
RB4_R8_UNORM = 2,
|
||||
RB4_R8_SNORM = 3,
|
||||
RB4_R8_UINT = 4,
|
||||
RB4_R8_SINT = 5,
|
||||
RB4_R4G4B4A4_UNORM = 8,
|
||||
RB4_R5G5B5A1_UNORM = 10,
|
||||
RB4_R5G6B5_UNORM = 14,
|
||||
|
@ -89,17 +93,10 @@ enum a4xx_color_fmt {
|
|||
|
||||
enum a4xx_tile_mode {
|
||||
TILE4_LINEAR = 0,
|
||||
TILE4_2 = 2,
|
||||
TILE4_3 = 3,
|
||||
};
|
||||
|
||||
enum a4xx_rb_blend_opcode {
|
||||
BLEND_DST_PLUS_SRC = 0,
|
||||
BLEND_SRC_MINUS_DST = 1,
|
||||
BLEND_DST_MINUS_SRC = 2,
|
||||
BLEND_MIN_DST_SRC = 3,
|
||||
BLEND_MAX_DST_SRC = 4,
|
||||
};
|
||||
|
||||
enum a4xx_vtx_fmt {
|
||||
VFMT4_32_FLOAT = 1,
|
||||
VFMT4_32_32_FLOAT = 2,
|
||||
|
@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
|
|||
{
|
||||
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
|
||||
}
|
||||
#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
|
||||
|
||||
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
|
||||
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
|
||||
|
@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
|
|||
}
|
||||
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
|
||||
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
|
||||
static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
|
||||
static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
|
||||
{
|
||||
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
|
||||
}
|
||||
|
@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
|
|||
}
|
||||
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
|
||||
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
|
||||
static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
|
||||
static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
|
||||
{
|
||||
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
|
||||
}
|
||||
|
@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
|
|||
}
|
||||
|
||||
#define REG_A4XX_RB_BLEND_RED 0x000020f0
|
||||
#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
|
||||
#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
|
||||
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
|
||||
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
|
||||
#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
|
||||
static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
|
||||
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
|
||||
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
|
||||
|
@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
|
|||
}
|
||||
|
||||
#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
|
||||
#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
|
||||
#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
|
||||
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
|
||||
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
|
||||
#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
|
||||
static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
|
||||
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
|
||||
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
|
||||
|
@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
|
|||
}
|
||||
|
||||
#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
|
||||
#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
|
||||
#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
|
||||
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
|
||||
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
|
||||
#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
|
||||
static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
|
||||
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
|
||||
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
|
||||
|
@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
|
|||
}
|
||||
|
||||
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
|
||||
#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
|
||||
#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
|
||||
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
|
||||
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
|
||||
#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
|
||||
static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
|
||||
}
|
||||
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
|
||||
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
|
||||
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
|
||||
|
@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
|
|||
{
|
||||
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
|
||||
}
|
||||
#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
|
||||
#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
|
||||
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
|
||||
#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
|
||||
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
|
||||
|
@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
|
|||
|
||||
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
|
||||
|
||||
#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
|
||||
|
||||
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
|
||||
|
||||
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
|
||||
#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
|
||||
#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
|
||||
static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
|
||||
}
|
||||
#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
|
||||
#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
|
||||
static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
|
||||
}
|
||||
#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
|
||||
#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
|
||||
|
||||
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
|
||||
|
||||
|
@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
|
|||
{
|
||||
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
|
||||
}
|
||||
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
|
||||
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
|
||||
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
|
||||
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
|
||||
{
|
||||
|
@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
|
|||
{
|
||||
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
|
||||
}
|
||||
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
|
||||
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
|
||||
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
|
||||
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
|
||||
{
|
||||
|
@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
|
|||
|
||||
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
|
||||
#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
|
||||
#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
|
||||
#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
|
||||
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
|
||||
|
||||
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
|
||||
|
@ -3670,6 +3706,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
|
|||
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
|
||||
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
|
||||
|
||||
#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
|
||||
|
||||
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
|
||||
|
||||
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
|
||||
|
@ -3690,6 +3728,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
|
|||
|
||||
#define REG_A4XX_PC_BIN_BASE 0x000021c0
|
||||
|
||||
#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
|
||||
#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
|
||||
#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
|
||||
static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
|
||||
}
|
||||
#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
|
||||
#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
|
||||
static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
|
||||
{
|
||||
return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
|
||||
}
|
||||
|
||||
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
|
||||
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
|
||||
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
|
||||
|
@ -3752,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
|
|||
{
|
||||
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
|
||||
}
|
||||
#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
|
||||
#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
|
||||
static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
|
||||
{
|
||||
return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
|
||||
}
|
||||
#define A4XX_PC_HS_PARAM_CW 0x00800000
|
||||
#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
|
||||
|
||||
#define REG_A4XX_VBIF_VERSION 0x00003000
|
||||
|
||||
|
|
|
@ -455,87 +455,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
|||
|
||||
/* Register offset defines for A4XX, in order of enum adreno_regs */
|
||||
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
|
||||
REG_A4XX_CP_PFP_UCODE_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
|
||||
REG_A4XX_CP_PFP_UCODE_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
|
||||
REG_A4XX_CP_PROTECT_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
|
||||
REG_A4XX_RBBM_PERFCTR_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
|
||||
REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
|
||||
REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
|
||||
REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
|
||||
REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
|
||||
REG_A4XX_RBBM_INT_0_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
|
||||
REG_A4XX_RBBM_AHB_ERROR_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
|
||||
REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
|
||||
REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
|
||||
REG_A4XX_VPC_DEBUG_RAM_SEL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
|
||||
REG_A4XX_VPC_DEBUG_RAM_READ),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
|
||||
REG_A4XX_RBBM_INT_CLEAR_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
|
||||
REG_A4XX_VSC_SIZE_ADDRESS),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
|
||||
REG_A4XX_SP_VS_PVT_MEM_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
|
||||
REG_A4XX_SP_FS_PVT_MEM_ADDR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
|
||||
REG_A4XX_SP_VS_OBJ_START),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
|
||||
REG_A4XX_SP_FS_OBJ_START),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
|
||||
REG_A4XX_RBBM_SW_RESET_CMD),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
|
||||
REG_A4XX_UCHE_INVALIDATE0),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
|
||||
REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
|
||||
REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
|
||||
};
|
||||
|
||||
static void a4xx_dump(struct msm_gpu *gpu)
|
||||
|
@ -582,16 +508,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
|
|||
|
||||
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
{
|
||||
uint32_t hi, lo, tmp;
|
||||
|
||||
tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
|
||||
do {
|
||||
hi = tmp;
|
||||
lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
|
||||
tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
|
||||
} while (tmp != hi);
|
||||
|
||||
*value = (((uint64_t)hi) << 32) | lo;
|
||||
*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
|
||||
REG_A4XX_RBBM_PERFCTR_CP_0_HI);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
3350
drivers/gpu/drm/msm/adreno/a5xx.xml.h
Normal file
3350
drivers/gpu/drm/msm/adreno/a5xx.xml.h
Normal file
File diff suppressed because it is too large
Load diff
1083
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
Normal file
1083
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
Normal file
File diff suppressed because it is too large
Load diff
66
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
Normal file
66
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __A5XX_GPU_H__
|
||||
#define __A5XX_GPU_H__
|
||||
|
||||
#include "adreno_gpu.h"
|
||||
|
||||
/* Bringing over the hack from the previous targets */
|
||||
#undef ROP_COPY
|
||||
#undef ROP_XOR
|
||||
|
||||
#include "a5xx.xml.h"
|
||||
|
||||
enum {
|
||||
A5XX_ZAP_SHADER_LOADED = 1,
|
||||
};
|
||||
|
||||
struct a5xx_gpu {
|
||||
unsigned long flags;
|
||||
|
||||
struct adreno_gpu base;
|
||||
struct platform_device *pdev;
|
||||
|
||||
struct drm_gem_object *pm4_bo;
|
||||
uint64_t pm4_iova;
|
||||
|
||||
struct drm_gem_object *pfp_bo;
|
||||
uint64_t pfp_iova;
|
||||
|
||||
struct drm_gem_object *gpmu_bo;
|
||||
uint64_t gpmu_iova;
|
||||
uint32_t gpmu_dwords;
|
||||
|
||||
uint32_t lm_leakage;
|
||||
};
|
||||
|
||||
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
|
||||
|
||||
int a5xx_power_init(struct msm_gpu *gpu);
|
||||
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
|
||||
|
||||
static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
|
||||
uint32_t reg, uint32_t mask, uint32_t value)
|
||||
{
|
||||
while (usecs--) {
|
||||
udelay(1);
|
||||
if ((gpu_read(gpu, reg) & mask) == value)
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
||||
#endif /* __A5XX_GPU_H__ */
|
507
drivers/gpu/drm/msm/adreno/a5xx_power.c
Normal file
507
drivers/gpu/drm/msm/adreno/a5xx_power.c
Normal file
|
@ -0,0 +1,507 @@
|
|||
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/pm_opp.h>
|
||||
#include "a5xx_gpu.h"
|
||||
|
||||
/*
|
||||
* The GPMU data block is a block of shared registers that can be used to
|
||||
* communicate back and forth. These "registers" are by convention with the GPMU
|
||||
* firwmare and not bound to any specific hardware design
|
||||
*/
|
||||
|
||||
#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
|
||||
#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
|
||||
#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
|
||||
|
||||
#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
|
||||
#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
|
||||
#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
|
||||
#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
|
||||
|
||||
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
|
||||
#define AGC_INIT_MSG_VALUE 0xBABEFACE
|
||||
|
||||
/* AGC_LM_CONFIG (A540+) */
|
||||
#define AGC_LM_CONFIG (136/4)
|
||||
#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
|
||||
#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
|
||||
#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
|
||||
#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
|
||||
#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
|
||||
#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
|
||||
#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
|
||||
|
||||
#define AGC_LEVEL_CONFIG (140/4)
|
||||
|
||||
static struct {
|
||||
uint32_t reg;
|
||||
uint32_t value;
|
||||
} a5xx_sequence_regs[] = {
|
||||
{ 0xB9A1, 0x00010303 },
|
||||
{ 0xB9A2, 0x13000000 },
|
||||
{ 0xB9A3, 0x00460020 },
|
||||
{ 0xB9A4, 0x10000000 },
|
||||
{ 0xB9A5, 0x040A1707 },
|
||||
{ 0xB9A6, 0x00010000 },
|
||||
{ 0xB9A7, 0x0E000904 },
|
||||
{ 0xB9A8, 0x10000000 },
|
||||
{ 0xB9A9, 0x01165000 },
|
||||
{ 0xB9AA, 0x000E0002 },
|
||||
{ 0xB9AB, 0x03884141 },
|
||||
{ 0xB9AC, 0x10000840 },
|
||||
{ 0xB9AD, 0x572A5000 },
|
||||
{ 0xB9AE, 0x00000003 },
|
||||
{ 0xB9AF, 0x00000000 },
|
||||
{ 0xB9B0, 0x10000000 },
|
||||
{ 0xB828, 0x6C204010 },
|
||||
{ 0xB829, 0x6C204011 },
|
||||
{ 0xB82A, 0x6C204012 },
|
||||
{ 0xB82B, 0x6C204013 },
|
||||
{ 0xB82C, 0x6C204014 },
|
||||
{ 0xB90F, 0x00000004 },
|
||||
{ 0xB910, 0x00000002 },
|
||||
{ 0xB911, 0x00000002 },
|
||||
{ 0xB912, 0x00000002 },
|
||||
{ 0xB913, 0x00000002 },
|
||||
{ 0xB92F, 0x00000004 },
|
||||
{ 0xB930, 0x00000005 },
|
||||
{ 0xB931, 0x00000005 },
|
||||
{ 0xB932, 0x00000005 },
|
||||
{ 0xB933, 0x00000005 },
|
||||
{ 0xB96F, 0x00000001 },
|
||||
{ 0xB970, 0x00000003 },
|
||||
{ 0xB94F, 0x00000004 },
|
||||
{ 0xB950, 0x0000000B },
|
||||
{ 0xB951, 0x0000000B },
|
||||
{ 0xB952, 0x0000000B },
|
||||
{ 0xB953, 0x0000000B },
|
||||
{ 0xB907, 0x00000019 },
|
||||
{ 0xB927, 0x00000019 },
|
||||
{ 0xB947, 0x00000019 },
|
||||
{ 0xB967, 0x00000019 },
|
||||
{ 0xB987, 0x00000019 },
|
||||
{ 0xB906, 0x00220001 },
|
||||
{ 0xB926, 0x00220001 },
|
||||
{ 0xB946, 0x00220001 },
|
||||
{ 0xB966, 0x00220001 },
|
||||
{ 0xB986, 0x00300000 },
|
||||
{ 0xAC40, 0x0340FF41 },
|
||||
{ 0xAC41, 0x03BEFED0 },
|
||||
{ 0xAC42, 0x00331FED },
|
||||
{ 0xAC43, 0x021FFDD3 },
|
||||
{ 0xAC44, 0x5555AAAA },
|
||||
{ 0xAC45, 0x5555AAAA },
|
||||
{ 0xB9BA, 0x00000008 },
|
||||
};
|
||||
|
||||
/*
|
||||
* Get the actual voltage value for the operating point at the specified
|
||||
* frequency
|
||||
*/
|
||||
static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct platform_device *pdev = priv->gpu_pdev;
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
|
||||
|
||||
return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
|
||||
}
|
||||
|
||||
#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
|
||||
#define LM_DCVS_LIMIT 1
|
||||
#define LEVEL_CONFIG ~(0x303)
|
||||
|
||||
/* Setup thermal limit management for A540 */
|
||||
static void a540_lm_setup(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
u32 max_power = 0;
|
||||
u32 rate = gpu->gpufreq[gpu->active_level];
|
||||
u32 config;
|
||||
|
||||
/* The battery current limiter isn't enabled for A540 */
|
||||
config = AGC_LM_CONFIG_BCL_DISABLED;
|
||||
config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
|
||||
|
||||
/* For now disable GPMU side throttling */
|
||||
config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
|
||||
|
||||
/* Get the max-power from the device tree */
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
|
||||
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
|
||||
|
||||
/*
|
||||
* For now just write the one voltage level - we will do more when we
|
||||
* can do scaling
|
||||
*/
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
|
||||
PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
|
||||
|
||||
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
|
||||
}
|
||||
|
||||
/* Setup thermal limit management for A530 */
|
||||
static void a530_lm_setup(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
uint32_t rate = gpu->gpufreq[gpu->active_level];
|
||||
uint32_t tsens = 0;
|
||||
uint32_t max_power = 0;
|
||||
unsigned int i;
|
||||
|
||||
/* Write the block of sequence registers */
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
|
||||
gpu_write(gpu, a5xx_sequence_regs[i].reg,
|
||||
a5xx_sequence_regs[i].value);
|
||||
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
|
||||
|
||||
/* Write the voltage table */
|
||||
|
||||
/* Get the max-power from the device tree */
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_STATE, 1);
|
||||
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
|
||||
|
||||
/*
|
||||
* For now just write the one voltage level - we will do more when we
|
||||
* can do scaling
|
||||
*/
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
|
||||
|
||||
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
|
||||
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
|
||||
}
|
||||
|
||||
/* Enable SP/TP cpower collapse */
|
||||
static void a5xx_pc_init(struct msm_gpu *gpu)
|
||||
{
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
|
||||
}
|
||||
|
||||
/* Enable the GPMU microcontroller */
|
||||
static int a5xx_gpmu_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct msm_ringbuffer *ring = gpu->rb;
|
||||
|
||||
if (!a5xx_gpu->gpmu_dwords)
|
||||
return 0;
|
||||
|
||||
/* Turn off protected mode for this operation */
|
||||
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
|
||||
OUT_RING(ring, 0);
|
||||
|
||||
/* Kick off the IB to load the GPMU microcode */
|
||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||
OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
|
||||
OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
|
||||
OUT_RING(ring, a5xx_gpu->gpmu_dwords);
|
||||
|
||||
/* Turn back on protected mode */
|
||||
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
|
||||
OUT_RING(ring, 1);
|
||||
|
||||
gpu->funcs->flush(gpu);
|
||||
|
||||
if (!gpu->funcs->idle(gpu)) {
|
||||
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
|
||||
gpu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Clock gating setup for A530 targets */
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
|
||||
|
||||
/* Kick off the GPMU */
|
||||
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
|
||||
|
||||
/*
|
||||
* Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
|
||||
* won't have advanced power collapse.
|
||||
*/
|
||||
if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
|
||||
0xBABEFACE))
|
||||
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
|
||||
gpu->name);
|
||||
|
||||
if (!adreno_is_a530(adreno_gpu)) {
|
||||
u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
|
||||
|
||||
if (val) {
|
||||
DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
|
||||
gpu->name, val);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME: Clear GPMU interrupts? */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enable limits management */
|
||||
static void a5xx_lm_enable(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
||||
/* This init sequence only applies to A530 */
|
||||
if (!adreno_is_a530(adreno_gpu))
|
||||
return;
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
|
||||
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
|
||||
}
|
||||
|
||||
int a5xx_power_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
int ret;
|
||||
u32 lm_limit = 6000;
|
||||
|
||||
/*
|
||||
* Set up the limit management
|
||||
* first, do some generic setup:
|
||||
*/
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
|
||||
|
||||
of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
|
||||
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
|
||||
|
||||
/* Now do the target specific setup */
|
||||
if (adreno_is_a530(adreno_gpu))
|
||||
a530_lm_setup(gpu);
|
||||
else
|
||||
a540_lm_setup(gpu);
|
||||
|
||||
/* Set up SP/TP power collpase */
|
||||
a5xx_pc_init(gpu);
|
||||
|
||||
/* Start the GPMU */
|
||||
ret = a5xx_gpmu_init(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Start the limits management */
|
||||
a5xx_lm_enable(gpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _read_header(unsigned int *data, uint32_t fwsize,
|
||||
unsigned int *major, unsigned int *minor)
|
||||
{
|
||||
uint32_t size;
|
||||
unsigned int i;
|
||||
|
||||
/* First dword of the header is the header size */
|
||||
if (fwsize < 4)
|
||||
return -EINVAL;
|
||||
|
||||
size = data[0];
|
||||
|
||||
/* Make sure the header isn't too big and is a multiple of two */
|
||||
if ((size % 2) || (size > 10) || size > (fwsize >> 2))
|
||||
return -EINVAL;
|
||||
|
||||
/* Read the values in pairs */
|
||||
for (i = 1; i < size; i += 2) {
|
||||
switch (data[i]) {
|
||||
case 1:
|
||||
*major = data[i + 1];
|
||||
break;
|
||||
case 2:
|
||||
*minor = data[i + 1];
|
||||
break;
|
||||
default:
|
||||
/* Invalid values are non fatal */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure cur_major and cur_minor are greater than or equal to the minimum
|
||||
* allowable major/minor
|
||||
*/
|
||||
static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
|
||||
uint32_t min_major, uint32_t min_minor)
|
||||
{
|
||||
return ((cur_major > min_major) ||
|
||||
((cur_major == min_major) && (cur_minor >= min_minor)));
|
||||
}
|
||||
|
||||
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct drm_device *drm = gpu->dev;
|
||||
const char *name;
|
||||
const struct firmware *fw;
|
||||
uint32_t version[2] = { 0, 0 };
|
||||
uint32_t dwords = 0, offset = 0;
|
||||
uint32_t major = 0, minor = 0, bosize;
|
||||
unsigned int *data, *ptr, *cmds;
|
||||
unsigned int cmds_size;
|
||||
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Read the firmware name from the device tree - if it doesn't exist
|
||||
* then don't initialize the GPMU for this target
|
||||
*/
|
||||
if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
|
||||
&name))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The version isn't mandatory, but if it exists, we need to enforce
|
||||
* that the version of the GPMU firmware matches or is newer than the
|
||||
* value
|
||||
*/
|
||||
of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
|
||||
version, 2);
|
||||
|
||||
/* Get the firmware */
|
||||
if (request_firmware(&fw, name, drm->dev)) {
|
||||
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
|
||||
gpu->name);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (unsigned int *) fw->data;
|
||||
|
||||
/*
|
||||
* The first dword is the size of the remaining data in dwords. Use it
|
||||
* as a checksum of sorts and make sure it matches the actual size of
|
||||
* the firmware that we read
|
||||
*/
|
||||
|
||||
if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
|
||||
goto out;
|
||||
|
||||
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
|
||||
if (data[1] != 2)
|
||||
goto out;
|
||||
|
||||
/* Read the header and get the major/minor of the read firmware */
|
||||
if (_read_header(&data[2], fw->size - 8, &major, &minor))
|
||||
goto out;
|
||||
|
||||
if (!_check_gpmu_version(major, minor, version[0], version[1])) {
|
||||
DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
|
||||
gpu->name, major, minor);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmds = data + data[2] + 3;
|
||||
cmds_size = data[0] - data[2] - 2;
|
||||
|
||||
/*
|
||||
* A single type4 opcode can only have so many values attached so
|
||||
* add enough opcodes to load the all the commands
|
||||
*/
|
||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
|
||||
if (IS_ERR(a5xx_gpu->gpmu_bo))
|
||||
goto err;
|
||||
|
||||
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
|
||||
&a5xx_gpu->gpmu_iova))
|
||||
goto err;
|
||||
|
||||
ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
|
||||
if (!ptr)
|
||||
goto err;
|
||||
|
||||
while (cmds_size > 0) {
|
||||
int i;
|
||||
uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
|
||||
TYPE4_MAX_PAYLOAD : cmds_size;
|
||||
|
||||
ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
|
||||
_size);
|
||||
|
||||
for (i = 0; i < _size; i++)
|
||||
ptr[dwords++] = *cmds++;
|
||||
|
||||
offset += _size;
|
||||
cmds_size -= _size;
|
||||
}
|
||||
|
||||
a5xx_gpu->gpmu_dwords = dwords;
|
||||
|
||||
goto out;
|
||||
|
||||
err:
|
||||
if (a5xx_gpu->gpmu_iova)
|
||||
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
|
||||
if (a5xx_gpu->gpmu_bo)
|
||||
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
|
||||
|
||||
a5xx_gpu->gpmu_bo = NULL;
|
||||
a5xx_gpu->gpmu_iova = 0;
|
||||
a5xx_gpu->gpmu_dwords = 0;
|
||||
|
||||
out:
|
||||
/* No need to keep that firmware laying around anymore */
|
||||
release_firmware(fw);
|
||||
}
|
|
@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
|
||||
- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
|
||||
- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
|
||||
- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
|
||||
|
||||
Copyright (C) 2013-2016 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -127,11 +128,13 @@ enum a3xx_rop_code {
|
|||
ROP_COPY_INVERTED = 3,
|
||||
ROP_AND_REVERSE = 4,
|
||||
ROP_INVERT = 5,
|
||||
ROP_XOR = 6,
|
||||
ROP_NAND = 7,
|
||||
ROP_AND = 8,
|
||||
ROP_EQUIV = 9,
|
||||
ROP_NOOP = 10,
|
||||
ROP_OR_INVERTED = 11,
|
||||
ROP_COPY = 12,
|
||||
ROP_OR_REVERSE = 13,
|
||||
ROP_OR = 14,
|
||||
ROP_SET = 15,
|
||||
|
@ -172,6 +175,14 @@ enum a3xx_color_swap {
|
|||
XYZW = 3,
|
||||
};
|
||||
|
||||
enum a3xx_rb_blend_opcode {
|
||||
BLEND_DST_PLUS_SRC = 0,
|
||||
BLEND_SRC_MINUS_DST = 1,
|
||||
BLEND_DST_MINUS_SRC = 2,
|
||||
BLEND_MIN_DST_SRC = 3,
|
||||
BLEND_MAX_DST_SRC = 4,
|
||||
};
|
||||
|
||||
#define REG_AXXX_CP_RB_BASE 0x000001c0
|
||||
|
||||
#define REG_AXXX_CP_RB_CNTL 0x000001c1
|
||||
|
|
|
@ -27,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
|
|||
|
||||
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
|
||||
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
|
||||
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
|
||||
|
||||
static const struct adreno_info gpulist[] = {
|
||||
{
|
||||
|
@ -77,6 +78,22 @@ static const struct adreno_info gpulist[] = {
|
|||
.pfpfw = "a420_pfp.fw",
|
||||
.gmem = (SZ_1M + SZ_512K),
|
||||
.init = a4xx_gpu_init,
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 3, 0, ANY_ID),
|
||||
.revn = 530,
|
||||
.name = "A530",
|
||||
.pm4fw = "a530_pm4.fw",
|
||||
.pfpfw = "a530_pfp.fw",
|
||||
.gmem = SZ_1M,
|
||||
.init = a5xx_gpu_init,
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 4, 0, ANY_ID),
|
||||
.revn = 540,
|
||||
.name = "A540",
|
||||
.pm4fw = "a530_pm4.fw",
|
||||
.pfpfw = "a530_pfp.fw",
|
||||
.gmem = SZ_1M,
|
||||
.init = a5xx_gpu_init,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -86,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
|
|||
MODULE_FIRMWARE("a330_pfp.fw");
|
||||
MODULE_FIRMWARE("a420_pm4.fw");
|
||||
MODULE_FIRMWARE("a420_pfp.fw");
|
||||
MODULE_FIRMWARE("a530_fm4.fw");
|
||||
MODULE_FIRMWARE("a530_pfp.fw");
|
||||
|
||||
static inline bool _rev_match(uint8_t entry, uint8_t id)
|
||||
{
|
||||
|
@ -148,12 +167,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
disable_irq(gpu->irq);
|
||||
|
||||
ret = gpu->funcs->hw_init(gpu);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
gpu->funcs->destroy(gpu);
|
||||
gpu = NULL;
|
||||
} else {
|
||||
enable_irq(gpu->irq);
|
||||
/* give inactive pm a chance to kick in: */
|
||||
msm_gpu_retire(gpu);
|
||||
}
|
||||
|
@ -172,11 +195,16 @@ static void set_gpu_pdev(struct drm_device *dev,
|
|||
static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
static struct adreno_platform_config config = {};
|
||||
struct device_node *child, *node = dev->of_node;
|
||||
u32 val;
|
||||
uint32_t val = 0;
|
||||
int ret;
|
||||
|
||||
ret = of_property_read_u32(node, "qcom,chipid", &val);
|
||||
/*
|
||||
* Read the chip ID from the device tree at bind time - we use this
|
||||
* information to load the correct functions. All the rest of the
|
||||
* (extensive) device tree probing should happen in the GPU specific
|
||||
* code
|
||||
*/
|
||||
ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not find chipid: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -185,29 +213,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
|||
config.rev = ADRENO_REV((val >> 24) & 0xff,
|
||||
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
|
||||
|
||||
/* find clock rates: */
|
||||
config.fast_rate = 0;
|
||||
config.slow_rate = ~0;
|
||||
for_each_child_of_node(node, child) {
|
||||
if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
|
||||
struct device_node *pwrlvl;
|
||||
for_each_child_of_node(child, pwrlvl) {
|
||||
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not find gpu-freq: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
config.fast_rate = max(config.fast_rate, val);
|
||||
config.slow_rate = min(config.slow_rate, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.fast_rate) {
|
||||
dev_err(dev, "could not find clk rates\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
dev->platform_data = &config;
|
||||
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
|
||||
return 0;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
|
@ -22,7 +22,7 @@
|
|||
#include "msm_mmu.h"
|
||||
|
||||
#define RB_SIZE SZ_32K
|
||||
#define RB_BLKSIZE 16
|
||||
#define RB_BLKSIZE 32
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||
{
|
||||
|
@ -42,7 +42,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
|||
(adreno_gpu->rev.core << 24);
|
||||
return 0;
|
||||
case MSM_PARAM_MAX_FREQ:
|
||||
*value = adreno_gpu->base.fast_rate;
|
||||
*value = gpu->gpufreq[gpu->active_level];
|
||||
return 0;
|
||||
case MSM_PARAM_TIMESTAMP:
|
||||
if (adreno_gpu->funcs->get_timestamp)
|
||||
|
@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
|||
}
|
||||
}
|
||||
|
||||
#define rbmemptr(adreno_gpu, member) \
|
||||
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
|
||||
|
||||
int adreno_hw_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -64,7 +61,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
|||
|
||||
DBG("%s", gpu->name);
|
||||
|
||||
ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
||||
ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
|
||||
if (ret) {
|
||||
gpu->rb_iova = 0;
|
||||
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
|
||||
|
@ -78,12 +75,12 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
|||
AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
|
||||
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
|
||||
|
||||
/* Setup ringbuffer address: */
|
||||
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
|
||||
/* Setup ringbuffer address */
|
||||
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
|
||||
REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
|
||||
|
||||
if (!adreno_is_a430(adreno_gpu))
|
||||
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
|
||||
rbmemptr(adreno_gpu, rptr));
|
||||
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
|
||||
REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, rptr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -126,11 +123,14 @@ void adreno_recover(struct msm_gpu *gpu)
|
|||
adreno_gpu->memptrs->wptr = 0;
|
||||
|
||||
gpu->funcs->pm_resume(gpu);
|
||||
|
||||
disable_irq(gpu->irq);
|
||||
ret = gpu->funcs->hw_init(gpu);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||
/* hmm, oh well? */
|
||||
}
|
||||
enable_irq(gpu->irq);
|
||||
}
|
||||
|
||||
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
|
@ -153,7 +153,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
||||
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
||||
OUT_RING(ring, submit->cmd[i].iova);
|
||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||
OUT_RING(ring, submit->cmd[i].size);
|
||||
ibs++;
|
||||
break;
|
||||
|
@ -219,7 +219,14 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
void adreno_flush(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
uint32_t wptr = get_wptr(gpu->rb);
|
||||
uint32_t wptr;
|
||||
|
||||
/*
|
||||
* Mask the wptr value that we calculate to fit in the HW range. This is
|
||||
* to account for the possibility that the last command fit exactly into
|
||||
* the ringbuffer and rb->next hasn't wrapped to zero yet
|
||||
*/
|
||||
wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
|
||||
|
||||
/* ensure writes to ringbuffer have hit system memory: */
|
||||
mb();
|
||||
|
@ -341,10 +348,122 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
|
|||
}
|
||||
|
||||
static const char *iommu_ports[] = {
|
||||
"gfx3d_user", "gfx3d_priv",
|
||||
"gfx3d1_user", "gfx3d1_priv",
|
||||
"gfx3d_user",
|
||||
};
|
||||
|
||||
/* Read the set of powerlevels */
|
||||
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
|
||||
{
|
||||
struct device_node *child;
|
||||
|
||||
gpu->active_level = 1;
|
||||
|
||||
/* The device tree will tell us the best clock to initialize with */
|
||||
of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
|
||||
|
||||
if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
|
||||
gpu->active_level = 1;
|
||||
|
||||
for_each_child_of_node(node, child) {
|
||||
unsigned int index;
|
||||
|
||||
if (of_property_read_u32(child, "reg", &index))
|
||||
return -EINVAL;
|
||||
|
||||
if (index >= ARRAY_SIZE(gpu->gpufreq))
|
||||
continue;
|
||||
|
||||
gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
|
||||
|
||||
of_property_read_u32(child, "qcom,gpu-freq",
|
||||
&gpu->gpufreq[index]);
|
||||
of_property_read_u32(child, "qcom,bus-freq",
|
||||
&gpu->busfreq[index]);
|
||||
}
|
||||
|
||||
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
|
||||
gpu->gpufreq[gpu->active_level],
|
||||
gpu->gpufreq[gpu->nr_pwrlevels - 1],
|
||||
gpu->busfreq[gpu->active_level]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Escape valve for targets that don't define the binning nodes. Get the
|
||||
* first powerlevel node and parse it
|
||||
*/
|
||||
static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct device_node *child;
|
||||
|
||||
child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
|
||||
if (child)
|
||||
return _adreno_get_pwrlevels(gpu, child);
|
||||
|
||||
dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get the powerlevels for the target */
|
||||
static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct device_node *node, *child;
|
||||
|
||||
/* See if the target has defined a number of power bins */
|
||||
node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
|
||||
if (!node) {
|
||||
/* If not look for the qcom,gpu-pwrlevels node */
|
||||
return adreno_get_legacy_pwrlevels(gpu, parent);
|
||||
}
|
||||
|
||||
for_each_child_of_node(node, child) {
|
||||
unsigned int bin;
|
||||
|
||||
if (of_property_read_u32(child, "qcom,speed-bin", &bin))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the bin matches the bin specified by the fuses, then we
|
||||
* have a winner - parse it
|
||||
*/
|
||||
if (adreno_gpu->speed_bin == bin)
|
||||
return _adreno_get_pwrlevels(gpu, child);
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static const struct {
|
||||
const char *str;
|
||||
uint32_t flag;
|
||||
} quirks[] = {
|
||||
{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
|
||||
{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
|
||||
};
|
||||
|
||||
/* Parse the statistics from the device tree */
|
||||
static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
int i, ret;
|
||||
|
||||
/* Probe the powerlevels */
|
||||
ret = adreno_get_pwrlevels(gpu, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Check to see if any quirks were specified in the device tree */
|
||||
for (i = 0; i < ARRAY_SIZE(quirks); i++)
|
||||
if (of_property_read_bool(node, quirks[i].str))
|
||||
adreno_gpu->quirks |= quirks[i].flag;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
|
||||
{
|
||||
|
@ -359,15 +478,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
adreno_gpu->revn = adreno_gpu->info->revn;
|
||||
adreno_gpu->rev = config->rev;
|
||||
|
||||
gpu->fast_rate = config->fast_rate;
|
||||
gpu->slow_rate = config->slow_rate;
|
||||
gpu->bus_freq = config->bus_freq;
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
gpu->bus_scale_table = config->bus_scale_table;
|
||||
#endif
|
||||
|
||||
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
|
||||
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
|
||||
/* Get the rest of the target configuration from the device tree */
|
||||
adreno_of_parse(pdev, gpu);
|
||||
|
||||
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
||||
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
|
||||
|
@ -414,7 +526,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
|
||||
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
|
||||
&adreno_gpu->memptrs_iova);
|
||||
if (ret) {
|
||||
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
|
||||
|
@ -430,7 +542,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
|||
|
||||
if (gpu->memptrs_bo) {
|
||||
if (gpu->memptrs_iova)
|
||||
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
|
||||
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
|
||||
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
|
||||
}
|
||||
release_firmware(gpu->pm4);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
|
@ -24,10 +24,17 @@
|
|||
|
||||
#include "msm_gpu.h"
|
||||
|
||||
/* arrg, somehow fb.h is getting pulled in: */
|
||||
#undef ROP_COPY
|
||||
#undef ROP_XOR
|
||||
|
||||
#include "adreno_common.xml.h"
|
||||
#include "adreno_pm4.xml.h"
|
||||
|
||||
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
|
||||
#define REG_SKIP ~0
|
||||
#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
|
||||
|
||||
/**
|
||||
* adreno_regs: List of registers that are used in across all
|
||||
* 3D devices. Each device type has different offset value for the same
|
||||
|
@ -35,73 +42,21 @@
|
|||
* and are indexed by the enumeration values defined in this enum
|
||||
*/
|
||||
enum adreno_regs {
|
||||
REG_ADRENO_CP_DEBUG,
|
||||
REG_ADRENO_CP_ME_RAM_WADDR,
|
||||
REG_ADRENO_CP_ME_RAM_DATA,
|
||||
REG_ADRENO_CP_PFP_UCODE_DATA,
|
||||
REG_ADRENO_CP_PFP_UCODE_ADDR,
|
||||
REG_ADRENO_CP_WFI_PEND_CTR,
|
||||
REG_ADRENO_CP_RB_BASE,
|
||||
REG_ADRENO_CP_RB_BASE_HI,
|
||||
REG_ADRENO_CP_RB_RPTR_ADDR,
|
||||
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
|
||||
REG_ADRENO_CP_RB_RPTR,
|
||||
REG_ADRENO_CP_RB_WPTR,
|
||||
REG_ADRENO_CP_PROTECT_CTRL,
|
||||
REG_ADRENO_CP_ME_CNTL,
|
||||
REG_ADRENO_CP_RB_CNTL,
|
||||
REG_ADRENO_CP_IB1_BASE,
|
||||
REG_ADRENO_CP_IB1_BUFSZ,
|
||||
REG_ADRENO_CP_IB2_BASE,
|
||||
REG_ADRENO_CP_IB2_BUFSZ,
|
||||
REG_ADRENO_CP_TIMESTAMP,
|
||||
REG_ADRENO_CP_ME_RAM_RADDR,
|
||||
REG_ADRENO_CP_ROQ_ADDR,
|
||||
REG_ADRENO_CP_ROQ_DATA,
|
||||
REG_ADRENO_CP_MERCIU_ADDR,
|
||||
REG_ADRENO_CP_MERCIU_DATA,
|
||||
REG_ADRENO_CP_MERCIU_DATA2,
|
||||
REG_ADRENO_CP_MEQ_ADDR,
|
||||
REG_ADRENO_CP_MEQ_DATA,
|
||||
REG_ADRENO_CP_HW_FAULT,
|
||||
REG_ADRENO_CP_PROTECT_STATUS,
|
||||
REG_ADRENO_SCRATCH_ADDR,
|
||||
REG_ADRENO_SCRATCH_UMSK,
|
||||
REG_ADRENO_SCRATCH_REG2,
|
||||
REG_ADRENO_RBBM_STATUS,
|
||||
REG_ADRENO_RBBM_PERFCTR_CTL,
|
||||
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
|
||||
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
|
||||
REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
|
||||
REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
|
||||
REG_ADRENO_RBBM_INT_0_MASK,
|
||||
REG_ADRENO_RBBM_INT_0_STATUS,
|
||||
REG_ADRENO_RBBM_AHB_ERROR_STATUS,
|
||||
REG_ADRENO_RBBM_PM_OVERRIDE2,
|
||||
REG_ADRENO_RBBM_AHB_CMD,
|
||||
REG_ADRENO_RBBM_INT_CLEAR_CMD,
|
||||
REG_ADRENO_RBBM_SW_RESET_CMD,
|
||||
REG_ADRENO_RBBM_CLOCK_CTL,
|
||||
REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
|
||||
REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
|
||||
REG_ADRENO_VPC_DEBUG_RAM_SEL,
|
||||
REG_ADRENO_VPC_DEBUG_RAM_READ,
|
||||
REG_ADRENO_VSC_SIZE_ADDRESS,
|
||||
REG_ADRENO_VFD_CONTROL_0,
|
||||
REG_ADRENO_VFD_INDEX_MAX,
|
||||
REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
|
||||
REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
|
||||
REG_ADRENO_SP_VS_OBJ_START_REG,
|
||||
REG_ADRENO_SP_FS_OBJ_START_REG,
|
||||
REG_ADRENO_PA_SC_AA_CONFIG,
|
||||
REG_ADRENO_SQ_GPR_MANAGEMENT,
|
||||
REG_ADRENO_SQ_INST_STORE_MANAGMENT,
|
||||
REG_ADRENO_TP0_CHICKEN,
|
||||
REG_ADRENO_RBBM_RBBM_CTL,
|
||||
REG_ADRENO_UCHE_INVALIDATE0,
|
||||
REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
|
||||
REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
|
||||
REG_ADRENO_REGISTER_MAX,
|
||||
};
|
||||
|
||||
enum adreno_quirks {
|
||||
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
|
||||
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
|
||||
};
|
||||
|
||||
struct adreno_rev {
|
||||
uint8_t core;
|
||||
uint8_t major;
|
||||
|
@ -128,6 +83,9 @@ struct adreno_info {
|
|||
|
||||
const struct adreno_info *adreno_info(struct adreno_rev rev);
|
||||
|
||||
#define rbmemptr(adreno_gpu, member) \
|
||||
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
|
||||
|
||||
struct adreno_rbmemptrs {
|
||||
volatile uint32_t rptr;
|
||||
volatile uint32_t wptr;
|
||||
|
@ -153,7 +111,7 @@ struct adreno_gpu {
|
|||
// different for z180..
|
||||
struct adreno_rbmemptrs *memptrs;
|
||||
struct drm_gem_object *memptrs_bo;
|
||||
uint32_t memptrs_iova;
|
||||
uint64_t memptrs_iova;
|
||||
|
||||
/*
|
||||
* Register offsets are different between some GPUs.
|
||||
|
@ -161,16 +119,15 @@ struct adreno_gpu {
|
|||
* code (a3xx_gpu.c) and stored in this common location.
|
||||
*/
|
||||
const unsigned int *reg_offsets;
|
||||
|
||||
uint32_t quirks;
|
||||
uint32_t speed_bin;
|
||||
};
|
||||
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
|
||||
|
||||
/* platform config data (ie. from DT, or pdata) */
|
||||
struct adreno_platform_config {
|
||||
struct adreno_rev rev;
|
||||
uint32_t fast_rate, slow_rate, bus_freq;
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
struct msm_bus_scale_pdata *bus_scale_table;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||
|
@ -187,6 +144,9 @@ struct adreno_platform_config {
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define GPU_OF_NODE(_g) \
|
||||
(((struct msm_drm_private *) \
|
||||
((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
|
||||
|
||||
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
|
||||
{
|
||||
|
@ -234,6 +194,16 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
|
|||
return gpu->revn == 430;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a530(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 530;
|
||||
}
|
||||
|
||||
static inline int adreno_is_a540(struct adreno_gpu *gpu)
|
||||
{
|
||||
return gpu->revn == 540;
|
||||
}
|
||||
|
||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
||||
int adreno_hw_init(struct msm_gpu *gpu);
|
||||
uint32_t adreno_last_fence(struct msm_gpu *gpu);
|
||||
|
@ -278,8 +248,38 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
|
|||
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
|
||||
}
|
||||
|
||||
static inline u32 PM4_PARITY(u32 val)
|
||||
{
|
||||
return (0x9669 >> (0xF & (val ^
|
||||
(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
|
||||
(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
|
||||
(val >> 28)))) & 1;
|
||||
}
|
||||
|
||||
/* Maximum number of values that can be executed for one opcode */
|
||||
#define TYPE4_MAX_PAYLOAD 127
|
||||
|
||||
#define PKT4(_reg, _cnt) \
|
||||
(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
|
||||
(((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
|
||||
|
||||
static inline void
|
||||
OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
|
||||
{
|
||||
adreno_wait_ring(ring->gpu, cnt + 1);
|
||||
OUT_RING(ring, PKT4(regindx, cnt));
|
||||
}
|
||||
|
||||
static inline void
|
||||
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
|
||||
{
|
||||
adreno_wait_ring(ring->gpu, cnt + 1);
|
||||
OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
|
||||
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
|
||||
}
|
||||
|
||||
/*
|
||||
* adreno_checkreg_off() - Checks the validity of a register enum
|
||||
* adreno_reg_check() - Checks the validity of a register enum
|
||||
* @gpu: Pointer to struct adreno_gpu
|
||||
* @offset_name: The register enum that is checked
|
||||
*/
|
||||
|
@ -290,6 +290,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
|
|||
!gpu->reg_offsets[offset_name]) {
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* REG_SKIP is a special value that tell us that the register in
|
||||
* question isn't implemented on target but don't trigger a BUG(). This
|
||||
* is used to cleanly implement adreno_gpu_write64() and
|
||||
* adreno_gpu_read64() in a generic fashion
|
||||
*/
|
||||
if (gpu->reg_offsets[offset_name] == REG_SKIP)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -311,4 +321,35 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
|
|||
gpu_write(&gpu->base, reg - 1, data);
|
||||
}
|
||||
|
||||
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
|
||||
enum adreno_regs lo, enum adreno_regs hi, u64 data)
|
||||
{
|
||||
adreno_gpu_write(gpu, lo, lower_32_bits(data));
|
||||
adreno_gpu_write(gpu, hi, upper_32_bits(data));
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a register and a count, return a value to program into
|
||||
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
|
||||
* registers starting at _reg.
|
||||
*
|
||||
* The register base needs to be a multiple of the length. If it is not, the
|
||||
* hardware will quietly mask off the bits for you and shift the size. For
|
||||
* example, if you intend the protection to start at 0x07 for a length of 4
|
||||
* (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
|
||||
* expose registers you intended to protect!
|
||||
*/
|
||||
#define ADRENO_PROTECT_RW(_reg, _len) \
|
||||
((1 << 30) | (1 << 29) | \
|
||||
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
|
||||
|
||||
/*
|
||||
* Same as above, but allow reads over the range. For areas of mixed use (such
|
||||
* as performance counters) this allows us to protect a much larger range with a
|
||||
* single register
|
||||
*/
|
||||
#define ADRENO_PROTECT_RDONLY(_reg, _len) \
|
||||
((1 << 29) \
|
||||
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
|
||||
|
||||
#endif /* __ADRENO_GPU_H__ */
|
||||
|
|
|
@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
|
||||
- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
|
||||
- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
|
||||
- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
|
||||
- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
|
||||
|
||||
Copyright (C) 2013-2016 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -58,6 +59,7 @@ enum vgt_event_type {
|
|||
RST_PIX_CNT = 13,
|
||||
RST_VTX_CNT = 14,
|
||||
TILE_FLUSH = 15,
|
||||
STAT_EVENT = 16,
|
||||
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
|
||||
ZPASS_DONE = 21,
|
||||
CACHE_FLUSH_AND_INV_EVENT = 22,
|
||||
|
@ -82,7 +84,6 @@ enum pc_di_primtype {
|
|||
DI_PT_LINESTRIP_ADJ = 11,
|
||||
DI_PT_TRI_ADJ = 12,
|
||||
DI_PT_TRISTRIP_ADJ = 13,
|
||||
DI_PT_PATCHES = 34,
|
||||
};
|
||||
|
||||
enum pc_di_src_sel {
|
||||
|
@ -110,11 +111,15 @@ enum adreno_pm4_packet_type {
|
|||
CP_TYPE1_PKT = 0x40000000,
|
||||
CP_TYPE2_PKT = 0x80000000,
|
||||
CP_TYPE3_PKT = 0xc0000000,
|
||||
CP_TYPE4_PKT = 0x40000000,
|
||||
CP_TYPE7_PKT = 0x70000000,
|
||||
};
|
||||
|
||||
enum adreno_pm4_type3_packets {
|
||||
CP_ME_INIT = 72,
|
||||
CP_NOP = 16,
|
||||
CP_PREEMPT_ENABLE = 28,
|
||||
CP_PREEMPT_TOKEN = 30,
|
||||
CP_INDIRECT_BUFFER = 63,
|
||||
CP_INDIRECT_BUFFER_PFD = 55,
|
||||
CP_WAIT_FOR_IDLE = 38,
|
||||
|
@ -163,6 +168,7 @@ enum adreno_pm4_type3_packets {
|
|||
CP_TEST_TWO_MEMS = 113,
|
||||
CP_REG_WR_NO_CTXT = 120,
|
||||
CP_RECORD_PFP_TIMESTAMP = 17,
|
||||
CP_SET_SECURE_MODE = 102,
|
||||
CP_WAIT_FOR_ME = 19,
|
||||
CP_SET_DRAW_STATE = 67,
|
||||
CP_DRAW_INDX_OFFSET = 56,
|
||||
|
@ -178,6 +184,21 @@ enum adreno_pm4_type3_packets {
|
|||
CP_WAIT_MEM_WRITES = 18,
|
||||
CP_COND_REG_EXEC = 71,
|
||||
CP_MEM_TO_REG = 66,
|
||||
CP_EXEC_CS = 51,
|
||||
CP_PERFCOUNTER_ACTION = 80,
|
||||
CP_SMMU_TABLE_UPDATE = 83,
|
||||
CP_CONTEXT_REG_BUNCH = 92,
|
||||
CP_YIELD_ENABLE = 28,
|
||||
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
|
||||
CP_SKIP_IB2_ENABLE_LOCAL = 35,
|
||||
CP_SET_SUBDRAW_SIZE = 53,
|
||||
CP_SET_VISIBILITY_OVERRIDE = 100,
|
||||
CP_PREEMPT_ENABLE_GLOBAL = 105,
|
||||
CP_PREEMPT_ENABLE_LOCAL = 106,
|
||||
CP_CONTEXT_SWITCH_YIELD = 107,
|
||||
CP_SET_RENDER_MODE = 108,
|
||||
CP_COMPUTE_CHECKPOINT = 110,
|
||||
CP_MEM_TO_MEM = 115,
|
||||
IN_IB_PREFETCH_END = 23,
|
||||
IN_SUBBLK_PREFETCH = 31,
|
||||
IN_INSTR_PREFETCH = 32,
|
||||
|
@ -196,6 +217,7 @@ enum adreno_state_block {
|
|||
SB_VERT_SHADER = 4,
|
||||
SB_GEOM_SHADER = 5,
|
||||
SB_FRAG_SHADER = 6,
|
||||
SB_COMPUTE_SHADER = 7,
|
||||
};
|
||||
|
||||
enum adreno_state_type {
|
||||
|
@ -389,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
|
|||
{
|
||||
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
|
||||
}
|
||||
#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
|
||||
#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
|
||||
#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
|
||||
static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
|
||||
{
|
||||
return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
|
||||
}
|
||||
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
|
||||
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
|
||||
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
|
||||
|
@ -533,5 +560,78 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
|
|||
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
|
||||
|
||||
#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
|
||||
#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
|
||||
#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
|
||||
static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
|
||||
#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
|
||||
#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
|
||||
static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
|
||||
#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
|
||||
#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
|
||||
static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_0 0x00000000
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_1 0x00000001
|
||||
#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
|
||||
#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
|
||||
static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_2 0x00000002
|
||||
#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
|
||||
#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
|
||||
static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_3 0x00000003
|
||||
#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_4 0x00000004
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_5 0x00000005
|
||||
#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
|
||||
#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
|
||||
static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_6 0x00000006
|
||||
#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
|
||||
#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
|
||||
static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
|
||||
}
|
||||
|
||||
#define REG_CP_SET_RENDER_MODE_7 0x00000007
|
||||
#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
|
||||
#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
|
||||
static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
|
||||
{
|
||||
return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
|
||||
}
|
||||
|
||||
|
||||
#endif /* ADRENO_PM4_XML */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -1186,7 +1186,7 @@ static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
|
|||
int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 iova = 0;
|
||||
u64 iova = 0;
|
||||
|
||||
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
|
||||
SZ_4K,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
|
|||
u32 *clk_pre, u32 *clk_post);
|
||||
void msm_dsi_manager_phy_disable(int id);
|
||||
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
|
||||
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
|
||||
bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
|
||||
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
|
||||
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
|
||||
|
||||
|
@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
|
|||
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
|
||||
const struct mipi_dsi_msg *msg);
|
||||
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
|
||||
u32 iova, u32 len);
|
||||
u64 iova, u32 len);
|
||||
int msm_dsi_host_enable(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_disable(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -836,7 +836,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
|||
{
|
||||
struct drm_device *dev = msm_host->dev;
|
||||
int ret;
|
||||
u32 iova;
|
||||
u64 iova;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
|
||||
|
@ -974,7 +974,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
|
|||
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
|
||||
{
|
||||
int ret;
|
||||
u32 iova;
|
||||
uint64_t iova;
|
||||
bool triggered;
|
||||
|
||||
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
|
||||
|
@ -1750,11 +1750,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
|
||||
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
|
||||
{
|
||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||
|
||||
dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
|
||||
/* FIXME: Verify that the iova < 32 bits? */
|
||||
dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
|
||||
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
|
||||
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -774,7 +774,7 @@ restore_host0:
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
|
||||
bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
|
||||
{
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
|
||||
|
|
|
@ -40,7 +40,7 @@ struct mdp4_crtc {
|
|||
uint32_t x, y;
|
||||
|
||||
/* next cursor to scan-out: */
|
||||
uint32_t next_iova;
|
||||
uint64_t next_iova;
|
||||
struct drm_gem_object *next_bo;
|
||||
|
||||
/* current cursor being scanned out: */
|
||||
|
@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
|
|||
container_of(work, struct mdp4_crtc, unref_cursor_work);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
|
||||
|
||||
msm_gem_put_iova(val, mdp4_kms->id);
|
||||
msm_gem_put_iova(val, mdp4_kms->aspace);
|
||||
drm_gem_object_unreference_unlocked(val);
|
||||
}
|
||||
|
||||
|
@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
|
|||
if (mdp4_crtc->cursor.stale) {
|
||||
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
|
||||
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
|
||||
uint32_t iova = mdp4_crtc->cursor.next_iova;
|
||||
uint64_t iova = mdp4_crtc->cursor.next_iova;
|
||||
|
||||
if (next_bo) {
|
||||
/* take a obj ref + iova ref when we start scanning out: */
|
||||
drm_gem_object_reference(next_bo);
|
||||
msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
|
||||
msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
|
||||
&iova);
|
||||
|
||||
/* enable cursor: */
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
|
||||
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
|
||||
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
|
||||
/* FIXME: Make sure iova < 32 bits */
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
|
||||
lower_32_bits(iova));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
|
||||
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
|
||||
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
|
||||
} else {
|
||||
/* disable cursor: */
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
|
||||
mdp4_kms->blank_cursor_iova);
|
||||
lower_32_bits(mdp4_kms->blank_cursor_iova));
|
||||
}
|
||||
|
||||
/* and drop the iova ref + obj rev when done scanning out: */
|
||||
|
@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_gem_object *cursor_bo, *old_bo;
|
||||
unsigned long flags;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
int ret;
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
|
@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
if (cursor_bo) {
|
||||
ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
|
||||
ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
|
||||
if (ret)
|
||||
goto fail;
|
||||
} else {
|
||||
|
|
|
@ -197,7 +197,7 @@ static void mdp4_destroy(struct msm_kms *kms)
|
|||
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
if (mdp4_kms->blank_cursor_iova)
|
||||
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
|
||||
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
|
||||
if (mdp4_kms->blank_cursor_bo)
|
||||
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
|
||||
|
||||
|
@ -541,13 +541,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|||
aspace = NULL;
|
||||
}
|
||||
|
||||
mdp4_kms->id = msm_register_address_space(dev, aspace);
|
||||
if (mdp4_kms->id < 0) {
|
||||
ret = mdp4_kms->id;
|
||||
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = modeset_init(mdp4_kms);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
|
||||
|
@ -564,7 +557,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
|
||||
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
|
||||
&mdp4_kms->blank_cursor_iova);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
|
||||
|
|
|
@ -33,8 +33,6 @@ struct mdp4_kms {
|
|||
int rev;
|
||||
|
||||
/* mapper-id used to request GEM buffer mapped for scanout: */
|
||||
int id;
|
||||
|
||||
void __iomem *mmio;
|
||||
|
||||
struct regulator *dsi_pll_vdda;
|
||||
|
@ -51,7 +49,7 @@ struct mdp4_kms {
|
|||
|
||||
/* empty/blank cursor bo to use when cursor is "disabled" */
|
||||
struct drm_gem_object *blank_cursor_bo;
|
||||
uint32_t blank_cursor_iova;
|
||||
uint64_t blank_cursor_iova;
|
||||
};
|
||||
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
|
|||
return 0;
|
||||
|
||||
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
|
||||
return msm_framebuffer_prepare(fb, mdp4_kms->id);
|
||||
return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
|
||||
}
|
||||
|
||||
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
|
||||
|
@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
|
|||
return;
|
||||
|
||||
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
|
||||
msm_framebuffer_cleanup(fb, mdp4_kms->id);
|
||||
msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
|
||||
}
|
||||
|
||||
|
||||
|
@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
|
|||
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
|
||||
msm_framebuffer_iova(fb, mdp4_kms->id, 0));
|
||||
msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
|
||||
msm_framebuffer_iova(fb, mdp4_kms->id, 1));
|
||||
msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
|
||||
msm_framebuffer_iova(fb, mdp4_kms->id, 2));
|
||||
msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
|
||||
msm_framebuffer_iova(fb, mdp4_kms->id, 3));
|
||||
msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
|
||||
|
||||
plane->fb = fb;
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
|
|||
container_of(work, struct mdp5_crtc, unref_cursor_work);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
|
||||
|
||||
msm_gem_put_iova(val, mdp5_kms->id);
|
||||
msm_gem_put_iova(val, mdp5_kms->aspace);
|
||||
drm_gem_object_unreference_unlocked(val);
|
||||
}
|
||||
|
||||
|
@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct drm_gem_object *cursor_bo, *old_bo = NULL;
|
||||
uint32_t blendcfg, cursor_addr, stride;
|
||||
uint32_t blendcfg, stride;
|
||||
uint64_t cursor_addr;
|
||||
int ret, bpp, lm;
|
||||
unsigned int depth;
|
||||
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
|
||||
|
@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
if (!cursor_bo)
|
||||
return -ENOENT;
|
||||
|
||||
ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
|
||||
ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -627,13 +627,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
|||
aspace = NULL;
|
||||
}
|
||||
|
||||
mdp5_kms->id = msm_register_address_space(dev, aspace);
|
||||
if (mdp5_kms->id < 0) {
|
||||
ret = mdp5_kms->id;
|
||||
dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = modeset_init(mdp5_kms);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
|
||||
|
|
|
@ -36,7 +36,6 @@ struct mdp5_kms {
|
|||
|
||||
|
||||
/* mapper-id used to request GEM buffer mapped for scanout: */
|
||||
int id;
|
||||
struct msm_gem_address_space *aspace;
|
||||
|
||||
struct mdp5_smp *smp;
|
||||
|
|
|
@ -260,7 +260,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
|
|||
return 0;
|
||||
|
||||
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
|
||||
return msm_framebuffer_prepare(fb, mdp5_kms->id);
|
||||
return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
|
||||
}
|
||||
|
||||
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
|
||||
|
@ -274,7 +274,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
|
|||
return;
|
||||
|
||||
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
|
||||
msm_framebuffer_cleanup(fb, mdp5_kms->id);
|
||||
msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
|
||||
}
|
||||
|
||||
static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
|
@ -400,13 +400,13 @@ static void set_scanout_locked(struct drm_plane *plane,
|
|||
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 0));
|
||||
msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 1));
|
||||
msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
|
||||
msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 3));
|
||||
msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
|
||||
|
||||
plane->fb = fb;
|
||||
}
|
||||
|
|
|
@ -38,20 +38,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
|
|||
.atomic_commit = msm_atomic_commit,
|
||||
};
|
||||
|
||||
int msm_register_address_space(struct drm_device *dev,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
int idx = priv->num_aspaces++;
|
||||
|
||||
if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
|
||||
return -EINVAL;
|
||||
|
||||
priv->aspace[idx] = aspace;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
|
||||
static bool reglog = false;
|
||||
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
|
||||
|
@ -956,7 +942,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
|
|||
int ret;
|
||||
|
||||
if (!priv->gpu)
|
||||
return 0;
|
||||
return -ENXIO;
|
||||
|
||||
if (fence > priv->gpu->submitted_fence) {
|
||||
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
|
||||
|
@ -1138,6 +1124,17 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_info_iova(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, uint64_t *iova)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (!priv->gpu)
|
||||
return -EINVAL;
|
||||
|
||||
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
|
@ -1145,14 +1142,22 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
|||
struct drm_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
if (args->pad)
|
||||
if (args->flags & ~MSM_INFO_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
args->offset = msm_gem_mmap_offset(obj);
|
||||
if (args->flags & MSM_INFO_IOVA) {
|
||||
uint64_t iova;
|
||||
|
||||
ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
|
||||
if (!ret)
|
||||
args->offset = iova;
|
||||
} else {
|
||||
args->offset = msm_gem_mmap_offset(obj);
|
||||
}
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
|
|
|
@ -374,8 +374,6 @@ int msm_queue_fence_cb(struct drm_device *dev,
|
|||
struct msm_fence_cb *cb, uint32_t fence);
|
||||
void msm_update_fence(struct drm_device *dev, uint32_t fence);
|
||||
|
||||
int msm_register_address_space(struct drm_device *dev,
|
||||
struct msm_gem_address_space *aspace);
|
||||
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt,
|
||||
void *priv);
|
||||
|
@ -402,13 +400,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
|||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
uint32_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
|
||||
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
|
@ -439,9 +440,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
uint32_t size, struct sg_table *sgt);
|
||||
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
|
||||
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
|
||||
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace);
|
||||
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace);
|
||||
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace, int plane);
|
||||
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
|
||||
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
|
||||
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
|
|
|
@ -92,15 +92,16 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
|
|||
* should be fine, since only the scanout (mdpN) side of things needs
|
||||
* this, the gpu doesn't care about fb's.
|
||||
*/
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
|
||||
int ret, i, n = drm_format_num_planes(fb->pixel_format);
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
|
||||
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
|
||||
ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
|
||||
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -108,21 +109,30 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
|
||||
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
|
||||
int i, n = drm_format_num_planes(fb->pixel_format);
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
msm_gem_put_iova(msm_fb->planes[i], id);
|
||||
msm_gem_put_iova(msm_fb->planes[i], aspace);
|
||||
}
|
||||
|
||||
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
|
||||
/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
|
||||
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace, int plane)
|
||||
{
|
||||
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
|
||||
uint64_t iova;
|
||||
|
||||
if (!msm_fb->planes[plane])
|
||||
return 0;
|
||||
return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
|
||||
|
||||
iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
|
||||
|
||||
/* FIXME: Make sure it is < 32 bits */
|
||||
return lower_32_bits(iova);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
|
||||
|
|
|
@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
struct drm_framebuffer *fb = NULL;
|
||||
struct fb_info *fbi = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
uint32_t paddr;
|
||||
uint64_t paddr;
|
||||
int ret, size;
|
||||
|
||||
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
|
||||
|
@ -160,11 +160,12 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
dev->mode_config.fb_base = paddr;
|
||||
/* FIXME: Verify paddr < 32 bits? */
|
||||
dev->mode_config.fb_base = lower_32_bits(paddr);
|
||||
|
||||
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
||||
fbi->screen_size = fbdev->bo->size;
|
||||
fbi->fix.smem_start = paddr;
|
||||
fbi->fix.smem_start = lower_32_bits(paddr);
|
||||
fbi->fix.smem_len = fbdev->bo->size;
|
||||
|
||||
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
|
||||
|
|
|
@ -273,22 +273,63 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
|||
return offset;
|
||||
}
|
||||
|
||||
static void obj_remove_domain(struct msm_gem_vma *domain)
|
||||
{
|
||||
if (domain) {
|
||||
list_del(&domain->list);
|
||||
kfree(domain);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
put_iova(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int id;
|
||||
struct msm_gem_vma *domain, *tmp;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
|
||||
msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
|
||||
msm_obj->sgt, get_dmabuf_ptr(obj));
|
||||
list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
msm_gem_unmap_vma(domain->aspace, domain,
|
||||
msm_obj->sgt, get_dmabuf_ptr(obj));
|
||||
}
|
||||
|
||||
obj_remove_domain(domain);
|
||||
}
|
||||
}
|
||||
|
||||
static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
|
||||
if (!domain)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
domain->aspace = aspace;
|
||||
|
||||
list_add_tail(&domain->list, &msm_obj->domains);
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *domain;
|
||||
|
||||
list_for_each_entry(domain, &msm_obj->domains, list) {
|
||||
if (domain->aspace == aspace)
|
||||
return domain;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* should be called under struct_mutex.. although it can be called
|
||||
* from atomic context without struct_mutex to acquire an extra
|
||||
* iova ref if you know one is already held.
|
||||
|
@ -296,49 +337,64 @@ put_iova(struct drm_gem_object *obj)
|
|||
* That means when I do eventually need to add support for unpinning
|
||||
* the refcnt counter needs to be atomic_t.
|
||||
*/
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
uint32_t *iova)
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct page **pages;
|
||||
struct msm_gem_vma *domain;
|
||||
int ret = 0;
|
||||
|
||||
if (!msm_obj->domain[id].iova) {
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
struct page **pages = get_pages(obj);
|
||||
if (!iommu_present(&platform_bus_type)) {
|
||||
pages = get_pages(obj);
|
||||
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
ret = msm_gem_map_vma(priv->aspace[id],
|
||||
&msm_obj->domain[id], msm_obj->sgt,
|
||||
get_dmabuf_ptr(obj));
|
||||
} else
|
||||
msm_obj->domain[id].iova = physaddr(obj);
|
||||
*iova = (uint64_t) physaddr(obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
domain = obj_get_domain(obj, aspace);
|
||||
|
||||
if (!domain) {
|
||||
domain = obj_add_domain(obj, aspace);
|
||||
if (IS_ERR(domain))
|
||||
return PTR_ERR(domain);
|
||||
|
||||
pages = get_pages(obj);
|
||||
if (IS_ERR(pages)) {
|
||||
obj_remove_domain(domain);
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
|
||||
get_dmabuf_ptr(obj));
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
*iova = msm_obj->domain[id].iova;
|
||||
*iova = domain->iova;
|
||||
else
|
||||
obj_remove_domain(domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get iova, taking a reference. Should have a matching put */
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *domain;
|
||||
int ret;
|
||||
|
||||
/* this is safe right now because we don't unmap until the
|
||||
* bo is deleted:
|
||||
*/
|
||||
if (msm_obj->domain[id].iova) {
|
||||
*iova = msm_obj->domain[id].iova;
|
||||
domain = obj_get_domain(obj, aspace);
|
||||
if (domain) {
|
||||
*iova = domain->iova;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
ret = msm_gem_get_iova_locked(obj, id, iova);
|
||||
ret = msm_gem_get_iova_locked(obj, aspace, iova);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -346,14 +402,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
|
|||
/* get iova without taking a reference, used in places where you have
|
||||
* already done a 'msm_gem_get_iova()'.
|
||||
*/
|
||||
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(!msm_obj->domain[id].iova);
|
||||
return msm_obj->domain[id].iova;
|
||||
struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
|
||||
|
||||
WARN_ON(!domain);
|
||||
|
||||
return domain ? domain->iova : 0;
|
||||
}
|
||||
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj, int id)
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
// XXX TODO ..
|
||||
// NOTE: probably don't need a _locked() version.. we wouldn't
|
||||
|
@ -487,9 +547,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
struct msm_gem_vma *domain;
|
||||
uint64_t off = drm_vma_node_start(&obj->vma_node);
|
||||
int id;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
|
||||
|
@ -498,8 +557,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|||
obj->name, obj->refcount.refcount.counter,
|
||||
off, msm_obj->vaddr);
|
||||
|
||||
for (id = 0; id < priv->num_aspaces; id++)
|
||||
seq_printf(m, " %08llx", msm_obj->domain[id].iova);
|
||||
/* FIXME: we need to print the address space here too */
|
||||
list_for_each_entry(domain, &msm_obj->domains, list)
|
||||
seq_printf(m, " %08llx", domain->iova);
|
||||
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
|
@ -618,8 +678,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
if (!msm_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
if (use_vram)
|
||||
msm_obj->vram_node = &msm_obj->domain[0].node;
|
||||
if (use_vram) {
|
||||
struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
|
||||
|
||||
if (!IS_ERR(domain))
|
||||
msm_obj->vram_node = &domain->node;
|
||||
}
|
||||
|
||||
msm_obj->flags = flags;
|
||||
|
||||
|
@ -627,6 +691,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
reservation_object_init(msm_obj->resv);
|
||||
|
||||
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
||||
INIT_LIST_HEAD(&msm_obj->domains);
|
||||
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
|
||||
*obj = &msm_obj->base;
|
||||
|
|
|
@ -43,7 +43,9 @@ struct msm_gem_address_space {
|
|||
struct msm_gem_vma {
|
||||
/* Node used by the GPU address space, but not the SDE address space */
|
||||
struct drm_mm_node node;
|
||||
struct msm_gem_address_space *aspace;
|
||||
uint64_t iova;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct msm_gem_object {
|
||||
|
@ -74,7 +76,7 @@ struct msm_gem_object {
|
|||
struct sg_table *sgt;
|
||||
void *vaddr;
|
||||
|
||||
struct msm_gem_vma domain[NUM_DOMAINS];
|
||||
struct list_head domains;
|
||||
|
||||
/* normally (resv == &_resv) except for imported bo's */
|
||||
struct reservation_object *resv;
|
||||
|
@ -125,13 +127,13 @@ struct msm_gem_submit {
|
|||
struct {
|
||||
uint32_t type;
|
||||
uint32_t size; /* in dwords */
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
uint32_t idx; /* cmdstream buffer idx in bos[] */
|
||||
} cmd[MAX_CMDS];
|
||||
struct {
|
||||
uint32_t flags;
|
||||
struct msm_gem_object *obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
} bos[0];
|
||||
};
|
||||
|
||||
|
|
|
@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|||
pagefault_disable();
|
||||
}
|
||||
|
||||
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
||||
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
|
||||
!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
|
||||
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
|
@ -141,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
|
|||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
|
||||
if (submit->bos[i].flags & BO_PINNED)
|
||||
msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
|
||||
msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
|
||||
|
||||
if (submit->bos[i].flags & BO_LOCKED)
|
||||
ww_mutex_unlock(&msm_obj->resv->lock);
|
||||
|
@ -162,7 +163,7 @@ retry:
|
|||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
if (slow_locked == i)
|
||||
slow_locked = -1;
|
||||
|
@ -180,7 +181,7 @@ retry:
|
|||
|
||||
/* if locking succeeded, pin bo: */
|
||||
ret = msm_gem_get_iova_locked(&msm_obj->base,
|
||||
submit->gpu->id, &iova);
|
||||
submit->gpu->aspace, &iova);
|
||||
|
||||
/* this would break the logic in the fail path.. there is no
|
||||
* reason for this to happen, but just to be on the safe side
|
||||
|
@ -229,7 +230,7 @@ fail:
|
|||
}
|
||||
|
||||
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
|
||||
struct msm_gem_object **obj, uint64_t *iova, bool *valid)
|
||||
{
|
||||
if (idx >= submit->nr_bos) {
|
||||
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
|
||||
|
@ -275,7 +276,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
|||
struct drm_msm_gem_submit_reloc submit_reloc;
|
||||
void __user *userptr =
|
||||
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
|
||||
uint32_t iova, off;
|
||||
uint64_t iova;
|
||||
uint32_t off;
|
||||
bool valid;
|
||||
|
||||
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
|
||||
|
@ -351,6 +353,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
|
||||
gpu = priv->gpu;
|
||||
if (!gpu)
|
||||
return -ENXIO;
|
||||
|
||||
if (args->nr_cmds > MAX_CMDS)
|
||||
return -EINVAL;
|
||||
|
@ -376,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
void __user *userptr =
|
||||
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
struct msm_gem_object *msm_obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
|
||||
if (ret) {
|
||||
|
|
|
@ -90,19 +90,18 @@ static int disable_pwrrail(struct msm_gpu *gpu)
|
|||
|
||||
static int enable_clk(struct msm_gpu *gpu)
|
||||
{
|
||||
struct clk *rate_clk = NULL;
|
||||
uint32_t rate = gpu->gpufreq[gpu->active_level];
|
||||
int i;
|
||||
|
||||
/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
|
||||
if (gpu->grp_clks[i]) {
|
||||
clk_prepare(gpu->grp_clks[i]);
|
||||
rate_clk = gpu->grp_clks[i];
|
||||
}
|
||||
}
|
||||
clk_set_rate(gpu->grp_clks[0], rate);
|
||||
|
||||
if (rate_clk && gpu->fast_rate)
|
||||
clk_set_rate(rate_clk, gpu->fast_rate);
|
||||
if (gpu->grp_clks[3])
|
||||
clk_set_rate(gpu->grp_clks[3], 19200000);
|
||||
|
||||
/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_prepare(gpu->grp_clks[i]);
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
|
@ -113,24 +112,19 @@ static int enable_clk(struct msm_gpu *gpu)
|
|||
|
||||
static int disable_clk(struct msm_gpu *gpu)
|
||||
{
|
||||
struct clk *rate_clk = NULL;
|
||||
uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
|
||||
int i;
|
||||
|
||||
/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
|
||||
if (gpu->grp_clks[i]) {
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_disable(gpu->grp_clks[i]);
|
||||
rate_clk = gpu->grp_clks[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (rate_clk && gpu->slow_rate)
|
||||
clk_set_rate(rate_clk, gpu->slow_rate);
|
||||
|
||||
for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
|
||||
if (gpu->grp_clks[i])
|
||||
clk_unprepare(gpu->grp_clks[i]);
|
||||
|
||||
clk_set_rate(gpu->grp_clks[0], rate);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -138,8 +132,9 @@ static int enable_axi(struct msm_gpu *gpu)
|
|||
{
|
||||
if (gpu->ebi1_clk)
|
||||
clk_prepare_enable(gpu->ebi1_clk);
|
||||
if (gpu->bus_freq)
|
||||
bs_set(gpu, gpu->bus_freq);
|
||||
|
||||
if (gpu->busfreq[gpu->active_level])
|
||||
bs_set(gpu, gpu->busfreq[gpu->active_level]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -147,7 +142,8 @@ static int disable_axi(struct msm_gpu *gpu)
|
|||
{
|
||||
if (gpu->ebi1_clk)
|
||||
clk_disable_unprepare(gpu->ebi1_clk);
|
||||
if (gpu->bus_freq)
|
||||
|
||||
if (gpu->busfreq[gpu->active_level])
|
||||
bs_set(gpu, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -474,7 +470,7 @@ static void retire_worker(struct work_struct *work)
|
|||
(obj->write_fence <= fence)) {
|
||||
/* move to inactive: */
|
||||
msm_gem_move_to_inactive(&obj->base);
|
||||
msm_gem_put_iova(&obj->base, gpu->id);
|
||||
msm_gem_put_iova(&obj->base, gpu->aspace);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
} else {
|
||||
break;
|
||||
|
@ -528,12 +524,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
|
||||
|
||||
if (!is_active(msm_obj)) {
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
/* ring takes a reference to the bo and iova: */
|
||||
drm_gem_object_reference(&msm_obj->base);
|
||||
msm_gem_get_iova_locked(&msm_obj->base,
|
||||
submit->gpu->id, &iova);
|
||||
submit->gpu->aspace, &iova);
|
||||
}
|
||||
|
||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
||||
|
@ -562,8 +558,8 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
}
|
||||
|
||||
static const char *clk_names[] = {
|
||||
"src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
|
||||
"alt_mem_iface_clk",
|
||||
"src_clk", "core_clk", "iface_clk", "rbbmtimer_clk",
|
||||
"mem_clk", "mem_iface_clk", "alt_mem_iface_clk",
|
||||
};
|
||||
|
||||
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
|
@ -627,6 +623,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
gpu->grp_clks[i] = NULL;
|
||||
}
|
||||
|
||||
gpu->grp_clks[0] = gpu->grp_clks[1];
|
||||
|
||||
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
|
||||
DBG("ebi1_clk: %p", gpu->ebi1_clk);
|
||||
if (IS_ERR(gpu->ebi1_clk))
|
||||
|
@ -667,8 +665,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
} else {
|
||||
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
||||
}
|
||||
gpu->id = msm_register_address_space(drm, gpu->aspace);
|
||||
|
||||
|
||||
/* Create ringbuffer: */
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
|
@ -681,6 +677,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
|
||||
gpu->pm_qos_req_dma.irq = gpu->irq;
|
||||
#endif
|
||||
|
||||
pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
bs_init(gpu);
|
||||
|
||||
return 0;
|
||||
|
@ -699,7 +703,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
|
|||
|
||||
if (gpu->rb) {
|
||||
if (gpu->rb_iova)
|
||||
msm_gem_put_iova(gpu->rb->bo, gpu->id);
|
||||
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
|
||||
msm_ringbuffer_destroy(gpu->rb);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#define __MSM_GPU_H__
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
|
@ -78,7 +79,7 @@ struct msm_gpu {
|
|||
uint32_t num_perfcntrs;
|
||||
|
||||
struct msm_ringbuffer *rb;
|
||||
uint32_t rb_iova;
|
||||
uint64_t rb_iova;
|
||||
|
||||
/* list of GEM active objects: */
|
||||
struct list_head active_list;
|
||||
|
@ -96,12 +97,17 @@ struct msm_gpu {
|
|||
int irq;
|
||||
|
||||
struct msm_gem_address_space *aspace;
|
||||
int id;
|
||||
|
||||
/* Power Control: */
|
||||
struct regulator *gpu_reg, *gpu_cx;
|
||||
struct clk *ebi1_clk, *grp_clks[6];
|
||||
uint32_t fast_rate, slow_rate, bus_freq;
|
||||
struct clk *ebi1_clk, *grp_clks[7];
|
||||
|
||||
uint32_t gpufreq[10];
|
||||
uint32_t busfreq[10];
|
||||
uint32_t nr_pwrlevels;
|
||||
uint32_t active_level;
|
||||
|
||||
struct pm_qos_request pm_qos_req_dma;
|
||||
|
||||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
struct msm_bus_scale_pdata *bus_scale_table;
|
||||
|
@ -151,6 +157,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
|
|||
return msm_readl(gpu->mmio + (reg << 2));
|
||||
}
|
||||
|
||||
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
|
||||
{
|
||||
uint32_t val = gpu_read(gpu, reg);
|
||||
|
||||
val &= ~mask;
|
||||
gpu_write(gpu, reg, val | or);
|
||||
}
|
||||
|
||||
static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Why not a readq here? Two reasons: 1) many of the LO registers are
|
||||
* not quad word aligned and 2) the GPU hardware designers have a bit
|
||||
* of a history of putting registers where they fit, especially in
|
||||
* spins. The longer a GPU family goes the higher the chance that
|
||||
* we'll get burned. We could do a series of validity checks if we
|
||||
* wanted to, but really is a readq() that much better? Nah.
|
||||
*/
|
||||
|
||||
/*
|
||||
* For some lo/hi registers (like perfcounters), the hi value is latched
|
||||
* when the lo is read, so make sure to read the lo first to trigger
|
||||
* that
|
||||
*/
|
||||
val = (u64) msm_readl(gpu->mmio + (lo << 2));
|
||||
val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
|
||||
{
|
||||
/* Why not a writeq here? Read the screed above */
|
||||
msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
|
||||
msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
|
||||
}
|
||||
|
||||
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
|
||||
int msm_gpu_pm_resume(struct msm_gpu *gpu);
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/of_platform.h>
|
||||
#include "msm_drv.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
|
@ -27,14 +28,49 @@ struct msm_iommu {
|
|||
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
|
||||
unsigned long iova, int flags, void *arg)
|
||||
{
|
||||
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
|
||||
pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||
int i;
|
||||
|
||||
/* See if there is a iommus member in the current device. If not, look
|
||||
* for the names and see if there is one in there.
|
||||
*/
|
||||
|
||||
if (of_find_property(mmu->dev->of_node, "iommus", NULL))
|
||||
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||
|
||||
/* Look through the list of names for a target */
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct device_node *node =
|
||||
of_find_node_by_name(mmu->dev->of_node, names[i]);
|
||||
|
||||
if (!node)
|
||||
continue;
|
||||
|
||||
if (of_find_property(node, "iommus", NULL)) {
|
||||
struct platform_device *pdev;
|
||||
|
||||
/* Get the platform device for the node */
|
||||
of_platform_populate(node->parent, NULL, NULL,
|
||||
mmu->dev);
|
||||
|
||||
pdev = of_find_device_by_node(node);
|
||||
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
mmu->dev = &pdev->dev;
|
||||
return iommu_attach_device(iommu->domain, mmu->dev);
|
||||
}
|
||||
}
|
||||
|
||||
dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
|
@ -43,13 +79,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
|
|||
iommu_detach_device(iommu->domain, mmu->dev);
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, int prot)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
uint64_t da = iova;
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
|
||||
|
@ -60,7 +96,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
|||
phys_addr_t pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08x %pa(%zx)", i, iova, &pa, bytes);
|
||||
VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
|
@ -82,13 +118,13 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
uint64_t da = iova;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
|
@ -99,7 +135,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
|||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
||||
VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
|
||||
|
||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
|
|
|
@ -33,9 +33,9 @@ enum msm_mmu_domain_type {
|
|||
struct msm_mmu_funcs {
|
||||
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||
int prot);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
|
||||
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
|
|
|
@ -307,7 +307,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
|
||||
for (i = 0; i < submit->nr_cmds; i++) {
|
||||
uint32_t idx = submit->cmd[i].idx;
|
||||
uint32_t iova = submit->cmd[i].iova;
|
||||
uint64_t iova = submit->cmd[i].iova;
|
||||
uint32_t szd = submit->cmd[i].size; /* in dwords */
|
||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
||||
|
@ -315,7 +315,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
buf += iova - submit->bos[idx].iova;
|
||||
|
||||
rd_write_section(rd, RD_GPUADDR,
|
||||
(uint32_t[2]){ iova, szd * 4 }, 8);
|
||||
(uint64_t[2]) { iova, szd * 4 }, 16);
|
||||
rd_write_section(rd, RD_BUFFER_CONTENTS,
|
||||
buf, szd * 4);
|
||||
|
||||
|
@ -329,7 +329,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
|||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
rd_write_section(rd, RD_CMDSTREAM_ADDR,
|
||||
(uint32_t[2]){ iova, szd }, 8);
|
||||
(uint64_t[2]) { iova, szd }, 16);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,14 +104,14 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
|
|||
dev_dbg(client->dev, "iommu domain detached\n");
|
||||
}
|
||||
|
||||
static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, int prot)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
struct iommu_domain *domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
uint64_t da = iova;
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
|
||||
|
@ -126,7 +126,7 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
|
|||
u32 pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
||||
VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
|
@ -172,14 +172,14 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
|
|||
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
|
||||
}
|
||||
|
||||
static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
struct iommu_domain *domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
uint64_t da = iova;
|
||||
int i;
|
||||
|
||||
if (!client)
|
||||
|
@ -197,7 +197,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
|||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
||||
VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
|
||||
|
||||
WARN_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -88,8 +88,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
|
|||
return;
|
||||
}
|
||||
|
||||
msm_framebuffer_cleanup(c_state->out_fb,
|
||||
c_state->mmu_id);
|
||||
msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
|
||||
drm_framebuffer_unreference(c_state->out_fb);
|
||||
c_state->out_fb = NULL;
|
||||
|
||||
|
@ -193,7 +192,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
|
|||
if (c_state->out_fb) {
|
||||
drm_framebuffer_reference(c_state->out_fb);
|
||||
rc = msm_framebuffer_prepare(c_state->out_fb,
|
||||
c_state->mmu_id);
|
||||
c_state->aspace);
|
||||
if (rc)
|
||||
SDE_ERROR("failed to prepare fb, %d\n", rc);
|
||||
}
|
||||
|
@ -241,14 +240,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
|
|||
rc = -EFAULT;
|
||||
} else {
|
||||
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
|
||||
c_state->mmu_id =
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
|
||||
c_state->aspace =
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
|
||||
else
|
||||
c_state->mmu_id =
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
|
||||
c_state->aspace =
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
|
||||
|
||||
rc = msm_framebuffer_prepare(c_state->out_fb,
|
||||
c_state->mmu_id);
|
||||
c_state->aspace);
|
||||
if (rc)
|
||||
SDE_ERROR("prep fb failed, %d\n", rc);
|
||||
}
|
||||
|
@ -492,18 +491,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
|
|||
c_conn->panel = panel;
|
||||
c_conn->display = display;
|
||||
|
||||
/* cache mmu_id's for later */
|
||||
sde_kms = to_sde_kms(priv->kms);
|
||||
if (sde_kms->vbif[VBIF_NRT]) {
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
|
||||
} else {
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
|
||||
}
|
||||
|
||||
if (ops)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -140,7 +140,7 @@ struct sde_connector {
|
|||
struct drm_panel *panel;
|
||||
void *display;
|
||||
|
||||
int mmu_id[SDE_IOMMU_DOMAIN_MAX];
|
||||
struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
|
||||
|
||||
char name[SDE_CONNECTOR_NAME_SIZE];
|
||||
|
||||
|
@ -195,13 +195,13 @@ struct sde_connector {
|
|||
* struct sde_connector_state - private connector status structure
|
||||
* @base: Base drm connector structure
|
||||
* @out_fb: Pointer to output frame buffer, if applicable
|
||||
* @mmu_id: MMU ID for accessing frame buffer objects, if applicable
|
||||
* @aspace: Address space for accessing frame buffer objects, if applicable
|
||||
* @property_values: Local cache of current connector property values
|
||||
*/
|
||||
struct sde_connector_state {
|
||||
struct drm_connector_state base;
|
||||
struct drm_framebuffer *out_fb;
|
||||
int mmu_id;
|
||||
struct msm_gem_address_space *aspace;
|
||||
uint64_t property_values[CONNECTOR_PROP_COUNT];
|
||||
};
|
||||
|
||||
|
|
|
@ -264,7 +264,7 @@ struct sde_encoder_phys_cmd {
|
|||
* @wb_fmt: Writeback pixel format
|
||||
* @frame_count: Counter of completed writeback operations
|
||||
* @kickoff_count: Counter of issued writeback operations
|
||||
* @mmu_id: mmu identifier for non-secure/secure domain
|
||||
* @aspace: address space identifier for non-secure/secure domain
|
||||
* @wb_dev: Pointer to writeback device
|
||||
* @start_time: Start time of writeback latest request
|
||||
* @end_time: End time of writeback latest request
|
||||
|
@ -285,7 +285,7 @@ struct sde_encoder_phys_wb {
|
|||
const struct sde_format *wb_fmt;
|
||||
u32 frame_count;
|
||||
u32 kickoff_count;
|
||||
int mmu_id[SDE_IOMMU_DOMAIN_MAX];
|
||||
struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
|
||||
struct sde_wb_device *wb_dev;
|
||||
ktime_t start_time;
|
||||
ktime_t end_time;
|
||||
|
|
|
@ -180,7 +180,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
|
|||
struct sde_hw_wb *hw_wb;
|
||||
struct sde_hw_wb_cfg *wb_cfg;
|
||||
const struct msm_format *format;
|
||||
int ret, mmu_id;
|
||||
int ret;
|
||||
struct msm_gem_address_space *aspace;
|
||||
|
||||
if (!phys_enc) {
|
||||
SDE_ERROR("invalid encoder\n");
|
||||
|
@ -193,9 +194,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
|
|||
|
||||
wb_cfg->intf_mode = phys_enc->intf_mode;
|
||||
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
|
||||
mmu_id = (wb_cfg->is_secure) ?
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
|
||||
aspace = (wb_cfg->is_secure) ?
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
|
||||
|
||||
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
|
||||
|
||||
|
@ -217,7 +218,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
|
|||
wb_cfg->roi = *wb_roi;
|
||||
|
||||
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
|
||||
ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
|
||||
ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
|
||||
if (ret) {
|
||||
SDE_DEBUG("failed to populate layout %d\n", ret);
|
||||
return;
|
||||
|
@ -226,7 +227,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
|
|||
wb_cfg->dest.height = fb->height;
|
||||
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
|
||||
} else {
|
||||
ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
|
||||
ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
|
||||
&wb_cfg->dest);
|
||||
if (ret) {
|
||||
/* this error should be detected during atomic_check */
|
||||
|
@ -1017,15 +1018,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
|
|||
phys_enc = &wb_enc->base;
|
||||
|
||||
if (p->sde_kms->vbif[VBIF_NRT]) {
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
|
||||
} else {
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
|
||||
p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
|
||||
p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
|
||||
}
|
||||
|
||||
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -630,7 +630,7 @@ static int _sde_format_get_plane_sizes(
|
|||
}
|
||||
|
||||
static int _sde_format_populate_addrs_ubwc(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_hw_fmt_layout *layout)
|
||||
{
|
||||
|
@ -641,7 +641,7 @@ static int _sde_format_populate_addrs_ubwc(
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
|
||||
base_addr = msm_framebuffer_iova(fb, aspace, 0);
|
||||
if (!base_addr) {
|
||||
DRM_ERROR("failed to retrieve base addr\n");
|
||||
return -EFAULT;
|
||||
|
@ -711,7 +711,7 @@ static int _sde_format_populate_addrs_ubwc(
|
|||
}
|
||||
|
||||
static int _sde_format_populate_addrs_linear(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_hw_fmt_layout *layout)
|
||||
{
|
||||
|
@ -728,7 +728,7 @@ static int _sde_format_populate_addrs_linear(
|
|||
|
||||
/* Populate addresses for simple formats here */
|
||||
for (i = 0; i < layout->num_planes; ++i) {
|
||||
layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
|
||||
layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
|
||||
if (!layout->plane_addr[i]) {
|
||||
DRM_ERROR("failed to retrieve base addr\n");
|
||||
return -EFAULT;
|
||||
|
@ -739,7 +739,7 @@ static int _sde_format_populate_addrs_linear(
|
|||
}
|
||||
|
||||
int sde_format_populate_layout(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_hw_fmt_layout *layout)
|
||||
{
|
||||
|
@ -770,9 +770,9 @@ int sde_format_populate_layout(
|
|||
|
||||
/* Populate the addresses given the fb */
|
||||
if (SDE_FORMAT_IS_UBWC(layout->format))
|
||||
ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
|
||||
ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
|
||||
else
|
||||
ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
|
||||
ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
|
||||
|
||||
/* check if anything changed */
|
||||
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
|
||||
|
@ -814,14 +814,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
|
|||
}
|
||||
|
||||
int sde_format_populate_layout_with_roi(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_rect *roi,
|
||||
struct sde_hw_fmt_layout *layout)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sde_format_populate_layout(mmu_id, fb, layout);
|
||||
ret = sde_format_populate_layout(aspace, fb, layout);
|
||||
if (ret || !roi)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -14,6 +14,7 @@
|
|||
#define _SDE_FORMATS_H
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include "msm_gem.h"
|
||||
#include "sde_hw_mdss.h"
|
||||
|
||||
/**
|
||||
|
@ -76,7 +77,7 @@ int sde_format_check_modified_format(
|
|||
/**
|
||||
* sde_format_populate_layout - populate the given format layout based on
|
||||
* mmu, fb, and format found in the fb
|
||||
* @mmu_id: mmu id handle
|
||||
* @aspace: address space pointer
|
||||
* @fb: framebuffer pointer
|
||||
* @fmtl: format layout structure to populate
|
||||
*
|
||||
|
@ -84,14 +85,14 @@ int sde_format_check_modified_format(
|
|||
* are the same as before or 0 if new addresses were populated
|
||||
*/
|
||||
int sde_format_populate_layout(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_hw_fmt_layout *fmtl);
|
||||
|
||||
/**
|
||||
* sde_format_populate_layout_with_roi - populate the given format layout
|
||||
* based on mmu, fb, roi, and format found in the fb
|
||||
* @mmu_id: mmu id handle
|
||||
* @aspace: mmu id handle
|
||||
* @fb: framebuffer pointer
|
||||
* @roi: region of interest (optional)
|
||||
* @fmtl: format layout structure to populate
|
||||
|
@ -99,7 +100,7 @@ int sde_format_populate_layout(
|
|||
* Return: error code on failure, 0 on success
|
||||
*/
|
||||
int sde_format_populate_layout_with_roi(
|
||||
int mmu_id,
|
||||
struct msm_gem_address_space *aspace,
|
||||
struct drm_framebuffer *fb,
|
||||
struct sde_rect *roi,
|
||||
struct sde_hw_fmt_layout *fmtl);
|
||||
|
|
|
@ -940,17 +940,17 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
|
|||
struct msm_mmu *mmu;
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
|
||||
mmu = sde_kms->aspace[i]->mmu;
|
||||
|
||||
if (!mmu)
|
||||
for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
|
||||
if (!sde_kms->aspace[i])
|
||||
continue;
|
||||
|
||||
mmu = sde_kms->aspace[i]->mmu;
|
||||
|
||||
mmu->funcs->detach(mmu, (const char **)iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
msm_gem_address_space_destroy(sde_kms->aspace[i]);
|
||||
|
||||
sde_kms->mmu_id[i] = 0;
|
||||
sde_kms->aspace[i] = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -991,17 +991,6 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev,
|
||||
aspace);
|
||||
if (sde_kms->mmu_id[i] < 0) {
|
||||
ret = sde_kms->mmu_id[i];
|
||||
SDE_ERROR("failed to register sde iommu %d: %d\n",
|
||||
i, ret);
|
||||
mmu->funcs->detach(mmu, (const char **)iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
msm_gem_address_space_destroy(aspace);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -123,7 +123,6 @@ struct sde_kms {
|
|||
struct sde_mdss_cfg *catalog;
|
||||
|
||||
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
|
||||
int mmu_id[MSM_SMMU_DOMAIN_MAX];
|
||||
struct sde_power_client *core_client;
|
||||
|
||||
/* directory entry for debugfs */
|
||||
|
|
|
@ -86,7 +86,7 @@ enum sde_plane_qos {
|
|||
struct sde_plane {
|
||||
struct drm_plane base;
|
||||
|
||||
int mmu_id;
|
||||
struct msm_gem_address_space *aspace;
|
||||
|
||||
struct mutex lock;
|
||||
|
||||
|
@ -580,7 +580,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
|
||||
ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
|
||||
if (ret == -EAGAIN)
|
||||
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
|
||||
else if (ret)
|
||||
|
@ -1285,7 +1285,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
|
|||
return 0;
|
||||
|
||||
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
|
||||
return msm_framebuffer_prepare(fb, psde->mmu_id);
|
||||
return msm_framebuffer_prepare(fb, psde->aspace);
|
||||
}
|
||||
|
||||
static void sde_plane_cleanup_fb(struct drm_plane *plane,
|
||||
|
@ -1294,11 +1294,11 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
|
|||
struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
|
||||
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
|
||||
|
||||
if (!fb)
|
||||
if (!fb || !psde)
|
||||
return;
|
||||
|
||||
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
|
||||
msm_framebuffer_cleanup(fb, psde->mmu_id);
|
||||
msm_framebuffer_cleanup(fb, psde->aspace);
|
||||
}
|
||||
|
||||
static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
|
||||
|
@ -2384,7 +2384,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
|
|||
/* cache local stuff for later */
|
||||
plane = &psde->base;
|
||||
psde->pipe = pipe;
|
||||
psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
|
||||
|
||||
/* initialize underlying h/w driver */
|
||||
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
|
||||
|
|
|
@ -85,10 +85,14 @@ struct drm_msm_gem_new {
|
|||
__u32 handle; /* out */
|
||||
};
|
||||
|
||||
#define MSM_INFO_IOVA 0x01
|
||||
|
||||
#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
|
||||
|
||||
struct drm_msm_gem_info {
|
||||
__u32 handle; /* in */
|
||||
__u32 pad;
|
||||
__u64 offset; /* out, offset to pass to mmap() */
|
||||
__u32 flags; /* in - combination of MSM_INFO_* flags */
|
||||
__u64 offset; /* out, mmap() offset or iova */
|
||||
};
|
||||
|
||||
#define MSM_PREP_READ 0x01
|
||||
|
|
Loading…
Add table
Reference in a new issue