Promotion of kernel.lnx.4.4-161219.

CRs      Change ID                                   Subject
--------------------------------------------------------------------------------------------------------------
1101906   Idc51070e2bb36d1a757d2714d2875a99901321a7   clk: qcom: add common clock framework support for MDSS P
1098508   Ib0a2361f854ae01d0d8090cdd48cfa96308daf93   msm: kgsl: Add Bind objects to dispatcher draw queue
1099255   Ib32d40351179a687eca38228c4503e4a9a88c28d   ARM: dts: msm: Move boot_rom_ahb_clk to proxy voted for
1095243   I18f549102e626dc788e8fa56d6bb1ea28efe4f88   soc: qcom: pil: Initialize variable to avoid invalid acc
1101152   I9f2cb058a7e59b573fc64662ee7b5bff49b18ea7   soc: qcom: pil-q6v5: Update the reset sequence for qdspv
1101152   Id5b8aaae0783893290e95626b394841a7d3808a3   ARM: dts: msm: Remove write of acc register for MSMFalco

Change-Id: If07ab3394784f1dcb31098e5d4737265f136883f
CRs-Fixed: 1099255, 1101906, 1098508, 1101152, 1095243
This commit is contained in:
Linux Build Service Account 2016-12-19 01:57:00 -07:00
commit 2011e141ad
28 changed files with 1399 additions and 922 deletions

View file

@ -2037,10 +2037,9 @@
clock-names = "xo", "iface_clk", "bus_clk",
"mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
"mnoc_axi_clk", "qdss_clk";
qcom,proxy-clock-names = "xo", "qdss_clk";
qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
"gpll0_mss_clk", "snoc_axi_clk",
"mnoc_axi_clk";
qcom,proxy-clock-names = "xo", "qdss_clk", "mem_clk";
qcom,active-clock-names = "iface_clk", "bus_clk",
"gpll0_mss_clk", "snoc_axi_clk", "mnoc_axi_clk";
interrupts = <0 448 1>;
vdd_cx-supply = <&pm8998_s1_level>;
@ -2051,7 +2050,6 @@
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
qcom,ssctl-instance-id = <0x12>;
qcom,override-acc;
qcom,qdsp6v62-1-2;
status = "ok";
memory-region = <&modem_mem>;

View file

@ -1372,7 +1372,6 @@
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
qcom,ssctl-instance-id = <0x12>;
qcom,override-acc;
qcom,qdsp6v62-1-5;
memory-region = <&modem_fw_mem>;
qcom,mem-protect-id = <0xF>;

View file

@ -902,7 +902,6 @@
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
qcom,ssctl-instance-id = <0x12>;
qcom,override-acc;
qcom,qdsp6v62-1-5;
memory-region = <&modem_fw_mem>;
qcom,mem-protect-id = <0xF>;

View file

@ -218,3 +218,5 @@ config QCOM_A53
Support for the A53 clock controller on MSM devices.
Say Y if you want to support CPU frequency scaling on devices
such as MSM8916.
source "drivers/clk/qcom/mdss/Kconfig"

View file

@ -37,3 +37,5 @@ obj-$(CONFIG_KRAITCC) += krait-cc.o
obj-$(CONFIG_QCOM_A53) += clk-a53.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-y += mdss/

View file

@ -1,5 +1,6 @@
config MSM_MDSS_PLL
config QCOM_MDSS_PLL
bool "MDSS pll programming"
depends on COMMON_CLK_QCOM
---help---
It provides support for DSI, eDP and HDMI interface pll programming on MDSS
hardware. It also handles the pll specific resources and turn them on/off when

View file

@ -1,5 +1,4 @@
obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o

View file

@ -16,38 +16,19 @@
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/clk/msm-clock-generic.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-8996.h"
#include "mdss-dsi-pll-14nm.h"
#define DSI_PLL_POLL_MAX_READS 15
#define DSI_PLL_POLL_TIMEOUT_US 1000
#define MSM8996_DSI_PLL_REVISION_2 2
#define VCO_REF_CLK_RATE 19200000
#define CEIL(x, y) (((x) + ((y)-1)) / (y))
int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
{
return 0;
}
int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
{
return 0;
}
int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
{
return 0;
}
int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
{
return 0;
}
static int mdss_pll_read_stored_trim_codes(
struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
{
@ -94,9 +75,9 @@ end_read:
return rc;
}
int post_n1_div_set_div(struct div_clk *clk, int div)
int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div)
{
struct mdss_pll_resources *pll = clk->priv;
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
int rc;
@ -108,6 +89,9 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
return rc;
}
/* in common clock framework the divider value provided is one less */
div++;
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
@ -120,8 +104,6 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
* support bit_clk above 86.67Mhz
*/
/* this is for vco/bit clock */
pout->pll_postdiv = 1; /* fixed, divided by 1 */
pout->pll_n1div = div;
n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
@ -138,11 +120,15 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
return 0;
}
int post_n1_div_get_div(struct div_clk *clk)
int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div)
{
u32 div;
int rc;
struct mdss_pll_resources *pll = clk->priv;
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
if (is_gdsc_disabled(pll))
return 0;
@ -159,20 +145,33 @@ int post_n1_div_get_div(struct div_clk *clk)
* fot the time being, assume postdiv = 1
*/
div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
div &= 0xF;
pr_debug("n1 div = %d\n", div);
*div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
*div &= 0xF;
/*
* initialize n1div here, it will get updated when
* corresponding set_div is called.
*/
pout->pll_n1div = *div;
/* common clock framework will add one to the divider value sent */
if (*div == 0)
*div = 1; /* value of zero means div is 2 as per SWI */
else
*div -= 1;
pr_debug("post n1 get div = %d\n", *div);
mdss_pll_resource_enable(pll, false);
return div;
return rc;
}
int n2_div_set_div(struct div_clk *clk, int div)
int n2_div_set_div(void *context, unsigned int reg, unsigned int div)
{
int rc;
u32 n2div;
struct mdss_pll_resources *pll = clk->priv;
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
struct mdss_pll_resources *slave;
@ -183,6 +182,12 @@ int n2_div_set_div(struct div_clk *clk, int div)
return rc;
}
/*
* in common clock framework the actual divider value
* provided is one less.
*/
div++;
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
@ -208,9 +213,9 @@ int n2_div_set_div(struct div_clk *clk, int div)
return rc;
}
int shadow_n2_div_set_div(struct div_clk *clk, int div)
int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div)
{
struct mdss_pll_resources *pll = clk->priv;
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
u32 data;
@ -218,6 +223,12 @@ int shadow_n2_div_set_div(struct div_clk *clk, int div)
pdb = pll->priv;
pout = &pdb->out;
/*
* in common clock framework the actual divider value
* provided is one less.
*/
div++;
pout->pll_n2div = div;
data = (pout->pll_n1div | (pout->pll_n2div << 4));
@ -228,15 +239,20 @@ int shadow_n2_div_set_div(struct div_clk *clk, int div)
return 0;
}
int n2_div_get_div(struct div_clk *clk)
int n2_div_get_div(void *context, unsigned int reg, unsigned int *div)
{
int rc;
u32 n2div;
struct mdss_pll_resources *pll = clk->priv;
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
if (is_gdsc_disabled(pll))
return 0;
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d resources\n",
@ -247,15 +263,27 @@ int n2_div_get_div(struct div_clk *clk)
n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
n2div >>= 4;
n2div &= 0x0f;
/*
* initialize n2div here, it will get updated when
* corresponding set_div is called.
*/
pout->pll_n2div = n2div;
mdss_pll_resource_enable(pll, false);
pr_debug("ndx=%d div=%d\n", pll->index, n2div);
*div = n2div;
return n2div;
/* common clock framework will add one to the divider value sent */
if (*div == 0)
*div = 1; /* value of zero means div is 2 as per SWI */
else
*div -= 1;
pr_debug("ndx=%d div=%d\n", pll->index, *div);
return rc;
}
static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
static bool pll_is_pll_locked_14nm(struct mdss_pll_resources *pll)
{
u32 status;
bool pll_locked;
@ -286,7 +314,7 @@ static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
return pll_locked;
}
static void dsi_pll_start_8996(void __iomem *pll_base)
static void dsi_pll_start_14nm(void __iomem *pll_base)
{
pr_debug("start PLL at base=%p\n", pll_base);
@ -294,14 +322,14 @@ static void dsi_pll_start_8996(void __iomem *pll_base)
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
}
static void dsi_pll_stop_8996(void __iomem *pll_base)
static void dsi_pll_stop_14nm(void __iomem *pll_base)
{
pr_debug("stop PLL at base=%p\n", pll_base);
MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
}
int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll)
{
int rc = 0;
@ -310,14 +338,14 @@ int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
return -EINVAL;
}
dsi_pll_start_8996(pll->pll_base);
dsi_pll_start_14nm(pll->pll_base);
/*
* both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
* enabled at mdss_dsi_8996_phy_config()
* enabled at mdss_dsi_14nm_phy_config()
*/
if (!pll_is_pll_locked_8996(pll)) {
if (!pll_is_pll_locked_14nm(pll)) {
pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
rc = -EINVAL;
goto init_lock_err;
@ -329,10 +357,10 @@ init_lock_err:
return rc;
}
static int dsi_pll_enable(struct clk *c)
static int dsi_pll_enable(struct clk_hw *hw)
{
int i, rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
/* Try all enable sequences until one succeeds */
@ -352,9 +380,9 @@ static int dsi_pll_enable(struct clk *c)
return rc;
}
static void dsi_pll_disable(struct clk *c)
static void dsi_pll_disable(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct mdss_pll_resources *slave;
@ -367,7 +395,7 @@ static void dsi_pll_disable(struct clk *c)
pll->handoff_resources = false;
slave = pll->slave;
dsi_pll_stop_8996(pll->pll_base);
dsi_pll_stop_14nm(pll->pll_base);
mdss_pll_resource_enable(pll, false);
@ -376,7 +404,7 @@ static void dsi_pll_disable(struct clk *c)
pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
}
static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
pdb->in.fref = 19200000; /* 19.2 Mhz*/
@ -414,9 +442,10 @@ static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
pdb->in.pll_iptat_trim = 7;
pdb->in.pll_c3ctrl = 2; /* 2 */
pdb->in.pll_r3ctrl = 1; /* 1 */
pdb->out.pll_postdiv = 1;
}
static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
static void pll_14nm_ssc_calc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
u32 period, ssc_period;
@ -457,7 +486,7 @@ static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
pdb->out.ssc_step_size = step_size;
}
static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
static void pll_14nm_dec_frac_calc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_input *pin = &pdb->in;
@ -501,7 +530,7 @@ static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
pout->cmn_ldo_cntrl = 0x1c;
}
static u32 pll_8996_kvco_slop(u32 vrate)
static u32 pll_14nm_kvco_slop(u32 vrate)
{
u32 slop = 0;
@ -515,7 +544,7 @@ static u32 pll_8996_kvco_slop(u32 vrate)
return slop;
}
static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
static void pll_14nm_calc_vco_count(struct dsi_pll_db *pdb,
s64 vco_clk_rate, s64 fref)
{
struct dsi_pll_input *pin = &pdb->in;
@ -540,7 +569,7 @@ static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
data -= 1;
pout->pll_kvco_div_ref = data;
cnt = pll_8996_kvco_slop(vco_clk_rate);
cnt = pll_14nm_kvco_slop(vco_clk_rate);
cnt *= 2;
do_div(cnt, 100);
cnt *= pin->kvco_measure_time;
@ -659,7 +688,7 @@ static void pll_db_commit_common(struct mdss_pll_resources *pll,
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
}
static void pll_db_commit_8996(struct mdss_pll_resources *pll,
static void pll_db_commit_14nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
void __iomem *pll_base = pll->pll_base;
@ -753,7 +782,7 @@ static void pll_db_commit_8996(struct mdss_pll_resources *pll,
/*
* pll_source_finding:
* Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
* at mdss_dsi_8996_phy_config()
* at mdss_dsi_14nm_phy_config()
*/
static int pll_source_finding(struct mdss_pll_resources *pll)
{
@ -820,10 +849,59 @@ static void pll_source_setup(struct mdss_pll_resources *pll)
other->slave = pll;
}
int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
u64 vco_rate, multiplier = BIT(20);
s32 div_frac_start;
u32 dec_start;
u64 ref_clk = vco->ref_clk_rate;
int rc;
if (pll->vco_current_rate)
return (unsigned long)pll->vco_current_rate;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return rc;
}
dec_start = MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DEC_START);
dec_start &= 0x0ff;
pr_debug("dec_start = 0x%x\n", dec_start);
div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
pr_debug("div_frac_start = 0x%x\n", div_frac_start);
vco_rate = ref_clk * dec_start;
vco_rate += ((ref_clk * div_frac_start) / multiplier);
pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
mdss_pll_resource_enable(pll, false);
pr_debug("%s: returning vco rate as %lu\n",
__func__, (unsigned long)vco_rate);
return (unsigned long)vco_rate;
}
int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int rc;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct mdss_pll_resources *slave;
struct dsi_pll_db *pdb;
@ -848,30 +926,30 @@ int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
pll->vco_current_rate = rate;
pll->vco_ref_clk_rate = vco->ref_clk_rate;
mdss_dsi_pll_8996_input_init(pll, pdb);
mdss_dsi_pll_14nm_input_init(pll, pdb);
pll_8996_dec_frac_calc(pll, pdb);
pll_14nm_dec_frac_calc(pll, pdb);
if (pll->ssc_en)
pll_8996_ssc_calc(pll, pdb);
pll_14nm_ssc_calc(pll, pdb);
pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
pll->vco_ref_clk_rate);
/* commit slave if split display is enabled */
slave = pll->slave;
if (slave)
pll_db_commit_8996(slave, pdb);
pll_db_commit_14nm(slave, pdb);
/* commit master itself */
pll_db_commit_8996(pll, pdb);
pll_db_commit_14nm(pll, pdb);
mdss_pll_resource_enable(pll, false);
return rc;
}
static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
static void shadow_pll_dynamic_refresh_14nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_output *pout = &pdb->out;
@ -931,10 +1009,11 @@ static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
wmb();
}
int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int rc;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct dsi_pll_db *pdb;
s64 vco_clk_rate = (s64)rate;
@ -968,14 +1047,14 @@ int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
pll->vco_current_rate = rate;
pll->vco_ref_clk_rate = vco->ref_clk_rate;
mdss_dsi_pll_8996_input_init(pll, pdb);
mdss_dsi_pll_14nm_input_init(pll, pdb);
pll_8996_dec_frac_calc(pll, pdb);
pll_14nm_dec_frac_calc(pll, pdb);
pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
pll->vco_ref_clk_rate);
shadow_pll_dynamic_refresh_8996(pll, pdb);
shadow_pll_dynamic_refresh_14nm(pll, pdb);
rc = mdss_pll_resource_enable(pll, false);
if (rc) {
@ -986,53 +1065,12 @@ int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
return rc;
}
unsigned long pll_vco_get_rate_8996(struct clk *c)
{
u64 vco_rate, multiplier = BIT(20);
s32 div_frac_start;
u32 dec_start;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
u64 ref_clk = vco->ref_clk_rate;
int rc;
struct mdss_pll_resources *pll = vco->priv;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return rc;
}
dec_start = MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DEC_START);
dec_start &= 0x0ff;
pr_debug("dec_start = 0x%x\n", dec_start);
div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
pr_debug("div_frac_start = 0x%x\n", div_frac_start);
vco_rate = ref_clk * dec_start;
vco_rate += ((ref_clk * div_frac_start) / multiplier);
pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
mdss_pll_resource_enable(pll, false);
return (unsigned long)vco_rate;
}
long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long rrate = rate;
u32 div;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
div = vco->min_rate / rate;
if (div > 15) {
@ -1046,46 +1084,14 @@ long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
if (rate > vco->max_rate)
rrate = vco->max_rate;
*parent_rate = rrate;
return rrate;
}
enum handoff pll_vco_handoff_8996(struct clk *c)
{
int rc;
enum handoff ret = HANDOFF_DISABLED_CLK;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct mdss_pll_resources *pll = vco->priv;
if (is_gdsc_disabled(pll))
return HANDOFF_DISABLED_CLK;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return ret;
}
if (pll_is_pll_locked_8996(pll)) {
pll->handoff_resources = true;
pll->pll_on = true;
c->rate = pll_vco_get_rate_8996(c);
ret = HANDOFF_ENABLED_CLK;
} else {
mdss_pll_resource_enable(pll, false);
}
return ret;
}
enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
{
return HANDOFF_DISABLED_CLK;
}
int pll_vco_prepare_8996(struct clk *c)
int pll_vco_prepare_14nm(struct clk_hw *hw)
{
int rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
if (!pll) {
@ -1101,8 +1107,9 @@ int pll_vco_prepare_8996(struct clk *c)
}
if ((pll->vco_cached_rate != 0)
&& (pll->vco_cached_rate == c->rate)) {
rc = c->ops->set_rate(c, pll->vco_cached_rate);
&& (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
pll->vco_cached_rate);
if (rc) {
pr_err("index=%d vco_set_rate failed. rc=%d\n",
rc, pll->index);
@ -1111,7 +1118,7 @@ int pll_vco_prepare_8996(struct clk *c)
}
}
rc = dsi_pll_enable(c);
rc = dsi_pll_enable(hw);
if (rc) {
mdss_pll_resource_enable(pll, false);
@ -1122,9 +1129,9 @@ error:
return rc;
}
void pll_vco_unprepare_8996(struct clk *c)
void pll_vco_unprepare_14nm(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct dsi_pll_vco_clk *vco = to_vco_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
if (!pll) {
@ -1132,6 +1139,17 @@ void pll_vco_unprepare_8996(struct clk *c)
return;
}
pll->vco_cached_rate = c->rate;
dsi_pll_disable(c);
pll->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(hw);
}
int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
{
return 0;
}
int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
{
*val = 0;
return 0;
}

View file

@ -0,0 +1,614 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-14nm.h"
#define VCO_DELAY_USEC 1
static struct dsi_pll_db pll_db[DSI_PLL_NUM];
static struct regmap_config dsi_pll_14nm_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x588,
};
static struct regmap_bus post_n1_div_regmap_bus = {
.reg_write = post_n1_div_set_div,
.reg_read = post_n1_div_get_div,
};
static struct regmap_bus n2_div_regmap_bus = {
.reg_write = n2_div_set_div,
.reg_read = n2_div_get_div,
};
static struct regmap_bus shadow_n2_div_regmap_bus = {
.reg_write = shadow_n2_div_set_div,
.reg_read = n2_div_get_div,
};
static struct regmap_bus dsi_mux_regmap_bus = {
.reg_write = dsi_mux_set_parent_14nm,
.reg_read = dsi_mux_get_parent_14nm,
};
/* Op structures */
static struct clk_ops clk_ops_dsi_vco = {
.recalc_rate = pll_vco_recalc_rate_14nm,
.set_rate = pll_vco_set_rate_14nm,
.round_rate = pll_vco_round_rate_14nm,
.prepare = pll_vco_prepare_14nm,
.unprepare = pll_vco_unprepare_14nm,
};
/* Shadow ops for dynamic refresh */
static struct clk_ops clk_ops_shadow_dsi_vco = {
.recalc_rate = pll_vco_recalc_rate_14nm,
.set_rate = shadow_pll_vco_set_rate_14nm,
.round_rate = pll_vco_round_rate_14nm,
};
static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1300000000UL,
.max_rate = 2600000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_vco_clk_14nm",
.parent_names = (const char *[]){ "xo_board" },
.num_parents = 1,
.ops = &clk_ops_dsi_vco,
},
};
static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
.ref_clk_rate = 19200000u,
.min_rate = 1300000000u,
.max_rate = 2600000000u,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_shadow_vco_clk_14nm",
.parent_names = (const char *[]){ "xo_board" },
.num_parents = 1,
.ops = &clk_ops_shadow_dsi_vco,
},
};
static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1300000000UL,
.max_rate = 2600000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_vco_clk_14nm",
.parent_names = (const char *[]){ "xo_board" },
.num_parents = 1,
.ops = &clk_ops_dsi_vco,
},
};
static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
.ref_clk_rate = 19200000u,
.min_rate = 1300000000u,
.max_rate = 2600000000u,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_shadow_vco_clk_14nm",
.parent_names = (const char *[]){ "xo_board" },
.num_parents = 1,
.ops = &clk_ops_shadow_dsi_vco,
},
};
static struct clk_regmap_div dsi0pll_post_n1_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_n1_div_clk",
.parent_names =
(const char *[]){ "dsi0pll_vco_clk_14nm" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi0pll_shadow_post_n1_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_shadow_post_n1_div_clk",
.parent_names =
(const char *[]){"dsi0pll_shadow_vco_clk_14nm"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_post_n1_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_n1_div_clk",
.parent_names =
(const char *[]){ "dsi1pll_vco_clk_14nm" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_shadow_post_n1_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_shadow_post_n1_div_clk",
.parent_names =
(const char *[]){"dsi1pll_shadow_vco_clk_14nm"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi0pll_n2_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_n2_div_clk",
.parent_names =
(const char *[]){ "dsi0pll_post_n1_div_clk" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi0pll_shadow_n2_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_shadow_n2_div_clk",
.parent_names =
(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_n2_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_n2_div_clk",
.parent_names =
(const char *[]){ "dsi1pll_post_n1_div_clk" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_shadow_n2_div_clk = {
.reg = 0x48,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_shadow_n2_div_clk",
.parent_names =
(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_pixel_clk_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pixel_clk_src",
.parent_names = (const char *[]){ "dsi0pll_n2_div_clk" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_shadow_pixel_clk_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_shadow_pixel_clk_src",
.parent_names = (const char *[]){ "dsi0pll_shadow_n2_div_clk" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_pixel_clk_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pixel_clk_src",
.parent_names = (const char *[]){ "dsi1pll_n2_div_clk" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_shadow_pixel_clk_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_shadow_pixel_clk_src",
.parent_names = (const char *[]){ "dsi1pll_shadow_n2_div_clk" },
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi0pll_pixel_clk_mux = {
.reg = 0x48,
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pixel_clk_mux",
.parent_names =
(const char *[]){ "dsi0pll_pixel_clk_src",
"dsi0pll_shadow_pixel_clk_src"},
.num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_regmap_mux dsi1pll_pixel_clk_mux = {
.reg = 0x48,
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pixel_clk_mux",
.parent_names =
(const char *[]){ "dsi1pll_pixel_clk_src",
"dsi1pll_shadow_pixel_clk_src"},
.num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_byte_clk_src = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_byte_clk_src",
.parent_names = (const char *[]){ "dsi0pll_post_n1_div_clk" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_shadow_byte_clk_src = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_shadow_byte_clk_src",
.parent_names =
(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_byte_clk_src = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_byte_clk_src",
.parent_names = (const char *[]){ "dsi1pll_post_n1_div_clk" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_shadow_byte_clk_src = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_shadow_byte_clk_src",
.parent_names =
(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi0pll_byte_clk_mux = {
.reg = 0x48,
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_byte_clk_mux",
.parent_names =
(const char *[]){"dsi0pll_byte_clk_src",
"dsi0pll_shadow_byte_clk_src"},
.num_parents = 2,
.ops = &clk_regmap_mux_closest_ops,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
},
},
};
static struct clk_regmap_mux dsi1pll_byte_clk_mux = {
.reg = 0x48,
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_byte_clk_mux",
.parent_names =
(const char *[]){"dsi1pll_byte_clk_src",
"dsi1pll_shadow_byte_clk_src"},
.num_parents = 2,
.ops = &clk_regmap_mux_closest_ops,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
},
},
};
static struct clk_hw *mdss_dsi_pllcc_14nm[] = {
[BYTE0_MUX_CLK] = &dsi0pll_byte_clk_mux.clkr.hw,
[BYTE0_SRC_CLK] = &dsi0pll_byte_clk_src.hw,
[PIX0_MUX_CLK] = &dsi0pll_pixel_clk_mux.clkr.hw,
[PIX0_SRC_CLK] = &dsi0pll_pixel_clk_src.hw,
[N2_DIV_0_CLK] = &dsi0pll_n2_div_clk.clkr.hw,
[POST_N1_DIV_0_CLK] = &dsi0pll_post_n1_div_clk.clkr.hw,
[VCO_CLK_0_CLK] = &dsi0pll_vco_clk.hw,
[SHADOW_BYTE0_SRC_CLK] = &dsi0pll_shadow_byte_clk_src.hw,
[SHADOW_PIX0_SRC_CLK] = &dsi0pll_shadow_pixel_clk_src.hw,
[SHADOW_N2_DIV_0_CLK] = &dsi0pll_shadow_n2_div_clk.clkr.hw,
[SHADOW_POST_N1_DIV_0_CLK] = &dsi0pll_shadow_post_n1_div_clk.clkr.hw,
[SHADOW_VCO_CLK_0_CLK] = &dsi0pll_shadow_vco_clk.hw,
[BYTE1_MUX_CLK] = &dsi1pll_byte_clk_mux.clkr.hw,
[BYTE1_SRC_CLK] = &dsi1pll_byte_clk_src.hw,
[PIX1_MUX_CLK] = &dsi1pll_pixel_clk_mux.clkr.hw,
[PIX1_SRC_CLK] = &dsi1pll_pixel_clk_src.hw,
[N2_DIV_1_CLK] = &dsi1pll_n2_div_clk.clkr.hw,
[POST_N1_DIV_1_CLK] = &dsi1pll_post_n1_div_clk.clkr.hw,
[VCO_CLK_1_CLK] = &dsi1pll_vco_clk.hw,
[SHADOW_BYTE1_SRC_CLK] = &dsi1pll_shadow_byte_clk_src.hw,
[SHADOW_PIX1_SRC_CLK] = &dsi1pll_shadow_pixel_clk_src.hw,
[SHADOW_N2_DIV_1_CLK] = &dsi1pll_shadow_n2_div_clk.clkr.hw,
[SHADOW_POST_N1_DIV_1_CLK] = &dsi1pll_shadow_post_n1_div_clk.clkr.hw,
[SHADOW_VCO_CLK_1_CLK] = &dsi1pll_shadow_vco_clk.hw,
};
int dsi_pll_clock_register_14nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res)
{
int rc = 0, ndx, i;
int const ssc_freq_default = 31500; /* default h/w recommended value */
int const ssc_ppm_default = 5000; /* default h/w recommended value */
struct dsi_pll_db *pdb;
struct clk_onecell_data *clk_data;
struct clk *clk;
struct regmap *regmap;
int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_14nm);
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid input parameters\n");
return -EINVAL;
}
if (!pll_res || !pll_res->pll_base) {
pr_err("Invalid PLL resources\n");
return -EPROBE_DEFER;
}
if (pll_res->index >= DSI_PLL_NUM) {
pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
return -EINVAL;
}
ndx = pll_res->index;
pdb = &pll_db[ndx];
pll_res->priv = pdb;
pdb->pll = pll_res;
ndx++;
ndx %= DSI_PLL_NUM;
pdb->next = &pll_db[ndx];
if (pll_res->ssc_en) {
if (!pll_res->ssc_freq)
pll_res->ssc_freq = ssc_freq_default;
if (!pll_res->ssc_ppm)
pll_res->ssc_ppm = ssc_ppm_default;
}
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
sizeof(struct clk *)), GFP_KERNEL);
if (!clk_data->clks) {
devm_kfree(&pdev->dev, clk_data);
return -ENOMEM;
}
clk_data->clk_num = num_clks;
/* Set client data to mux, div and vco clocks. */
if (pll_res->index == DSI_PLL_1) {
regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi1pll_post_n1_div_clk.clkr.regmap = regmap;
dsi1pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi1pll_n2_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi1pll_shadow_n2_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi1pll_byte_clk_mux.clkr.regmap = regmap;
dsi1pll_pixel_clk_mux.clkr.regmap = regmap;
dsi1pll_vco_clk.priv = pll_res;
dsi1pll_shadow_vco_clk.priv = pll_res;
pll_res->vco_delay = VCO_DELAY_USEC;
for (i = BYTE1_MUX_CLK; i <= SHADOW_VCO_CLK_1_CLK; i++) {
pr_debug("register clk: %d index: %d\n",
i, pll_res->index);
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_14nm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI: %d\n",
pll_res->index);
rc = -EINVAL;
goto clk_reg_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
} else {
regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi0pll_post_n1_div_clk.clkr.regmap = regmap;
dsi0pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi0pll_n2_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi0pll_shadow_n2_div_clk.clkr.regmap = regmap;
regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
pll_res, &dsi_pll_14nm_config);
dsi0pll_byte_clk_mux.clkr.regmap = regmap;
dsi0pll_pixel_clk_mux.clkr.regmap = regmap;
dsi0pll_vco_clk.priv = pll_res;
dsi0pll_shadow_vco_clk.priv = pll_res;
pll_res->vco_delay = VCO_DELAY_USEC;
for (i = BYTE0_MUX_CLK; i <= SHADOW_VCO_CLK_0_CLK; i++) {
pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_14nm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI: %d\n",
pll_res->index);
rc = -EINVAL;
goto clk_reg_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
}
if (!rc) {
pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
pll_res->index);
return rc;
}
clk_reg_fail:
devm_kfree(&pdev->dev, clk_data->clks);
devm_kfree(&pdev->dev, clk_data);
return rc;
}

View file

@ -10,8 +10,8 @@
* GNU General Public License for more details.
*/
#ifndef MDSS_DSI_PLL_8996_H
#define MDSS_DSI_PLL_8996_H
#ifndef MDSS_DSI_PLL_14NM_H
#define MDSS_DSI_PLL_14NM_H
#define DSIPHY_CMN_CLK_CFG0 0x0010
#define DSIPHY_CMN_CLK_CFG1 0x0014
@ -197,25 +197,31 @@ enum {
PLL_MASTER
};
int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
enum handoff pll_vco_handoff_8996(struct clk *c);
enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
int shadow_post_n1_div_get_div(struct div_clk *clk);
int shadow_n2_div_set_div(struct div_clk *clk, int div);
int shadow_n2_div_get_div(struct div_clk *clk);
int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
int pll_vco_prepare_8996(struct clk *c);
void pll_vco_unprepare_8996(struct clk *c);
int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
int post_n1_div_set_div(struct div_clk *clk, int div);
int post_n1_div_get_div(struct div_clk *clk);
int n2_div_set_div(struct div_clk *clk, int div);
int n2_div_get_div(struct div_clk *clk);
int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
unsigned long parent_rate);
#endif /* MDSS_DSI_PLL_8996_H */
int pll_vco_prepare_14nm(struct clk_hw *hw);
void pll_vco_unprepare_14nm(struct clk_hw *hw);
int shadow_post_n1_div_set_div(void *context,
unsigned int reg, unsigned int div);
int shadow_post_n1_div_get_div(void *context,
unsigned int reg, unsigned int *div);
int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div);
int shadow_n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div);
int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div);
int n2_div_set_div(void *context, unsigned int reg, unsigned int div);
int n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll);
int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val);
int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val);
#endif /* MDSS_DSI_PLL_14NM_H */

View file

@ -1,566 +0,0 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/clk/msm-clk-provider.h>
#include <linux/clk/msm-clk.h>
#include <linux/workqueue.h>
#include <linux/clk/msm-clock-generic.h>
#include <dt-bindings/clock/msm-clocks-8996.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-8996.h"
#define VCO_DELAY_USEC 1
static struct dsi_pll_db pll_db[DSI_PLL_NUM];
static struct clk_ops n2_clk_src_ops;
static struct clk_ops shadow_n2_clk_src_ops;
static struct clk_ops byte_clk_src_ops;
static struct clk_ops post_n1_div_clk_src_ops;
static struct clk_ops shadow_post_n1_div_clk_src_ops;
static struct clk_ops clk_ops_gen_mux_dsi;
/* Op structures */
static struct clk_ops clk_ops_dsi_vco = {
.set_rate = pll_vco_set_rate_8996,
.round_rate = pll_vco_round_rate_8996,
.handoff = pll_vco_handoff_8996,
.prepare = pll_vco_prepare_8996,
.unprepare = pll_vco_unprepare_8996,
};
static struct clk_div_ops post_n1_div_ops = {
.set_div = post_n1_div_set_div,
.get_div = post_n1_div_get_div,
};
static struct clk_div_ops n2_div_ops = { /* hr_oclk3 */
.set_div = n2_div_set_div,
.get_div = n2_div_get_div,
};
static struct clk_mux_ops mdss_byte_mux_ops = {
.set_mux_sel = set_mdss_byte_mux_sel_8996,
.get_mux_sel = get_mdss_byte_mux_sel_8996,
};
static struct clk_mux_ops mdss_pixel_mux_ops = {
.set_mux_sel = set_mdss_pixel_mux_sel_8996,
.get_mux_sel = get_mdss_pixel_mux_sel_8996,
};
/* Shadow ops for dynamic refresh */
static struct clk_ops clk_ops_shadow_dsi_vco = {
.set_rate = shadow_pll_vco_set_rate_8996,
.round_rate = pll_vco_round_rate_8996,
.handoff = shadow_pll_vco_handoff_8996,
};
static struct clk_div_ops shadow_post_n1_div_ops = {
.set_div = post_n1_div_set_div,
};
static struct clk_div_ops shadow_n2_div_ops = {
.set_div = shadow_n2_div_set_div,
};
static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1300000000UL,
.max_rate = 2600000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
.c = {
.dbg_name = "dsi0pll_vco_clk_8996",
.ops = &clk_ops_dsi_vco,
CLK_INIT(dsi0pll_vco_clk.c),
},
};
static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
.ref_clk_rate = 19200000u,
.min_rate = 1300000000u,
.max_rate = 2600000000u,
.c = {
.dbg_name = "dsi0pll_shadow_vco_clk",
.ops = &clk_ops_shadow_dsi_vco,
CLK_INIT(dsi0pll_shadow_vco_clk.c),
},
};
static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1300000000UL,
.max_rate = 2600000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
.c = {
.dbg_name = "dsi1pll_vco_clk_8996",
.ops = &clk_ops_dsi_vco,
CLK_INIT(dsi1pll_vco_clk.c),
},
};
static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
.ref_clk_rate = 19200000u,
.min_rate = 1300000000u,
.max_rate = 2600000000u,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
.c = {
.dbg_name = "dsi1pll_shadow_vco_clk",
.ops = &clk_ops_shadow_dsi_vco,
CLK_INIT(dsi1pll_shadow_vco_clk.c),
},
};
static struct div_clk dsi0pll_post_n1_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &post_n1_div_ops,
.c = {
.parent = &dsi0pll_vco_clk.c,
.dbg_name = "dsi0pll_post_n1_div_clk",
.ops = &post_n1_div_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_post_n1_div_clk.c),
},
};
static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &shadow_post_n1_div_ops,
.c = {
.parent = &dsi0pll_shadow_vco_clk.c,
.dbg_name = "dsi0pll_shadow_post_n1_div_clk",
.ops = &shadow_post_n1_div_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
},
};
static struct div_clk dsi1pll_post_n1_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &post_n1_div_ops,
.c = {
.parent = &dsi1pll_vco_clk.c,
.dbg_name = "dsi1pll_post_n1_div_clk",
.ops = &post_n1_div_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_post_n1_div_clk.c),
},
};
static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &shadow_post_n1_div_ops,
.c = {
.parent = &dsi1pll_shadow_vco_clk.c,
.dbg_name = "dsi1pll_shadow_post_n1_div_clk",
.ops = &shadow_post_n1_div_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
},
};
static struct div_clk dsi0pll_n2_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &n2_div_ops,
.c = {
.parent = &dsi0pll_post_n1_div_clk.c,
.dbg_name = "dsi0pll_n2_div_clk",
.ops = &n2_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_n2_div_clk.c),
},
};
static struct div_clk dsi0pll_shadow_n2_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &shadow_n2_div_ops,
.c = {
.parent = &dsi0pll_shadow_post_n1_div_clk.c,
.dbg_name = "dsi0pll_shadow_n2_div_clk",
.ops = &shadow_n2_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
},
};
static struct div_clk dsi1pll_n2_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &n2_div_ops,
.c = {
.parent = &dsi1pll_post_n1_div_clk.c,
.dbg_name = "dsi1pll_n2_div_clk",
.ops = &n2_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_n2_div_clk.c),
},
};
static struct div_clk dsi1pll_shadow_n2_div_clk = {
.data = {
.max_div = 15,
.min_div = 1,
},
.ops = &shadow_n2_div_ops,
.c = {
.parent = &dsi1pll_shadow_post_n1_div_clk.c,
.dbg_name = "dsi1pll_shadow_n2_div_clk",
.ops = &shadow_n2_clk_src_ops,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
},
};
static struct div_clk dsi0pll_pixel_clk_src = {
.data = {
.div = 2,
.min_div = 2,
.max_div = 2,
},
.c = {
.parent = &dsi0pll_n2_div_clk.c,
.dbg_name = "dsi0pll_pixel_clk_src",
.ops = &clk_ops_div,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_pixel_clk_src.c),
},
};
static struct div_clk dsi0pll_shadow_pixel_clk_src = {
.data = {
.div = 2,
.min_div = 2,
.max_div = 2,
},
.c = {
.parent = &dsi0pll_shadow_n2_div_clk.c,
.dbg_name = "dsi0pll_shadow_pixel_clk_src",
.ops = &clk_ops_div,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
},
};
static struct div_clk dsi1pll_pixel_clk_src = {
.data = {
.div = 2,
.min_div = 2,
.max_div = 2,
},
.c = {
.parent = &dsi1pll_n2_div_clk.c,
.dbg_name = "dsi1pll_pixel_clk_src",
.ops = &clk_ops_div,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_pixel_clk_src.c),
},
};
static struct div_clk dsi1pll_shadow_pixel_clk_src = {
.data = {
.div = 2,
.min_div = 2,
.max_div = 2,
},
.c = {
.parent = &dsi1pll_shadow_n2_div_clk.c,
.dbg_name = "dsi1pll_shadow_pixel_clk_src",
.ops = &clk_ops_div,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
},
};
static struct mux_clk dsi0pll_pixel_clk_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi0pll_pixel_clk_src.c, 0},
{&dsi0pll_shadow_pixel_clk_src.c, 1},
},
.ops = &mdss_pixel_mux_ops,
.c = {
.parent = &dsi0pll_pixel_clk_src.c,
.dbg_name = "dsi0pll_pixel_clk_mux",
.ops = &clk_ops_gen_mux_dsi,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_pixel_clk_mux.c),
}
};
static struct mux_clk dsi1pll_pixel_clk_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi1pll_pixel_clk_src.c, 0},
{&dsi1pll_shadow_pixel_clk_src.c, 1},
},
.ops = &mdss_pixel_mux_ops,
.c = {
.parent = &dsi1pll_pixel_clk_src.c,
.dbg_name = "dsi1pll_pixel_clk_mux",
.ops = &clk_ops_gen_mux_dsi,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_pixel_clk_mux.c),
}
};
static struct div_clk dsi0pll_byte_clk_src = {
.data = {
.div = 8,
.min_div = 8,
.max_div = 8,
},
.c = {
.parent = &dsi0pll_post_n1_div_clk.c,
.dbg_name = "dsi0pll_byte_clk_src",
.ops = &clk_ops_div,
CLK_INIT(dsi0pll_byte_clk_src.c),
},
};
static struct div_clk dsi0pll_shadow_byte_clk_src = {
.data = {
.div = 8,
.min_div = 8,
.max_div = 8,
},
.c = {
.parent = &dsi0pll_shadow_post_n1_div_clk.c,
.dbg_name = "dsi0pll_shadow_byte_clk_src",
.ops = &clk_ops_div,
CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
},
};
static struct div_clk dsi1pll_byte_clk_src = {
.data = {
.div = 8,
.min_div = 8,
.max_div = 8,
},
.c = {
.parent = &dsi1pll_post_n1_div_clk.c,
.dbg_name = "dsi1pll_byte_clk_src",
.ops = &clk_ops_div,
CLK_INIT(dsi1pll_byte_clk_src.c),
},
};
static struct div_clk dsi1pll_shadow_byte_clk_src = {
.data = {
.div = 8,
.min_div = 8,
.max_div = 8,
},
.c = {
.parent = &dsi1pll_shadow_post_n1_div_clk.c,
.dbg_name = "dsi1pll_shadow_byte_clk_src",
.ops = &clk_ops_div,
CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
},
};
static struct mux_clk dsi0pll_byte_clk_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi0pll_byte_clk_src.c, 0},
{&dsi0pll_shadow_byte_clk_src.c, 1},
},
.ops = &mdss_byte_mux_ops,
.c = {
.parent = &dsi0pll_byte_clk_src.c,
.dbg_name = "dsi0pll_byte_clk_mux",
.ops = &clk_ops_gen_mux_dsi,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi0pll_byte_clk_mux.c),
}
};
static struct mux_clk dsi1pll_byte_clk_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi1pll_byte_clk_src.c, 0},
{&dsi1pll_shadow_byte_clk_src.c, 1},
},
.ops = &mdss_byte_mux_ops,
.c = {
.parent = &dsi1pll_byte_clk_src.c,
.dbg_name = "dsi1pll_byte_clk_mux",
.ops = &clk_ops_gen_mux_dsi,
.flags = CLKFLAG_NO_RATE_CACHE,
CLK_INIT(dsi1pll_byte_clk_mux.c),
}
};
static struct clk_lookup mdss_dsi_pllcc_8996[] = {
CLK_LIST(dsi0pll_byte_clk_mux),
CLK_LIST(dsi0pll_byte_clk_src),
CLK_LIST(dsi0pll_pixel_clk_mux),
CLK_LIST(dsi0pll_pixel_clk_src),
CLK_LIST(dsi0pll_n2_div_clk),
CLK_LIST(dsi0pll_post_n1_div_clk),
CLK_LIST(dsi0pll_vco_clk),
CLK_LIST(dsi0pll_shadow_byte_clk_src),
CLK_LIST(dsi0pll_shadow_pixel_clk_src),
CLK_LIST(dsi0pll_shadow_n2_div_clk),
CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
CLK_LIST(dsi0pll_shadow_vco_clk),
};
static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
CLK_LIST(dsi1pll_byte_clk_mux),
CLK_LIST(dsi1pll_byte_clk_src),
CLK_LIST(dsi1pll_pixel_clk_mux),
CLK_LIST(dsi1pll_pixel_clk_src),
CLK_LIST(dsi1pll_n2_div_clk),
CLK_LIST(dsi1pll_post_n1_div_clk),
CLK_LIST(dsi1pll_vco_clk),
CLK_LIST(dsi1pll_shadow_byte_clk_src),
CLK_LIST(dsi1pll_shadow_pixel_clk_src),
CLK_LIST(dsi1pll_shadow_n2_div_clk),
CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
CLK_LIST(dsi1pll_shadow_vco_clk),
};
int dsi_pll_clock_register_8996(struct platform_device *pdev,
struct mdss_pll_resources *pll_res)
{
int rc = 0, ndx;
int const ssc_freq_default = 31500; /* default h/w recommended value */
int const ssc_ppm_default = 5000; /* default h/w recommended value */
struct dsi_pll_db *pdb;
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid input parameters\n");
return -EINVAL;
}
if (!pll_res || !pll_res->pll_base) {
pr_err("Invalid PLL resources\n");
return -EPROBE_DEFER;
}
if (pll_res->index >= DSI_PLL_NUM) {
pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
return -EINVAL;
}
ndx = pll_res->index;
pdb = &pll_db[ndx];
pll_res->priv = pdb;
pdb->pll = pll_res;
ndx++;
ndx %= DSI_PLL_NUM;
pdb->next = &pll_db[ndx];
/* Set clock source operations */
/* hr_oclk3, pixel */
n2_clk_src_ops = clk_ops_slave_div;
n2_clk_src_ops.prepare = mdss_pll_div_prepare;
shadow_n2_clk_src_ops = clk_ops_slave_div;
/* hr_ockl2, byte, vco pll */
post_n1_div_clk_src_ops = clk_ops_div;
post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
shadow_post_n1_div_clk_src_ops = clk_ops_div;
byte_clk_src_ops = clk_ops_div;
byte_clk_src_ops.prepare = mdss_pll_div_prepare;
clk_ops_gen_mux_dsi = clk_ops_gen_mux;
clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
if (pll_res->ssc_en) {
if (!pll_res->ssc_freq)
pll_res->ssc_freq = ssc_freq_default;
if (!pll_res->ssc_ppm)
pll_res->ssc_ppm = ssc_ppm_default;
}
/* Set client data to mux, div and vco clocks. */
if (pll_res->index == DSI_PLL_1) {
dsi1pll_byte_clk_src.priv = pll_res;
dsi1pll_pixel_clk_src.priv = pll_res;
dsi1pll_post_n1_div_clk.priv = pll_res;
dsi1pll_n2_div_clk.priv = pll_res;
dsi1pll_vco_clk.priv = pll_res;
dsi1pll_shadow_byte_clk_src.priv = pll_res;
dsi1pll_shadow_pixel_clk_src.priv = pll_res;
dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
dsi1pll_shadow_n2_div_clk.priv = pll_res;
dsi1pll_shadow_vco_clk.priv = pll_res;
pll_res->vco_delay = VCO_DELAY_USEC;
rc = of_msm_clock_register(pdev->dev.of_node,
mdss_dsi_pllcc_8996_1,
ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
} else {
dsi0pll_byte_clk_src.priv = pll_res;
dsi0pll_pixel_clk_src.priv = pll_res;
dsi0pll_post_n1_div_clk.priv = pll_res;
dsi0pll_n2_div_clk.priv = pll_res;
dsi0pll_vco_clk.priv = pll_res;
dsi0pll_shadow_byte_clk_src.priv = pll_res;
dsi0pll_shadow_pixel_clk_src.priv = pll_res;
dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
dsi0pll_shadow_n2_div_clk.priv = pll_res;
dsi0pll_shadow_vco_clk.priv = pll_res;
pll_res->vco_delay = VCO_DELAY_USEC;
rc = of_msm_clock_register(pdev->dev.of_node,
mdss_dsi_pllcc_8996,
ARRAY_SIZE(mdss_dsi_pllcc_8996));
}
if (!rc) {
pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
pll_res->index);
}
return rc;
}

View file

@ -31,6 +31,8 @@ struct lpfr_cfg {
};
struct dsi_pll_vco_clk {
struct clk_hw hw;
unsigned long ref_clk_rate;
unsigned long min_rate;
unsigned long max_rate;
@ -39,72 +41,15 @@ struct dsi_pll_vco_clk {
u32 lpfr_lut_size;
void *priv;
struct clk c;
int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
(struct mdss_pll_resources *dsi_pll_Res);
};
static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
static inline struct dsi_pll_vco_clk *to_vco_hw(struct clk_hw *hw)
{
return container_of(clk, struct dsi_pll_vco_clk, c);
return container_of(hw, struct dsi_pll_vco_clk, hw);
}
int dsi_pll_clock_register_hpm(struct platform_device *pdev,
int dsi_pll_clock_register_14nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_20nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_lpm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_8996(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_8998(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int set_byte_mux_sel(struct mux_clk *clk, int sel);
int get_byte_mux_sel(struct mux_clk *clk);
int dsi_pll_mux_prepare(struct clk *c);
int fixed_4div_set_div(struct div_clk *clk, int div);
int fixed_4div_get_div(struct div_clk *clk);
int digital_set_div(struct div_clk *clk, int div);
int digital_get_div(struct div_clk *clk);
int analog_set_div(struct div_clk *clk, int div);
int analog_get_div(struct div_clk *clk);
int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
unsigned long vco_get_rate(struct clk *c);
long vco_round_rate(struct clk *c, unsigned long rate);
enum handoff vco_handoff(struct clk *c);
int vco_prepare(struct clk *c);
void vco_unprepare(struct clk *c);
/* APIs for 20nm PHY PLL */
int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
unsigned long rate);
long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
enum handoff pll_20nm_vco_handoff(struct clk *c);
int pll_20nm_vco_prepare(struct clk *c);
void pll_20nm_vco_unprepare(struct clk *c);
int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
int fixed_hr_oclk2_get_div(struct div_clk *clk);
int hr_oclk3_set_div(struct div_clk *clk, int div);
int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
int hr_oclk3_get_div(struct div_clk *clk);
int ndiv_set_div(struct div_clk *clk, int div);
int shadow_ndiv_set_div(struct div_clk *clk, int div);
int ndiv_get_div(struct div_clk *clk);
void __dsi_pll_disable(void __iomem *pll_base);
int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
int get_mdss_pixel_mux_sel(struct mux_clk *clk);
int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
int get_mdss_byte_mux_sel(struct mux_clk *clk);
#endif

View file

@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/clk/msm-clock-generic.h>
#include <linux/of_address.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>

View file

@ -19,12 +19,9 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/clk/msm-clock-generic.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-hdmi-pll.h"
#include "mdss-dp-pll.h"
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
{
@ -175,27 +172,7 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
switch (pll_res->pll_interface_type) {
case MDSS_DSI_PLL_8996:
rc = dsi_pll_clock_register_8996(pdev, pll_res);
break;
case MDSS_DSI_PLL_8998:
rc = dsi_pll_clock_register_8998(pdev, pll_res);
case MDSS_DP_PLL_8998:
rc = dp_pll_clock_register_8998(pdev, pll_res);
break;
case MDSS_HDMI_PLL_8996:
rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
break;
case MDSS_HDMI_PLL_8996_V2:
rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
break;
case MDSS_HDMI_PLL_8996_V3:
rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
break;
case MDSS_HDMI_PLL_8996_V3_1_8:
rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
break;
case MDSS_HDMI_PLL_8998:
rc = hdmi_8998_pll_clock_register(pdev, pll_res);
rc = dsi_pll_clock_register_14nm(pdev, pll_res);
break;
case MDSS_UNKNOWN_PLL:
default:

View file

@ -14,8 +14,16 @@
#define __MDSS_PLL_H
#include <linux/mdss_io_util.h>
#include <linux/clk/msm-clock-generic.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/regmap.h>
#include "../clk-regmap.h"
#include "../clk-regmap-divider.h"
#include "../clk-regmap-mux.h"
#include <dt-bindings/clock/mdss-pll-clk.h>
#define MDSS_PLL_REG_W(base, offset, data) \
writel_relaxed((data), (base) + (offset))
@ -200,21 +208,12 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
}
static inline int mdss_pll_div_prepare(struct clk *c)
static inline int mdss_pll_div_prepare(struct clk_hw *hw)
{
struct div_clk *div = to_div_clk(c);
struct clk_hw *parent_hw = clk_hw_get_parent(hw);
/* Restore the divider's value */
return div->ops->set_div(div, div->data.div);
}
static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
{
return 0;
}
static inline int mdss_get_mux_sel(struct mux_clk *clk)
{
return 0;
return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
clk_hw_get_rate(parent_hw));
}
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);

View file

@ -359,6 +359,13 @@ static inline void _pop_drawobj(struct adreno_context *drawctxt)
drawctxt->queued--;
}
static void _retire_sparseobj(struct kgsl_drawobj_sparse *sparseobj,
struct adreno_context *drawctxt)
{
kgsl_sparse_bind(drawctxt->base.proc_priv, sparseobj);
_retire_timestamp(DRAWOBJ(sparseobj));
}
static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
struct adreno_context *drawctxt)
{
@ -436,6 +443,8 @@ static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
return drawobj;
} else if (drawobj->type == SYNCOBJ_TYPE)
ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
else
return ERR_PTR(-EINVAL);
if (ret == -EAGAIN)
return ERR_PTR(-EAGAIN);
@ -670,6 +679,76 @@ static int sendcmd(struct adreno_device *adreno_dev,
return 0;
}
/*
* Retires all sync objs from the sparse context
* queue and returns one of the below
* a) next sparseobj
* b) -EAGAIN for syncobj with syncpoints pending
* c) -EINVAL for unexpected drawobj
* d) NULL for no sparseobj
*/
static struct kgsl_drawobj_sparse *_get_next_sparseobj(
struct adreno_context *drawctxt)
{
struct kgsl_drawobj *drawobj;
unsigned int i = drawctxt->drawqueue_head;
int ret = 0;
if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
return NULL;
for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
drawobj = drawctxt->drawqueue[i];
if (drawobj == NULL)
return NULL;
if (drawobj->type == SYNCOBJ_TYPE)
ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
else if (drawobj->type == SPARSEOBJ_TYPE)
return SPARSEOBJ(drawobj);
else
return ERR_PTR(-EINVAL);
if (ret == -EAGAIN)
return ERR_PTR(-EAGAIN);
continue;
}
return NULL;
}
static int _process_drawqueue_sparse(
struct adreno_context *drawctxt)
{
struct kgsl_drawobj_sparse *sparseobj;
int ret = 0;
unsigned int i;
for (i = 0; i < ADRENO_CONTEXT_DRAWQUEUE_SIZE; i++) {
spin_lock(&drawctxt->lock);
sparseobj = _get_next_sparseobj(drawctxt);
if (IS_ERR_OR_NULL(sparseobj)) {
if (IS_ERR(sparseobj))
ret = PTR_ERR(sparseobj);
spin_unlock(&drawctxt->lock);
return ret;
}
_pop_drawobj(drawctxt);
spin_unlock(&drawctxt->lock);
_retire_sparseobj(sparseobj, drawctxt);
}
return 0;
}
/**
* dispatcher_context_sendcmds() - Send commands from a context to the GPU
* @adreno_dev: Pointer to the adreno device struct
@ -689,6 +768,9 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (drawctxt->base.flags & KGSL_CONTEXT_SPARSE)
return _process_drawqueue_sparse(drawctxt);
if (dispatch_q->inflight >= inflight) {
spin_lock(&drawctxt->lock);
_process_drawqueue_get_next_drawobj(drawctxt);
@ -1124,6 +1206,31 @@ static void _queue_drawobj(struct adreno_context *drawctxt,
trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
}
static int _queue_sparseobj(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt, struct kgsl_drawobj_sparse *sparseobj,
uint32_t *timestamp, unsigned int user_ts)
{
struct kgsl_drawobj *drawobj = DRAWOBJ(sparseobj);
int ret;
ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
if (ret)
return ret;
/*
* See if we can fastpath this thing - if nothing is
* queued bind/unbind without queueing the context
*/
if (!drawctxt->queued)
return 1;
drawctxt->queued_timestamp = *timestamp;
_queue_drawobj(drawctxt, drawobj);
return 0;
}
static int _queue_markerobj(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
uint32_t *timestamp, unsigned int user_ts)
@ -1141,7 +1248,6 @@ static int _queue_markerobj(struct adreno_device *adreno_dev,
*/
if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
drawobj->context, drawctxt->queued_timestamp)) {
trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
_retire_timestamp(drawobj);
return 1;
}
@ -1212,7 +1318,7 @@ static void _queue_syncobj(struct adreno_context *drawctxt,
}
/**
* adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
* adreno_dispactcher_queue_cmds() - Queue a new draw object in the context
* @dev_priv: Pointer to the device private struct
* @context: Pointer to the kgsl draw context
* @drawobj: Pointer to the array of drawobj's being submitted
@ -1234,6 +1340,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
int ret;
unsigned int i, user_ts;
if (!count)
return -EINVAL;
ret = _check_context_state(&drawctxt->base);
if (ret)
return ret;
@ -1283,6 +1392,20 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
_queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
timestamp);
break;
case SPARSEOBJ_TYPE:
ret = _queue_sparseobj(adreno_dev, drawctxt,
SPARSEOBJ(drawobj[i]),
timestamp, user_ts);
if (ret == 1) {
spin_unlock(&drawctxt->lock);
_retire_sparseobj(SPARSEOBJ(drawobj[i]),
drawctxt);
return 0;
} else if (ret) {
spin_unlock(&drawctxt->lock);
return ret;
}
break;
default:
spin_unlock(&drawctxt->lock);
return -EINVAL;

View file

@ -351,7 +351,8 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE |
KGSL_CONTEXT_PREEMPT_STYLE_MASK |
KGSL_CONTEXT_NO_SNAPSHOT);
KGSL_CONTEXT_NO_SNAPSHOT |
KGSL_CONTEXT_SPARSE);
/* Check for errors before trying to initialize */

View file

@ -1439,6 +1439,17 @@ long kgsl_ioctl_device_waittimestamp_ctxtid(
return result;
}
static inline bool _check_context_is_sparse(struct kgsl_context *context,
uint64_t flags)
{
if ((context->flags & KGSL_CONTEXT_SPARSE) ||
(flags & KGSL_DRAWOBJ_SPARSE))
return true;
return false;
}
long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@ -1463,6 +1474,11 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
if (_check_context_is_sparse(context, param->flags)) {
kgsl_context_put(context);
return -EINVAL;
}
cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
CMDOBJ_TYPE);
if (IS_ERR(cmdobj)) {
@ -1558,6 +1574,11 @@ long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
if (_check_context_is_sparse(context, param->flags)) {
kgsl_context_put(context);
return -EINVAL;
}
if (type & SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj =
kgsl_drawobj_sync_create(device, context);
@ -1632,6 +1653,11 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
if (_check_context_is_sparse(context, param->flags)) {
kgsl_context_put(context);
return -EINVAL;
}
if (type & SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj =
kgsl_drawobj_sync_create(device, context);
@ -3742,6 +3768,128 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
return ret;
}
long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_gpu_sparse_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
struct kgsl_drawobj *drawobj[2];
struct kgsl_drawobj_sparse *sparseobj;
long result;
unsigned int i = 0;
/* Make sure sparse and syncpoint count isn't too big */
if (param->numsparse > KGSL_MAX_SPARSE ||
param->numsyncs > KGSL_MAX_SYNCPOINTS)
return -EINVAL;
/* Make sure there is atleast one sparse or sync */
if (param->numsparse == 0 && param->numsyncs == 0)
return -EINVAL;
/* Only Sparse commands are supported in this ioctl */
if (!(param->flags & KGSL_DRAWOBJ_SPARSE) || (param->flags &
(KGSL_DRAWOBJ_SUBMIT_IB_LIST | KGSL_DRAWOBJ_MARKER
| KGSL_DRAWOBJ_SYNC)))
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
/* Restrict bind commands to bind context */
if (!(context->flags & KGSL_CONTEXT_SPARSE)) {
kgsl_context_put(context);
return -EINVAL;
}
if (param->numsyncs) {
struct kgsl_drawobj_sync *syncobj = kgsl_drawobj_sync_create(
device, context);
if (IS_ERR(syncobj)) {
result = PTR_ERR(syncobj);
goto done;
}
drawobj[i++] = DRAWOBJ(syncobj);
result = kgsl_drawobj_sync_add_synclist(device, syncobj,
to_user_ptr(param->synclist),
param->syncsize, param->numsyncs);
if (result)
goto done;
}
if (param->numsparse) {
sparseobj = kgsl_drawobj_sparse_create(device, context,
param->flags);
if (IS_ERR(sparseobj)) {
result = PTR_ERR(sparseobj);
goto done;
}
sparseobj->id = param->id;
drawobj[i++] = DRAWOBJ(sparseobj);
result = kgsl_drawobj_sparse_add_sparselist(device, sparseobj,
param->id, to_user_ptr(param->sparselist),
param->sparsesize, param->numsparse);
if (result)
goto done;
}
result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
drawobj, i, &param->timestamp);
done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
while (i--)
kgsl_drawobj_destroy(drawobj[i]);
kgsl_context_put(context);
return result;
}
void kgsl_sparse_bind(struct kgsl_process_private *private,
struct kgsl_drawobj_sparse *sparseobj)
{
struct kgsl_sparseobj_node *sparse_node;
struct kgsl_mem_entry *virt_entry = NULL;
long ret = 0;
char *name;
virt_entry = kgsl_sharedmem_find_id_flags(private, sparseobj->id,
KGSL_MEMFLAGS_SPARSE_VIRT);
if (virt_entry == NULL)
return;
list_for_each_entry(sparse_node, &sparseobj->sparselist, node) {
if (sparse_node->obj.flags & KGSL_SPARSE_BIND) {
ret = sparse_bind_range(private, &sparse_node->obj,
virt_entry);
name = "bind";
} else {
ret = sparse_unbind_range(&sparse_node->obj,
virt_entry);
name = "unbind";
}
if (ret)
KGSL_CORE_ERR("kgsl: Unable to '%s' ret %ld virt_id %d, phys_id %d, virt_offset %16.16llX, phys_offset %16.16llX, size %16.16llX, flags %16.16llX\n",
name, ret, sparse_node->virt_id,
sparse_node->obj.id,
sparse_node->obj.virtoffset,
sparse_node->obj.physoffset,
sparse_node->obj.size, sparse_node->obj.flags);
}
kgsl_mem_entry_put(virt_entry);
}
EXPORT_SYMBOL(kgsl_sparse_bind);
long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@ -4656,7 +4804,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
kgsl_drawobj_exit();
kgsl_drawobjs_cache_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@ -4732,7 +4880,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
result = kgsl_drawobj_init();
result = kgsl_drawobjs_cache_init();
if (result)
goto err;

View file

@ -100,6 +100,7 @@ static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
#define KGSL_MAX_NUMIBS 100000
#define KGSL_MAX_SYNCPOINTS 32
#define KGSL_MAX_SPARSE 1000
struct kgsl_device;
struct kgsl_context;
@ -432,6 +433,8 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
void kgsl_mem_entry_destroy(struct kref *kref);

View file

@ -382,6 +382,8 @@ static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = {
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
kgsl_ioctl_gpu_sparse_command),
};
long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)

View file

@ -203,6 +203,18 @@ struct kgsl_memobj_node {
unsigned long priv;
};
/**
* struct kgsl_sparseobj_node - Sparse object descriptor
* @node: Local list node for the sparse cmdbatch
* @virt_id: Virtual ID to bind/unbind
* @obj: struct kgsl_sparse_binding_object
*/
struct kgsl_sparseobj_node {
struct list_head node;
unsigned int virt_id;
struct kgsl_sparse_binding_object obj;
};
struct kgsl_device {
struct device *dev;
const char *name;
@ -639,6 +651,9 @@ long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd,
unsigned long, unsigned char *ptr);
void kgsl_sparse_bind(struct kgsl_process_private *private,
struct kgsl_drawobj_sparse *sparse);
/**
* kgsl_context_put() - Release context reference count
* @context: Pointer to the KGSL context to be released

View file

@ -37,10 +37,12 @@
#include "kgsl_compat.h"
/*
* Define an kmem cache for the memobj structures since we allocate and free
* them so frequently
* Define an kmem cache for the memobj & sparseobj structures since we
* allocate and free them so frequently
*/
static struct kmem_cache *memobjs_cache;
static struct kmem_cache *sparseobjs_cache;
static void drawobj_destroy_object(struct kref *kref)
{
@ -60,6 +62,9 @@ static void drawobj_destroy_object(struct kref *kref)
case MARKEROBJ_TYPE:
kfree(CMDOBJ(drawobj));
break;
case SPARSEOBJ_TYPE:
kfree(SPARSEOBJ(drawobj));
break;
}
}
@ -211,6 +216,18 @@ static inline void memobj_list_free(struct list_head *list)
}
}
static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
{
struct kgsl_sparseobj_node *mem, *tmpmem;
struct list_head *list = &SPARSEOBJ(drawobj)->sparselist;
/* Free the sparse mem here */
list_for_each_entry_safe(mem, tmpmem, list, node) {
list_del_init(&mem->node);
kmem_cache_free(sparseobjs_cache, mem);
}
}
static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
@ -297,6 +314,8 @@ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
drawobj_destroy_sync(drawobj);
else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
drawobj_destroy_cmd(drawobj);
else if (drawobj->type == SPARSEOBJ_TYPE)
drawobj_destroy_sparse(drawobj);
else
return;
@ -610,16 +629,26 @@ int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
return 0;
}
static inline int drawobj_init(struct kgsl_device *device,
struct kgsl_context *context, struct kgsl_drawobj *drawobj,
static void *_drawobj_create(struct kgsl_device *device,
struct kgsl_context *context, unsigned int size,
unsigned int type)
{
void *obj = kzalloc(size, GFP_KERNEL);
struct kgsl_drawobj *drawobj;
if (obj == NULL)
return ERR_PTR(-ENOMEM);
/*
* Increase the reference count on the context so it doesn't disappear
* during the lifetime of this object
*/
if (!_kgsl_context_get(context))
return -ENOENT;
if (!_kgsl_context_get(context)) {
kfree(obj);
return ERR_PTR(-ENOENT);
}
drawobj = obj;
kref_init(&drawobj->refcount);
@ -627,7 +656,28 @@ static inline int drawobj_init(struct kgsl_device *device,
drawobj->context = context;
drawobj->type = type;
return 0;
return obj;
}
/**
* kgsl_drawobj_sparse_create() - Create a new sparse obj structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
* @flags: Flags for the sparse obj
*
* Allocate an new kgsl_drawobj_sparse structure
*/
struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
struct kgsl_device *device,
struct kgsl_context *context, unsigned int flags)
{
struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device,
context, sizeof(*sparseobj), SPARSEOBJ_TYPE);
if (!IS_ERR(sparseobj))
INIT_LIST_HEAD(&sparseobj->sparselist);
return sparseobj;
}
/**
@ -641,18 +691,13 @@ static inline int drawobj_init(struct kgsl_device *device,
struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
struct kgsl_context *context)
{
struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
GFP_KERNEL);
if (syncobj == NULL)
return ERR_PTR(-ENOMEM);
if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
kfree(syncobj);
return ERR_PTR(-ENOENT);
}
struct kgsl_drawobj_sync *syncobj = _drawobj_create(device,
context, sizeof(*syncobj), SYNCOBJ_TYPE);
/* Add a timer to help debug sync deadlocks */
setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
if (!IS_ERR(syncobj))
setup_timer(&syncobj->timer, syncobj_timer,
(unsigned long) syncobj);
return syncobj;
}
@ -671,27 +716,13 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
struct kgsl_context *context, unsigned int flags,
unsigned int type)
{
struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
struct kgsl_drawobj *drawobj;
struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device,
context, sizeof(*cmdobj),
(type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
if (cmdobj == NULL)
return ERR_PTR(-ENOMEM);
type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
if (type == 0) {
kfree(cmdobj);
return ERR_PTR(-EINVAL);
}
drawobj = DRAWOBJ(cmdobj);
if (drawobj_init(device, context, drawobj, type)) {
kfree(cmdobj);
return ERR_PTR(-ENOENT);
}
/* sanitize our flags for drawobj's */
drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
if (!IS_ERR(cmdobj)) {
/* sanitize our flags for drawobj's */
cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
| KGSL_DRAWOBJ_MARKER
| KGSL_DRAWOBJ_END_OF_FRAME
| KGSL_DRAWOBJ_PWR_CONSTRAINT
@ -699,8 +730,9 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
| KGSL_DRAWOBJ_PROFILING
| KGSL_DRAWOBJ_PROFILING_KTIME);
INIT_LIST_HEAD(&cmdobj->cmdlist);
INIT_LIST_HEAD(&cmdobj->memlist);
INIT_LIST_HEAD(&cmdobj->cmdlist);
INIT_LIST_HEAD(&cmdobj->memlist);
}
return cmdobj;
}
@ -864,7 +896,7 @@ int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
return 0;
}
static int drawobj_add_object(struct list_head *head,
static int kgsl_drawobj_add_memobject(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@ -884,6 +916,62 @@ static int drawobj_add_object(struct list_head *head,
return 0;
}
static int kgsl_drawobj_add_sparseobject(struct list_head *head,
struct kgsl_sparse_binding_object *obj, unsigned int virt_id)
{
struct kgsl_sparseobj_node *mem;
mem = kmem_cache_alloc(sparseobjs_cache, GFP_KERNEL);
if (mem == NULL)
return -ENOMEM;
mem->virt_id = virt_id;
mem->obj.id = obj->id;
mem->obj.virtoffset = obj->virtoffset;
mem->obj.physoffset = obj->physoffset;
mem->obj.size = obj->size;
mem->obj.flags = obj->flags;
list_add_tail(&mem->node, head);
return 0;
}
int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
void __user *ptr, unsigned int size, unsigned int count)
{
struct kgsl_sparse_binding_object obj;
int i, ret = 0;
ret = _verify_input_list(count, ptr, size);
if (ret <= 0)
return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
if (ret)
return ret;
if (!(obj.flags & (KGSL_SPARSE_BIND | KGSL_SPARSE_UNBIND)))
return -EINVAL;
ret = kgsl_drawobj_add_sparseobject(&sparseobj->sparselist,
&obj, id);
if (ret)
return ret;
ptr += sizeof(obj);
}
sparseobj->size = size;
sparseobj->count = count;
return 0;
}
#define CMDLIST_FLAGS \
(KGSL_CMDLIST_IB | \
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
@ -922,7 +1010,7 @@ int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
return -EINVAL;
}
ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@ -967,7 +1055,8 @@ int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
ret = drawobj_add_object(&cmdobj->memlist, &obj);
ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
&obj);
if (ret)
return ret;
}
@ -1018,19 +1107,19 @@ int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
return 0;
}
void kgsl_drawobj_exit(void)
void kgsl_drawobjs_cache_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
kmem_cache_destroy(memobjs_cache);
kmem_cache_destroy(sparseobjs_cache);
}
int kgsl_drawobj_init(void)
int kgsl_drawobjs_cache_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
KGSL_CORE_ERR("failed to create memobjs_cache");
sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0);
if (!memobjs_cache || !sparseobjs_cache)
return -ENOMEM;
}
return 0;
}

View file

@ -18,10 +18,13 @@
container_of(obj, struct kgsl_drawobj_sync, base)
#define CMDOBJ(obj) \
container_of(obj, struct kgsl_drawobj_cmd, base)
#define SPARSEOBJ(obj) \
container_of(obj, struct kgsl_drawobj_sparse, base)
#define CMDOBJ_TYPE BIT(0)
#define MARKEROBJ_TYPE BIT(1)
#define SYNCOBJ_TYPE BIT(2)
#define SPARSEOBJ_TYPE BIT(3)
/**
* struct kgsl_drawobj - KGSL drawobj descriptor
@ -45,7 +48,7 @@ struct kgsl_drawobj {
* struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
* cmds also since markers are special form of cmds that do not
* need their cmds to be executed.
* @base: Base kgsl_drawobj
* @base: Base kgsl_drawobj, this needs to be the first entry
* @priv: Internal flags
* @global_ts: The ringbuffer timestamp corresponding to this
* command obj
@ -123,6 +126,22 @@ struct kgsl_drawobj_sync_event {
struct kgsl_device *device;
};
/**
* struct kgsl_drawobj_sparse - KGSl sparse obj descriptor
* @base: Base kgsl_obj, this needs to be the first entry
* @id: virtual id of the bind/unbind
* @sparselist: list of binds/unbinds
* @size: Size of kgsl_sparse_bind_object
* @count: Number of elements in list
*/
struct kgsl_drawobj_sparse {
struct kgsl_drawobj base;
unsigned int id;
struct list_head sparselist;
unsigned int size;
unsigned int count;
};
#define KGSL_DRAWOBJ_FLAGS \
{ KGSL_DRAWOBJ_MARKER, "MARKER" }, \
{ KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
@ -172,9 +191,15 @@ int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync);
struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
struct kgsl_device *device,
struct kgsl_context *context, unsigned int flags);
int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
void __user *ptr, unsigned int size, unsigned int count);
int kgsl_drawobj_init(void);
void kgsl_drawobj_exit(void);
int kgsl_drawobjs_cache_init(void);
void kgsl_drawobjs_cache_exit(void);
void kgsl_dump_syncpoints(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj);

View file

@ -100,6 +100,8 @@ static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
kgsl_ioctl_sparse_virt_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SPARSE_BIND,
kgsl_ioctl_sparse_bind),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND,
kgsl_ioctl_gpu_sparse_command),
};
long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,

View file

@ -278,6 +278,7 @@ int pil_mss_shutdown(struct pil_desc *pil)
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
int ret = 0;
dev_info(pil->dev, "MSS is shutting down\n");
if (drv->axi_halt_base) {
pil_q6v5_halt_axi_port(pil,
drv->axi_halt_base + MSS_Q6_HALT_BASE);
@ -542,7 +543,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
struct modem_data *md = dev_get_drvdata(pil->dev);
const struct firmware *fw, *dp_fw;
const struct firmware *fw, *dp_fw = NULL;
char fw_name_legacy[10] = "mba.b00";
char fw_name[10] = "mba.mbn";
char *dp_name = "msadp";

View file

@ -512,6 +512,8 @@ static int __pil_q6v55_reset(struct pil_desc *pil)
val |= BIT(i);
writel_relaxed(val, drv->reg_base +
QDSP6V6SS_MEM_PWR_CTL);
val = readl_relaxed(drv->reg_base +
QDSP6V6SS_MEM_PWR_CTL);
/*
* Wait for 1us for both memory peripheral and
* data array to turn on.

View file

@ -0,0 +1,42 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MDSS_PLL_CLK_H
#define __MDSS_PLL_CLK_H
/* DSI PLL clocks */
#define BYTE0_MUX_CLK 0
#define BYTE0_SRC_CLK 1
#define PIX0_MUX_CLK 2
#define PIX0_SRC_CLK 3
#define N2_DIV_0_CLK 4
#define POST_N1_DIV_0_CLK 5
#define VCO_CLK_0_CLK 6
#define SHADOW_BYTE0_SRC_CLK 7
#define SHADOW_PIX0_SRC_CLK 8
#define SHADOW_N2_DIV_0_CLK 9
#define SHADOW_POST_N1_DIV_0_CLK 10
#define SHADOW_VCO_CLK_0_CLK 11
#define BYTE1_MUX_CLK 12
#define BYTE1_SRC_CLK 13
#define PIX1_MUX_CLK 14
#define PIX1_SRC_CLK 15
#define N2_DIV_1_CLK 16
#define POST_N1_DIV_1_CLK 17
#define VCO_CLK_1_CLK 18
#define SHADOW_BYTE1_SRC_CLK 19
#define SHADOW_PIX1_SRC_CLK 20
#define SHADOW_N2_DIV_1_CLK 21
#define SHADOW_POST_N1_DIV_1_CLK 22
#define SHADOW_VCO_CLK_1_CLK 23
#endif

View file

@ -50,6 +50,7 @@
#define KGSL_CONTEXT_IFH_NOP 0x00010000
#define KGSL_CONTEXT_SECURE 0x00020000
#define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000
#define KGSL_CONTEXT_SPARSE 0x00080000
#define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000
#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25
@ -89,6 +90,7 @@
#define KGSL_CMDBATCH_END_OF_FRAME KGSL_CONTEXT_END_OF_FRAME /* 0x100 */
#define KGSL_CMDBATCH_SYNC KGSL_CONTEXT_SYNC /* 0x400 */
#define KGSL_CMDBATCH_PWR_CONSTRAINT KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
#define KGSL_CMDBATCH_SPARSE 0x1000 /* 0x1000 */
/*
* Reserve bits [16:19] and bits [28:31] for possible bits shared between
@ -1556,4 +1558,34 @@ struct kgsl_sparse_bind {
#define IOCTL_KGSL_SPARSE_BIND \
_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
/**
* struct kgsl_gpu_sparse_command - Argument for
* IOCTL_KGSL_GPU_SPARSE_COMMAND
* @flags: Current flags for the object
* @sparselist: List of kgsl_sparse_binding_object to bind/unbind
* @synclist: List of kgsl_command_syncpoints
* @sparsesize: Size of kgsl_sparse_binding_object
* @numsparse: Number of elements in list
* @sync_size: Size of kgsl_command_syncpoint structure
* @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
* @context_id: Context ID submitting the kgsl_gpu_command
* @timestamp: Timestamp for the submitted commands
* @id: Virtual ID to bind/unbind
*/
struct kgsl_gpu_sparse_command {
uint64_t flags;
uint64_t __user sparselist;
uint64_t __user synclist;
unsigned int sparsesize;
unsigned int numsparse;
unsigned int syncsize;
unsigned int numsyncs;
unsigned int context_id;
unsigned int timestamp;
unsigned int id;
};
#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
#endif /* _UAPI_MSM_KGSL_H */