Promotion of kernel.lnx.4.4-161119.

CRs      Change ID                                   Subject
--------------------------------------------------------------------------------------------------------------
1088658   I2f994ae0250ffc8f740ea633324815ae429c74be   msm: ipa3: linearize large skbs
1077102   I09359b528b4742f72a76690930f3d0ed90bb2caa   msm: mdss: move warnings and errors out of mdss spinlock
1089895   I84185558fa6e80b13d7d0078bda9d75143680941   tcp: take care of truncations done by sk_filter()
1091511   Ia151b2dd5229f07790ac961af298305b24e098fb   msm: wlan: update regulatory database
1081957   I24820bd6254002f8a8db9604d230dcbce59b1beb   clk: qcom: Add support to be able to slew PLL
1081738   I10a788726358c56df9bfe11f2332e3823d7cd332   ARM: dts: msm: Enable auto GM for WLED in pmicobalt
1077726   I031ca48f0e0c39f1b2cb51081ecd55b086fb4c9b   msm: mdss: fix pp timeout during transition from LP1 to
1074985   Ib2268181a617c23d62b5b6f857be5327113b2a67   soc: qcom: smem: Redesign smem memory architecture
1090708   I9cda84d1c199b72ce8b9e2997601bcc7430ddbf3   ARM: dts: msm: Update the console uart gpios for msmfalc
1080245   I3b4cf83e776750d993d53331142223109bf0862e   clk: qcom: Add support for debugfs support
1087110   I3694952289c76394af8d40cd89fd2175f49ac127   msm: mdss: Add systrace for readptr_done
1089865   Ia73ab1ba51df7b501d246bb45141018409496d01   ARM: dts: msm: ensure contiguous MSI for PCIe on msmcoba
941978   Idee8691d769218d7e732c9b7f936a2c40946b239   Revert "scsi: ufs: stub UFS shutdown handler"
1091072   I7e9ada5de1f619c6a34a4b2e1764f5e908564ce5   iio: rradc: Update reading USBIN_V channel
1075082   I971e555ec8d02ccf4382e83132a696b065a8ff12   qseecom: improve error checks in qseecom_probe()
1080245   Ib67b3a3409c9e7d8adb710bb524f54f543abf712   clk: add/modify debugfs support for clocks
941978   Id499abc27303bfed72fab4d61abb872bad7d9043   scsi: ufs: error out all issued requests after shutdown
1083537   I73fc02b812f2e6694e2a6aa8bdad2381a5f19406   ASoC: msm: Fix sound card registration failure
1085331   I92e98ab46107fbcfd843898423b41716a204c2ae   ARM: dts: msm: Correct interrupt assignments for msmcoba
1073250   Idc9ca896b3fe6c1c6a72a066a6e453d27a3173e8   Asoc: clean up bootup errors
1091147   I30b8488a1c19815601e6a1c5bcbdeed53715f8fa   usb: phy: qusb: Make sure QUSB PHY is into proper state
1086292   I6482dc3d21fdc3e570fd53022e2fb9427668d939   msm: mdss: add null check before dereferencing src_fmt
1086292   I4812330453dedacd16dad1d920a2bacc3f67042b   msm: mdss: fix race condition in dsi clk off request
1088709   I21e1c029e6b245cfa26a187b35bb1f6845302484   clk: msm: Add the CLKFLAG_NO_RATE_CACHE flag for MM cloc
1082112   I171c91e700c24ecc213ccda705bbe6188d22a43a   scsi: ufs: fix sleep in atomic context
1091354   I9f928f0aad6af346de43965755beb039e422047a   Revert "defconfig: msm: avoid compilation of MDSS DP dri
1090727   I78d2c27743d30b90a96e3d8df60859f67db7ddb8   ARM: dts: msm: Add ufs regulators for msmfalcon interpos
1090029   I66f6de42b106fa2027285e7393b6f9fc143d00d8   leds: qpnp-flash: Fix the mask in the flash prepare API
1089181   I4a382915a6c3a6b9d445ec1f5d57fb499a011f1a   driver: thermal: msm_thermal: Enable Reliability algorit
1079438   Ib14c5b9121190dded5071ff60ecf0be8e5e5c232   ARM: dts: msm: Add physical dimensions for NT35597 panel
1060212   Iabe79bae5f9471c3c6128ed21efd04de00739daa   leds: qpnp-flash-v2: Add support for thermal derate feat
1091127   I7220ad565212c325514301e4c59415b807deb99a   ARM: dts: msm: Add gladiator support on msmfalcon and ms
1091440   I0eb8b9a357f172984612175d1b03dd872df91b6f   diag: Call diagmem_exit only if the mempool is initializ
1090076   Ia85688854f26fe871d5c1253c2d51d75d84deb8f   ARM: dts: msm: Add dummy regulator for LCDB bias
1064071   Ic0dedbad372fd9029b932dd99633a650049751ed   msm: kgsl: Fix pagetable member of struct kgsl_memdesc
1083537   I3d2765535793d6ef9153cfcab4b44a9adad67e15   ASoC: msm: Add support for USB/WCN/TDM Audio
1091141   I6ce48512df5973bf8a2a3081a3a6f8759aeb499f   ARM: dts: msm: Set USB core clock rate for USB2/USB3 for
1060212   Ie7a94f59e58b8f1b0816afda2496449694629205   leds: qpnp-flash-v2: add support to read pmic revid
1080701   If08ff46e72d537254e90707f28c849a86f262853   ARM: dts: msm: specify I2C configuration for msmfalcon
1079442   I822d6280b301b2db6194c845098c935e612ca61c   ASoC: wcd934x: Fix adie loopback through sidetone src pa
1089895   Idc52737bc96097a9220dfe47bb76e94ff1026a05   rose: limit sk_filter trim to payload
1091147   Ibfecfe1846d02b959bd249acac3fe4c57b88aaf0   USB: phy: qusb: Turn on vdd along with 1p8/3p3 LDOs when
1090701   I0e06be169edc2eb1d35ef7fc6c41ff1809aebd03   pinctrl: qcom: msmfalcon: Update gpios as per latest gpi
1086292   I422d53d008223a9b0520f499e629f681bb6afa05   mdss: mdp: avoid panic if recovery handler is uninitiali
1060212   I42503ccd2b2dcc62c5c868132d202b9698c9d216   leds: qpnp-flash-v2: change from dev_*() to pr_*() for l
1090076   Ie828c8568ef09c89cff157d16d3cb322647b6f6e   ARM: dts: msm: enable mdss power supplies for falcon tra
1074879   I8d224a70cbef162f27078b62b73acaa22670861d   sched/hmp: Enhance co-location and scheduler boost featu
1087471   I15323e3ef91401142d3841db59c18fd8fee753fd   sched: Remove thread group iteration from colocation
1085170   Ie23d473302d7fbda9b243a150e5c52d025007e4f   usb: pd: Stop processing SVDM if handler found
1091540   I61523188f45daca026b90943c845b43a8327f51e   qcom-charger: smb2: Disable try.SINK mode in the probe
1081738   Iee99e9d1b999c84ece075d2f17e9cdf6aef9a2ac   leds: qpnp-wled: Add support to configure AUTO_GM settin
1081922   I9aa7a000e75b50c6b26970deaba2131c87087b8c   msm: mdss: fix autorefresh disable during handoff
1075694   I9cf2f94892bdeb83fab0068902419b1603520364   msm: kgsl: preserve ISENSE registers across GPU power co
1085321 1085649   I3c9422f3a790c0c1633ab64d4213a088faaeb9e5   diag: Set the diag write buffers to busy state on channe
1090311   I96cdcb9e3642906b4afa08d9bde07e123d9b3977   USB: Allow skipping device resume during system resume
1074879   I470bcd0588e038b4a540d337fe6a412f2fa74920   sched: revise boost logic when boost_type is SCHED_BOOST
1087020   I6f9b7a630158355a7f920dcf9cfffe537b1c6a85   ASoC: msm: q6dspv2: fix potentional information leak
1089062   Icb04f6175b66fa46405e77d10fddf06b0051ee5f   phy: qcom-ufs: update ufs phy 1-lane settings
1082590   I4cdcbd31b5fa5ceac0eea7c743ea9286f231b80b   scsi: ufs: handle LINERESET during hibern8
1081738   I964b3452d0cdb3618b4ab446655ae75fa3a1049d   leds: qpnp-wled: Add support to configure auto PFM for p
1080245   I936496e553bc958c10e743fd8a225ffc7fbc0f79   clk: Add support to allow client to print all enabled cl
1079373   Ifd7b2b88e7ab4c952b743fede6e24795069d653a   qcom-charger: WA for legacy bit set on hard reboot
1090518   I7f1c0d9d84607821893a1e5d17934dae5acef5f4   clk: qcom: Add support for RCGs with dynamic and fixed s
1089865   I1e74f1b03c3e15880efdac7ff07aca2f628de99d   ARM: dts: msm: enable QGIC MSI for PCIe on msmcobalt
1088059   I66cbe48b7f4910228a6af57610a8427fea7fd1f2   msm: mdss: fix incorrect mutex unlocking during NOTIFY_U
1087418   Ia3fb69dca00654dacd8d1faae34715e40e097480   scsi: ufs: enable auto hibern8 only after device initial
1088216   I326eceeddff8e77d346c3365fa46cd539324451f   ARM: dts: msm: Add support for USB device for msmfalcon
1060212   Iafb7915e196a18b5f8076dda8fb06a4bd71a8e6e   leds: qpnp-flash-v2: Add support for configuring OTST th
1086372   Ia03380dfa4852c80fedb38f3c79f55d8d1a9a7f6   icnss: Reset mpm_wcssaon_config bits before top level re
1080245   I0a202af6f46c7cf164036d65487db5c40aab4063   clk: Add support for list_rates ops for clocks
1091477   I7435f05f20e12a7704ae5d9597b5cdc9b5a61d00   qcom-charger: Change usb_icl votable for PD vote
1089062   Ief5df61d91fbd765c595533b3380a602a2540e5e   scsi: ufs-qcom: update clock scaling sequence
1085217   I62de66e9b0bb1eeeac3c94d1ac1037285811b631   msm: ipa3: header file change for wdi-stats
1080674   I15ef73049cee76c6ea5b3916d9281bbd9fdfc563   ARM: dts: msm: specify UART configuration on msmfalcon.
1090525   I48c50bc320425c0db40cd4865e05c6b7a7fb5da3   msm: sde: remove secure camera ctrl_id definition
1061507   Iad71abbed72aa40b5c839260f5c297a885f7d128   ASoC: wcd-mbhc: correct cross connection check
1085064   Ib53902459646e590df4dc7fcb00f833d5e8f41ed   usb: pd: Don't suspend charging unless changing voltages
1064071   Ic0dedbad661143977a226d50263c26b5af579ce3   msm: kgsl: Make sure USE_CPU_MAP + MAP_USER_MEM work tog
1090862 987021   I0d1797a4df9ff67f3b162a1b5d26320ca989f54a   msm: mdss: hide additional kernel addresses from unprivi

Change-Id: Ic6272ada932975c2562cb87d4a617520002db3d3
CRs-Fixed: 1082112, 1075694, 1091440, 1085331, 1089062, 1081922, 1089895, 1077726, 1090029, 1061507, 1091354, 1074879, 987021, 1086292, 1085217, 1087020, 1080245, 1088709, 1089181, 1085064, 1087471, 1088059, 1080674, 1090862, 1079442, 1087418, 1090727, 1085649, 1064071, 1081738, 1086372, 941978, 1090518, 1090708, 1077102, 1090076, 1085321, 1091477, 1090701, 1090311, 1091511, 1091141, 1074985, 1079438, 1091147, 1075082, 1091127, 1087110, 1082590, 1081957, 1090525, 1085170, 1088658, 1080701, 1083537, 1091540, 1088216, 1079373, 1060212, 1073250, 1089865, 1091072
This commit is contained in:
Linux Build Service Account 2016-11-19 05:39:11 -07:00
commit 3c45c2a8a2
123 changed files with 7255 additions and 1706 deletions

View file

@ -11,6 +11,8 @@ Main node:
Required properties:
- compatible : Should be "qcom,qpnp-flash-led-v2"
- reg : Base address and size for flash LED modules
- qcom,pmic-revid : phandle of PMIC revid module. This is used to
identify the PMIC subtype.
Optional properties:
- interrupts : Specifies the interrupts associated with flash-led.
@ -76,6 +78,39 @@ Optional properties:
- qcom,thermal-derate-current : Array of currrent limits for thermal mitigation. Required if
qcom,thermal-derate-en is specified. Unit is mA. Format is
qcom,thermal-derate-current = <OTST1_LIMIT, OTST2_LIMIT, OTST3_LIMIT>.
- qcom,otst-ramp-back-up-dis : Boolean property to disable current ramp
backup after thermal derate trigger is
deasserted.
- qcom,thermal-derate-slow : Integer property to specify slow ramping
down thermal rate. Unit is in uS. Allowed
values are: 128, 256, 512, 1024, 2048, 4096,
8192 and 314592.
- qcom,thermal-derate-fast : Integer property to specify fast ramping
down thermal rate. Unit is in uS. Allowed
values are: 32, 64, 96, 128, 256, 384 and
512.
- qcom,thermal-debounce : Integer property to specify thermal debounce
time. It is only used if qcom,thermal-derate-en
is specified. Unit is in uS. Allowed values
are: 0, 16, 32, 64.
- qcom,thermal-hysteresis : Integer property to specify thermal derating
hysteresis. Unit is in deciDegC. It is only
used if qcom,thermal-derate-en is specified.
Allowed values are:
0, 15, 30, 45 for pmicobalt.
0, 20, 40, 60 for pm2falcon.
- qcom,thermal-thrsh1 : Integer property to specify OTST1 threshold
for thermal mitigation. Unit is in Celsius.
Accepted values are:
85, 79, 73, 67, 109, 103, 97, 91.
- qcom,thermal-thrsh2 : Integer property to specify OTST2 threshold
for thermal mitigation. Unit is in Celsius.
Accepted values are:
110, 104, 98, 92, 134, 128, 122, 116.
- qcom,thermal-thrsh3 : Integer property to specify OTST3 threshold
for thermal mitigation. Unit is in Celsius.
Accepted values are:
125, 119, 113, 107, 149, 143, 137, 131.
- qcom,hw-strobe-option : Integer type to specify hardware strobe option. Based on the specified
value, additional GPIO configuration may be required to provide strobing
support. Supported values are:

View file

@ -68,9 +68,15 @@ Optional properties for WLED:
- qcom,cons-sync-write-delay-us : Specify in 'us' the duration of delay between two consecutive writes to
SYNC register.
- qcom,sc-deb-cycles : debounce time for short circuit detection
- qcom,loop-ea-gm : control the gm for gm stage in control loop. default is 3.
- qcom,loop-auto-gm-en : A boolean property to specify if auto gm is enabled.
- qcom,loop-auto-gm-thresh : Specify auto gm threshold if "loop-auto-gm-en" is defined.
Supported values are: 0 - 3.
- qcom,lcd-auto-pfm-thresh : Specify the auto-pfm threshold, if the headroom voltage level
falls below this threshold and auto PFM is enabled, boost
controller will enter into PFM mode automatically.
Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
- qcom,loop-ea-gm : control the gm for gm stage in control loop. default is 3.
- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
- qcom,vref-psm-mv : reference psm voltage in mv. default for amoled is 450.
- qcom,avdd-mode-spmi: Boolean property to enable AMOLED_VOUT programming via SPMI. If not specified,

View file

@ -44,6 +44,8 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-pan-physical-width-dimension = <74>;
qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;

View file

@ -77,6 +77,8 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-pan-physical-width-dimension = <74>;
qcom,mdss-pan-physical-height-dimension = <131>;
qcom,compression-mode = "dsc";
qcom,config-select = <&dsi_nt35597_dsc_video_config0>;

View file

@ -47,6 +47,8 @@
04 00];
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-pan-physical-width-dimension = <74>;
qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-bl-max-level = <4095>;

View file

@ -70,6 +70,8 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-pan-physical-width-dimension = <74>;
qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;

View file

@ -22,17 +22,13 @@
#iommu-cells = <0>;
qcom,register-save;
qcom,skip-init;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
#global-interrupts = <0>;
interrupts = <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 431 IRQ_TYPE_EDGE_RISING>;
<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>;
clocks = <&clock_gcc clk_aggre1_noc_clk>;
clock-names = "smmu_aggre1_noc_clk";
#clock-cells = <1>;
@ -45,10 +41,8 @@
#iommu-cells = <1>;
qcom,register-save;
qcom,skip-init;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
#global-interrupts = <0>;
interrupts = <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>,
@ -57,17 +51,7 @@
<GIC_SPI 462 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 463 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 353 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 354 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 355 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 356 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 357 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 358 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
<GIC_SPI 465 IRQ_TYPE_EDGE_RISING>;
clocks = <&clock_gcc clk_aggre2_noc_clk>;
clock-names = "smmu_aggre2_noc_clk";
#clock-cells = <1>;
@ -81,10 +65,8 @@
qcom,tz-device-id = "LPASS";
qcom,register-save;
qcom,skip-init;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
#global-interrupts = <0>;
interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
@ -96,11 +78,7 @@
<GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>;
clocks = <&clock_gcc clk_hlos1_vote_lpass_adsp_smmu_clk>;
clock-names = "lpass_q6_smmu_clk";
@ -115,10 +93,8 @@
qcom,register-save;
qcom,no-smr-check;
qcom,skip-init;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
#global-interrupts = <0>;
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
@ -137,11 +113,7 @@
<GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <&gdsc_bimc_smmu>;
clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_gcc clk_mmssnoc_axi_clk>,
@ -164,15 +136,10 @@
qcom,dynamic;
qcom,register-save;
qcom,skip-init;
#global-interrupts = <2>;
interrupts = <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 231 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 329 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 330 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 116 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 117 IRQ_TYPE_EDGE_RISING>;
#global-interrupts = <0>;
interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 331 IRQ_TYPE_EDGE_RISING>;
vdd-supply = <&gdsc_gpu_cx>;
clocks = <&clock_gcc clk_gcc_gpu_cfg_ahb_clk>,
<&clock_gcc clk_gcc_bimc_gfx_clk>,

View file

@ -269,6 +269,7 @@
qcom,thermal-derate-en;
qcom,thermal-derate-current = <200 500 1000>;
qcom,isc-delay = <192>;
qcom,pmic-revid = <&pm2falcon_revid>;
status = "disabled";
pm2falcon_flash0: qcom,flash_0 {

View file

@ -613,6 +613,7 @@
qcom,led-strings-list = [00 01 02 03];
qcom,en-ext-pfet-sc-pro;
qcom,pmic-revid = <&pmicobalt_revid>;
qcom,loop-auto-gm-en;
};
pmicobalt_haptics: qcom,haptic@c000 {
@ -657,6 +658,7 @@
qcom,thermal-derate-en;
qcom,thermal-derate-current = <200 500 1000>;
qcom,isc-delay = <192>;
qcom,pmic-revid = <&pmicobalt_revid>;
pmicobalt_flash0: qcom,flash_0 {
label = "flash";

View file

@ -1948,6 +1948,8 @@
clock-names = "core_clk", "iface_clk", "bus_aggr_clk", "utmi_clk",
"sleep_clk", "xo", "cfg_ahb_clk";
qcom,core-clk-rate = <120000000>;
resets = <&clock_gcc USB_30_BCR>;
reset-names = "core_reset";
@ -2056,6 +2058,7 @@
<&clock_gcc clk_gcc_usb_phy_cfg_ahb2phy_clk>;
clock-names = "core_clk", "iface_clk", "utmi_clk", "sleep_clk",
"xo", "cfg_ahb_clk";
qcom,core-clk-rate = <60000000>;
resets = <&clock_gcc USB_20_BCR>;
reset-names = "core_reset";

View file

@ -44,9 +44,9 @@
};
&ufsphy1 {
vdda-phy-supply = <&pmcobalt_l1>;
vdda-pll-supply = <&pmcobalt_l2>;
vddp-ref-clk-supply = <&pmcobalt_l26>;
vdda-phy-supply = <&pm2falcon_l1>;
vdda-pll-supply = <&pmfalcon_l1>;
vddp-ref-clk-supply = <&pmfalcon_l1>;
vdda-phy-max-microamp = <51400>;
vdda-pll-max-microamp = <14600>;
vddp-ref-clk-max-microamp = <100>;
@ -57,12 +57,10 @@
&ufs1 {
vdd-hba-supply = <&gdsc_ufs>;
vdd-hba-fixed-regulator;
vcc-supply = <&pmcobalt_l20>;
vccq-supply = <&pmcobalt_l26>;
vccq2-supply = <&pmcobalt_s4>;
vcc-max-microamp = <750000>;
vccq-max-microamp = <560000>;
vccq2-max-microamp = <750000>;
vcc-supply = <&pm2falcon_l4>;
vccq2-supply = <&pmfalcon_l8>;
vcc-max-microamp = <500000>;
vccq2-max-microamp = <600000>;
status = "ok";
};
@ -275,10 +273,15 @@
&mdss_dsi {
hw-config = "split_dsi";
vdda-1p2-supply = <&pmfalcon_l1>;
vdda-0p9-supply = <&pm2falcon_l1>;
};
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_truly_video>;
wqhd-vddio-supply = <&pmfalcon_l11>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
@ -288,7 +291,10 @@
};
&mdss_dsi1 {
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_truly_video>;
wqhd-vddio-supply = <&pmfalcon_l11>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
@ -424,6 +430,8 @@
};
&mdss_dp_ctrl {
vdda-1p2-supply = <&pmfalcon_l1>;
vdda-0p9-supply = <&pm2falcon_l1>;
pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;

View file

@ -45,9 +45,9 @@
};
&ufsphy1 {
vdda-phy-supply = <&pmcobalt_l1>;
vdda-pll-supply = <&pmcobalt_l2>;
vddp-ref-clk-supply = <&pmcobalt_l26>;
vdda-phy-supply = <&pm2falcon_l1>;
vdda-pll-supply = <&pmfalcon_l1>;
vddp-ref-clk-supply = <&pmfalcon_l1>;
vdda-phy-max-microamp = <51400>;
vdda-pll-max-microamp = <14600>;
vddp-ref-clk-max-microamp = <100>;
@ -58,12 +58,10 @@
&ufs1 {
vdd-hba-supply = <&gdsc_ufs>;
vdd-hba-fixed-regulator;
vcc-supply = <&pmcobalt_l20>;
vccq-supply = <&pmcobalt_l26>;
vccq2-supply = <&pmcobalt_s4>;
vcc-max-microamp = <750000>;
vccq-max-microamp = <560000>;
vccq2-max-microamp = <750000>;
vcc-supply = <&pm2falcon_l4>;
vccq2-supply = <&pmfalcon_l8>;
vcc-max-microamp = <500000>;
vccq2-max-microamp = <600000>;
status = "ok";
};
@ -314,6 +312,8 @@
};
&mdss_dp_ctrl {
vdda-1p2-supply = <&pmfalcon_l1>;
vdda-0p9-supply = <&pm2falcon_l1>;
pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
@ -328,10 +328,15 @@
&mdss_dsi {
hw-config = "split_dsi";
vdda-1p2-supply = <&pmfalcon_l1>;
vdda-0p9-supply = <&pm2falcon_l1>;
};
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_truly_video>;
wqhd-vddio-supply = <&pmfalcon_l11>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
@ -341,7 +346,10 @@
};
&mdss_dsi1 {
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>;
qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_truly_video>;
wqhd-vddio-supply = <&pmfalcon_l11>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;

View file

@ -81,18 +81,6 @@
/delete-property/qca,bt-chip-pwd-supply;
};
&ufsphy1 {
/delete-property/vdda-phy-supply;
/delete-property/vdda-pll-supply;
/delete-property/vddp-ref-clk-supply;
};
&ufs1 {
/delete-property/vcc-supply;
/delete-property/vccq-supply;
/delete-property/vccq2-supply;
};
&sdhc_2 {
/delete-property/vdd-supply;
/delete-property/vdd-io-supply;
@ -277,3 +265,19 @@
#include "msm-pm2falcon.dtsi"
#include "msmfalcon-regulator.dtsi"
/* dummy LCDB regulator nodes */
&soc {
lcdb_ldo_vreg: regulator-vdisp-vreg {
compatible = "qcom,stub-regulator";
regulator-name = "lcdb_ldo";
regulator-min-microvolt = <4000000>;
regulator-max-microvolt = <6000000>;
};
lcdb_ncp_vreg: regulator-vdisn-vreg {
compatible = "qcom,stub-regulator";
regulator-name = "lcdb_ncp";
regulator-min-microvolt = <4000000>;
regulator-max-microvolt = <6000000>;
};
};

View file

@ -1441,184 +1441,6 @@
};
};
pcie0: qcom,pcie@01c00000 {
compatible = "qcom,pci-msm";
cell-index = <0>;
reg = <0x1c00000 0x2000>,
<0x1c06000 0x1000>,
<0x1b000000 0xf1d>,
<0x1b000f20 0xa8>,
<0x1b100000 0x100000>,
<0x1b200000 0x100000>,
<0x1b300000 0xd00000>;
reg-names = "parf", "phy", "dm_core", "elbi",
"conf", "io", "bars";
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
interrupt-parent = <&pcie0>;
interrupts = <0 1 2 3 4 5>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0xffffffff>;
interrupt-map = <0 0 0 0 &intc 0 0 405 0
0 0 0 1 &intc 0 0 135 0
0 0 0 2 &intc 0 0 136 0
0 0 0 3 &intc 0 0 138 0
0 0 0 4 &intc 0 0 139 0
0 0 0 5 &intc 0 0 278 0>;
interrupt-names = "int_msi", "int_a", "int_b", "int_c",
"int_d", "int_global_int";
qcom,phy-sequence = <0x804 0x01 0x00
0x034 0x14 0x00
0x138 0x30 0x00
0x048 0x0f 0x00
0x15c 0x06 0x00
0x090 0x01 0x00
0x088 0x20 0x00
0x0f0 0x00 0x00
0x0f8 0x01 0x00
0x0f4 0xc9 0x00
0x11c 0xff 0x00
0x120 0x3f 0x00
0x164 0x01 0x00
0x154 0x00 0x00
0x148 0x0a 0x00
0x05C 0x19 0x00
0x038 0x90 0x00
0x0b0 0x82 0x00
0x0c0 0x03 0x00
0x0bc 0x55 0x00
0x0b8 0x55 0x00
0x0a0 0x00 0x00
0x09c 0x0d 0x00
0x098 0x04 0x00
0x13c 0x00 0x00
0x060 0x08 0x00
0x068 0x16 0x00
0x070 0x34 0x00
0x15c 0x06 0x00
0x138 0x33 0x00
0x03c 0x02 0x00
0x040 0x0e 0x00
0x080 0x04 0x00
0x0dc 0x00 0x00
0x0d8 0x3f 0x00
0x00c 0x09 0x00
0x010 0x01 0x00
0x01c 0x40 0x00
0x020 0x01 0x00
0x014 0x02 0x00
0x018 0x00 0x00
0x024 0x7e 0x00
0x028 0x15 0x00
0x244 0x02 0x00
0x2a4 0x12 0x00
0x260 0x10 0x00
0x28c 0x06 0x00
0x504 0x03 0x00
0x500 0x1c 0x00
0x50c 0x14 0x00
0x4d4 0x0a 0x00
0x4d8 0x04 0x00
0x4dc 0x1a 0x00
0x434 0x4b 0x00
0x414 0x04 0x00
0x40c 0x04 0x00
0x4f8 0x00 0x00
0x4fc 0x80 0x00
0x51c 0x40 0x00
0x444 0x71 0x00
0x43c 0x40 0x00
0x854 0x04 0x00
0x62c 0x52 0x00
0x9ac 0x00 0x00
0x8a0 0x01 0x00
0x9e0 0x00 0x00
0x9dc 0x01 0x00
0x9a8 0x00 0x00
0x8a4 0x01 0x00
0x8a8 0x73 0x00
0x9d8 0x99 0x00
0x9b0 0x03 0x00
0x804 0x03 0x00
0x800 0x00 0x00
0x808 0x03 0x00>;
pinctrl-names = "default";
pinctrl-0 = <&pcie0_clkreq_default
&pcie0_perst_default
&pcie0_wake_default>;
perst-gpio = <&tlmm 35 0>;
wake-gpio = <&tlmm 37 0>;
gdsc-vdd-supply = <&gdsc_pcie_0>;
vreg-1.8-supply = <&pmcobalt_l2>;
vreg-0.9-supply = <&pmcobalt_l1>;
vreg-cx-supply = <&pmcobalt_s1_level>;
qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
qcom,vreg-cx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_BINNING
RPM_SMD_REGULATOR_LEVEL_SVS 0>;
qcom,l1-supported;
qcom,l1ss-supported;
qcom,aux-clk-sync;
qcom,ep-latency = <10>;
qcom,ep-wakeirq;
linux,pci-domain = <0>;
qcom,pcie-phy-ver = <0x20>;
qcom,use-19p2mhz-aux-clk;
iommus = <&anoc1_smmu>;
qcom,smmu-exist;
qcom,smmu-sid-base = <0x1480>;
qcom,msm-bus,name = "pcie0";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<45 512 0 0>,
<45 512 500 800>;
clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>,
<&clock_gcc clk_ln_bb_clk1>,
<&clock_gcc clk_gcc_pcie_0_aux_clk>,
<&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>,
<&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
<&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
<&clock_gcc clk_gcc_pcie_clkref_clk>;
clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
"pcie_0_ldo";
max-clock-frequency-hz = <0>, <0>, <19200000>,
<0>, <0>, <0>, <0>, <0>, <0>,
<0>, <0>, <0>, <0>, <0>, <0>,
<0>, <0>;
resets = <&clock_gcc PCIE_PHY_BCR>,
<&clock_gcc PCIE_0_PHY_BCR>,
<&clock_gcc PCIE_0_PHY_BCR>;
reset-names = "pcie_phy_reset",
"pcie_0_phy_reset",
"pcie_0_phy_pipe_reset";
};
qcom,ipc_router {
compatible = "qcom,ipc_router";
qcom,node-id = <1>;
@ -2614,6 +2436,229 @@
};
};
pcie0: qcom,pcie@01c00000 {
compatible = "qcom,pci-msm";
cell-index = <0>;
reg = <0x1c00000 0x2000>,
<0x1c06000 0x1000>,
<0x1b000000 0xf1d>,
<0x1b000f20 0xa8>,
<0x1b100000 0x100000>,
<0x1b200000 0x100000>,
<0x1b300000 0xd00000>;
reg-names = "parf", "phy", "dm_core", "elbi",
"conf", "io", "bars";
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
interrupt-parent = <&pcie0>;
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
36 37>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0xffffffff>;
interrupt-map = <0 0 0 0 &intc 0 0 405 0
0 0 0 1 &intc 0 0 135 0
0 0 0 2 &intc 0 0 136 0
0 0 0 3 &intc 0 0 138 0
0 0 0 4 &intc 0 0 139 0
0 0 0 5 &intc 0 0 278 0
0 0 0 6 &intc 0 0 576 0
0 0 0 7 &intc 0 0 577 0
0 0 0 8 &intc 0 0 578 0
0 0 0 9 &intc 0 0 579 0
0 0 0 10 &intc 0 0 580 0
0 0 0 11 &intc 0 0 581 0
0 0 0 12 &intc 0 0 582 0
0 0 0 13 &intc 0 0 583 0
0 0 0 14 &intc 0 0 584 0
0 0 0 15 &intc 0 0 585 0
0 0 0 16 &intc 0 0 586 0
0 0 0 17 &intc 0 0 587 0
0 0 0 18 &intc 0 0 588 0
0 0 0 19 &intc 0 0 589 0
0 0 0 20 &intc 0 0 590 0
0 0 0 21 &intc 0 0 591 0
0 0 0 22 &intc 0 0 592 0
0 0 0 23 &intc 0 0 593 0
0 0 0 24 &intc 0 0 594 0
0 0 0 25 &intc 0 0 595 0
0 0 0 26 &intc 0 0 596 0
0 0 0 27 &intc 0 0 597 0
0 0 0 28 &intc 0 0 598 0
0 0 0 29 &intc 0 0 599 0
0 0 0 30 &intc 0 0 600 0
0 0 0 31 &intc 0 0 601 0
0 0 0 32 &intc 0 0 602 0
0 0 0 33 &intc 0 0 603 0
0 0 0 34 &intc 0 0 604 0
0 0 0 35 &intc 0 0 605 0
0 0 0 36 &intc 0 0 606 0
0 0 0 37 &intc 0 0 607 0>;
interrupt-names = "int_msi", "int_a", "int_b", "int_c",
"int_d", "int_global_int",
"msi_0", "msi_1", "msi_2", "msi_3",
"msi_4", "msi_5", "msi_6", "msi_7",
"msi_8", "msi_9", "msi_10", "msi_11",
"msi_12", "msi_13", "msi_14", "msi_15",
"msi_16", "msi_17", "msi_18", "msi_19",
"msi_20", "msi_21", "msi_22", "msi_23",
"msi_24", "msi_25", "msi_26", "msi_27",
"msi_28", "msi_29", "msi_30", "msi_31";
qcom,phy-sequence = <0x804 0x01 0x00
0x034 0x14 0x00
0x138 0x30 0x00
0x048 0x0f 0x00
0x15c 0x06 0x00
0x090 0x01 0x00
0x088 0x20 0x00
0x0f0 0x00 0x00
0x0f8 0x01 0x00
0x0f4 0xc9 0x00
0x11c 0xff 0x00
0x120 0x3f 0x00
0x164 0x01 0x00
0x154 0x00 0x00
0x148 0x0a 0x00
0x05C 0x19 0x00
0x038 0x90 0x00
0x0b0 0x82 0x00
0x0c0 0x03 0x00
0x0bc 0x55 0x00
0x0b8 0x55 0x00
0x0a0 0x00 0x00
0x09c 0x0d 0x00
0x098 0x04 0x00
0x13c 0x00 0x00
0x060 0x08 0x00
0x068 0x16 0x00
0x070 0x34 0x00
0x15c 0x06 0x00
0x138 0x33 0x00
0x03c 0x02 0x00
0x040 0x0e 0x00
0x080 0x04 0x00
0x0dc 0x00 0x00
0x0d8 0x3f 0x00
0x00c 0x09 0x00
0x010 0x01 0x00
0x01c 0x40 0x00
0x020 0x01 0x00
0x014 0x02 0x00
0x018 0x00 0x00
0x024 0x7e 0x00
0x028 0x15 0x00
0x244 0x02 0x00
0x2a4 0x12 0x00
0x260 0x10 0x00
0x28c 0x06 0x00
0x504 0x03 0x00
0x500 0x1c 0x00
0x50c 0x14 0x00
0x4d4 0x0a 0x00
0x4d8 0x04 0x00
0x4dc 0x1a 0x00
0x434 0x4b 0x00
0x414 0x04 0x00
0x40c 0x04 0x00
0x4f8 0x00 0x00
0x4fc 0x80 0x00
0x51c 0x40 0x00
0x444 0x71 0x00
0x43c 0x40 0x00
0x854 0x04 0x00
0x62c 0x52 0x00
0x9ac 0x00 0x00
0x8a0 0x01 0x00
0x9e0 0x00 0x00
0x9dc 0x01 0x00
0x9a8 0x00 0x00
0x8a4 0x01 0x00
0x8a8 0x73 0x00
0x9d8 0x99 0x00
0x9b0 0x03 0x00
0x804 0x03 0x00
0x800 0x00 0x00
0x808 0x03 0x00>;
pinctrl-names = "default";
pinctrl-0 = <&pcie0_clkreq_default
&pcie0_perst_default
&pcie0_wake_default>;
perst-gpio = <&tlmm 35 0>;
wake-gpio = <&tlmm 37 0>;
gdsc-vdd-supply = <&gdsc_pcie_0>;
vreg-1.8-supply = <&pmcobalt_l2>;
vreg-0.9-supply = <&pmcobalt_l1>;
vreg-cx-supply = <&pmcobalt_s1_level>;
qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
qcom,vreg-cx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_BINNING
RPM_SMD_REGULATOR_LEVEL_SVS 0>;
qcom,l1-supported;
qcom,l1ss-supported;
qcom,aux-clk-sync;
qcom,ep-latency = <10>;
qcom,ep-wakeirq;
linux,pci-domain = <0>;
qcom,msi-gicm-addr = <0x17a00040>;
qcom,msi-gicm-base = <0x260>;
qcom,pcie-phy-ver = <0x20>;
qcom,use-19p2mhz-aux-clk;
iommus = <&anoc1_smmu>;
qcom,smmu-exist;
qcom,smmu-sid-base = <0x1480>;
qcom,msm-bus,name = "pcie0";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<45 512 0 0>,
<45 512 500 800>;
clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>,
<&clock_gcc clk_ln_bb_clk1>,
<&clock_gcc clk_gcc_pcie_0_aux_clk>,
<&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>,
<&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
<&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
<&clock_gcc clk_gcc_pcie_clkref_clk>;
clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
"pcie_0_ldo";
max-clock-frequency-hz = <0>, <0>, <19200000>,
<0>, <0>, <0>, <0>, <0>, <0>,
<0>, <0>, <0>, <0>, <0>, <0>,
<0>, <0>;
resets = <&clock_gcc PCIE_PHY_BCR>,
<&clock_gcc PCIE_0_PHY_BCR>,
<&clock_gcc PCIE_0_PHY_BCR>;
reset-names = "pcie_phy_reset",
"pcie_0_phy_reset",
"pcie_0_phy_pipe_reset";
};
qcom,bcl {
compatible = "qcom,bcl";
qcom,bcl-enable;

View file

@ -22,11 +22,203 @@
spi6 = &spi_6;
spi7 = &spi_7;
spi8 = &spi_8;
i2c1 = &i2c_1;
i2c2 = &i2c_2;
i2c3 = &i2c_3;
i2c4 = &i2c_4;
i2c5 = &i2c_5;
i2c6 = &i2c_6;
i2c7 = &i2c_7;
i2c8 = &i2c_8;
};
};
&soc {
i2c_1: i2c@c175000 { /* BLSP1 QUP1 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc175000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 95 0>;
dmas = <&dma_blsp1 4 64 0x20000020 0x20>,
<&dma_blsp1 5 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <86>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>,
<&clock_gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_1_active>;
pinctrl-1 = <&i2c_1_sleep>;
status = "disabled";
};
i2c_2: i2c@c176000 { /* BLSP1 QUP2 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc176000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 96 0>;
dmas = <&dma_blsp1 6 64 0x20000020 0x20>,
<&dma_blsp1 7 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <86>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>,
<&clock_gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_2_active>;
pinctrl-1 = <&i2c_2_sleep>;
status = "disabled";
};
i2c_3: i2c@c177000 { /* BLSP1 QUP3 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc177000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 97 0>;
dmas = <&dma_blsp1 8 64 0x20000020 0x20>,
<&dma_blsp1 9 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <86>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>,
<&clock_gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_3_active>;
pinctrl-1 = <&i2c_3_sleep>;
status = "disabled";
};
i2c_4: i2c@c178000 { /* BLSP1 QUP4 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc178000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 98 0>;
dmas = <&dma_blsp1 10 64 0x20000020 0x20>,
<&dma_blsp1 11 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <86>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>,
<&clock_gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_4_active>;
pinctrl-1 = <&i2c_4_sleep>;
status = "disabled";
};
i2c_5: i2c@c1b5000 { /* BLSP2 QUP1 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc1b5000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 101 0>;
dmas = <&dma_blsp2 4 64 0x20000020 0x20>,
<&dma_blsp2 5 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <84>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>,
<&clock_gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_5_active>;
pinctrl-1 = <&i2c_5_sleep>;
status = "disabled";
};
i2c_6: i2c@c1b6000 { /* BLSP2 QUP2 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc1b6000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 102 0>;
dmas = <&dma_blsp2 6 64 0x20000020 0x20>,
<&dma_blsp2 7 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <84>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>,
<&clock_gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_6_active>;
pinctrl-1 = <&i2c_6_sleep>;
status = "disabled";
};
i2c_7: i2c@c1b7000 { /* BLSP2 QUP3 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc1b7000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 103 0>;
dmas = <&dma_blsp2 8 64 0x20000020 0x20>,
<&dma_blsp2 9 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <84>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>,
<&clock_gcc GCC_BLSP2_QUP3_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_7_active>;
pinctrl-1 = <&i2c_7_sleep>;
status = "disabled";
};
i2c_8: i2c@c1b8000 { /* BLSP2 QUP4 */
compatible = "qcom,i2c-msm-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xc1b8000 0x600>;
reg-names = "qup_phys_addr";
interrupt-names = "qup_irq";
interrupts = <0 104 0>;
dmas = <&dma_blsp2 10 64 0x20000020 0x20>,
<&dma_blsp2 11 32 0x20000020 0x20>;
dma-names = "tx", "rx";
qcom,master-id = <84>;
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>,
<&clock_gcc GCC_BLSP2_QUP4_I2C_APPS_CLK>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_8_active>;
pinctrl-1 = <&i2c_8_sleep>;
status = "disabled";
};
spi_1: spi@c175000 { /* BLSP1 QUP1 */
compatible = "qcom,spi-qup-v2";
#address-cells = <1>;
@ -226,4 +418,152 @@
<&clock_gcc GCC_BLSP2_QUP4_SPI_APPS_CLK>;
status = "disabled";
};
blsp1_uart1_hs: uart@c16f000 { /* BLSP1 UART1 */
compatible = "qcom,msm-hsuart-v14";
reg = <0xc16f000 0x200>,
<0xc144000 0x1f000>;
reg-names = "core_mem", "bam_mem";
interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
#address-cells = <0>;
interrupt-parent = <&blsp1_uart1_hs>;
interrupts = <0 1 2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xffffffff>;
interrupt-map = <0 &intc 0 0 107 0
1 &intc 0 0 238 0
2 &tlmm 1 0>;
qcom,inject-rx-on-wakeup;
qcom,rx-char-to-inject = <0xfd>;
qcom,bam-tx-ep-pipe-index = <0>;
qcom,bam-rx-ep-pipe-index = <1>;
qcom,master-id = <86>;
clock-names = "core_clk", "iface_clk";
clocks = <&clock_gcc GCC_BLSP1_UART1_APPS_CLK>,
<&clock_gcc GCC_BLSP1_AHB_CLK>;
pinctrl-names = "sleep", "default";
pinctrl-0 = <&blsp1_uart1_sleep>;
pinctrl-1 = <&blsp1_uart1_active>;
qcom,msm-bus,name = "buart1";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<86 512 0 0>,
<86 512 500 800>;
status = "disabled";
};
blsp1_uart2_hs: uart@c170000 { /* BLSP1 UART2 */
compatible = "qcom,msm-hsuart-v14";
reg = <0xc170000 0x200>,
<0xc144000 0x1f000>;
reg-names = "core_mem", "bam_mem";
interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
#address-cells = <0>;
interrupt-parent = <&blsp1_uart2_hs>;
interrupts = <0 1 2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xffffffff>;
interrupt-map = <0 &intc 0 0 108 0
1 &intc 0 0 238 0
2 &tlmm 5 0>;
qcom,inject-rx-on-wakeup;
qcom,rx-char-to-inject = <0xfd>;
qcom,bam-tx-ep-pipe-index = <2>;
qcom,bam-rx-ep-pipe-index = <3>;
qcom,master-id = <86>;
clock-names = "core_clk", "iface_clk";
clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
<&clock_gcc GCC_BLSP1_AHB_CLK>;
pinctrl-names = "sleep", "default";
pinctrl-0 = <&blsp1_uart2_sleep>;
pinctrl-1 = <&blsp1_uart2_active>;
qcom,msm-bus,name = "buart2";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<86 512 0 0>,
<86 512 500 800>;
status = "disabled";
};
blsp2_uart1_hs: uart@c1af000 { /* BLSP2 UART1 */
compatible = "qcom,msm-hsuart-v14";
reg = <0xc1af000 0x200>,
<0xc184000 0x1f000>;
reg-names = "core_mem", "bam_mem";
interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
#address-cells = <0>;
interrupt-parent = <&blsp2_uart1_hs>;
interrupts = <0 1 2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xffffffff>;
interrupt-map = <0 &intc 0 0 113 0
1 &intc 0 0 239 0
2 &tlmm 17 0>;
qcom,inject-rx-on-wakeup;
qcom,rx-char-to-inject = <0xfd>;
qcom,bam-tx-ep-pipe-index = <0>;
qcom,bam-rx-ep-pipe-index = <1>;
qcom,master-id = <84>;
clock-names = "core_clk", "iface_clk";
clocks = <&clock_gcc GCC_BLSP2_UART1_APPS_CLK>,
<&clock_gcc GCC_BLSP2_AHB_CLK>;
pinctrl-names = "sleep", "default";
pinctrl-0 = <&blsp2_uart1_sleep>;
pinctrl-1 = <&blsp2_uart1_active>;
qcom,msm-bus,name = "buart3";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
<84 512 500 800>;
status = "disabled";
};
blsp2_uart2_hs: uart@c1b0000 { /* BLSP2 UART2 */
compatible = "qcom,msm-hsuart-v14";
reg = <0xc1b0000 0x200>,
<0xc184000 0x1f000>;
reg-names = "core_mem", "bam_mem";
interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
#address-cells = <0>;
interrupt-parent = <&blsp2_uart2_hs>;
interrupts = <0 1 2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xffffffff>;
interrupt-map = <0 &intc 0 0 114 0
1 &intc 0 0 239 0
2 &tlmm 25 0>;
qcom,inject-rx-on-wakeup;
qcom,rx-char-to-inject = <0xfd>;
qcom,bam-tx-ep-pipe-index = <2>;
qcom,bam-rx-ep-pipe-index = <3>;
qcom,master-id = <84>;
clock-names = "core_clk", "iface_clk";
clocks = <&clock_gcc GCC_BLSP2_UART2_APPS_CLK>,
<&clock_gcc GCC_BLSP2_AHB_CLK>;
pinctrl-names = "sleep", "default";
pinctrl-0 = <&blsp2_uart2_sleep>;
pinctrl-1 = <&blsp2_uart2_active>;
qcom,msm-bus,name = "buart4";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<84 512 0 0>,
<84 512 500 800>;
status = "disabled";
};
};

View file

@ -0,0 +1,196 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
&soc {
usb3: ssusb@a800000 {
compatible = "qcom,dwc-usb3-msm";
reg = <0x0a800000 0xfc100>,
<0x0c016000 0x400>;
reg-names = "core_base",
"ahb2phy_base";
#address-cells = <1>;
#size-cells = <1>;
ranges;
interrupts = <0 347 0>, <0 243 0>, <0 180 0>;
interrupt-names = "hs_phy_irq", "ss_phy_irq", "pwr_event_irq";
USB3_GDSC-supply = <&gdsc_usb30>;
qcom,usb-dbm = <&dbm_1p5>;
qcom,msm-bus,name = "usb3";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<61 512 0 0>,
<61 512 240000 800000>;
qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
clocks = <&clock_gcc GCC_USB30_MASTER_CLK>,
<&clock_gcc GCC_CFG_NOC_USB3_AXI_CLK>,
<&clock_gcc GCC_AGGRE2_USB3_AXI_CLK>,
<&clock_gcc GCC_USB30_MOCK_UTMI_CLK>,
<&clock_gcc GCC_USB30_SLEEP_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
<&clock_rpmcc CXO_DWC3_CLK>;
clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
resets = <&clock_gcc GCC_USB_30_BCR>;
reset-names = "core_reset";
dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xc8d0>;
interrupt-parent = <&intc>;
interrupts = <0 131 0>;
usb-phy = <&qusb_phy0>, <&ssphy>;
tx-fifo-resize;
snps,usb3-u1u2-disable;
snps,nominal-elastic-buffer;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
};
qcom,usbbam@a904000 {
compatible = "qcom,usb-bam-msm";
reg = <0x0a904000 0x17000>;
interrupt-parent = <&intc>;
interrupts = <0 132 0>;
qcom,bam-type = <0>;
qcom,usb-bam-fifo-baseaddr = <0x066bb000>;
qcom,usb-bam-num-pipes = <8>;
qcom,ignore-core-reset-ack;
qcom,disable-clk-gating;
qcom,usb-bam-override-threshold = <0x4001>;
qcom,usb-bam-max-mbps-highspeed = <400>;
qcom,usb-bam-max-mbps-superspeed = <3600>;
qcom,reset-bam-on-connect;
qcom,pipe0 {
label = "ssusb-ipa-out-0";
qcom,usb-bam-mem-type = <1>;
qcom,dir = <0>;
qcom,pipe-num = <0>;
qcom,peer-bam = <1>;
qcom,src-bam-pipe-index = <1>;
qcom,data-fifo-size = <0x8000>;
qcom,descriptor-fifo-size = <0x2000>;
};
qcom,pipe1 {
label = "ssusb-ipa-in-0";
qcom,usb-bam-mem-type = <1>;
qcom,dir = <1>;
qcom,pipe-num = <0>;
qcom,peer-bam = <1>;
qcom,dst-bam-pipe-index = <0>;
qcom,data-fifo-size = <0x8000>;
qcom,descriptor-fifo-size = <0x2000>;
};
qcom,pipe2 {
label = "ssusb-qdss-in-0";
qcom,usb-bam-mem-type = <2>;
qcom,dir = <1>;
qcom,pipe-num = <0>;
qcom,peer-bam = <0>;
qcom,peer-bam-physical-address = <0x06064000>;
qcom,src-bam-pipe-index = <0>;
qcom,dst-bam-pipe-index = <2>;
qcom,data-fifo-offset = <0x0>;
qcom,data-fifo-size = <0x1800>;
qcom,descriptor-fifo-offset = <0x1800>;
qcom,descriptor-fifo-size = <0x800>;
};
qcom,pipe3 {
label = "ssusb-dpl-ipa-in-1";
qcom,usb-bam-mem-type = <1>;
qcom,dir = <1>;
qcom,pipe-num = <1>;
qcom,peer-bam = <1>;
qcom,dst-bam-pipe-index = <2>;
qcom,data-fifo-size = <0x8000>;
qcom,descriptor-fifo-size = <0x2000>;
};
};
};
qusb_phy0: qusb@c012000 {
compatible = "qcom,qusb2phy";
reg = <0x0c012000 0x180>,
<0x00188018 0x4>;
reg-names = "qusb_phy_base",
"ref_clk_addr";
vdd-supply = <&pm2falcon_l1>;
vdda18-supply = <&pmfalcon_l10>;
vdda33-supply = <&pm2falcon_l7>;
qcom,vdd-voltage-level = <0 925000 925000>;
qcom,tune2-efuse-bit-pos = <21>;
qcom,tune2-efuse-num-bits = <4>;
qcom,enable-dpdm-pulsing;
qcom,qusb-phy-init-seq = <0xf8 0x80
0xb3 0x84
0x83 0x88
0xc0 0x8c
0x30 0x08
0x79 0x0c
0x21 0x10
0x14 0x9c
0x9f 0x1c
0x00 0x18>;
phy_type= "utmi";
clocks = <&clock_rpmcc RPM_LN_BB_CLK1>,
<&clock_gcc GCC_RX0_USB2_CLKREF_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
reset-names = "phy_reset";
};
ssphy: ssphy@c010000 {
compatible = "qcom,usb-ssphy-qmp-v2";
reg = <0xc010000 0x7a8>,
<0x01fcb244 0x4>,
<0x01fcb248 0x4>;
reg-names = "qmp_phy_base",
"vls_clamp_reg",
"tcsr_usb3_dp_phymode";
vdd-supply = <&pm2falcon_l1>;
core-supply = <&pmfalcon_l10>;
qcom,vdd-voltage-level = <0 925000 925000>;
qcom,vbus-valid-override;
clocks = <&clock_gcc GCC_USB3_PHY_AUX_CLK>,
<&clock_gcc GCC_USB3_PHY_PIPE_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
<&clock_rpmcc RPM_LN_BB_CLK1>,
<&clock_gcc GCC_USB3_CLKREF_CLK>;
clock-names = "aux_clk", "pipe_clk", "cfg_ahb_clk",
"ref_clk_src", "ref_clk";
resets = <&clock_gcc GCC_USB3_PHY_BCR>,
<&clock_gcc GCC_USB3PHY_PHY_BCR>;
reset-names = "phy_reset", "phy_phy_reset";
};
dbm_1p5: dbm@a8f8000 {
compatible = "qcom,usb-dbm-1p5";
reg = <0xa8f8000 0x300>;
qcom,reset-ep-after-lpm-resume;
};
};

View file

@ -22,12 +22,12 @@
uart_console_active: uart_console_active {
mux {
pins = "gpio0", "gpio1";
function = "blsp_uart1";
pins = "gpio4", "gpio5";
function = "blsp_uart2";
};
config {
pins = "gpio0", "gpio1";
pins = "gpio4", "gpio5";
drive-strength = <2>;
bias-disable;
};
@ -113,6 +113,231 @@
};
};
/* I2C CONFIGURATION */
i2c_1 {
i2c_1_active: i2c_1_active {
mux {
pins = "gpio2", "gpio3";
function = "blsp_i2c1";
};
config {
pins = "gpio2", "gpio3";
drive-strength = <2>;
bias-disable;
};
};
i2c_1_sleep: i2c_1_sleep {
mux {
pins = "gpio2", "gpio3";
function = "blsp_i2c1";
};
config {
pins = "gpio2", "gpio3";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_2 {
i2c_2_active: i2c_2_active {
mux {
pins = "gpio6", "gpio7";
function = "blsp_i2c2";
};
config {
pins = "gpio6", "gpio7";
drive-strength = <2>;
bias-disable;
};
};
i2c_2_sleep: i2c_2_sleep {
mux {
pins = "gpio6", "gpio7";
function = "blsp_i2c2";
};
config {
pins = "gpio6", "gpio7";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_3 {
i2c_3_active: i2c_3_active {
mux {
pins = "gpio10", "gpio11";
function = "blsp_i2c3";
};
config {
pins = "gpio10", "gpio11";
drive-strength = <2>;
bias-disable;
};
};
i2c_3_sleep: i2c_3_sleep {
mux {
pins = "gpio10", "gpio11";
function = "blsp_i2c3";
};
config {
pins = "gpio10", "gpio11";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_4 {
i2c_4_active: i2c_4_active {
mux {
pins = "gpio14", "gpio15";
function = "blsp_i2c4";
};
config {
pins = "gpio14", "gpio15";
drive-strength = <2>;
bias-disable;
};
};
i2c_4_sleep: i2c_4_sleep {
mux {
pins = "gpio14", "gpio15";
function = "blsp_i2c4";
};
config {
pins = "gpio14", "gpio15";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_5 {
i2c_5_active: i2c_5_active {
mux {
pins = "gpio18", "gpio19";
function = "blsp_i2c5";
};
config {
pins = "gpio18", "gpio19";
drive-strength = <2>;
bias-disable;
};
};
i2c_5_sleep: i2c_5_sleep {
mux {
pins = "gpio18", "gpio19";
function = "blsp_i2c5";
};
config {
pins = "gpio18", "gpio19";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_6 {
i2c_6_active: i2c_6_active {
mux {
pins = "gpio22", "gpio23";
function = "blsp_i2c6";
};
config {
pins = "gpio22", "gpio23";
drive-strength = <2>;
bias-disable;
};
};
i2c_6_sleep: i2c_6_sleep {
mux {
pins = "gpio22", "gpio23";
function = "blsp_i2c6";
};
config {
pins = "gpio22", "gpio23";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_7 {
i2c_7_active: i2c_7_active {
mux {
pins = "gpio26", "gpio27";
function = "blsp_i2c7";
};
config {
pins = "gpio26", "gpio27";
drive-strength = <2>;
bias-disable;
};
};
i2c_7_sleep: i2c_7_sleep {
mux {
pins = "gpio26", "gpio27";
function = "blsp_i2c7";
};
config {
pins = "gpio26", "gpio27";
drive-strength = <2>;
bias-pull-up;
};
};
};
i2c_8 {
i2c_8_active: i2c_8_active {
mux {
pins = "gpio30", "gpio31";
function = "blsp_i2c8_a";
};
config {
pins = "gpio30", "gpio31";
drive-strength = <2>;
bias-disable;
};
};
i2c_8_sleep: i2c_8_sleep {
mux {
pins = "gpio30", "gpio31";
function = "blsp_i2c8_a";
};
config {
pins = "gpio30", "gpio31";
drive-strength = <2>;
bias-pull-up;
};
};
};
/* SPI CONFIGURATION */
spi_1 {
spi_1_active: spi_1_active {
@ -369,5 +594,110 @@
};
};
};
/* HS UART CONFIGURATION */
blsp1_uart1_active: blsp1_uart1_active {
mux {
pins = "gpio0", "gpio1", "gpio2", "gpio3";
function = "blsp_uart1";
};
config {
pins = "gpio0", "gpio1", "gpio2", "gpio3";
drive-strength = <2>;
bias-disable;
};
};
blsp1_uart1_sleep: blsp1_uart1_sleep {
mux {
pins = "gpio0", "gpio1", "gpio2", "gpio3";
function = "gpio";
};
config {
pins = "gpio0", "gpio1", "gpio2", "gpio3";
drive-strength = <2>;
bias-disable;
};
};
blsp1_uart2_active: blsp1_uart2_active {
mux {
pins = "gpio4", "gpio5", "gpio6", "gpio7";
function = "blsp_uart2 ";
};
config {
pins = "gpio4", "gpio5", "gpio6", "gpio7";
drive-strength = <2>;
bias-disable;
};
};
blsp1_uart2_sleep: blsp1_uart2_sleep {
mux {
pins = "gpio4", "gpio5", "gpio6", "gpio7";
function = "gpio";
};
config {
pins = "gpio4", "gpio5", "gpio6", "gpio7";
drive-strength = <2>;
bias-disable;
};
};
blsp2_uart1_active: blsp2_uart1_active {
mux {
pins = "gpio16", "gpio17", "gpio18", "gpio19";
function = "blsp_uart5";
};
config {
pins = "gpio16", "gpio17", "gpio18", "gpio19";
drive-strength = <2>;
bias-disable;
};
};
blsp2_uart1_sleep: blsp2_uart1_sleep {
mux {
pins = "gpio16", "gpio17", "gpio18", "gpio19";
function = "gpio";
};
config {
pins = "gpio16", "gpio17", "gpio18", "gpio19";
drive-strength = <2>;
bias-disable;
};
};
blsp2_uart2_active: blsp2_uart2_active {
mux {
pins = "gpio24", "gpio25", "gpio26", "gpio27";
function = "blsp_uart6_a";
};
config {
pins = "gpio24", "gpio25", "gpio26", "gpio27";
drive-strength = <2>;
bias-disable;
};
};
blsp2_uart2_sleep: blsp2_uart2_sleep {
mux {
pins = "gpio24", "gpio25", "gpio26", "gpio27";
function = "gpio";
};
config {
pins = "gpio24", "gpio25", "gpio26", "gpio27";
drive-strength = <2>;
bias-disable;
};
};
};
};

View file

@ -26,6 +26,38 @@
};
};
&usb3 {
/delete-property/ USB3_GDSC-supply;
dwc3@a800000 {
maximum-speed = "high-speed";
};
};
&ssphy {
compatible = "usb-nop-xceiv";
};
&qusb_phy0 {
reg = <0x0a928000 0x8000>,
<0x0a8f8800 0x400>,
<0x0a920000 0x100>;
reg-names = "qusb_phy_base",
"qscratch_base",
"emu_phy_base";
qcom,emulation;
qcom,qusb-phy-init-seq = <0x19 0x1404
0x20 0x1414
0x79 0x1410
0x00 0x1418
0x99 0x1404
0x04 0x1408
0xd9 0x1404>;
qcom,emu-dcm-reset-seq = <0x100000 0x20
0x0 0x20
0x1a0 0x20
0x5 0x14>;
};
&uartblsp1dm1 {
status = "ok";
pinctrl-names = "default";

View file

@ -26,6 +26,22 @@
};
};
&usb3 {
reg = <0xa800000 0xfc000>;
reg-names = "core_base";
dwc3@a800000 {
maximum-speed = "high-speed";
};
};
&ssphy {
compatible = "usb-nop-xceiv";
};
&qusb_phy0 {
compatible = "usb-nop-xceiv";
};
&uartblsp1dm1 {
status = "ok";
pinctrl-names = "default";

View file

@ -249,6 +249,22 @@
clock-frequency = <19200000>;
};
dma_blsp1: qcom,sps-dma@0xc144000{ /* BLSP1 */
#dma-cells = <4>;
compatible = "qcom,sps-dma";
reg = <0xc144000 0x1F000>;
interrupts = <0 238 0>;
qcom,summing-threshold = <0x10>;
};
dma_blsp2: qcom,sps-dma@0xc184000{ /* BLSP2 */
#dma-cells = <4>;
compatible = "qcom,sps-dma";
reg = <0xc184000 0x1F000>;
interrupts = <0 239 0>;
qcom,summing-threshold = <0x10>;
};
spmi_bus: qcom,spmi@800f000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x800f000 0x1000>,
@ -873,6 +889,23 @@
reg = <0x94c 200>;
};
};
qcom,ghd {
compatible = "qcom,gladiator-hang-detect";
qcom,threshold-arr = <0x179d141c 0x179d1420
0x179d1424 0x179d1428
0x179d142c 0x179d1430>;
qcom,config-reg = <0x179d1434>;
};
qcom,msm-gladiator-v2@17900000 {
compatible = "qcom,msm-gladiator-v2";
reg = <0x17900000 0xe000>;
reg-names = "gladiator_base";
interrupts = <0 22 0>;
clock-names = "atb_clk";
clocks = <&clock_rpmcc RPM_QDSS_CLK>;
};
};
#include "msmfalcon-ion.dtsi"
@ -967,4 +1000,5 @@
#include "msm-pm2falcon.dtsi"
#include "msm-arm-smmu-falcon.dtsi"
#include "msm-arm-smmu-impl-defs-falcon.dtsi"
#include "msmfalcon-common.dtsi"
#include "msmfalcon-blsp.dtsi"

View file

@ -22,6 +22,37 @@
qcom,board-id = <15 0>;
};
&usb3 {
dwc3@a800000 {
maximum-speed = "high-speed";
};
};
&ssphy {
compatible = "usb-nop-xceiv";
};
&qusb_phy0 {
reg = <0x0a928000 0x8000>,
<0x0a8f8800 0x400>,
<0x0a920000 0x100>;
reg-names = "qusb_phy_base",
"qscratch_base",
"emu_phy_base";
qcom,emulation;
qcom,qusb-phy-init-seq = <0x19 0x1404
0x20 0x1414
0x79 0x1410
0x00 0x1418
0x99 0x1404
0x04 0x1408
0xd9 0x1404>;
qcom,emu-dcm-reset-seq = <0x100000 0x20
0x0 0x20
0x1a0 0x20
0x5 0x14>;
};
&uartblsp1dm1 {
status = "ok";
pinctrl-names = "default";

View file

@ -710,11 +710,29 @@
reg = <0x94c 200>;
};
};
qcom,ghd {
compatible = "qcom,gladiator-hang-detect";
qcom,threshold-arr = <0x179d141c 0x179d1420
0x179d1424 0x179d1428
0x179d142c 0x179d1430>;
qcom,config-reg = <0x179d1434>;
};
qcom,msm-gladiator-v2@17900000 {
compatible = "qcom,msm-gladiator-v2";
reg = <0x17900000 0xe000>;
reg-names = "gladiator_base";
interrupts = <0 22 0>;
clock-names = "atb_clk";
clocks = <&clock_rpmcc RPM_QDSS_CLK>;
};
};
#include "msmtriton-ion.dtsi"
#include "msmtriton-regulator.dtsi"
#include "msm-gdsc-falcon.dtsi"
#include "msmfalcon-common.dtsi"
&gdsc_usb30 {
clock-names = "core_clk";

View file

@ -337,6 +337,7 @@ CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
CONFIG_FB_MSM_MDSS_DP_PANEL=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set

View file

@ -3432,9 +3432,6 @@ static int __init diagchar_init(void)
if (ret)
goto fail;
ret = diagfwd_init();
if (ret)
goto fail;
ret = diagfwd_bridge_init();
if (ret)
goto fail;
ret = diagfwd_cntl_init();
@ -3467,6 +3464,9 @@ static int __init diagchar_init(void)
goto fail;
pr_debug("diagchar initialized now");
ret = diagfwd_bridge_init();
if (ret)
diagfwd_bridge_exit();
return 0;
fail:
@ -3482,6 +3482,7 @@ fail:
diag_masks_exit();
diag_remote_exit();
return -1;
}
static void diagchar_exit(void)

View file

@ -455,6 +455,8 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
atomic_set(&glink_info->opened, 0);
diagfwd_channel_close(glink_info->fwd_ctxt);
atomic_set(&glink_info->tx_intent_ready, 0);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@ -501,6 +503,7 @@ static void diag_glink_close_work_fn(struct work_struct *work)
glink_close(glink_info->hdl);
atomic_set(&glink_info->opened, 0);
atomic_set(&glink_info->tx_intent_ready, 0);
glink_info->hdl = NULL;
diagfwd_channel_close(glink_info->fwd_ctxt);
}

View file

@ -49,6 +49,7 @@ struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
.enabled = 0,
.num_read = 0,
.mempool = POOL_TYPE_MDM,
.mempool_init = 0,
.mhi_wq = NULL,
.read_ch = {
.chan = MHI_CLIENT_DIAG_IN,
@ -68,6 +69,7 @@ struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
.enabled = 0,
.num_read = 0,
.mempool = POOL_TYPE_MDM_DCI,
.mempool_init = 0,
.mhi_wq = NULL,
.read_ch = {
.chan = MHI_CLIENT_DCI_IN,
@ -684,6 +686,7 @@ int diag_mhi_init()
strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
diagmem_init(driver, mhi_info->mempool);
mhi_info->mempool_init = 1;
mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
if (!mhi_info->mhi_wq)
goto fail;
@ -725,7 +728,8 @@ void diag_mhi_exit()
if (mhi_info->mhi_wq)
destroy_workqueue(mhi_info->mhi_wq);
mhi_close(mhi_info->id);
diagmem_exit(driver, mhi_info->mempool);
if (mhi_info->mempool_init)
diagmem_exit(driver, mhi_info->mempool);
}
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -65,6 +65,7 @@ struct diag_mhi_info {
int id;
int dev_id;
int mempool;
int mempool_init;
int num_read;
uint8_t enabled;
char name[DIAG_MHI_NAME_SZ];

View file

@ -438,6 +438,7 @@ int diagfwd_peripheral_init(void)
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
spin_lock_init(&fwd_info->buf_lock);
spin_lock_init(&fwd_info->write_buf_lock);
mutex_init(&fwd_info->data_mutex);
}
}
@ -453,6 +454,7 @@ int diagfwd_peripheral_init(void)
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
spin_lock_init(&fwd_info->buf_lock);
spin_lock_init(&fwd_info->write_buf_lock);
mutex_init(&fwd_info->data_mutex);
/*
* This state shouldn't be set for Control channels
@ -686,16 +688,19 @@ void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
{
void *buf = NULL;
int index;
unsigned long flags;
spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
buf = fwd_info->buf_ptr[index]->data;
if (!buf)
return NULL;
atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
break;
}
}
spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
return buf;
}
@ -760,7 +765,6 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
static void __diag_fwd_open(struct diagfwd_info *fwd_info)
{
int i;
if (!fwd_info)
return;
@ -775,10 +779,7 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if (fwd_info->p_ops && fwd_info->p_ops->open)
fwd_info->p_ops->open(fwd_info->ctxt);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
}
diagfwd_queue_read(fwd_info);
}
@ -839,6 +840,7 @@ void diagfwd_close(uint8_t peripheral, uint8_t type)
int diagfwd_channel_open(struct diagfwd_info *fwd_info)
{
int i;
if (!fwd_info)
return -EIO;
@ -859,6 +861,10 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info)
diagfwd_write_buffers_init(fwd_info);
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
}
diagfwd_queue_read(fwd_info);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
fwd_info->peripheral, fwd_info->type);
@ -873,6 +879,7 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info)
int diagfwd_channel_close(struct diagfwd_info *fwd_info)
{
int i;
if (!fwd_info)
return -EIO;
@ -885,6 +892,10 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (fwd_info->buf_2 && fwd_info->buf_2->data)
atomic_set(&fwd_info->buf_2->in_busy, 0);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
}
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
fwd_info->peripheral, fwd_info->type);
@ -940,10 +951,11 @@ int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
int found = 0;
int index = 0;
unsigned long flags;
if (!fwd_info || !ptr)
return found;
spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
if (fwd_info->buf_ptr[index]->data == ptr) {
atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
@ -951,6 +963,7 @@ int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
break;
}
}
spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
return found;
}
@ -1197,7 +1210,7 @@ void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
return;
}
spin_lock_irqsave(&fwd_info->buf_lock, flags);
spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (!fwd_info->buf_ptr[i])
fwd_info->buf_ptr[i] =
@ -1215,11 +1228,11 @@ void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
}
}
spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
return;
err:
spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
pr_err("diag:unable to allocate write buffers\n");
diagfwd_write_buffers_exit(fwd_info);
@ -1233,7 +1246,7 @@ static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
if (!fwd_info)
return;
spin_lock_irqsave(&fwd_info->buf_lock, flags);
spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i]) {
kfree(fwd_info->buf_ptr[i]->data);
@ -1242,5 +1255,5 @@ static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
fwd_info->buf_ptr[i] = NULL;
}
}
spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
}

View file

@ -71,6 +71,7 @@ struct diagfwd_info {
unsigned long read_bytes;
unsigned long write_bytes;
spinlock_t buf_lock;
spinlock_t write_buf_lock;
struct mutex data_mutex;
void *ctxt;
struct diagfwd_buf_t *buf_1;

View file

@ -2267,6 +2267,7 @@ EXPORT_SYMBOL_GPL(clk_set_flags);
static struct dentry *rootdir;
static int inited = 0;
static u32 debug_suspend;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
@ -2409,6 +2410,309 @@ static const struct file_operations clk_dump_fops = {
.release = single_release,
};
static int clock_debug_rate_set(void *data, u64 val)
{
struct clk_core *core = data;
int ret;
ret = clk_set_rate(core->hw->clk, val);
if (ret)
pr_err("clk_set_rate(%lu) failed (%d)\n",
(unsigned long)val, ret);
return ret;
}
static int clock_debug_rate_get(void *data, u64 *val)
{
struct clk_core *core = data;
*val = core->hw->core->rate;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
clock_debug_rate_set, "%llu\n");
static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char name[256] = {0};
struct clk_core *core = filp->private_data;
struct clk_core *p = core->hw->core->parent;
snprintf(name, sizeof(name), "%s\n", p ? p->name : "None\n");
return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
}
static const struct file_operations clock_parent_fops = {
.open = simple_open,
.read = clock_parent_read,
};
static int clock_debug_enable_set(void *data, u64 val)
{
struct clk_core *core = data;
int rc = 0;
if (val)
rc = clk_prepare_enable(core->hw->clk);
else
clk_disable_unprepare(core->hw->clk);
return rc;
}
static int clock_debug_enable_get(void *data, u64 *val)
{
struct clk_core *core = data;
int enabled = 0;
enabled = core->enable_count;
*val = enabled;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
clock_debug_enable_set, "%lld\n");
#define clock_debug_output(m, c, fmt, ...) \
do { \
if (m) \
seq_printf(m, fmt, ##__VA_ARGS__); \
else if (c) \
pr_cont(fmt, ##__VA_ARGS__); \
else \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
{
char *start = "";
struct clk *clk;
if (!c || !c->prepare_count)
return 0;
clk = c->hw->clk;
clock_debug_output(s, 0, "\t");
do {
if (clk->core->vdd_class)
clock_debug_output(s, 1, "%s%s:%u:%u [%ld, %d]", start,
clk->core->name,
clk->core->prepare_count,
clk->core->enable_count,
clk->core->rate,
clk_find_vdd_level(clk->core, clk->core->rate));
else
clock_debug_output(s, 1, "%s%s:%u:%u [%ld]", start,
clk->core->name,
clk->core->prepare_count,
clk->core->enable_count,
clk->core->rate);
start = " -> ";
} while ((clk = clk_get_parent(clk)));
clock_debug_output(s, 1, "\n");
return 1;
}
/*
* clock_debug_print_enabled_clocks() - Print names of enabled clocks
*/
static void clock_debug_print_enabled_clocks(struct seq_file *s)
{
struct clk_core *core;
int cnt = 0;
clock_debug_output(s, 0, "Enabled clocks:\n");
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
cnt += clock_debug_print_clock(core, s);
mutex_unlock(&clk_debug_lock);
if (cnt)
clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
else
clock_debug_output(s, 0, "No clocks enabled.\n");
}
static int enabled_clocks_show(struct seq_file *s, void *unused)
{
clock_debug_print_enabled_clocks(s);
return 0;
}
static int enabled_clocks_open(struct inode *inode, struct file *file)
{
return single_open(file, enabled_clocks_show, inode->i_private);
}
static const struct file_operations clk_enabled_list_fops = {
.open = enabled_clocks_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
{
if (IS_ERR_OR_NULL(clk))
return;
clk_debug_print_hw(clk->parent, f);
clock_debug_output(f, false, "%s\n", clk->name);
if (!clk->ops->list_registers)
return;
clk->ops->list_registers(f, clk->hw);
}
static int print_hw_show(struct seq_file *m, void *unused)
{
struct clk_core *c = m->private;
clk_debug_print_hw(c, m);
return 0;
}
static int print_hw_open(struct inode *inode, struct file *file)
{
return single_open(file, print_hw_show, inode->i_private);
}
static const struct file_operations clock_print_hw_fops = {
.open = print_hw_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int list_rates_show(struct seq_file *s, void *unused)
{
struct clk_core *core = s->private;
int level = 0, i = 0;
unsigned long rate, rate_max = 0;
/* Find max frequency supported within voltage constraints. */
if (!core->vdd_class) {
rate_max = ULONG_MAX;
} else {
for (level = 0; level < core->num_rate_max; level++)
if (core->rate_max[level])
rate_max = core->rate_max[level];
}
/*
* List supported frequencies <= rate_max. Higher frequencies may
* appear in the frequency table, but are not valid and should not
* be listed.
*/
while (!IS_ERR_VALUE(rate =
core->ops->list_rate(core->hw, i++, rate_max))) {
if (rate <= 0)
break;
if (rate <= rate_max)
seq_printf(s, "%lu\n", rate);
}
return 0;
}
static int list_rates_open(struct inode *inode, struct file *file)
{
return single_open(file, list_rates_show, inode->i_private);
}
static const struct file_operations list_rates_fops = {
.open = list_rates_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void clock_print_rate_max_by_level(struct seq_file *s, int level)
{
struct clk_core *core = s->private;
struct clk_vdd_class *vdd_class = core->vdd_class;
int off, i, vdd_level, nregs = vdd_class->num_regulators;
vdd_level = clk_find_vdd_level(core, core->rate);
seq_printf(s, "%2s%10lu", vdd_level == level ? "[" : "",
core->rate_max[level]);
for (i = 0; i < nregs; i++) {
off = nregs*level + i;
if (vdd_class->vdd_uv)
seq_printf(s, "%10u", vdd_class->vdd_uv[off]);
}
if (vdd_level == level)
seq_puts(s, "]");
seq_puts(s, "\n");
}
static int rate_max_show(struct seq_file *s, void *unused)
{
struct clk_core *core = s->private;
struct clk_vdd_class *vdd_class = core->vdd_class;
int level = 0, i, nregs = vdd_class->num_regulators;
char reg_name[10];
int vdd_level = clk_find_vdd_level(core, core->rate);
if (vdd_level < 0) {
seq_printf(s, "could not find_vdd_level for %s, %ld\n",
core->name, core->rate);
return 0;
}
seq_printf(s, "%12s", "");
for (i = 0; i < nregs; i++) {
snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
seq_printf(s, "%10s", reg_name);
}
seq_printf(s, "\n%12s", "freq");
for (i = 0; i < nregs; i++)
seq_printf(s, "%10s", "uV");
seq_puts(s, "\n");
for (level = 0; level < core->num_rate_max; level++)
clock_print_rate_max_by_level(s, level);
return 0;
}
static int rate_max_open(struct inode *inode, struct file *file)
{
return single_open(file, rate_max_show, inode->i_private);
}
static const struct file_operations rate_max_fops = {
.open = rate_max_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *d;
@ -2425,11 +2729,21 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
core->dentry = d;
d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
(u32 *)&core->rate);
d = debugfs_create_file("clk_rate", S_IRUGO, core->dentry, core,
&clock_rate_fops);
if (!d)
goto err_out;
if (core->ops->list_rate) {
if (!debugfs_create_file("clk_list_rates",
S_IRUGO, core->dentry, core, &list_rates_fops))
goto err_out;
}
if (core->vdd_class && !debugfs_create_file("clk_rate_max",
S_IRUGO, core->dentry, core, &rate_max_fops))
goto err_out;
d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
(u32 *)&core->accuracy);
if (!d)
@ -2450,8 +2764,8 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
if (!d)
goto err_out;
d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
(u32 *)&core->enable_count);
d = debugfs_create_file("clk_enable_count", S_IRUGO, core->dentry,
core, &clock_enable_fops);
if (!d)
goto err_out;
@ -2460,6 +2774,16 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
if (!d)
goto err_out;
d = debugfs_create_file("clk_parent", S_IRUGO, core->dentry, core,
&clock_parent_fops);
if (!d)
goto err_out;
d = debugfs_create_file("clk_print_regs", S_IRUGO, core->dentry,
core, &clock_print_hw_fops);
if (!d)
goto err_out;
if (core->ops->debug_init) {
ret = core->ops->debug_init(core->hw, core->dentry);
if (ret)
@ -2531,6 +2855,19 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
}
EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
/*
* Print the names of all enabled clocks and their parents if
* debug_suspend is set from debugfs.
*/
void clock_debug_print_enabled(void)
{
if (likely(!debug_suspend))
return;
clock_debug_print_enabled_clocks(NULL);
}
EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
/**
* clk_debug_init - lazily populate the debugfs clk directory
*
@ -2570,6 +2907,17 @@ static int __init clk_debug_init(void)
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_enabled_list", S_IRUGO, rootdir,
&clk_debug_list, &clk_enabled_list_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
rootdir, &debug_suspend);
if (!d)
return -ENOMEM;
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);

View file

@ -20,6 +20,10 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
const char *con_id);
void __clk_free_clk(struct clk *clk);
/* Debugfs API to print the enabled clocks */
void clock_debug_print_enabled(void);
#else
/* All these casts to avoid ifdefs in clkdev... */
static inline struct clk *

View file

@ -245,6 +245,7 @@ static struct rcg_clk ahb_clk_src = {
.c = {
.dbg_name = "ahb_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 40000000,
NOMINAL, 80800000),
CLK_INIT(ahb_clk_src.c),
@ -277,6 +278,7 @@ static struct rcg_clk csi0_clk_src = {
.c = {
.dbg_name = "csi0_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 164571429, LOW, 256000000,
NOMINAL, 384000000, HIGH, 576000000),
CLK_INIT(csi0_clk_src.c),
@ -315,6 +317,7 @@ static struct rcg_clk vfe0_clk_src = {
.c = {
.dbg_name = "vfe0_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
NOMINAL, 576000000, HIGH, 600000000),
CLK_INIT(vfe0_clk_src.c),
@ -330,6 +333,7 @@ static struct rcg_clk vfe1_clk_src = {
.c = {
.dbg_name = "vfe1_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
NOMINAL, 576000000, HIGH, 600000000),
CLK_INIT(vfe1_clk_src.c),
@ -358,6 +362,7 @@ static struct rcg_clk mdp_clk_src = {
.c = {
.dbg_name = "mdp_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
NOMINAL, 330000000, HIGH, 412500000),
CLK_INIT(mdp_clk_src.c),
@ -382,6 +387,7 @@ static struct rcg_clk maxi_clk_src = {
.c = {
.dbg_name = "maxi_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 171428571,
NOMINAL, 323200000, HIGH, 406000000),
CLK_INIT(maxi_clk_src.c),
@ -416,6 +422,7 @@ static struct rcg_clk cpp_clk_src = {
.c = {
.dbg_name = "cpp_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
NOMINAL, 576000000, HIGH, 600000000),
CLK_INIT(cpp_clk_src.c),
@ -446,6 +453,7 @@ static struct rcg_clk jpeg0_clk_src = {
.c = {
.dbg_name = "jpeg0_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 75000000, LOW, 150000000,
NOMINAL, 480000000),
CLK_INIT(jpeg0_clk_src.c),
@ -469,6 +477,7 @@ static struct rcg_clk rot_clk_src = {
.c = {
.dbg_name = "rot_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
NOMINAL, 330000000, HIGH, 412500000),
CLK_INIT(rot_clk_src.c),
@ -501,6 +510,7 @@ static struct rcg_clk video_core_clk_src = {
.c = {
.dbg_name = "video_core_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
NOMINAL, 360000000, HIGH, 465000000),
CLK_INIT(video_core_clk_src.c),
@ -531,6 +541,7 @@ static struct rcg_clk csiphy_clk_src = {
.c = {
.dbg_name = "csiphy_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 164570000, LOW, 256000000,
NOMINAL, 384000000),
CLK_INIT(csiphy_clk_src.c),
@ -546,6 +557,7 @@ static struct rcg_clk csi1_clk_src = {
.c = {
.dbg_name = "csi1_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
NOMINAL, 384000000, HIGH, 576000000),
CLK_INIT(csi1_clk_src.c),
@ -561,6 +573,7 @@ static struct rcg_clk csi2_clk_src = {
.c = {
.dbg_name = "csi2_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
NOMINAL, 384000000, HIGH, 576000000),
CLK_INIT(csi2_clk_src.c),
@ -576,6 +589,7 @@ static struct rcg_clk csi3_clk_src = {
.c = {
.dbg_name = "csi3_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
NOMINAL, 384000000, HIGH, 576000000),
CLK_INIT(csi3_clk_src.c),
@ -607,6 +621,7 @@ static struct rcg_clk fd_core_clk_src = {
.c = {
.dbg_name = "fd_core_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
NOMINAL, 576000000),
CLK_INIT(fd_core_clk_src.c),
@ -755,6 +770,7 @@ static struct rcg_clk video_subcore0_clk_src = {
.c = {
.dbg_name = "video_subcore0_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
NOMINAL, 360000000, HIGH, 465000000),
CLK_INIT(video_subcore0_clk_src.c),
@ -771,6 +787,7 @@ static struct rcg_clk video_subcore1_clk_src = {
.c = {
.dbg_name = "video_subcore1_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
NOMINAL, 360000000, HIGH, 465000000),
CLK_INIT(video_subcore1_clk_src.c),
@ -793,6 +810,7 @@ static struct rcg_clk cci_clk_src = {
.c = {
.dbg_name = "cci_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 37500000, LOW, 50000000,
NOMINAL, 100000000),
CLK_INIT(cci_clk_src.c),
@ -818,6 +836,7 @@ static struct rcg_clk camss_gp0_clk_src = {
.c = {
.dbg_name = "camss_gp0_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
NOMINAL, 200000000),
CLK_INIT(camss_gp0_clk_src.c),
@ -833,6 +852,7 @@ static struct rcg_clk camss_gp1_clk_src = {
.c = {
.dbg_name = "camss_gp1_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
NOMINAL, 200000000),
CLK_INIT(camss_gp1_clk_src.c),
@ -862,6 +882,7 @@ static struct rcg_clk mclk0_clk_src = {
.c = {
.dbg_name = "mclk0_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
NOMINAL, 68571429),
CLK_INIT(mclk0_clk_src.c),
@ -877,6 +898,7 @@ static struct rcg_clk mclk1_clk_src = {
.c = {
.dbg_name = "mclk1_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
NOMINAL, 68571429),
CLK_INIT(mclk1_clk_src.c),
@ -892,6 +914,7 @@ static struct rcg_clk mclk2_clk_src = {
.c = {
.dbg_name = "mclk2_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
NOMINAL, 68571429),
CLK_INIT(mclk2_clk_src.c),
@ -907,6 +930,7 @@ static struct rcg_clk mclk3_clk_src = {
.c = {
.dbg_name = "mclk3_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
NOMINAL, 68571429),
CLK_INIT(mclk3_clk_src.c),
@ -928,6 +952,7 @@ static struct rcg_clk csi0phytimer_clk_src = {
.c = {
.dbg_name = "csi0phytimer_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
NOMINAL, 269333333),
CLK_INIT(csi0phytimer_clk_src.c),
@ -943,6 +968,7 @@ static struct rcg_clk csi1phytimer_clk_src = {
.c = {
.dbg_name = "csi1phytimer_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
NOMINAL, 269333333),
CLK_INIT(csi1phytimer_clk_src.c),
@ -958,6 +984,7 @@ static struct rcg_clk csi2phytimer_clk_src = {
.c = {
.dbg_name = "csi2phytimer_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
NOMINAL, 269333333),
CLK_INIT(csi2phytimer_clk_src.c),
@ -978,6 +1005,7 @@ static struct rcg_clk dp_gtc_clk_src = {
.c = {
.dbg_name = "dp_gtc_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 300000000),
CLK_INIT(dp_gtc_clk_src.c),
},
@ -997,6 +1025,7 @@ static struct rcg_clk esc0_clk_src = {
.c = {
.dbg_name = "esc0_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
CLK_INIT(esc0_clk_src.c),
},
@ -1011,6 +1040,7 @@ static struct rcg_clk esc1_clk_src = {
.c = {
.dbg_name = "esc1_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
CLK_INIT(esc1_clk_src.c),
},
@ -1033,6 +1063,7 @@ static struct rcg_clk extpclk_clk_src = {
.dbg_name = "extpclk_clk_src",
.parent = &ext_extpclk_clk_src.c,
.ops = &clk_ops_byte,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 300000000,
NOMINAL, 600000000),
CLK_INIT(extpclk_clk_src.c),
@ -1053,6 +1084,7 @@ static struct rcg_clk hdmi_clk_src = {
.c = {
.dbg_name = "hdmi_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
CLK_INIT(hdmi_clk_src.c),
},
@ -1072,6 +1104,7 @@ static struct rcg_clk vsync_clk_src = {
.c = {
.dbg_name = "vsync_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
CLK_INIT(vsync_clk_src.c),
},
@ -1091,6 +1124,7 @@ static struct rcg_clk dp_aux_clk_src = {
.c = {
.dbg_name = "dp_aux_clk_src",
.ops = &clk_ops_rcg,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
CLK_INIT(dp_aux_clk_src.c),
},
@ -1165,6 +1199,7 @@ static struct rcg_clk dp_crypto_clk_src = {
.c = {
.dbg_name = "dp_crypto_clk_src",
.ops = &clk_ops_rcg_mnd,
.flags = CLKFLAG_NO_RATE_CACHE,
VDD_DIG_FMAX_MAP3(LOWER, 101250, LOW, 168750,
NOMINAL, 337500),
CLK_INIT(dp_crypto_clk_src.c),
@ -1237,6 +1272,7 @@ static struct branch_clk mmss_camss_cci_clk = {
.c = {
.dbg_name = "mmss_camss_cci_clk",
.parent = &cci_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_cci_clk.c),
},
@ -1260,6 +1296,7 @@ static struct branch_clk mmss_camss_cpp_clk = {
.c = {
.dbg_name = "mmss_camss_cpp_clk",
.parent = &cpp_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_cpp_clk.c),
},
@ -1317,6 +1354,7 @@ static struct branch_clk mmss_camss_csi0_clk = {
.c = {
.dbg_name = "mmss_camss_csi0_clk",
.parent = &csi0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi0_clk.c),
},
@ -1376,6 +1414,7 @@ static struct branch_clk mmss_camss_csi1_clk = {
.c = {
.dbg_name = "mmss_camss_csi1_clk",
.parent = &csi1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi1_clk.c),
},
@ -1435,6 +1474,7 @@ static struct branch_clk mmss_camss_csi2_clk = {
.c = {
.dbg_name = "mmss_camss_csi2_clk",
.parent = &csi2_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi2_clk.c),
},
@ -1494,6 +1534,7 @@ static struct branch_clk mmss_camss_csi3_clk = {
.c = {
.dbg_name = "mmss_camss_csi3_clk",
.parent = &csi3_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi3_clk.c),
},
@ -1555,6 +1596,7 @@ static struct branch_clk mmss_camss_csiphy0_clk = {
.c = {
.dbg_name = "mmss_camss_csiphy0_clk",
.parent = &csiphy_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csiphy0_clk.c),
},
@ -1568,6 +1610,7 @@ static struct branch_clk mmss_camss_csiphy1_clk = {
.c = {
.dbg_name = "mmss_camss_csiphy1_clk",
.parent = &csiphy_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csiphy1_clk.c),
},
@ -1581,6 +1624,7 @@ static struct branch_clk mmss_camss_csiphy2_clk = {
.c = {
.dbg_name = "mmss_camss_csiphy2_clk",
.parent = &csiphy_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csiphy2_clk.c),
},
@ -1604,6 +1648,7 @@ static struct branch_clk mmss_fd_core_clk = {
.c = {
.dbg_name = "mmss_fd_core_clk",
.parent = &fd_core_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_fd_core_clk.c),
},
@ -1628,6 +1673,7 @@ static struct branch_clk mmss_camss_gp0_clk = {
.c = {
.dbg_name = "mmss_camss_gp0_clk",
.parent = &camss_gp0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_gp0_clk.c),
},
@ -1640,6 +1686,7 @@ static struct branch_clk mmss_camss_gp1_clk = {
.c = {
.dbg_name = "mmss_camss_gp1_clk",
.parent = &camss_gp1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_gp1_clk.c),
},
@ -1663,6 +1710,7 @@ static struct branch_clk mmss_camss_jpeg0_clk = {
.c = {
.dbg_name = "mmss_camss_jpeg0_clk",
.parent = &jpeg0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_jpeg0_clk.c),
},
@ -1701,6 +1749,7 @@ static struct branch_clk mmss_camss_mclk0_clk = {
.c = {
.dbg_name = "mmss_camss_mclk0_clk",
.parent = &mclk0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_mclk0_clk.c),
},
@ -1713,6 +1762,7 @@ static struct branch_clk mmss_camss_mclk1_clk = {
.c = {
.dbg_name = "mmss_camss_mclk1_clk",
.parent = &mclk1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_mclk1_clk.c),
},
@ -1725,6 +1775,7 @@ static struct branch_clk mmss_camss_mclk2_clk = {
.c = {
.dbg_name = "mmss_camss_mclk2_clk",
.parent = &mclk2_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_mclk2_clk.c),
},
@ -1737,6 +1788,7 @@ static struct branch_clk mmss_camss_mclk3_clk = {
.c = {
.dbg_name = "mmss_camss_mclk3_clk",
.parent = &mclk3_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_mclk3_clk.c),
},
@ -1760,6 +1812,7 @@ static struct branch_clk mmss_camss_csi0phytimer_clk = {
.c = {
.dbg_name = "mmss_camss_csi0phytimer_clk",
.parent = &csi0phytimer_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi0phytimer_clk.c),
},
@ -1772,6 +1825,7 @@ static struct branch_clk mmss_camss_csi1phytimer_clk = {
.c = {
.dbg_name = "mmss_camss_csi1phytimer_clk",
.parent = &csi1phytimer_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi1phytimer_clk.c),
},
@ -1784,6 +1838,7 @@ static struct branch_clk mmss_camss_csi2phytimer_clk = {
.c = {
.dbg_name = "mmss_camss_csi2phytimer_clk",
.parent = &csi2phytimer_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_csi2phytimer_clk.c),
},
@ -1818,6 +1873,7 @@ static struct branch_clk mmss_camss_vfe0_clk = {
.c = {
.dbg_name = "mmss_camss_vfe0_clk",
.parent = &vfe0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_vfe0_clk.c),
},
@ -1853,6 +1909,7 @@ static struct branch_clk mmss_camss_vfe1_clk = {
.c = {
.dbg_name = "mmss_camss_vfe1_clk",
.parent = &vfe1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_camss_vfe1_clk.c),
},
@ -1921,6 +1978,7 @@ static struct branch_clk mmss_mdss_byte0_clk = {
.c = {
.dbg_name = "mmss_mdss_byte0_clk",
.parent = &byte0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_byte0_clk.c),
},
@ -1956,6 +2014,7 @@ static struct branch_clk mmss_mdss_byte0_intf_clk = {
.c = {
.dbg_name = "mmss_mdss_byte0_intf_clk",
.parent = &mmss_mdss_byte0_intf_div_clk.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_byte0_intf_clk.c),
},
@ -1968,6 +2027,7 @@ static struct branch_clk mmss_mdss_byte1_clk = {
.c = {
.dbg_name = "mmss_mdss_byte1_clk",
.parent = &byte1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_byte1_clk.c),
},
@ -2003,6 +2063,7 @@ static struct branch_clk mmss_mdss_byte1_intf_clk = {
.c = {
.dbg_name = "mmss_mdss_byte1_intf_clk",
.parent = &mmss_mdss_byte1_intf_div_clk.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_byte1_intf_clk.c),
},
@ -2015,6 +2076,7 @@ static struct branch_clk mmss_mdss_dp_aux_clk = {
.c = {
.dbg_name = "mmss_mdss_dp_aux_clk",
.parent = &dp_aux_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_dp_aux_clk.c),
},
@ -2066,6 +2128,7 @@ static struct branch_clk mmss_mdss_dp_crypto_clk = {
.c = {
.dbg_name = "mmss_mdss_dp_crypto_clk",
.parent = &dp_crypto_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_dp_crypto_clk.c),
},
@ -2078,6 +2141,7 @@ static struct branch_clk mmss_mdss_dp_gtc_clk = {
.c = {
.dbg_name = "mmss_mdss_dp_gtc_clk",
.parent = &dp_gtc_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_dp_gtc_clk.c),
},
@ -2090,6 +2154,7 @@ static struct branch_clk mmss_mdss_esc0_clk = {
.c = {
.dbg_name = "mmss_mdss_esc0_clk",
.parent = &esc0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_esc0_clk.c),
},
@ -2102,6 +2167,7 @@ static struct branch_clk mmss_mdss_esc1_clk = {
.c = {
.dbg_name = "mmss_mdss_esc1_clk",
.parent = &esc1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_esc1_clk.c),
},
@ -2114,6 +2180,7 @@ static struct branch_clk mmss_mdss_extpclk_clk = {
.c = {
.dbg_name = "mmss_mdss_extpclk_clk",
.parent = &extpclk_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_extpclk_clk.c),
},
@ -2126,6 +2193,7 @@ static struct branch_clk mmss_mdss_hdmi_clk = {
.c = {
.dbg_name = "mmss_mdss_hdmi_clk",
.parent = &hdmi_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_hdmi_clk.c),
},
@ -2149,6 +2217,7 @@ static struct branch_clk mmss_mdss_mdp_clk = {
.c = {
.dbg_name = "mmss_mdss_mdp_clk",
.parent = &mdp_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_mdp_clk.c),
},
@ -2175,6 +2244,7 @@ static struct branch_clk mmss_mdss_pclk0_clk = {
.c = {
.dbg_name = "mmss_mdss_pclk0_clk",
.parent = &pclk0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_pclk0_clk.c),
},
@ -2187,6 +2257,7 @@ static struct branch_clk mmss_mdss_pclk1_clk = {
.c = {
.dbg_name = "mmss_mdss_pclk1_clk",
.parent = &pclk1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_pclk1_clk.c),
},
@ -2199,6 +2270,7 @@ static struct branch_clk mmss_mdss_rot_clk = {
.c = {
.dbg_name = "mmss_mdss_rot_clk",
.parent = &rot_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_rot_clk.c),
},
@ -2211,6 +2283,7 @@ static struct branch_clk mmss_mdss_vsync_clk = {
.c = {
.dbg_name = "mmss_mdss_vsync_clk",
.parent = &vsync_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mdss_vsync_clk.c),
},
@ -2225,6 +2298,7 @@ static struct branch_clk mmss_mnoc_ahb_clk = {
.c = {
.dbg_name = "mmss_mnoc_ahb_clk",
.parent = &ahb_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mnoc_ahb_clk.c),
},
@ -2260,6 +2334,7 @@ static struct branch_clk mmss_mnoc_maxi_clk = {
.c = {
.dbg_name = "mmss_mnoc_maxi_clk",
.parent = &maxi_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_mnoc_maxi_clk.c),
},
@ -2272,6 +2347,7 @@ static struct branch_clk mmss_video_subcore0_clk = {
.c = {
.dbg_name = "mmss_video_subcore0_clk",
.parent = &video_subcore0_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_video_subcore0_clk.c),
},
@ -2284,6 +2360,7 @@ static struct branch_clk mmss_video_subcore1_clk = {
.c = {
.dbg_name = "mmss_video_subcore1_clk",
.parent = &video_subcore1_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_video_subcore1_clk.c),
},
@ -2318,6 +2395,7 @@ static struct branch_clk mmss_video_core_clk = {
.c = {
.dbg_name = "mmss_video_core_clk",
.parent = &video_core_clk_src.c,
.flags = CLKFLAG_NO_RATE_CACHE,
.ops = &clk_ops_branch,
CLK_INIT(mmss_video_core_clk.c),
},
@ -2822,6 +2900,9 @@ int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
ext_dp_phy_pll_vco.clk_id = "dp_vco_div";
ext_dp_phy_pll_vco.c.flags = CLKFLAG_NO_RATE_CACHE;
mmss_camss_jpeg0_vote_clk.c.flags = CLKFLAG_NO_RATE_CACHE;
mmss_camss_jpeg0_dma_vote_clk.c.flags = CLKFLAG_NO_RATE_CACHE;
is_vq = of_device_is_compatible(pdev->dev.of_node,
"qcom,mmsscc-hamster");
if (is_vq)

View file

@ -117,6 +117,11 @@ static int wait_for_pll_latch_ack(struct clk_alpha_pll *pll, u32 mask)
return wait_for_pll(pll, mask, 0, "latch_ack");
}
static int wait_for_pll_update(struct clk_alpha_pll *pll, u32 mask)
{
return wait_for_pll(pll, mask, 1, "update");
}
/* alpha pll with hwfsm support */
#define PLL_OFFLINE_REQ BIT(7)
@ -562,12 +567,48 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return clamp(rate, min_freq, max_freq);
}
static void clk_alpha_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
int size, i, val;
static struct clk_register_data data[] = {
{"PLL_MODE", 0x0},
{"PLL_L_VAL", 0x4},
{"PLL_ALPHA_VAL", 0x8},
{"PLL_ALPHA_VAL_U", 0xC},
{"PLL_USER_CTL", 0x10},
{"PLL_CONFIG_CTL", 0x18},
};
static struct clk_register_data data1[] = {
{"APSS_PLL_VOTE", 0x0},
};
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
&val);
seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
}
regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
if (val & PLL_FSM_ENA) {
regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
data1[0].offset, &val);
seq_printf(f, "%20s: 0x%.8x\n", data1[0].name, val);
}
}
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
.list_registers = clk_alpha_pll_list_registers,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
@ -577,6 +618,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
.list_registers = clk_alpha_pll_list_registers,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@ -633,3 +675,177 @@ const struct clk_ops clk_alpha_pll_postdiv_ops = {
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
static int clk_alpha_pll_slew_update(struct clk_alpha_pll *pll)
{
int ret = 0;
u32 val;
regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
PLL_UPDATE, PLL_UPDATE);
regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val);
ret = wait_for_pll_update(pll, PLL_UPDATE);
if (ret)
return ret;
/*
* HPG mandates a wait of at least 570ns before polling the LOCK
* detect bit. Have a delay of 1us just to be safe.
*/
mb();
udelay(1);
ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
return ret;
}
static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long freq_hz;
const struct pll_vco *curr_vco, *vco;
u32 l;
u64 a;
freq_hz = alpha_pll_round_rate(rate, parent_rate, &l, &a);
if (freq_hz != rate) {
pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
return -EINVAL;
}
curr_vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
if (!curr_vco) {
pr_err("alpha pll: not in a valid vco range\n");
return -EINVAL;
}
vco = alpha_pll_find_vco(pll, freq_hz);
if (!vco) {
pr_err("alpha pll: not in a valid vco range\n");
return -EINVAL;
}
/*
* Dynamic pll update will not support switching frequencies across
* vco ranges. In those cases fall back to normal alpha set rate.
*/
if (curr_vco->val != vco->val)
return clk_alpha_pll_set_rate(hw, rate, parent_rate);
a = a << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, pll->offset + PLL_L_VAL, l);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL, a);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL_U, a >> 32);
/* Ensure that the write above goes through before proceeding. */
mb();
if (clk_hw_is_enabled(hw))
clk_alpha_pll_slew_update(pll);
return 0;
}
/*
* Slewing plls should be bought up at frequency which is in the middle of the
* desired VCO range. So after bringing up the pll at calibration freq, set it
* back to desired frequency(that was set by previous clk_set_rate).
*/
static int clk_alpha_pll_calibrate(struct clk_hw *hw)
{
unsigned long calibration_freq, freq_hz;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
u64 a;
u32 l;
int rc;
vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
if (!vco) {
pr_err("alpha pll: not in a valid vco range\n");
return -EINVAL;
}
/*
* As during slewing plls vco_sel won't be allowed to change, vco table
* should have only one entry table, i.e. index = 0, find the
* calibration frequency.
*/
calibration_freq = (pll->vco_table[0].min_freq +
pll->vco_table[0].max_freq)/2;
freq_hz = alpha_pll_round_rate(calibration_freq,
clk_hw_get_rate(clk_hw_get_parent(hw)), &l, &a);
if (freq_hz != calibration_freq) {
pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
return -EINVAL;
}
/* Setup PLL for calibration frequency */
a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, pll->offset + PLL_L_VAL, l);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL, a);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL_U, a >> 32);
regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
PLL_ALPHA_EN, PLL_ALPHA_EN);
/* Bringup the pll at calibration frequency */
rc = clk_alpha_pll_enable(hw);
if (rc) {
pr_err("alpha pll calibration failed\n");
return rc;
}
/*
* PLL is already running at calibration frequency.
* So slew pll to the previously set frequency.
*/
freq_hz = alpha_pll_round_rate(clk_hw_get_rate(hw),
clk_hw_get_rate(clk_hw_get_parent(hw)), &l, &a);
pr_debug("pll %s: setting back to required rate %lu, freq_hz %ld\n",
hw->init->name, clk_hw_get_rate(hw), freq_hz);
/* Setup the PLL for the new frequency */
a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, pll->offset + PLL_L_VAL, l);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL, a);
regmap_write(pll->clkr.regmap, pll->offset + PLL_ALPHA_VAL_U, a >> 32);
regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
PLL_ALPHA_EN, PLL_ALPHA_EN);
return clk_alpha_pll_slew_update(pll);
}
static int clk_alpha_pll_slew_enable(struct clk_hw *hw)
{
int rc;
rc = clk_alpha_pll_calibrate(hw);
if (rc)
return rc;
rc = clk_alpha_pll_enable(hw);
return rc;
}
const struct clk_ops clk_alpha_pll_slew_ops = {
.enable = clk_alpha_pll_slew_enable,
.disable = clk_alpha_pll_disable,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_slew_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_slew_ops);

View file

@ -80,6 +80,7 @@ struct clk_alpha_pll_postdiv {
extern const struct clk_ops clk_alpha_pll_ops;
extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_slew_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct pll_config *config);

View file

@ -16,10 +16,12 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include "clk-branch.h"
#include "clk-regmap.h"
static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
{
@ -181,6 +183,43 @@ const struct clk_ops clk_branch_ops = {
};
EXPORT_SYMBOL_GPL(clk_branch_ops);
static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw)
{
struct clk_branch *br = to_clk_branch(hw);
struct clk_regmap *rclk = to_clk_regmap(hw);
int size, i, val;
static struct clk_register_data data[] = {
{"CBCR", 0x0},
};
static struct clk_register_data data1[] = {
{"APSS_VOTE", 0x0},
{"APSS_SLEEP_VOTE", 0x4},
};
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
regmap_read(br->clkr.regmap, br->halt_reg + data[i].offset,
&val);
seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
}
if ((br->halt_check & BRANCH_HALT_VOTED) &&
!(br->halt_check & BRANCH_VOTED)) {
if (rclk->enable_reg) {
size = ARRAY_SIZE(data1);
for (i = 0; i < size; i++) {
regmap_read(br->clkr.regmap, rclk->enable_reg +
data1[i].offset, &val);
seq_printf(f, "%20s: 0x%.8x\n",
data1[i].name, val);
}
}
}
}
static int clk_branch2_enable(struct clk_hw *hw)
{
return clk_branch_toggle(hw, true, clk_branch2_check_halt);
@ -196,6 +235,7 @@ const struct clk_ops clk_branch2_ops = {
.disable = clk_branch2_disable,
.is_enabled = clk_is_enabled_regmap,
.set_flags = clk_branch_set_flags,
.list_registers = clk_branch2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
@ -228,10 +268,29 @@ static void clk_gate2_disable(struct clk_hw *hw)
clk_gate_toggle(hw, false);
}
static void clk_gate2_list_registers(struct seq_file *f, struct clk_hw *hw)
{
struct clk_gate2 *gt = to_clk_gate2(hw);
int size, i, val;
static struct clk_register_data data[] = {
{"EN_REG", 0x0},
};
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
regmap_read(gt->clkr.regmap, gt->clkr.enable_reg +
data[i].offset, &val);
seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
}
}
const struct clk_ops clk_gate2_ops = {
.enable = clk_gate2_enable,
.disable = clk_gate2_disable,
.is_enabled = clk_is_enabled_regmap,
.list_registers = clk_gate2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_gate2_ops);

View file

@ -24,6 +24,7 @@ struct freq_tbl {
u16 m;
u16 n;
unsigned long src_freq;
#define FIXED_FREQ_SRC 0
};
/**

View file

@ -232,9 +232,10 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw,
const struct freq_tbl *f, struct clk_rate_request *req)
{
unsigned long clk_flags, rate = req->rate;
struct clk_rate_request parent_req = { };
struct clk_hw *p;
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int index;
int index, ret = 0;
f = qcom_find_freq(f, rate);
if (!f)
@ -265,6 +266,21 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw,
req->best_parent_rate = rate;
req->rate = f->freq;
if (f->src_freq != FIXED_FREQ_SRC) {
rate = parent_req.rate = f->src_freq;
parent_req.best_parent_hw = p;
ret = __clk_determine_rate(p, &parent_req);
if (ret)
return ret;
ret = clk_set_rate(p->clk, parent_req.rate);
if (ret) {
pr_err("Failed set rate(%lu) on parent for non-fixed source\n",
parent_req.rate);
return ret;
}
}
return 0;
}
@ -317,6 +333,53 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
static void clk_rcg2_list_registers(struct seq_file *f, struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int i = 0, size = 0, val;
static struct clk_register_data data[] = {
{"CMD_RCGR", 0x0},
{"CFG_RCGR", 0x4},
};
static struct clk_register_data data1[] = {
{"CMD_RCGR", 0x0},
{"CFG_RCGR", 0x4},
{"M_VAL", 0x8},
{"N_VAL", 0xC},
{"D_VAL", 0x10},
};
if (rcg->mnd_width) {
size = ARRAY_SIZE(data1);
for (i = 0; i < size; i++) {
regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr +
data1[i].offset), &val);
seq_printf(f, "%20s: 0x%.8x\n", data1[i].name, val);
}
} else {
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr +
data[i].offset), &val);
seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
}
}
}
/* Return the nth supported frequency for a given clock. */
static long clk_rcg2_list_rate(struct clk_hw *hw, unsigned n,
unsigned long fmax)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
if (!rcg->freq_tbl)
return -ENXIO;
return (rcg->freq_tbl + n)->freq;
}
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@ -351,6 +414,8 @@ const struct clk_ops clk_rcg2_ops = {
.determine_rate = clk_rcg2_determine_rate,
.set_rate = clk_rcg2_set_rate,
.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
.list_rate = clk_rcg2_list_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
@ -557,6 +622,7 @@ const struct clk_ops clk_edp_pixel_ops = {
.set_rate = clk_edp_pixel_set_rate,
.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
.determine_rate = clk_edp_pixel_determine_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
@ -615,6 +681,7 @@ const struct clk_ops clk_byte_ops = {
.set_rate = clk_byte_set_rate,
.set_rate_and_parent = clk_byte_set_rate_and_parent,
.determine_rate = clk_byte_determine_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_byte_ops);
@ -685,6 +752,7 @@ const struct clk_ops clk_byte2_ops = {
.set_rate = clk_byte2_set_rate,
.set_rate_and_parent = clk_byte2_set_rate_and_parent,
.determine_rate = clk_byte2_determine_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_byte2_ops);
@ -775,6 +843,7 @@ const struct clk_ops clk_pixel_ops = {
.set_rate = clk_pixel_set_rate,
.set_rate_and_parent = clk_pixel_set_rate_and_parent,
.determine_rate = clk_pixel_determine_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_pixel_ops);
@ -864,6 +933,7 @@ const struct clk_ops clk_gfx3d_ops = {
.set_rate = clk_gfx3d_set_rate,
.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
.determine_rate = clk_gfx3d_determine_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
@ -944,5 +1014,7 @@ const struct clk_ops clk_gfx3d_src_ops = {
.set_rate = clk_gfx3d_set_rate,
.set_rate_and_parent = clk_gfx3d_src_set_rate_and_parent,
.determine_rate = clk_gfx3d_src_determine_rate,
.list_rate = clk_rcg2_list_rate,
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_gfx3d_src_ops);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -15,6 +15,7 @@
#define __QCOM_CLK_REGMAP_H__
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
struct regmap;
@ -42,4 +43,9 @@ void clk_disable_regmap(struct clk_hw *hw);
struct clk *
devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
struct clk_register_data {
char *name;
u32 offset;
};
#endif

View file

@ -893,6 +893,11 @@
#define A5XX_GDPM_INT_MASK 0xB811
#define A5XX_GPMU_BEC_ENABLE 0xB9A0
/* ISENSE registers */
#define A5XX_GPU_CS_DECIMAL_ALIGN 0xC16A
#define A5XX_GPU_CS_SENSOR_PARAM_CORE_1 0xC126
#define A5XX_GPU_CS_SENSOR_PARAM_CORE_2 0xC127
#define A5XX_GPU_CS_SW_OV_FUSE_EN 0xC168
#define A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0xC41A
#define A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0xC41D
#define A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0xC41F
@ -900,5 +905,6 @@
#define A5XX_GPU_CS_ENABLE_REG 0xC520
#define A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0xC557
#define A5XX_GPU_CS_AMP_CALIBRATION_DONE 0xC565
#define A5XX_GPU_CS_ENDPOINT_CALIBRATION_DONE 0xC556
#endif /* _A5XX_REG_H */

View file

@ -254,7 +254,7 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
return ret;
ret = kgsl_allocate_user(&adreno_dev->dev, &crit_pkts_refbuf0,
NULL, PAGE_SIZE, KGSL_MEMFLAGS_SECURE);
PAGE_SIZE, KGSL_MEMFLAGS_SECURE);
if (ret)
return ret;
@ -431,6 +431,43 @@ static int _poll_gdsc_status(struct adreno_device *adreno_dev,
return 0;
}
static void a5xx_restore_isense_regs(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int reg, i, ramp = GPMU_ISENSE_SAVE;
static unsigned int isense_regs[6] = {0xFFFF}, isense_reg_addr[] = {
A5XX_GPU_CS_DECIMAL_ALIGN,
A5XX_GPU_CS_SENSOR_PARAM_CORE_1,
A5XX_GPU_CS_SENSOR_PARAM_CORE_2,
A5XX_GPU_CS_SW_OV_FUSE_EN,
A5XX_GPU_CS_ENDPOINT_CALIBRATION_DONE,
A5XX_GPMU_TEMP_SENSOR_CONFIG};
if (!adreno_is_a540(adreno_dev))
return;
/* read signature */
kgsl_regread(device, ramp++, &reg);
if (reg == 0xBABEFACE) {
/* store memory locations in buffer */
for (i = 0; i < ARRAY_SIZE(isense_regs); i++)
kgsl_regread(device, ramp + i, isense_regs + i);
/* clear signature */
kgsl_regwrite(device, GPMU_ISENSE_SAVE, 0x0);
}
/* if we never stored memory locations - do nothing */
if (isense_regs[0] == 0xFFFF)
return;
/* restore registers from memory */
for (i = 0; i < ARRAY_SIZE(isense_reg_addr); i++)
kgsl_regwrite(device, isense_reg_addr[i], isense_regs[i]);
}
/*
* a5xx_regulator_enable() - Enable any necessary HW regulators
* @adreno_dev: The adreno device pointer
@ -480,6 +517,7 @@ static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
CNTL_IP_CLK_ENABLE, 1);
a5xx_restore_isense_regs(adreno_dev);
return 0;
}

View file

@ -228,6 +228,7 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
#define LM_SEQUENCE_ID 1
#define MAX_SEQUENCE_ID 3
#define GPMU_ISENSE_SAVE (A5XX_GPMU_DATA_RAM_BASE + 200/4)
/* LM defaults */
#define LM_DEFAULT_LIMIT 6000
#define A530_DEFAULT_LEAKAGE 0x004E001A

View file

@ -176,9 +176,10 @@ int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
return 0;
}
static void kgsl_memfree_purge(pid_t ptname, uint64_t gpuaddr,
uint64_t size)
static void kgsl_memfree_purge(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size)
{
pid_t ptname = pagetable ? pagetable->name : 0;
int i;
if (memfree.list == NULL)
@ -342,62 +343,26 @@ kgsl_mem_entry_destroy(struct kref *kref)
}
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
/**
* kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree and
* assign it with a gpu address space before insertion
* @process: the process that owns the memory
* @entry: the memory entry
*
* @returns - 0 on succcess else error code
*
* Insert the kgsl_mem_entry in to the rb_tree for searching by GPU address.
* The assignment of gpu address and insertion into list needs to
* happen with the memory lock held to avoid race conditions between
* gpu address being selected and some other thread looking through the
* rb list in search of memory based on gpuaddr
* This function should be called with processes memory spinlock held
*/
static int
kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
struct kgsl_mem_entry *entry)
/* Allocate a IOVA for memory objects that don't use SVM */
static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
struct kgsl_process_private *process,
struct kgsl_mem_entry *entry)
{
struct kgsl_pagetable *pagetable = process->pagetable;
struct kgsl_pagetable *pagetable;
/*
* If cpu=gpu map is used then caller needs to set the
* gpu address
* If SVM is enabled for this object then the address needs to be
* assigned elsewhere
*/
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
if (!entry->memdesc.gpuaddr)
return 0;
} else if (entry->memdesc.gpuaddr) {
WARN_ONCE(1, "gpuaddr assigned w/o holding memory lock\n");
return -EINVAL;
}
if (kgsl_memdesc_is_secured(&entry->memdesc))
pagetable = pagetable->mmu->securepagetable;
if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
return 0;
pagetable = kgsl_memdesc_is_secured(&entry->memdesc) ?
device->mmu.securepagetable : process->pagetable;
return kgsl_mmu_get_gpuaddr(pagetable, &entry->memdesc);
}
/**
* kgsl_mem_entry_untrack_gpuaddr() - Untrack memory that is previously tracked
* process - Pointer to process private to which memory belongs
* entry - Memory entry to untrack
*
* Function just does the opposite of kgsl_mem_entry_track_gpuaddr. Needs to be
* called with processes spin lock held
*/
static void
kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process,
struct kgsl_mem_entry *entry)
{
struct kgsl_pagetable *pagetable = entry->memdesc.pagetable;
if (entry->memdesc.gpuaddr)
kgsl_mmu_put_gpuaddr(pagetable, &entry->memdesc);
}
/* Commit the entry to the process so it can be accessed by other operations */
static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
{
@ -409,33 +374,25 @@ static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
spin_unlock(&entry->priv->mem_lock);
}
/**
* kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
* @entry: the memory entry
* @process: the owner process
*
* Attach a newly created mem_entry to its owner process so that
* it can be found later. The mem_entry will be added to mem_idr and have
* its 'id' field assigned.
*
* @returns - 0 on success or error code on failure.
/*
* Attach the memory object to a process by (possibly) getting a GPU address and
* (possibly) mapping it
*/
int
kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
struct kgsl_device_private *dev_priv)
static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
struct kgsl_process_private *process,
struct kgsl_mem_entry *entry)
{
int id;
int ret;
struct kgsl_process_private *process = dev_priv->process_priv;
struct kgsl_pagetable *pagetable = NULL;
int id, ret;
ret = kgsl_process_private_get(process);
if (!ret)
return -EBADF;
ret = kgsl_mem_entry_track_gpuaddr(process, entry);
if (ret)
goto err_put_proc_priv;
ret = kgsl_mem_entry_track_gpuaddr(device, process, entry);
if (ret) {
kgsl_process_private_put(process);
return ret;
}
idr_preload(GFP_KERNEL);
spin_lock(&process->mem_lock);
@ -445,47 +402,44 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
idr_preload_end();
if (id < 0) {
ret = id;
kgsl_mem_entry_untrack_gpuaddr(process, entry);
goto err_put_proc_priv;
if (!kgsl_memdesc_use_cpu_map(&entry->memdesc))
kgsl_mmu_put_gpuaddr(&entry->memdesc);
kgsl_process_private_put(process);
return id;
}
entry->id = id;
entry->priv = process;
/* map the memory after unlocking if gpuaddr has been assigned */
/*
* Map the memory if a GPU address is already assigned, either through
* kgsl_mem_entry_track_gpuaddr() or via some other SVM process
*/
if (entry->memdesc.gpuaddr) {
pagetable = process->pagetable;
if (kgsl_memdesc_is_secured(&entry->memdesc))
pagetable = pagetable->mmu->securepagetable;
entry->memdesc.pagetable = pagetable;
if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
ret = kgsl_mmu_sparse_dummy_map(pagetable,
&entry->memdesc, 0, entry->memdesc.size);
ret = kgsl_mmu_sparse_dummy_map(
entry->memdesc.pagetable,
&entry->memdesc, 0,
entry->memdesc.size);
else if (entry->memdesc.gpuaddr)
ret = kgsl_mmu_map(pagetable, &entry->memdesc);
ret = kgsl_mmu_map(entry->memdesc.pagetable,
&entry->memdesc);
if (ret)
kgsl_mem_entry_detach_process(entry);
}
kgsl_memfree_purge(pagetable ? pagetable->name : 0,
entry->memdesc.gpuaddr, entry->memdesc.size);
kgsl_memfree_purge(entry->memdesc.pagetable, entry->memdesc.gpuaddr,
entry->memdesc.size);
return ret;
err_put_proc_priv:
kgsl_process_private_put(process);
return ret;
}
/* Detach a memory entry from a process and unmap it from the MMU */
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
{
unsigned int type;
int ret;
if (entry == NULL)
return;
@ -502,14 +456,7 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
entry->priv->stats[type].cur -= entry->memdesc.size;
spin_unlock(&entry->priv->mem_lock);
ret = kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
/*
* Do not free the gpuaddr/size if unmap fails. Because if we try
* to map this range in future, the iommu driver will throw
* a BUG_ON() because it feels we are overwriting a mapping.
*/
if (ret == 0)
kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry);
kgsl_mmu_put_gpuaddr(&entry->memdesc);
kgsl_process_private_put(entry->priv);
@ -2139,10 +2086,21 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = (uint64_t) size;
entry->memdesc.useraddr = hostptr;
if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr;
entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR;
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
int ret;
/* Register the address in the database */
ret = kgsl_mmu_set_svm_region(pagetable,
(uint64_t) entry->memdesc.useraddr, (uint64_t) size);
if (ret)
return ret;
entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr;
}
return memdesc_sg_virt(&entry->memdesc, NULL);
}
@ -2392,7 +2350,7 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
param->flags = entry->memdesc.flags;
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
if (ret)
goto unmap;
@ -2696,7 +2654,8 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
/* echo back flags */
param->flags = (unsigned int) entry->memdesc.flags;
result = kgsl_mem_entry_attach_process(entry, dev_priv);
result = kgsl_mem_entry_attach_process(dev_priv->device, private,
entry);
if (result)
goto error_attach;
@ -3089,11 +3048,11 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
private->pagetable, size, flags);
size, flags);
if (ret != 0)
goto err;
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
if (ret != 0) {
kgsl_sharedmem_free(&entry->memdesc);
goto err;
@ -3292,7 +3251,7 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
process->pagetable, param->size, entry->memdesc.flags);
param->size, entry->memdesc.flags);
if (ret)
goto err_remove_idr;
@ -3359,6 +3318,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_sparse_virt_alloc *param = data;
struct kgsl_mem_entry *entry;
int ret;
@ -3379,7 +3339,7 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
spin_lock_init(&entry->bind_lock);
entry->bind_tree = RB_ROOT;
ret = kgsl_mem_entry_attach_process(entry, dev_priv);
ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
if (ret) {
kfree(entry);
return ret;
@ -4057,16 +4017,16 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
return ret;
entry->memdesc.gpuaddr = (uint64_t) addr;
entry->memdesc.pagetable = private->pagetable;
ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
if (ret) {
kgsl_mmu_put_gpuaddr(private->pagetable,
&entry->memdesc);
kgsl_mmu_put_gpuaddr(&entry->memdesc);
return ret;
}
kgsl_memfree_purge(private->pagetable ? private->pagetable->name : 0,
entry->memdesc.gpuaddr, entry->memdesc.size);
kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
entry->memdesc.size);
return addr;
}

View file

@ -1403,17 +1403,16 @@ static int _setstate_alloc(struct kgsl_device *device,
{
int ret;
ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, NULL,
PAGE_SIZE);
if (ret)
return ret;
ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
/* Mark the setstate memory as read only */
iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
if (!ret) {
/* Mark the setstate memory as read only */
iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
}
return 0;
return ret;
}
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
@ -1663,7 +1662,7 @@ static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
if (!kgsl_secure_guard_page_memdesc.sgt) {
if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
&kgsl_secure_guard_page_memdesc, pt,
&kgsl_secure_guard_page_memdesc,
sgp_size, KGSL_MEMFLAGS_SECURE)) {
KGSL_CORE_ERR(
"Secure guard page alloc failed\n");
@ -2364,23 +2363,27 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
}
ret = _insert_gpuaddr(pagetable, addr, size);
if (ret == 0)
if (ret == 0) {
memdesc->gpuaddr = addr;
memdesc->pagetable = pagetable;
}
out:
spin_unlock(&pagetable->lock);
return ret;
}
static void kgsl_iommu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
static void kgsl_iommu_put_gpuaddr(struct kgsl_memdesc *memdesc)
{
spin_lock(&pagetable->lock);
if (memdesc->pagetable == NULL)
return;
if (_remove_gpuaddr(pagetable, memdesc->gpuaddr))
spin_lock(&memdesc->pagetable->lock);
if (_remove_gpuaddr(memdesc->pagetable, memdesc->gpuaddr))
BUG();
spin_unlock(&pagetable->lock);
spin_unlock(&memdesc->pagetable->lock);
}
static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,

View file

@ -424,17 +424,29 @@ EXPORT_SYMBOL(kgsl_mmu_map);
* @pagetable: Pagetable to release the memory from
* @memdesc: Memory descriptor containing the GPU address to free
*/
void kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
{
struct kgsl_pagetable *pagetable = memdesc->pagetable;
int unmap_fail = 0;
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return;
if (PT_OP_VALID(pagetable, put_gpuaddr))
pagetable->pt_ops->put_gpuaddr(pagetable, memdesc);
if (!kgsl_memdesc_is_global(memdesc))
unmap_fail = kgsl_mmu_unmap(pagetable, memdesc);
/*
* Do not free the gpuaddr/size if unmap fails. Because if we
* try to map this range in future, the iommu driver will throw
* a BUG_ON() because it feels we are overwriting a mapping.
*/
if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
pagetable->pt_ops->put_gpuaddr(memdesc);
if (!kgsl_memdesc_is_global(memdesc))
memdesc->gpuaddr = 0;
memdesc->pagetable = NULL;
}
EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
@ -630,7 +642,12 @@ static int nommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
return memdesc->gpuaddr != 0 ? 0 : -ENOMEM;
if (memdesc->gpuaddr) {
memdesc->pagetable = pagetable;
return 0;
}
return -ENOMEM;
}
static struct kgsl_mmu_pt_ops nommu_pt_ops = {

View file

@ -92,7 +92,7 @@ struct kgsl_mmu_pt_ops {
u64 (*get_ttbr0)(struct kgsl_pagetable *);
u32 (*get_contextidr)(struct kgsl_pagetable *);
int (*get_gpuaddr)(struct kgsl_pagetable *, struct kgsl_memdesc *);
void (*put_gpuaddr)(struct kgsl_pagetable *, struct kgsl_memdesc *);
void (*put_gpuaddr)(struct kgsl_memdesc *);
uint64_t (*find_svm_region)(struct kgsl_pagetable *, uint64_t, uint64_t,
uint64_t, uint64_t);
int (*set_svm_region)(struct kgsl_pagetable *, uint64_t, uint64_t);
@ -181,8 +181,7 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
void kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
u64 ttbr0, uint64_t addr);

View file

@ -318,12 +318,11 @@ static int kgsl_cma_alloc_secure(struct kgsl_device *device,
static int kgsl_allocate_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size) {
int ret;
if (MMU_FEATURE(&device->mmu, KGSL_MMU_HYP_SECURE_ALLOC))
ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
ret = kgsl_sharedmem_page_alloc_user(memdesc, size);
else
ret = kgsl_cma_alloc_secure(device, memdesc, size);
@ -332,7 +331,6 @@ static int kgsl_allocate_secure(struct kgsl_device *device,
int kgsl_allocate_user(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size, uint64_t flags)
{
int ret;
@ -340,12 +338,11 @@ int kgsl_allocate_user(struct kgsl_device *device,
memdesc->flags = flags;
if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
ret = kgsl_sharedmem_alloc_contig(device, memdesc,
pagetable, size);
ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
else if (flags & KGSL_MEMFLAGS_SECURE)
ret = kgsl_allocate_secure(device, memdesc, pagetable, size);
ret = kgsl_allocate_secure(device, memdesc, size);
else
ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
ret = kgsl_sharedmem_page_alloc_user(memdesc, size);
return ret;
}
@ -637,7 +634,6 @@ static inline int get_page_size(size_t size, unsigned int align)
int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size)
{
int ret = 0;
@ -671,7 +667,6 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
len_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_page_alloc_ops;
/*
@ -805,18 +800,8 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
if (memdesc == NULL || memdesc->size == 0)
return;
if (memdesc->gpuaddr) {
int ret = 0;
ret = kgsl_mmu_unmap(memdesc->pagetable, memdesc);
/*
* Do not free the gpuaddr/size if unmap fails. Because if we
* try to map this range in future, the iommu driver will throw
* a BUG_ON() because it feels we are overwriting a mapping.
*/
if (ret == 0)
kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
}
/* Make sure the memory object has been unmapped */
kgsl_mmu_put_gpuaddr(memdesc);
if (memdesc->ops && memdesc->ops->free)
memdesc->ops->free(memdesc);
@ -996,8 +981,7 @@ void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
EXPORT_SYMBOL(kgsl_get_memory_usage);
int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, uint64_t size)
struct kgsl_memdesc *memdesc, uint64_t size)
{
int result = 0;
@ -1006,7 +990,6 @@ int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
return -EINVAL;
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_cma_ops;
memdesc->dev = device->dev->parent;
@ -1097,7 +1080,6 @@ static int kgsl_cma_alloc_secure(struct kgsl_device *device,
{
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
int result = 0;
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
size_t aligned;
/* Align size to 1M boundaries */
@ -1117,7 +1099,6 @@ static int kgsl_cma_alloc_secure(struct kgsl_device *device,
memdesc->priv &= ~KGSL_MEMDESC_GUARD_PAGE;
memdesc->size = aligned;
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_cma_ops;
memdesc->dev = iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE].dev;

View file

@ -26,7 +26,7 @@ struct kgsl_process_private;
int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, uint64_t size);
uint64_t size);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
@ -66,13 +66,11 @@ void kgsl_sharedmem_uninit_sysfs(void);
int kgsl_allocate_user(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size, uint64_t flags);
void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size);
#define MEMFLAGS(_flags, _mask, _shift) \
@ -287,11 +285,10 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
memdesc->priv = priv;
if ((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0)
ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL,
ret = kgsl_sharedmem_alloc_contig(device, memdesc,
(size_t) size);
else {
ret = kgsl_sharedmem_page_alloc_user(memdesc, NULL,
(size_t) size);
ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
if (ret == 0)
kgsl_memdesc_map(memdesc);
}

View file

@ -20,12 +20,18 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/qpnp/qpnp-revid.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
#define FG_ADC_RR_SKIN_TEMP_MSB 0x51
#define FG_ADC_RR_RR_ADC_CTL 0x52
#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK 0x8
#define FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL BIT(3)
#define FG_ADC_RR_ADC_LOG 0x53
#define FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK 0xFE
#define FG_ADC_RR_ADC_LOG_CLR_CTRL BIT(0)
#define FG_ADC_RR_FAKE_BATT_LOW_LSB 0x58
#define FG_ADC_RR_FAKE_BATT_LOW_MSB 0x59
@ -68,6 +74,8 @@
#define FG_ADC_RR_USB_IN_V_CTRL 0x90
#define FG_ADC_RR_USB_IN_V_TRIGGER 0x91
#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK 0x80
#define FG_ADC_RR_USB_IN_V_EVERY_CYCLE BIT(7)
#define FG_ADC_RR_USB_IN_V_STS 0x92
#define FG_ADC_RR_USB_IN_V_LSB 0x94
#define FG_ADC_RR_USB_IN_V_MSB 0x95
@ -175,6 +183,9 @@
#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN 16
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
#define FG_RR_CONV_CONTINUOUS_TIME_MIN 80000
#define FG_RR_CONV_CONTINUOUS_TIME_MAX 81000
/*
* The channel number is not a physical index in hardware,
* rather it's a list of supported channels and an index to
@ -230,6 +241,21 @@ struct rradc_chan_prop {
u16 adc_code, int *result);
};
static int rradc_masked_write(struct rradc_chip *rr_adc, u16 offset, u8 mask,
u8 val)
{
int rc;
rc = regmap_update_bits(rr_adc->regmap, rr_adc->base + offset,
mask, val);
if (rc) {
pr_err("spmi write failed: addr=%03X, rc=%d\n", offset, rc);
return rc;
}
return rc;
}
static int rradc_read(struct rradc_chip *rr_adc, u16 offset, u8 *data, int len)
{
int rc = 0, retry_cnt = 0, i = 0;
@ -547,7 +573,7 @@ static const struct rradc_channels rradc_chans[] = {
static int rradc_do_conversion(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 *data)
{
int rc = 0, bytes_to_read = 0;
int rc = 0, bytes_to_read = 0, retry = 0;
u8 buf[6];
u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
u16 status = 0;
@ -558,7 +584,8 @@ static int rradc_do_conversion(struct rradc_chip *chip,
(prop->channel != RR_ADC_CHG_HOT_TEMP) &&
(prop->channel != RR_ADC_CHG_TOO_HOT_TEMP) &&
(prop->channel != RR_ADC_SKIN_HOT_TEMP) &&
(prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP)) {
(prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP) &&
(prop->channel != RR_ADC_USBIN_V)) {
/* BATT_ID STS bit does not get set initially */
status = rradc_chans[prop->channel].sts;
rc = rradc_read(chip, status, buf, 1);
@ -576,6 +603,85 @@ static int rradc_do_conversion(struct rradc_chip *chip,
}
}
if (prop->channel == RR_ADC_USBIN_V) {
/* Force conversion every cycle */
rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
FG_ADC_RR_USB_IN_V_EVERY_CYCLE);
if (rc < 0) {
pr_err("Force every cycle update failed:%d\n", rc);
goto fail;
}
/* Clear channel log */
rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK,
FG_ADC_RR_ADC_LOG_CLR_CTRL);
if (rc < 0) {
pr_err("log ctrl update to clear failed:%d\n", rc);
goto fail;
}
rc = rradc_masked_write(chip, FG_ADC_RR_ADC_LOG,
FG_ADC_RR_ADC_LOG_CLR_CTRL_MASK, 0);
if (rc < 0) {
pr_err("log ctrl update to not clear failed:%d\n", rc);
goto fail;
}
/* Switch to continuous mode */
rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK,
FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL);
if (rc < 0) {
pr_err("Update to continuous mode failed:%d\n", rc);
goto fail;
}
status = rradc_chans[prop->channel].sts;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
pr_err("status read failed:%d\n", rc);
goto fail;
}
buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
while ((buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) &&
(retry < 2)) {
pr_debug("%s is not ready; nothing to read\n",
rradc_chans[prop->channel].datasheet_name);
usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN,
FG_RR_CONV_CONTINUOUS_TIME_MAX);
retry++;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
pr_err("status read failed:%d\n", rc);
goto fail;
}
}
/* Switch to non continuous mode */
rc = rradc_masked_write(chip, FG_ADC_RR_RR_ADC_CTL,
FG_ADC_RR_ADC_CTL_CONTINUOUS_SEL_MASK, 0);
if (rc < 0) {
pr_err("Update to continuous mode failed:%d\n", rc);
goto fail;
}
/* Restore usb_in trigger */
rc = rradc_masked_write(chip, FG_ADC_RR_USB_IN_V_TRIGGER,
FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
if (rc < 0) {
pr_err("Restore every cycle update failed:%d\n", rc);
goto fail;
}
if (retry >= 2) {
rc = -ENODATA;
goto fail;
}
}
offset = rradc_chans[prop->channel].lsb;
if (prop->channel == RR_ADC_BATT_ID)
bytes_to_read = 6;

File diff suppressed because it is too large Load diff

View file

@ -82,7 +82,6 @@
#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
#define FLASH_PREPARE_OPTIONS_MASK 0x08
#define FLASH_LED_TRIGGER_DEFAULT "none"
#define FLASH_LED_HEADROOM_DEFAULT_MV 500
@ -1172,7 +1171,7 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
flash_node = container_of(led_cdev, struct flash_node_data, cdev);
led = dev_get_drvdata(&flash_node->pdev->dev);
if (!(options & FLASH_PREPARE_OPTIONS_MASK)) {
if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
dev_err(&led->pdev->dev, "Invalid options %d\n", options);
return -EINVAL;
}

View file

@ -50,6 +50,7 @@
#define QPNP_WLED_VLOOP_COMP_RES_REG(b) (b + 0x55)
#define QPNP_WLED_VLOOP_COMP_GM_REG(b) (b + 0x56)
#define QPNP_WLED_PSM_CTRL_REG(b) (b + 0x5B)
#define QPNP_WLED_LCD_AUTO_PFM_REG(b) (b + 0x5C)
#define QPNP_WLED_SC_PRO_REG(b) (b + 0x5E)
#define QPNP_WLED_SWIRE_AVDD_REG(b) (b + 0x5F)
#define QPNP_WLED_CTRL_SPARE_REG(b) (b + 0xDF)
@ -69,17 +70,29 @@
#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM 320
#define QPNP_WLED_VLOOP_COMP_GM_MASK 0xF0
#define QPNP_WLED_VLOOP_COMP_GM_MASK GENMASK(3, 0)
#define QPNP_WLED_VLOOP_COMP_GM_OVERWRITE 0x80
#define QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED 0x03
#define QPNP_WLED_VLOOP_COMP_AUTO_GM_EN BIT(6)
#define QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK GENMASK(5, 4)
#define QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT 4
#define QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED_PMI8994 0x03
#define QPNP_WLED_LOOP_GM_DFLT_AMOLED_PMICOBALT 0x09
#define QPNP_WLED_LOOP_GM_DFLT_WLED 0x09
#define QPNP_WLED_LOOP_EA_GM_MIN 0x0
#define QPNP_WLED_LOOP_EA_GM_MAX 0xF
#define QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX 3
#define QPNP_WLED_LOOP_AUTO_GM_DFLT_THRESH 1
#define QPNP_WLED_VREF_PSM_MASK 0xF8
#define QPNP_WLED_VREF_PSM_STEP_MV 50
#define QPNP_WLED_VREF_PSM_MIN_MV 400
#define QPNP_WLED_VREF_PSM_MAX_MV 750
#define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV 450
#define QPNP_WLED_PSM_CTRL_OVERWRITE 0x80
#define QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH 1
#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX 0xF
#define QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT 7
#define QPNP_WLED_LCD_AUTO_PFM_EN_BIT BIT(7)
#define QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK GENMASK(3, 0)
#define QPNP_WLED_ILIM_MASK GENMASK(2, 0)
#define QPNP_WLED_ILIM_OVERWRITE BIT(7)
@ -319,6 +332,10 @@ static struct wled_vref_setting vref_setting_pmicobalt = {
* @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC
* @ strings - supported list of strings
* @ num_strings - number of strings
* @ loop_auto_gm_thresh - the clamping level for auto gm
* @ lcd_auto_pfm_thresh - the threshold for lcd auto pfm mode
* @ loop_auto_gm_en - select if auto gm is enabled
* @ lcd_auto_pfm_en - select if auto pfm is enabled in lcd mode
* @ avdd_mode_spmi - enable avdd programming via spmi
* @ en_9b_dim_res - enable or disable 9bit dimming
* @ en_phase_stag - enable or disable phase staggering
@ -362,6 +379,10 @@ struct qpnp_wled {
u16 cons_sync_write_delay_us;
u8 strings[QPNP_WLED_MAX_STRINGS];
u8 num_strings;
u8 loop_auto_gm_thresh;
u8 lcd_auto_pfm_thresh;
bool loop_auto_gm_en;
bool lcd_auto_pfm_en;
bool avdd_mode_spmi;
bool en_9b_dim_res;
bool en_phase_stag;
@ -987,24 +1008,6 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
if (rc)
return rc;
/* Configure the LOOP COMP GM register for AMOLED */
if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MIN;
else if (wled->loop_ea_gm > QPNP_WLED_LOOP_EA_GM_MAX)
wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MAX;
rc = qpnp_wled_read_reg(wled, &reg,
QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
if (rc < 0)
return rc;
reg &= QPNP_WLED_VLOOP_COMP_GM_MASK;
reg |= (wled->loop_ea_gm | QPNP_WLED_VLOOP_COMP_GM_OVERWRITE);
rc = qpnp_wled_write_reg(wled, reg,
QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
if (rc)
return rc;
/* Configure the CTRL TEST4 register for AMOLED */
rc = qpnp_wled_read_reg(wled, &reg,
QPNP_WLED_TEST4_REG(wled->ctrl_base));
@ -1084,6 +1087,45 @@ static bool is_avdd_trim_adjustment_required(struct qpnp_wled *wled)
return !(reg & QPNP_WLED_AVDD_SET_BIT);
}
static int qpnp_wled_gm_config(struct qpnp_wled *wled)
{
int rc;
u8 mask = 0, reg = 0;
/* Configure the LOOP COMP GM register */
if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) {
if (wled->loop_auto_gm_en)
reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
if (wled->loop_auto_gm_thresh >
QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
wled->loop_auto_gm_thresh =
QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
reg |= wled->loop_auto_gm_thresh <<
QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
}
if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MIN;
else if (wled->loop_ea_gm > QPNP_WLED_LOOP_EA_GM_MAX)
wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MAX;
reg |= wled->loop_ea_gm | QPNP_WLED_VLOOP_COMP_GM_OVERWRITE;
mask |= QPNP_WLED_VLOOP_COMP_GM_MASK |
QPNP_WLED_VLOOP_COMP_GM_OVERWRITE;
rc = qpnp_wled_masked_write_reg(wled, mask, &reg,
QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
if (rc)
pr_err("write VLOOP_COMP_GM_REG failed, rc=%d]\n", rc);
return rc;
}
static int qpnp_wled_ovp_config(struct qpnp_wled *wled)
{
int rc, i, *ovp_table;
@ -1317,6 +1359,13 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
}
/* Configure VLOOP_COMP_GM register */
rc = qpnp_wled_gm_config(wled);
if (rc < 0) {
pr_err("Error in configureing wled gm, rc=%d\n", rc);
return rc;
}
/* Configure the ILIM register */
rc = qpnp_wled_ilim_config(wled);
if (rc < 0) {
@ -1324,6 +1373,24 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
}
/* Configure auto PFM mode for LCD mode only */
if ((wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE)
&& !wled->disp_type_amoled) {
reg = 0;
reg |= wled->lcd_auto_pfm_thresh;
reg |= wled->lcd_auto_pfm_en <<
QPNP_WLED_LCD_AUTO_PFM_EN_SHIFT;
rc = qpnp_wled_masked_write_reg(wled,
QPNP_WLED_LCD_AUTO_PFM_EN_BIT |
QPNP_WLED_LCD_AUTO_PFM_THRESH_MASK, &reg,
QPNP_WLED_LCD_AUTO_PFM_REG(wled->ctrl_base));
if (rc < 0) {
pr_err("Write LCD_AUTO_PFM failed, rc=%d\n", rc);
return rc;
}
}
/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
reg = (wled->disp_type_amoled) ? 0 : 2;
rc = qpnp_wled_write_reg(wled, reg,
@ -1689,16 +1756,6 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,loop-ea-gm", &temp_val);
if (!rc) {
wled->loop_ea_gm = temp_val;
} else if (rc != -EINVAL) {
dev_err(&pdev->dev, "Unable to read loop-ea-gm\n");
return rc;
}
wled->avdd_mode_spmi = of_property_read_bool(pdev->dev.of_node,
"qcom,avdd-mode-spmi");
@ -1713,6 +1770,67 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
}
}
if (wled->disp_type_amoled) {
if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE)
wled->loop_ea_gm =
QPNP_WLED_LOOP_GM_DFLT_AMOLED_PMICOBALT;
else
wled->loop_ea_gm =
QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED_PMI8994;
} else {
wled->loop_ea_gm = QPNP_WLED_LOOP_GM_DFLT_WLED;
}
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,loop-ea-gm", &temp_val);
if (!rc) {
wled->loop_ea_gm = temp_val;
} else if (rc != -EINVAL) {
dev_err(&pdev->dev, "Unable to read loop-ea-gm\n");
return rc;
}
if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) {
wled->loop_auto_gm_en =
of_property_read_bool(pdev->dev.of_node,
"qcom,loop-auto-gm-en");
wled->loop_auto_gm_thresh = QPNP_WLED_LOOP_AUTO_GM_DFLT_THRESH;
rc = of_property_read_u8(pdev->dev.of_node,
"qcom,loop-auto-gm-thresh",
&wled->loop_auto_gm_thresh);
if (rc && rc != -EINVAL) {
dev_err(&pdev->dev,
"Unable to read loop-auto-gm-thresh\n");
return rc;
}
}
if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE ||
wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) {
if (wled->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4)
wled->lcd_auto_pfm_en = false;
else
wled->lcd_auto_pfm_en = true;
wled->lcd_auto_pfm_thresh = QPNP_WLED_LCD_AUTO_PFM_DFLT_THRESH;
rc = of_property_read_u8(pdev->dev.of_node,
"qcom,lcd-auto-pfm-thresh",
&wled->lcd_auto_pfm_thresh);
if (rc && rc != -EINVAL) {
dev_err(&pdev->dev,
"Unable to read lcd-auto-pfm-thresh\n");
return rc;
}
if (wled->lcd_auto_pfm_thresh >
QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX)
wled->lcd_auto_pfm_thresh =
QPNP_WLED_LCD_AUTO_PFM_THRESH_MAX;
}
wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_DFLT;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,sc-deb-cycles", &temp_val);

View file

@ -8125,11 +8125,12 @@ static int qseecom_check_whitelist_feature(void)
static int qseecom_probe(struct platform_device *pdev)
{
int rc;
int ret = 0;
int i;
uint32_t feature = 10;
struct device *class_dev;
struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
struct qseecom_command_scm_resp resp;
struct qseecom_ce_info_use *pce_info_use = NULL;
qseecom.qsee_bw_count = 0;
qseecom.qsee_perf_client = 0;
@ -8171,7 +8172,7 @@ static int qseecom_probe(struct platform_device *pdev)
class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
QSEECOM_DEV);
if (!class_dev) {
if (IS_ERR(class_dev)) {
pr_err("class_device_create failed %d\n", rc);
rc = -ENOMEM;
goto exit_destroy_class;
@ -8210,7 +8211,7 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.pdev = class_dev;
/* Create ION msm client */
qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
if (qseecom.ion_clnt == NULL) {
if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
pr_err("Ion client cannot be created\n");
rc = -ENOMEM;
goto exit_del_cdev;
@ -8272,14 +8273,14 @@ static int qseecom_probe(struct platform_device *pdev)
pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
}
ret = __qseecom_init_clk(CLK_QSEE);
if (ret)
rc = __qseecom_init_clk(CLK_QSEE);
if (rc)
goto exit_destroy_ion_client;
if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
(qseecom.support_pfe || qseecom.support_fde)) {
ret = __qseecom_init_clk(CLK_CE_DRV);
if (ret) {
rc = __qseecom_init_clk(CLK_CE_DRV);
if (rc) {
__qseecom_deinit_clk(CLK_QSEE);
goto exit_destroy_ion_client;
}
@ -8333,9 +8334,14 @@ static int qseecom_probe(struct platform_device *pdev)
} else {
pr_err("Fail to get secure app region info\n");
rc = -EINVAL;
goto exit_destroy_ion_client;
goto exit_deinit_clock;
}
rc = __qseecom_enable_clk(CLK_QSEE);
if (rc) {
pr_err("CLK_QSEE enabling failed (%d)\n", rc);
rc = -EIO;
goto exit_deinit_clock;
}
__qseecom_enable_clk(CLK_QSEE);
rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
cmd_buf, cmd_len,
&resp, sizeof(resp));
@ -8344,7 +8350,7 @@ static int qseecom_probe(struct platform_device *pdev)
pr_err("send secapp reg fail %d resp.res %d\n",
rc, resp.result);
rc = -EINVAL;
goto exit_destroy_ion_client;
goto exit_deinit_clock;
}
}
/*
@ -8380,7 +8386,28 @@ static int qseecom_probe(struct platform_device *pdev)
atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
return 0;
exit_deinit_clock:
__qseecom_deinit_clk(CLK_QSEE);
if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
(qseecom.support_pfe || qseecom.support_fde))
__qseecom_deinit_clk(CLK_CE_DRV);
exit_destroy_ion_client:
if (qseecom.ce_info.fde) {
pce_info_use = qseecom.ce_info.fde;
for (i = 0; i < qseecom.ce_info.num_fde; i++) {
kzfree(pce_info_use->ce_pipe_entry);
pce_info_use++;
}
kfree(qseecom.ce_info.fde);
}
if (qseecom.ce_info.pfe) {
pce_info_use = qseecom.ce_info.pfe;
for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
kzfree(pce_info_use->ce_pipe_entry);
pce_info_use++;
}
kfree(qseecom.ce_info.pfe);
}
ion_client_destroy(qseecom.ion_clnt);
exit_del_cdev:
cdev_del(&qseecom.cdev);

View file

@ -225,22 +225,22 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_0_0[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),

View file

@ -748,10 +748,10 @@ static const char * const blsp_i2c5_groups[] = {
"gpio18", "gpio19",
};
static const char * const blsp_spi6_groups[] = {
"gpio20", "gpio21", "gpio22", "gpio23",
"gpio49", "gpio52", "gpio22", "gpio23",
};
static const char * const blsp_uart2_groups[] = {
"gpio20", "gpio21", "gpio22", "gpio23",
"gpio4", "gpio5", "gpio6", "gpio7",
};
static const char * const blsp_uim6_groups[] = {
"gpio20", "gpio21",
@ -1495,13 +1495,14 @@ static const struct msm_pingroup msmfalcon_groups[] = {
NA, NA),
PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, ddr_bist, NA, NA,
atest_tsens2, atest_usb1, NA),
PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, NA, phase_flag3, NA, NA, NA,
PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag3, NA,
NA, NA, NA, NA),
PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag14, NA,
NA, NA, NA, NA),
PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, phase_flag31, NA,
NA, NA, NA, NA),
PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, NA, NA, NA, NA,
NA, NA),
PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, NA, phase_flag14, NA, NA, NA,
NA, NA),
PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, NA, phase_flag31, NA, NA, NA,
NA, NA),
PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(8, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc1,
atest_usb13, bimc_dte1, NA),
PINGROUP(9, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc0,
@ -1526,13 +1527,13 @@ static const struct msm_pingroup msmfalcon_groups[] = {
NA, NA),
PINGROUP(19, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
NA, NA),
PINGROUP(20, SOUTH, blsp_spi6, blsp_uart2, blsp_uim6, NA, NA, NA, NA,
PINGROUP(20, SOUTH, NA, NA, blsp_uim6, NA, NA, NA, NA,
NA, NA),
PINGROUP(21, SOUTH, blsp_spi6, blsp_uart2, blsp_uim6, NA, phase_flag11,
PINGROUP(21, SOUTH, NA, NA, blsp_uim6, NA, phase_flag11,
qdss_cti0_b, vsense_data0, NA, NA),
PINGROUP(22, CENTER, blsp_spi6, blsp_uart2, blsp_i2c6, NA,
PINGROUP(22, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
phase_flag12, vsense_data1, NA, NA, NA),
PINGROUP(23, CENTER, blsp_spi6, blsp_uart2, blsp_i2c6, NA,
PINGROUP(23, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
phase_flag13, vsense_mode, NA, NA, NA),
PINGROUP(24, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_clk, NA,
NA, phase_flag17, vsense_clkout, NA),
@ -1580,13 +1581,13 @@ static const struct msm_pingroup msmfalcon_groups[] = {
NA, NA, NA),
PINGROUP(48, SOUTH, NA, phase_flag1, qdss_gpio15, NA, NA, NA, NA, NA,
NA),
PINGROUP(49, SOUTH, NA, phase_flag2, qdss_cti0_a, NA, NA, NA, NA, NA,
NA),
PINGROUP(49, SOUTH, blsp_spi6, phase_flag2, qdss_cti0_a, NA, NA, NA,
NA, NA, NA),
PINGROUP(50, SOUTH, qspi_cs, NA, phase_flag9, qdss_cti0_a, NA, NA, NA,
NA, NA),
PINGROUP(51, SOUTH, qspi_data3, NA, phase_flag15, qdss_gpio8, NA, NA,
NA, NA, NA),
PINGROUP(52, SOUTH, CCI_TIMER2, blsp_spi8_b, blsp_i2c8_b, NA,
PINGROUP(52, SOUTH, CCI_TIMER2, blsp_spi8_b, blsp_i2c8_b, blsp_spi6,
phase_flag16, qdss_gpio, NA, NA, NA),
PINGROUP(53, NORTH, NA, phase_flag6, qdss_cti1_a, NA, NA, NA, NA, NA,
NA),

View file

@ -1723,6 +1723,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa3_sys_context *sys;
int src_ep_idx;
int num_frags, f;
struct ipa_gsi_ep_config *gsi_ep;
if (unlikely(!ipa3_ctx)) {
IPAERR("IPA3 driver was not initialized\n");
@ -1734,23 +1735,6 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
return -EINVAL;
}
num_frags = skb_shinfo(skb)->nr_frags;
if (num_frags) {
/* 1 desc for tag to resolve status out-of-order issue;
* 1 desc is needed for the linear portion of skb;
* 1 desc may be needed for the PACKET_INIT;
* 1 desc for each frag
*/
desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
if (!desc) {
IPAERR("failed to alloc desc array\n");
goto fail_mem;
}
} else {
memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
desc = &_desc[0];
}
/*
* USB_CONS: PKT_INIT ep_idx = dst pipe
* Q6_CONS: PKT_INIT ep_idx = sender pipe
@ -1787,6 +1771,37 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
goto fail_gen;
}
num_frags = skb_shinfo(skb)->nr_frags;
/*
* make sure TLV FIFO supports the needed frags.
* 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
* 1 descriptor needed for the linear portion of skb.
*/
gsi_ep = ipa3_get_gsi_ep_info(src_ep_idx);
if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) {
if (skb_linearize(skb)) {
IPAERR("Failed to linear skb with %d frags\n",
num_frags);
goto fail_gen;
}
num_frags = 0;
}
if (num_frags) {
/* 1 desc for tag to resolve status out-of-order issue;
* 1 desc is needed for the linear portion of skb;
* 1 desc may be needed for the PACKET_INIT;
* 1 desc for each frag
*/
desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
if (!desc) {
IPAERR("failed to alloc desc array\n");
goto fail_gen;
}
} else {
memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
desc = &_desc[0];
}
if (dst_ep_idx != -1) {
/* SW data path */
cmd.destination_pipe_index = dst_ep_idx;
@ -1794,7 +1809,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
if (unlikely(!cmd_pyld)) {
IPAERR("failed to construct ip_packet_init imm cmd\n");
goto fail_gen;
goto fail_mem;
}
/* the tag field will be populated in ipa3_send() function */
@ -1863,7 +1878,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (num_frags == 0) {
if (ipa3_send(sys, 2, desc, true)) {
IPAERR("fail to send skb %p HWP\n", skb);
goto fail_gen;
goto fail_mem;
}
} else {
for (f = 0; f < num_frags; f++) {
@ -1880,7 +1895,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (ipa3_send(sys, num_frags + 2, desc, true)) {
IPAERR("fail to send skb %p num_frags %u HWP\n",
skb, num_frags);
goto fail_gen;
goto fail_mem;
}
}
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
@ -1894,10 +1909,10 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
fail_send:
ipahal_destroy_imm_cmd(cmd_pyld);
fail_gen:
fail_mem:
if (num_frags)
kfree(desc);
fail_mem:
fail_gen:
return -EFAULT;
}

View file

@ -1069,6 +1069,7 @@ static int smb2_init_hw(struct smb2 *chip)
&chip->dt.dc_icl_ua);
chg->otg_cl_ua = chip->dt.otg_cl_ua;
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
if (rc < 0) {
@ -1101,7 +1102,7 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->fv_votable,
DEFAULT_VOTER, true, chip->dt.fv_uv);
vote(chg->usb_icl_votable,
DEFAULT_VOTER, true, chip->dt.usb_icl_ua);
DCP_VOTER, true, chip->dt.usb_icl_ua);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
vote(chg->hvdcp_disable_votable, DEFAULT_VOTER,
@ -1186,6 +1187,13 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
/* disable try.SINK mode */
rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
return rc;
}
rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
if (rc < 0) {
@ -1280,6 +1288,13 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
rc = smblib_validate_initial_typec_legacy_status(chg);
if (rc < 0) {
dev_err(chg->dev, "Couldn't validate typec legacy status rc=%d\n",
rc);
return rc;
}
return rc;
}

View file

@ -16,6 +16,7 @@
#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
#include <linux/qpnp/power-on.h>
#include <linux/irq.h>
#include "smb-lib.h"
#include "smb-reg.h"
@ -2015,6 +2016,13 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
if (rc < 0) {
smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
rc);
return rc;
}
rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
USBIN_MODE_CHG_BIT, USBIN_MODE_CHG_BIT);
if (rc < 0) {
@ -2031,6 +2039,14 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
} else {
rc = vote(chg->usb_icl_votable, DCP_VOTER, true,
chg->dcp_icl_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
rc);
return rc;
}
rc = smblib_masked_write(chg, CMD_APSD_REG,
ICL_OVERRIDE_BIT, 0);
if (rc < 0) {
@ -2066,13 +2082,6 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
smblib_update_usb_type(chg);
power_supply_changed(chg->usb_psy);
rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT,
chg->pd_active ? 0 : EN_TRYSINK_MODE_BIT);
if (rc < 0) {
dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
return rc;
}
return rc;
}
@ -3356,3 +3365,40 @@ int smblib_deinit(struct smb_charger *chg)
return 0;
}
int smblib_validate_initial_typec_legacy_status(struct smb_charger *chg)
{
int rc;
u8 stat;
if (qpnp_pon_is_warm_reset())
return 0;
rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
return rc;
}
if ((stat & TYPEC_LEGACY_CABLE_STATUS_BIT) == 0)
return 0;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
if (rc < 0) {
smblib_err(chg, "Couldn't disable typec rc=%d\n", rc);
return rc;
}
usleep_range(150000, 151000);
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_DISABLE_CMD_BIT, 0);
if (rc < 0) {
smblib_err(chg, "Couldn't enable typec rc=%d\n", rc);
return rc;
}
return 0;
}

View file

@ -28,6 +28,7 @@ enum print_reason {
#define DEFAULT_VOTER "DEFAULT_VOTER"
#define USER_VOTER "USER_VOTER"
#define PD_VOTER "PD_VOTER"
#define DCP_VOTER "DCP_VOTER"
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
@ -203,6 +204,7 @@ struct smb_charger {
int *thermal_mitigation;
int otg_cl_ua;
int dcp_icl_ua;
int fake_capacity;
@ -354,6 +356,8 @@ int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
int smblib_get_prop_slave_current_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_validate_initial_typec_legacy_status(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#endif /* __SMB2_CHARGER_H */

View file

@ -630,10 +630,12 @@ static int smb138x_init_hw(struct smb138x *chip)
vote(chg->fcc_votable,
DEFAULT_VOTER, true, chip->dt.fcc_ua);
vote(chg->usb_icl_votable,
DEFAULT_VOTER, true, chip->dt.usb_icl_ua);
DCP_VOTER, true, chip->dt.usb_icl_ua);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
/* configure charge enable for software control; active high */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);

View file

@ -412,8 +412,9 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
/**
* Returns zero for success and non-zero in case of a failure
*/
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer,
bool is_pre_scale_up)
{
int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@ -460,8 +461,12 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk"))
core_clk_rate = clk_get_rate(clki->clk);
if (!strcmp(clki->name, "core_clk")) {
if (is_pre_scale_up)
core_clk_rate = clki->max_freq;
else
core_clk_rate = clk_get_rate(clki->clk);
}
}
/* If frequency is smaller than 1MHz, set to 1MHz */
@ -558,6 +563,13 @@ out:
return ret;
}
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
update_link_startup_timer, false);
}
static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@ -2160,61 +2172,49 @@ out:
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int err = 0;
if (!ufs_qcom_cap_qunipro(host))
return 0;
goto out;
return ufs_qcom_configure_lpm(hba, false);
}
err = ufs_qcom_configure_lpm(hba, false);
if (err)
goto out;
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (!ufs_qcom_cap_qunipro(host))
return 0;
if (attr)
__ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
attr->hs_rate, false, true);
/* set unipro core clock cycles to 150 and clear clock divider */
return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
out:
return err;
}
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
u32 core_clk_ctrl_reg;
int err = 0;
if (!ufs_qcom_cap_qunipro(host))
goto out;
return 0;
err = ufs_qcom_configure_lpm(hba, true);
if (err)
goto out;
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
/* make sure CORE_CLK_DIV_EN is cleared */
if (!err &&
(core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
err = ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
}
out:
return err;
return ufs_qcom_configure_lpm(hba, true);
}
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int err = 0;
if (!ufs_qcom_cap_qunipro(host))
return 0;
if (attr)
ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
attr->hs_rate, false);
if (ufs_qcom_cap_svs2(host))
/*
* For SVS2 set unipro core clock cycles to 37 and
@ -2235,7 +2235,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
switch (status) {
@ -2246,19 +2245,9 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
err = ufs_qcom_clk_scale_down_pre_change(hba);
break;
case POST_CHANGE:
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
if (!scale_up)
err = ufs_qcom_clk_scale_down_post_change(hba);
if (err || !dev_req_params)
goto out;
ufs_qcom_cfg_timers(hba,
dev_req_params->gear_rx,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
ufs_qcom_update_bus_bw_vote(host);
break;
default:
@ -2267,7 +2256,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
break;
}
out:
return err;
}

View file

@ -2632,6 +2632,65 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
/**
* ufshcd_get_write_lock - synchronize between shutdown, scaling &
* arrival of requests
* @hba: ufs host
*
* Lock is predominantly held by shutdown context thus, ensuring
* that no requests from any other context may sneak through.
*/
static void ufshcd_get_write_lock(struct ufs_hba *hba)
{
down_write(&hba->lock);
hba->issuing_task = current;
}
/**
* ufshcd_get_read_lock - synchronize between shutdown, scaling &
* arrival of requests
* @hba: ufs host
*
* Returns 1 if acquired, < 0 on contention
*
* After shutdown's initiated, allow requests only from shutdown
* context. The sync between scaling & issue is maintained
* as is and this restructuring syncs shutdown with these too.
*/
static int ufshcd_get_read_lock(struct ufs_hba *hba)
{
int err = 0;
err = down_read_trylock(&hba->lock);
if (err > 0)
goto out;
if (hba->issuing_task == current)
return 0;
else if (!ufshcd_is_shutdown_ongoing(hba))
return -EAGAIN;
else
return -EPERM;
out:
hba->issuing_task = current;
return err;
}
/**
* ufshcd_put_read_lock - synchronize between shutdown, scaling &
* arrival of requests
* @hba: ufs host
*
* Returns none
*/
static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
{
if (!ufshcd_is_shutdown_ongoing(hba)) {
hba->issuing_task = NULL;
up_read(&hba->lock);
}
}
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
@ -2657,8 +2716,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
BUG();
}
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
err = ufshcd_get_read_lock(hba);
if (unlikely(err < 0)) {
if (err == -EPERM) {
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
return 0;
}
if (err == -EAGAIN)
return SCSI_MLQUEUE_HOST_BUSY;
}
spin_lock_irqsave(hba->host->host_lock, flags);
@ -2798,7 +2865,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
up_read(&hba->clk_scaling_lock);
ufshcd_put_read_lock(hba);
return err;
}
@ -2990,7 +3057,12 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
struct completion wait;
unsigned long flags;
down_read(&hba->clk_scaling_lock);
/*
* May get invoked from shutdown and IOCTL contexts.
* In shutdown context, it comes in with lock acquired.
*/
if (!ufshcd_is_shutdown_ongoing(hba))
down_read(&hba->lock);
/*
* Get free slot, sleep if slots are unavailable.
@ -3023,7 +3095,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
up_read(&hba->clk_scaling_lock);
if (!ufshcd_is_shutdown_ongoing(hba))
up_read(&hba->lock);
return err;
}
@ -4132,7 +4205,13 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
/*
* Do full reinit if enter failed or if LINERESET was detected during
* Hibern8 operation. After LINERESET, link moves to default PWM-G1
* mode hence full reinit is required to move link to HS speeds.
*/
if (ret || hba->full_init_linereset) {
hba->full_init_linereset = false;
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
__func__, ret);
@ -4175,8 +4254,13 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
/*
* Do full reinit if exit failed or if LINERESET was detected during
* Hibern8 operation. After LINERESET, link moves to default PWM-G1
* mode hence full reinit is required to move link to HS speeds.
*/
if (ret || hba->full_init_linereset) {
hba->full_init_linereset = false;
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
__func__, ret);
@ -5696,10 +5780,13 @@ static void ufshcd_err_handler(struct work_struct *work)
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
if (!hba->silence_err_logs) {
/* release lock as print host regs sleeps */
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
spin_lock_irqsave(hba->host->host_lock, flags);
}
}
@ -5827,9 +5914,8 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
(reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
(reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@ -5837,6 +5923,20 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
__func__, reg);
ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
/* Don't ignore LINERESET indication during hibern8 operation */
if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
struct uic_command *cmd = hba->active_uic_cmd;
if (cmd) {
if ((cmd->command == UIC_CMD_DME_HIBER_ENTER)
|| (cmd->command == UIC_CMD_DME_HIBER_EXIT)) {
dev_err(hba->dev, "%s: LINERESET during hibern8, reg 0x%x\n",
__func__, reg);
hba->full_init_linereset = true;
}
}
}
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
@ -6908,11 +7008,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
/* Enable auto hibern8 if supported */
if (ufshcd_is_auto_hibern8_supported(hba))
ufshcd_set_auto_hibern8_timer(hba,
hba->hibern8_on_idle.delay_ms);
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
/* set the default level for urgent bkops */
@ -6979,6 +7074,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ufshcd_scsi_add_wlus(hba))
goto out;
/* Enable auto hibern8 if supported, after full host and
* device initialization.
*/
if (ufshcd_is_auto_hibern8_supported(hba))
ufshcd_set_auto_hibern8_timer(hba,
hba->hibern8_on_idle.delay_ms);
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
@ -8654,11 +8756,37 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
*/
int ufshcd_shutdown(struct ufs_hba *hba)
{
/*
* TODO: This function should send the power down notification to
* UFS device and then power off the UFS link. But we need to be sure
* that there will not be any new UFS requests issued after this.
int ret = 0;
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_hold_all(hba);
/**
* (1) Acquire the lock to stop any more requests
* (2) Set state to shutting down
* (3) Suspend clock scaling
* (4) Wait for all issued requests to complete
*/
ufshcd_get_write_lock(hba);
ufshcd_mark_shutdown_ongoing(hba);
ufshcd_scsi_block_requests(hba);
ufshcd_suspend_clkscaling(hba);
ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
if (ret)
dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
__func__, ret);
/* Requests may have errored out above, let it be handled */
flush_work(&hba->eh_work);
/* reqs issued from contexts other than shutdown will fail from now */
ufshcd_scsi_unblock_requests(hba);
ufshcd_release_all(hba);
ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
/* allow force shutdown even in case of errors */
return 0;
}
EXPORT_SYMBOL(ufshcd_shutdown);
@ -8843,10 +8971,10 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
down_write(&hba->lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
up_write(&hba->lock);
ufshcd_scsi_unblock_requests(hba);
}
@ -8855,7 +8983,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
up_write(&hba->lock);
ufshcd_scsi_unblock_requests(hba);
}
@ -9038,6 +9166,10 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
clk_scaling.resume_work);
unsigned long irq_flags;
/* Let's not resume scaling if shutdown is ongoing */
if (ufshcd_is_shutdown_ongoing(hba))
return;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (!hba->clk_scaling.is_suspended) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@ -9250,7 +9382,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
init_rwsem(&hba->clk_scaling_lock);
init_rwsem(&hba->lock);
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);

View file

@ -895,15 +895,30 @@ struct ufs_hba {
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
struct rw_semaphore clk_scaling_lock;
/* sync b/w diff contexts */
struct rw_semaphore lock;
struct task_struct *issuing_task;
unsigned long shutdown_in_prog;
struct reset_control *core_reset;
/* If set, don't gate device ref_clk during clock gating */
bool no_ref_clk_gating;
int scsi_block_reqs_cnt;
bool full_init_linereset;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
{
set_bit(0, &hba->shutdown_in_prog);
}
static inline bool ufshcd_is_shutdown_ongoing(struct ufs_hba *hba)
{
return !!(test_bit(0, &hba->shutdown_in_prog));
}
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{

View file

@ -190,6 +190,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR UFS_BIT(4)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF

View file

@ -1485,8 +1485,18 @@ static int icnss_hw_reset(struct icnss_priv *priv)
icnss_hw_reset_wlan_rfactrl_power_down(priv);
ret = icnss_hw_reset_rf_reset_cmd(priv);
if (ret)
if (ret) {
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
goto top_level_reset;
}
icnss_hw_reset_switch_to_cxo(priv);
@ -1511,8 +1521,18 @@ static int icnss_hw_reset(struct icnss_priv *priv)
}
ret = icnss_hw_reset_xo_disable_cmd(priv);
if (ret)
if (ret) {
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_DISCONNECT_CLR, 0);
icnss_hw_write_reg_field(priv->mpm_config_va,
MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_WLAN_DISABLE, 1);
goto top_level_reset;
}
icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 0);

View file

@ -84,6 +84,7 @@ static int smem_module_inited;
static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
static DEFINE_MUTEX(smem_module_init_notifier_lock);
static bool probe_done;
uint32_t smem_max_items;
/* smem security feature components */
#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
@ -139,6 +140,11 @@ struct smem_partition_info {
};
static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
#define SMEM_COMM_PART_VERSION 0x000C
#define SMEM_COMM_HOST 0xFFFE
static bool use_comm_partition;
static struct smem_partition_info comm_partition;
/* end smem security feature components */
/* Identifier for the SMEM target info struct. */
@ -149,6 +155,7 @@ struct smem_targ_info_type {
uint32_t identifier;
uint32_t size;
phys_addr_t phys_base_addr;
uint32_t max_items;
};
struct restart_notifier_block {
@ -312,7 +319,7 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
if (!skip_init_check && !smem_initialized_check())
return ret;
if (id >= SMEM_NUM_ITEMS)
if (id >= smem_max_items)
return ret;
if (use_spinlocks) {
@ -374,7 +381,7 @@ static void *__smem_get_entry_secure(unsigned id,
if (!skip_init_check && !smem_initialized_check())
return NULL;
if (id >= SMEM_NUM_ITEMS) {
if (id >= smem_max_items) {
SMEM_INFO("%s: invalid id %d\n", __func__, id);
return NULL;
}
@ -385,12 +392,18 @@ static void *__smem_get_entry_secure(unsigned id,
return NULL;
}
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
return __smem_get_entry_nonsecure(id, size, skip_init_check,
use_rspinlock);
partition_num = partitions[to_proc].partition_num;
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
if (use_comm_partition) {
partition_num = comm_partition.partition_num;
hdr = smem_areas[0].virt_addr + comm_partition.offset;
} else {
return __smem_get_entry_nonsecure(id, size,
skip_init_check, use_rspinlock);
}
} else {
partition_num = partitions[to_proc].partition_num;
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
}
if (unlikely(!spinlocks_initialized)) {
rc = init_smem_remote_spinlock();
if (unlikely(rc)) {
@ -613,8 +626,19 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
uint32_t partition_num;
void *ret = NULL;
hdr = smem_base + partitions[to_proc].offset;
partition_num = partitions[to_proc].partition_num;
if (to_proc == SMEM_COMM_HOST) {
hdr = smem_base + comm_partition.offset;
partition_num = comm_partition.partition_num;
size_cacheline = comm_partition.size_cacheline;
} else if (to_proc < NUM_SMEM_SUBSYSTEMS) {
hdr = smem_base + partitions[to_proc].offset;
partition_num = partitions[to_proc].partition_num;
size_cacheline = partitions[to_proc].size_cacheline;
} else {
SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
to_proc, id);
return NULL;
}
if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
LOG_ERR(
@ -626,7 +650,6 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
BUG();
}
size_cacheline = partitions[to_proc].size_cacheline;
free_space = hdr->offset_free_cached -
hdr->offset_free_uncached;
@ -718,7 +741,7 @@ void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
if (!smem_initialized_check())
return NULL;
if (id >= SMEM_NUM_ITEMS) {
if (id >= smem_max_items) {
SMEM_INFO("%s: invalid id %u\n", __func__, id);
return NULL;
}
@ -761,11 +784,16 @@ void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
if (id > SMEM_FIXED_ITEM_LAST) {
SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
__func__, id, size_in, to_proc, flags);
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
ret = alloc_item_nonsecure(id, a_size_in);
else
if (flags & SMEM_ANY_HOST_FLAG
|| !partitions[to_proc].offset) {
if (use_comm_partition)
ret = alloc_item_secure(id, size_in,
SMEM_COMM_HOST, flags);
else
ret = alloc_item_nonsecure(id, a_size_in);
} else {
ret = alloc_item_secure(id, size_in, to_proc, flags);
}
} else {
SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
__func__, id);
@ -893,14 +921,18 @@ EXPORT_SYMBOL(smem_get_free_space);
unsigned smem_get_version(unsigned idx)
{
int *version_array;
struct smem_shared *smem = smem_ram_base;
if (idx > 32) {
pr_err("%s: invalid idx:%d\n", __func__, idx);
return 0;
}
version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
true);
if (use_comm_partition)
version_array = smem->version;
else
version_array = __smem_find(SMEM_VERSION_INFO,
SMEM_VERSION_INFO_SIZE, true);
if (version_array == NULL)
return 0;
@ -948,6 +980,7 @@ bool smem_initialized_check(void)
static int is_inited;
unsigned long flags;
struct smem_shared *smem;
unsigned ver;
if (likely(checked)) {
if (unlikely(!is_inited))
@ -976,8 +1009,12 @@ bool smem_initialized_check(void)
* structures. Without the extra configuration data, the SMEM driver
* cannot be properly initialized.
*/
if (smem_get_version(MODEM_SBL_VERSION_INDEX) != SMEM_VERSION << 16) {
pr_err("%s: SBL version not correct\n", __func__);
ver = smem->version[MODEM_SBL_VERSION_INDEX];
if (ver == SMEM_COMM_PART_VERSION << 16) {
use_comm_partition = true;
} else if (ver != SMEM_VERSION << 16) {
pr_err("%s: SBL version not correct 0x%x\n",
__func__, smem->version[7]);
goto failed;
}
@ -1122,6 +1159,7 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
{
uint16_t remote_host;
struct smem_partition_header *hdr;
bool is_comm_partition = false;
if (!entry->offset) {
SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
@ -1136,31 +1174,38 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
return;
}
if (entry->host0 == SMEM_APPS)
remote_host = entry->host1;
else
remote_host = entry->host0;
if (entry->host0 == SMEM_COMM_HOST && entry->host1 == SMEM_COMM_HOST)
is_comm_partition = true;
if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
SMEM_INFO("Skipping smem partition %d - bad remote:%d\n", num,
remote_host);
return;
}
if (partitions[remote_host].offset) {
SMEM_INFO("Skipping smem partition %d - duplicate of %d\n", num,
partitions[remote_host].partition_num);
return;
if (!is_comm_partition) {
if (entry->host0 == SMEM_APPS)
remote_host = entry->host1;
else
remote_host = entry->host0;
if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
SMEM_INFO(
"Skipping smem partition %d - bad remote:%d\n",
num, remote_host);
return;
}
if (partitions[remote_host].offset) {
SMEM_INFO(
"Skipping smem partition %d - duplicate of %d\n",
num, partitions[remote_host].partition_num);
return;
}
if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
SMEM_INFO(
"Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
num, entry->offset, entry->host0, entry->host1);
return;
}
}
hdr = smem_areas[0].virt_addr + entry->offset;
if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
SMEM_INFO(
"Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
num, entry->offset, entry->host0, entry->host1);
return;
}
if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
LOG_ERR("Smem partition %d hdr magic is bad\n", num);
BUG();
@ -1177,6 +1222,14 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
BUG();
}
if (hdr->host0 == SMEM_COMM_HOST && hdr->host1 == SMEM_COMM_HOST) {
comm_partition.partition_num = num;
comm_partition.offset = entry->offset;
comm_partition.size_cacheline = entry->size_cacheline;
SMEM_INFO("Common Partition %d offset:%x\n", num,
entry->offset);
return;
}
if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
BUG();
@ -1253,6 +1306,8 @@ static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
}
smem_ram_phys = smem_targ_info->phys_base_addr;
smem_ram_size = smem_targ_info->size;
if (smem_targ_info->max_items)
smem_max_items = smem_targ_info->max_items;
iounmap(smem_targ_info_addr);
return 0;
}
@ -1488,7 +1543,7 @@ int __init msm_smem_init(void)
return 0;
registered = true;
smem_max_items = SMEM_NUM_ITEMS;
smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
if (!smem_ipc_log_ctx) {
pr_err("%s: unable to create logging context\n", __func__);

View file

@ -1,7 +1,7 @@
/* arch/arm/mach-msm/smem_debug.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2009-2013,2016 The Linux Foundation. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@ -53,7 +53,7 @@ static void debug_read_mem(struct seq_file *s)
heap_info->free_offset,
heap_info->heap_remaining);
for (n = 0; n < SMEM_NUM_ITEMS; n++) {
for (n = 0; n < smem_max_items; n++) {
if (toc[n].allocated == 0)
continue;
seq_printf(s, "%04d: offset %08x size %08x\n",
@ -67,9 +67,8 @@ static void debug_read_smem_version(struct seq_file *s)
for (n = 0; n < 32; n++) {
version = smem_get_version(n);
seq_printf(s, "entry %d: smem = %d proc_comm = %d\n", n,
version >> 16,
version & 0xffff);
seq_printf(s, "entry %d:%x smem = %d proc_comm = %d\n",
n, version, version >> 16, version & 0xffff);
}
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -18,6 +18,7 @@
#define SMD_HEAP_SIZE 512
extern uint32_t smem_max_items;
struct smem_heap_info {
unsigned initialized;

View file

@ -60,6 +60,7 @@
#define MSM_LIMITS_NODE_DCVS 0x44435653
#define MSM_LIMITS_SUB_FN_GENERAL 0x47454E00
#define MSM_LIMITS_SUB_FN_CRNT 0x43524E54
#define MSM_LIMITS_SUB_FN_REL 0x52454C00
#define MSM_LIMITS_DOMAIN_MAX 0x444D4158
#define MSM_LIMITS_DOMAIN_MIN 0x444D494E
#define MSM_LIMITS_CLUSTER_0 0x6370302D
@ -1674,7 +1675,20 @@ static int msm_thermal_lmh_dcvs_init(struct platform_device *pdev)
*/
devm_clk_put(&pdev->dev, osm_clk);
/* Enable the CRNT algorithm. Again, we dont care if this fails */
/* Enable the CRNT and Reliability algorithm. Again, we dont
* care if this fails
*/
ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
MSM_LIMITS_SUB_FN_REL,
MSM_LIMITS_ALGO_MODE_ENABLE, 1);
if (ret)
pr_err("Unable to enable REL algo for cluster0\n");
ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_1,
MSM_LIMITS_SUB_FN_REL,
MSM_LIMITS_ALGO_MODE_ENABLE, 1);
if (ret)
pr_err("Unable to enable REL algo for cluster1\n");
ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
MSM_LIMITS_SUB_FN_CRNT,
MSM_LIMITS_ALGO_MODE_ENABLE, 1);

View file

@ -1441,6 +1441,9 @@ int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
return 0;
unbind_no_pm_drivers_interfaces(udev);
/* From now on we are sure all drivers support suspend/resume
@ -1470,6 +1473,15 @@ int usb_resume(struct device *dev, pm_message_t msg)
struct usb_device *udev = to_usb_device(dev);
int status;
/*
* Some buses would like to keep their devices in suspend
* state after system resume. Their resume happen when
* a remote wakeup is detected or interface driver start
* I/O.
*/
if (udev->bus->skip_resume)
return 0;
/* For all calls, take the device back to full power and
* tell the PM core in case it was autosuspended previously.
* Unbind the interfaces that will need rebinding later,

View file

@ -167,6 +167,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
hcd_to_bus(hcd)->skip_resume = true;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
@ -221,6 +223,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
(pdata && pdata->usb3_lpm_capable))
xhci->quirks |= XHCI_LPM_SUPPORT;

View file

@ -913,7 +913,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
val.intval = pd->requested_current = 0; /* suspend charging */
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &val);
POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
pd->in_explicit_contract = false;
@ -1039,23 +1039,23 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
return;
}
if (handler && handler->svdm_received)
handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
/* if this interrupts a previous exchange, abort queued response */
if (cmd_type == SVDM_CMD_TYPE_INITIATOR && pd->vdm_tx) {
usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
VDM_HDR_SVID(pd->vdm_tx->data[0]));
kfree(pd->vdm_tx);
pd->vdm_tx = NULL;
}
if (handler && handler->svdm_received) {
handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
return;
}
/* Standard Discovery or unhandled messages go here */
switch (cmd_type) {
case SVDM_CMD_TYPE_INITIATOR:
/*
* if this interrupts a previous exchange, abort the previous
* outgoing response
*/
if (pd->vdm_tx) {
usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
VDM_HDR_SVID(pd->vdm_tx->data[0]));
kfree(pd->vdm_tx);
pd->vdm_tx = NULL;
}
if (svid == USBPD_SID && cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
u32 tx_vdos[3] = {
ID_HDR_USB_HOST | ID_HDR_USB_DEVICE |
@ -1074,13 +1074,14 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
break;
case SVDM_CMD_TYPE_RESP_ACK:
if (svid != USBPD_SID) {
usbpd_err(&pd->dev, "unhandled ACK for SVID:0x%x\n",
svid);
break;
}
switch (cmd) {
case USBPD_SVDM_DISCOVER_IDENTITY:
if (svid != USBPD_SID) {
usbpd_err(&pd->dev, "invalid VID:0x%x\n", svid);
break;
}
kfree(pd->vdm_tx_retry);
pd->vdm_tx_retry = NULL;
@ -1091,11 +1092,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
break;
case USBPD_SVDM_DISCOVER_SVIDS:
if (svid != USBPD_SID) {
usbpd_err(&pd->dev, "invalid VID:0x%x\n", svid);
break;
}
pd->vdm_state = DISCOVERED_SVIDS;
kfree(pd->vdm_tx_retry);
@ -1181,33 +1177,15 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
break;
case USBPD_SVDM_DISCOVER_MODES:
usbpd_info(&pd->dev, "SVID:0x%04x VDM Modes discovered\n",
svid);
pd->vdm_state = DISCOVERED_MODES;
break;
case USBPD_SVDM_ENTER_MODE:
usbpd_info(&pd->dev, "SVID:0x%04x VDM Mode entered\n",
svid);
pd->vdm_state = MODE_ENTERED;
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
break;
case USBPD_SVDM_EXIT_MODE:
usbpd_info(&pd->dev, "SVID:0x%04x VDM Mode exited\n",
svid);
pd->vdm_state = MODE_EXITED;
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
break;
default:
usbpd_dbg(&pd->dev, "unhandled ACK for command:0x%x\n",
cmd);
break;
}
break;
case SVDM_CMD_TYPE_RESP_NAK:
usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:%d\n",
usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:0x%x\n",
svid, cmd);
break;
@ -1731,6 +1709,8 @@ static void usbpd_sm(struct work_struct *w)
case PE_SNK_SELECT_CAPABILITY:
if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
/* prepare for voltage increase/decrease */
val.intval = pd->requested_voltage;
power_supply_set_property(pd->usb_psy,
@ -1740,16 +1720,31 @@ static void usbpd_sm(struct work_struct *w)
&val);
/*
* disable charging; technically we are allowed to
* charge up to pSnkStdby (2.5 W) during this
* transition, but disable it just for simplicity.
* if we are changing voltages, we must lower input
* current to pSnkStdby (2.5W). Calculate it and set
* PD_CURRENT_MAX accordingly.
*/
val.intval = 0;
power_supply_set_property(pd->usb_psy,
if (pd->requested_voltage != pd->current_voltage) {
int mv = max(pd->requested_voltage,
pd->current_voltage) / 1000;
val.intval = (2500000 / mv) * 1000;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
} else {
/* decreasing current? */
ret = power_supply_get_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
if (!ret &&
pd->requested_current < val.intval) {
val.intval =
pd->requested_current * 1000;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_CURRENT_MAX,
&val);
}
}
pd->selected_pdo = pd->requested_pdo;
usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
} else if (IS_CTRL(rx_msg, MSG_REJECT) ||
IS_CTRL(rx_msg, MSG_WAIT)) {
if (pd->in_explicit_contract)

View file

@ -109,6 +109,7 @@ struct qusb_phy {
void __iomem *base;
void __iomem *tune2_efuse_reg;
void __iomem *ref_clk_base;
void __iomem *tcsr_clamp_dig_n;
struct clk *ref_clk_src;
struct clk *ref_clk;
@ -147,6 +148,7 @@ struct qusb_phy {
int phy_pll_reset_seq_len;
int *emu_dcm_reset_seq;
int emu_dcm_reset_seq_len;
bool put_into_high_z_state;
};
static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@ -189,15 +191,14 @@ static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
return ret;
}
static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
bool toggle_vdd)
static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
{
int ret = 0;
dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
__func__, on ? "on" : "off", qphy->power_enabled);
if (toggle_vdd && qphy->power_enabled == on) {
if (qphy->power_enabled == on) {
dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
return 0;
}
@ -205,19 +206,17 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
if (!on)
goto disable_vdda33;
if (toggle_vdd) {
ret = qusb_phy_config_vdd(qphy, true);
if (ret) {
dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
ret);
goto err_vdd;
}
ret = qusb_phy_config_vdd(qphy, true);
if (ret) {
dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
ret);
goto err_vdd;
}
ret = regulator_enable(qphy->vdd);
if (ret) {
dev_err(qphy->phy.dev, "Unable to enable VDD\n");
goto unconfig_vdd;
}
ret = regulator_enable(qphy->vdd);
if (ret) {
dev_err(qphy->phy.dev, "Unable to enable VDD\n");
goto unconfig_vdd;
}
ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
@ -260,8 +259,7 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
goto unset_vdd33;
}
if (toggle_vdd)
qphy->power_enabled = true;
qphy->power_enabled = true;
pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
return ret;
@ -299,21 +297,18 @@ put_vdda18_lpm:
dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
disable_vdd:
if (toggle_vdd) {
ret = regulator_disable(qphy->vdd);
if (ret)
dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
ret = regulator_disable(qphy->vdd);
if (ret)
dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
ret);
unconfig_vdd:
ret = qusb_phy_config_vdd(qphy, false);
if (ret)
dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
ret = qusb_phy_config_vdd(qphy, false);
if (ret)
dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
ret);
}
err_vdd:
if (toggle_vdd)
qphy->power_enabled = false;
qphy->power_enabled = false;
dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
return ret;
}
@ -330,12 +325,53 @@ static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
case POWER_SUPPLY_DP_DM_DPF_DMF:
dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPF_DMF\n");
if (!qphy->rm_pulldown) {
ret = qusb_phy_enable_power(qphy, true, false);
ret = qusb_phy_enable_power(qphy, true);
if (ret >= 0) {
qphy->rm_pulldown = true;
dev_dbg(phy->dev, "DP_DM_F: rm_pulldown:%d\n",
qphy->rm_pulldown);
}
if (qphy->put_into_high_z_state) {
if (qphy->tcsr_clamp_dig_n)
writel_relaxed(0x1,
qphy->tcsr_clamp_dig_n);
qusb_phy_enable_clocks(qphy, true);
dev_dbg(phy->dev, "RESET QUSB PHY\n");
ret = reset_control_assert(qphy->phy_reset);
if (ret)
dev_err(phy->dev, "phyassert failed\n");
usleep_range(100, 150);
ret = reset_control_deassert(qphy->phy_reset);
if (ret)
dev_err(phy->dev, "deassert failed\n");
/*
* Phy in non-driving mode leaves Dp and Dm
* lines in high-Z state. Controller power
* collapse is not switching phy to non-driving
* mode causing charger detection failure. Bring
* phy to non-driving mode by overriding
* controller output via UTMI interface.
*/
writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
OP_MODE_NON_DRIVE,
qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
writel_relaxed(UTMI_ULPI_SEL |
UTMI_TEST_MUX_SEL,
qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
/* Disable PHY */
writel_relaxed(CLAMP_N_EN | FREEZIO_N |
POWER_DOWN,
qphy->base + QUSB2PHY_PORT_POWERDOWN);
/* Make sure that above write is completed */
wmb();
qusb_phy_enable_clocks(qphy, false);
}
}
break;
@ -343,7 +379,13 @@ static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
case POWER_SUPPLY_DP_DM_DPR_DMR:
dev_dbg(phy->dev, "POWER_SUPPLY_DP_DM_DPR_DMR\n");
if (qphy->rm_pulldown) {
ret = qusb_phy_enable_power(qphy, false, false);
if (!qphy->cable_connected) {
if (qphy->tcsr_clamp_dig_n)
writel_relaxed(0x0,
qphy->tcsr_clamp_dig_n);
dev_dbg(phy->dev, "turn off for HVDCP case\n");
ret = qusb_phy_enable_power(qphy, false);
}
if (ret >= 0) {
qphy->rm_pulldown = false;
dev_dbg(phy->dev, "DP_DM_R: rm_pulldown:%d\n",
@ -420,7 +462,7 @@ static int qusb_phy_init(struct usb_phy *phy)
dev_dbg(phy->dev, "%s\n", __func__);
ret = qusb_phy_enable_power(qphy, true, true);
ret = qusb_phy_enable_power(qphy, true);
if (ret)
return ret;
@ -651,22 +693,23 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
/* Disable all interrupts */
writel_relaxed(0x00,
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
/*
* Phy in non-driving mode leaves Dp and Dm lines in
* high-Z state. Controller power collapse is not
* switching phy to non-driving mode causing charger
* detection failure. Bring phy to non-driving mode by
* overriding controller output via UTMI interface.
*/
writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
OP_MODE_NON_DRIVE,
qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
/* Make sure that above write is completed */
wmb();
qusb_phy_enable_clocks(qphy, false);
qusb_phy_enable_power(qphy, false, true);
if (qphy->tcsr_clamp_dig_n)
writel_relaxed(0x0,
qphy->tcsr_clamp_dig_n);
qusb_phy_enable_power(qphy, false);
/*
* Set put_into_high_z_state to true so next USB
* cable connect, DPF_DMF request performs PHY
* reset and put it into high-z state. For bootup
* with or without USB cable, it doesn't require
* to put QUSB PHY into high-z state.
*/
qphy->put_into_high_z_state = true;
}
qphy->suspended = true;
} else {
@ -678,7 +721,10 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
writel_relaxed(0x00,
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
} else {
qusb_phy_enable_power(qphy, true, true);
qusb_phy_enable_power(qphy, true);
if (qphy->tcsr_clamp_dig_n)
writel_relaxed(0x1,
qphy->tcsr_clamp_dig_n);
qusb_phy_enable_clocks(qphy, true);
}
qphy->suspended = false;
@ -849,6 +895,18 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"tcsr_clamp_dig_n_1p8");
if (res) {
qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
res->start, resource_size(res));
if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
dev_err(dev, "err reading tcsr_clamp_dig_n\n");
qphy->tcsr_clamp_dig_n = NULL;
}
}
qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
if (IS_ERR(qphy->ref_clk_src))
dev_dbg(dev, "clk get failed for ref_clk_src\n");
@ -1046,7 +1104,7 @@ static int qusb_phy_remove(struct platform_device *pdev)
qphy->clocks_enabled = false;
}
qusb_phy_enable_power(qphy, false, true);
qusb_phy_enable_power(qphy, false);
return 0;
}

View file

@ -2068,7 +2068,7 @@ static int mdss_retrieve_dp_ctrl_resources(struct platform_device *pdev,
"hdcp_physical"))
pr_warn("unable to remap dp hdcp resources\n");
pr_debug("DP Driver base=%p size=%x\n",
pr_debug("DP Driver base=%pK size=%x\n",
dp_drv->base, dp_drv->base_size);
mdss_debug_register_base("dp",

View file

@ -2985,6 +2985,12 @@ static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
mdss_dsi_panel_pwm_enable(ctrl_pdata);
ctrl_pdata->ctrl_state |= (CTRL_STATE_PANEL_INIT |
CTRL_STATE_MDP_ACTIVE | CTRL_STATE_DSI_ACTIVE);
/*
* MDP client removes this extra vote during splash reconfigure
* for command mode panel from interface. DSI removes the vote
* during suspend-resume for video mode panel.
*/
if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL)
clk_handle = ctrl_pdata->mdp_clk_handle;
else

View file

@ -557,7 +557,8 @@ struct mdss_dsi_ctrl_pdata {
void *clk_mngr;
void *dsi_clk_handle;
void *mdp_clk_handle;
int m_vote_cnt;
int m_dsi_vote_cnt;
int m_mdp_vote_cnt;
/* debugfs structure */
struct mdss_dsi_debugfs_info *debugfs_info;

View file

@ -795,8 +795,29 @@ error:
return rc;
}
bool is_dsi_clk_in_ecg_state(void *client)
{
struct mdss_dsi_clk_client_info *c = client;
struct mdss_dsi_clk_mngr *mngr;
bool is_ecg = false;
if (!client) {
pr_err("Invalid client params\n");
goto end;
}
mngr = c->mngr;
mutex_lock(&mngr->clk_mutex);
is_ecg = (c->core_clk_state == MDSS_DSI_CLK_EARLY_GATE);
mutex_unlock(&mngr->clk_mutex);
end:
return is_ecg;
}
int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
enum mdss_dsi_clk_state state)
enum mdss_dsi_clk_state state, u32 index)
{
int rc = 0;
struct mdss_dsi_clk_client_info *c = client;
@ -817,7 +838,7 @@ int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
c->name, mngr->name, clk, state, c->core_clk_state,
c->link_clk_state);
MDSS_XLOG(clk, state, c->core_clk_state, c->link_clk_state);
MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
/*
* Refcount handling rules:
* 1. Increment refcount whenever ON is called
@ -883,7 +904,7 @@ int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
pr_debug("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n",
c->name, mngr->name, changed, c->core_refcount,
c->core_clk_state, c->link_refcount, c->link_clk_state);
MDSS_XLOG(clk, state, c->core_clk_state, c->link_clk_state);
MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
if (changed) {
rc = dsi_recheck_clk_state(mngr);

View file

@ -197,6 +197,7 @@ int mdss_dsi_clk_deregister(void *client);
* @client: client handle.
* @clk: Type of clock requested (enum mdss_dsi_clk_type).
* @state: clock state requested.
* @index: controller index.
*
* This routine is used to request a new clock state for a specific clock. If
* turning ON the clocks, this guarantees that clocks will be on before
@ -206,7 +207,7 @@ int mdss_dsi_clk_deregister(void *client);
* @return: error code.
*/
int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
enum mdss_dsi_clk_state state);
enum mdss_dsi_clk_state state, u32 index);
/**
* mdss_dsi_clk_set_link_rate() - set clock rate for link clocks
@ -238,4 +239,16 @@ int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
* @return:error code.
*/
int mdss_dsi_clk_force_toggle(void *client, u32 clk);
/**
* is_dsi_clk_in_ecg_state() - Checks the current state of clocks
* @client: client handle.
*
* This routine returns checks the clocks status for client and return
* success code based on it.
*
* @return:true: if clocks are in ECG state
* false: for all other cases
*/
bool is_dsi_clk_in_ecg_state(void *client);
#endif /* _MDSS_DSI_CLK_H_ */

View file

@ -2811,12 +2811,6 @@ static int dsi_event_thread(void *data)
pr_debug("%s: Handling underflow event\n",
__func__);
__dsi_fifo_error_handler(ctrl, true);
} else {
pr_err("%s: ctrl recovery not defined\n",
__func__);
MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
"dsi0_phy", "dsi1_ctrl", "dsi1_phy", "vbif",
"vbif_nrt", "dbg_bus", "vbif_dbg_bus", "panic");
}
mutex_unlock(&ctrl->mutex);
}

View file

@ -233,9 +233,11 @@ static int mdss_fb_notify_update(struct msm_fb_data_type *mfd,
}
} else if (notify == NOTIFY_UPDATE_STOP) {
mutex_lock(&mfd->update.lock);
if (mfd->update.init_done)
if (mfd->update.init_done) {
mutex_unlock(&mfd->update.lock);
mutex_lock(&mfd->no_update.lock);
reinit_completion(&mfd->no_update.comp);
else {
} else {
mutex_unlock(&mfd->update.lock);
pr_err("notify update stop called without init\n");
return -EINVAL;

View file

@ -4227,9 +4227,11 @@ int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff)
return 0;
}
ret = mdss_mdp_ctl_setup(ctl);
if (ret)
return ret;
if (mdss_mdp_ctl_is_power_off(ctl)) {
ret = mdss_mdp_ctl_setup(ctl);
if (ret)
return ret;
}
sctl = mdss_mdp_get_split_ctl(ctl);

View file

@ -335,6 +335,57 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
return 0;
}
static bool __disable_rd_ptr_from_te(char __iomem *pingpong_base)
{
u32 cfg;
bool disabled;
cfg = mdss_mdp_pingpong_read(pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
disabled = BIT(20) & cfg;
cfg &= ~BIT(20);
mdss_mdp_pingpong_write(pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
return disabled;
}
static inline void __enable_rd_ptr_from_te(char __iomem *pingpong_base)
{
u32 cfg;
cfg = mdss_mdp_pingpong_read(pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
cfg |= BIT(20);
mdss_mdp_pingpong_write(pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
}
/*
* __disable_autorefresh - disables autorefresh feature in the hw.
*
* To disable autorefresh, driver needs to make sure no transactions are
* on-going; for ensuring this, driver must:
*
* 1. Disable listening to the external TE (this gives extra time before
* trigger next transaction).
* 2. Wait for any on-going transaction (wait for ping pong done interrupt).
* 3. Disable auto-refresh.
* 4. Re-enable listening to the external panel TE.
*
* So it is responsability of the caller of this function to only call to disable
* autorefresh if no hw transaction is on-going (wait for ping pong) and if
* the listening for the external TE is disabled in the tear check logic (this
* to prevent any race conditions with the hw), as mentioned in the above
* steps.
*/
static inline void __disable_autorefresh(char __iomem *pingpong_base)
{
mdss_mdp_pingpong_write(pingpong_base,
MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
}
static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
bool locked)
{
@ -342,7 +393,7 @@ static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
struct mdss_mdp_mixer *mixer = NULL, *mixer_right = NULL;
struct mdss_mdp_ctl *ctl = ctx->ctl;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
u32 offset = 0;
bool rd_ptr_disabled = false;
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (mixer) {
@ -352,21 +403,32 @@ static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
*/
if (mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG) & BIT(31)) {
offset = MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG;
/* 1. disable rd pointer from the external te */
rd_ptr_disabled =
__disable_rd_ptr_from_te(mixer->pingpong_base);
/* 2. disable autorefresh */
if (is_pingpong_split(ctl->mfd))
writel_relaxed(0x0,
(mdata->slave_pingpong_base + offset));
__disable_autorefresh(
mdata->slave_pingpong_base);
if (is_split_lm(ctl->mfd)) {
mixer_right =
mdss_mdp_mixer_get(ctl,
MDSS_MDP_MIXER_MUX_RIGHT);
mixer_right = mdss_mdp_mixer_get(ctl,
MDSS_MDP_MIXER_MUX_RIGHT);
if (mixer_right)
writel_relaxed(0x0,
(mixer_right->pingpong_base + offset));
__disable_autorefresh(
mixer_right->pingpong_base);
}
mdss_mdp_pingpong_write(mixer->pingpong_base,
MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
__disable_autorefresh(mixer->pingpong_base);
pr_debug("%s: disabling auto refresh\n", __func__);
/* 2. re-enable rd pointer from te (if was enabled) */
if (rd_ptr_disabled)
__enable_rd_ptr_from_te(mixer->pingpong_base);
}
rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
if (rc)
@ -991,6 +1053,7 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
vsync_time = ktime_get();
ctl->vsync_cnt++;
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
trace_mdp_cmd_readptr_done(ctl->num, atomic_read(&ctx->koff_cnt));
complete_all(&ctx->rdptr_done);
/* If caller is waiting for the read pointer, notify. */
@ -2364,7 +2427,6 @@ static void mdss_mdp_cmd_pre_programming(struct mdss_mdp_ctl *mctl)
struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
char __iomem *pp_base;
u32 autorefresh_state;
u32 cfg;
if (!mctl->is_master)
return;
@ -2384,11 +2446,8 @@ static void mdss_mdp_cmd_pre_programming(struct mdss_mdp_ctl *mctl)
* instruct MDP to ignore the panel TE so the next auto-refresh
* is delayed until flush bits are set.
*/
cfg = mdss_mdp_pingpong_read(pp_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
cfg &= ~BIT(20);
mdss_mdp_pingpong_write(pp_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
__disable_rd_ptr_from_te(pp_base);
ctx->ignore_external_te = true;
}
@ -2400,7 +2459,6 @@ static void mdss_mdp_cmd_post_programming(struct mdss_mdp_ctl *mctl)
{
struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
char __iomem *pp_base;
u32 cfg;
if (!mctl->is_master)
return;
@ -2419,11 +2477,8 @@ static void mdss_mdp_cmd_post_programming(struct mdss_mdp_ctl *mctl)
pp_base = mctl->mixer_left->pingpong_base;
/* enable MDP to listen to the TE */
cfg = mdss_mdp_pingpong_read(pp_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
cfg |= BIT(20);
mdss_mdp_pingpong_write(pp_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
__enable_rd_ptr_from_te(pp_base);
ctx->ignore_external_te = false;
}
}
@ -2675,11 +2730,10 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
mdss_mdp_cmd_wait4_autorefresh_pp(sctl);
/* disable autorefresh */
mdss_mdp_pingpong_write(pp_base, MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
__disable_autorefresh(pp_base);
if (is_pingpong_split(ctl->mfd))
mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
__disable_autorefresh(mdata->slave_pingpong_base);
ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
ctx->autorefresh_frame_cnt = 0;

View file

@ -960,7 +960,7 @@ static int mdss_mdp_video_intfs_stop(struct mdss_mdp_ctl *ctl,
pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
return -ENODEV;
}
pr_debug("stop ctl=%d video Intf #%d base=%p", ctl->num, ctx->intf_num,
pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num, ctx->intf_num,
ctx->base);
ret = mdss_mdp_video_ctx_stop(ctl, pinfo, ctx);
@ -978,7 +978,7 @@ static int mdss_mdp_video_intfs_stop(struct mdss_mdp_ctl *ctl,
pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
return -ENODEV;
}
pr_debug("stop ctl=%d video Intf #%d base=%p", ctl->num,
pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num,
sctx->intf_num, sctx->base);
ret = mdss_mdp_video_ctx_stop(ctl, pinfo, sctx);
@ -2021,7 +2021,7 @@ static int mdss_mdp_video_intfs_setup(struct mdss_mdp_ctl *ctl,
(inum + MDSS_MDP_INTF0));
return -EBUSY;
}
pr_debug("video Intf #%d base=%p", ctx->intf_num, ctx->base);
pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
ctx->ref_cnt++;
} else {
pr_err("Invalid intf number: %d\n", (inum + MDSS_MDP_INTF0));
@ -2054,7 +2054,7 @@ static int mdss_mdp_video_intfs_setup(struct mdss_mdp_ctl *ctl,
(inum + MDSS_MDP_INTF0));
return -EBUSY;
}
pr_debug("video Intf #%d base=%p", ctx->intf_num, ctx->base);
pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
ctx->ref_cnt++;
ctl->intf_ctx[SLAVE_CTX] = ctx;

View file

@ -852,7 +852,7 @@ static int __validate_layer_reconfig(struct mdp_input_layer *layer,
*/
if (pipe->csc_coeff_set != layer->color_space) {
src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
if (pipe->src_fmt->is_yuv && src_fmt->is_yuv) {
if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
status = -EPERM;
pr_err("csc change is not permitted on used pipe\n");
}

View file

@ -1533,7 +1533,7 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
}
break;
default:
pr_err("invalid state of buf %p=%d\n",
pr_err("invalid state of buf %pK=%d\n",
buf, buf->state);
BUG();
break;

View file

@ -2906,7 +2906,7 @@ int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd)
mfd->index);
return 0;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -3250,7 +3250,7 @@ static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
mfd->index);
return 0;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -5754,7 +5754,7 @@ static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd)
mfd->index);
return 0;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -5789,7 +5789,7 @@ int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
mfd->index);
return ret;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -5929,7 +5929,7 @@ int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
mfd->index);
return ret;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -6298,7 +6298,7 @@ static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd)
mfd->index);
return 0;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}
@ -6339,7 +6339,7 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
mfd->index);
return 0;
} else if (ret || !ad) {
pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
ret, ad);
return ret;
}

View file

@ -367,7 +367,7 @@ int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
{
int ret = 0;
if (!config || !mdss_pp_res) {
pr_err("invalid param config %pi pp_res %pK\n",
pr_err("invalid param config %pK pp_res %pK\n",
config, mdss_pp_res);
return -EINVAL;
}

View file

@ -372,6 +372,22 @@ TRACE_EVENT(mdp_cmd_pingpong_done,
__entry->koff_cnt)
);
TRACE_EVENT(mdp_cmd_readptr_done,
TP_PROTO(u32 ctl_num, int koff_cnt),
TP_ARGS(ctl_num, koff_cnt),
TP_STRUCT__entry(
__field(u32, ctl_num)
__field(int, koff_cnt)
),
TP_fast_assign(
__entry->ctl_num = ctl_num;
__entry->koff_cnt = koff_cnt;
),
TP_printk("ctl num:%d kickoff:%d",
__entry->ctl_num,
__entry->koff_cnt)
);
TRACE_EVENT(mdp_cmd_release_bw,
TP_PROTO(u32 ctl_num),
TP_ARGS(ctl_num),

View file

@ -23,6 +23,7 @@ int mdss_register_irq(struct mdss_hw *hw)
{
unsigned long irq_flags;
u32 ndx_bit;
bool err = false;
if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
return -EINVAL;
@ -33,10 +34,12 @@ int mdss_register_irq(struct mdss_hw *hw)
if (!mdss_irq_handlers[hw->hw_ndx])
mdss_irq_handlers[hw->hw_ndx] = hw;
else
pr_err("panel %d's irq at %pK is already registered\n",
hw->hw_ndx, hw->irq_handler);
err = true;
spin_unlock_irqrestore(&mdss_lock, irq_flags);
if (err)
pr_err("panel %d's irq at %pK is already registered\n",
hw->hw_ndx, hw->irq_handler);
return 0;
}
@ -76,6 +79,7 @@ void mdss_disable_irq(struct mdss_hw *hw)
{
unsigned long irq_flags;
u32 ndx_bit;
bool err = false;
if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
return;
@ -87,7 +91,7 @@ void mdss_disable_irq(struct mdss_hw *hw)
spin_lock_irqsave(&mdss_lock, irq_flags);
if (!(hw->irq_info->irq_mask & ndx_bit)) {
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
err = true;
} else {
hw->irq_info->irq_mask &= ~ndx_bit;
if (hw->irq_info->irq_mask == 0) {
@ -96,12 +100,16 @@ void mdss_disable_irq(struct mdss_hw *hw)
}
}
spin_unlock_irqrestore(&mdss_lock, irq_flags);
if (err)
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
}
/* called from interrupt context */
void mdss_disable_irq_nosync(struct mdss_hw *hw)
{
u32 ndx_bit;
bool err = false;
if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
return;
@ -113,7 +121,7 @@ void mdss_disable_irq_nosync(struct mdss_hw *hw)
spin_lock(&mdss_lock);
if (!(hw->irq_info->irq_mask & ndx_bit)) {
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
err = true;
} else {
hw->irq_info->irq_mask &= ~ndx_bit;
if (hw->irq_info->irq_mask == 0) {
@ -122,6 +130,9 @@ void mdss_disable_irq_nosync(struct mdss_hw *hw)
}
}
spin_unlock(&mdss_lock);
if (err)
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
}
int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
@ -176,6 +187,7 @@ void mdss_disable_irq_wake(struct mdss_hw *hw)
{
unsigned long irq_flags;
u32 ndx_bit;
bool err = false;
if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
return;
@ -188,7 +200,7 @@ void mdss_disable_irq_wake(struct mdss_hw *hw)
spin_lock_irqsave(&mdss_lock, irq_flags);
if (!(hw->irq_info->irq_wake_mask & ndx_bit)) {
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
err = true;
} else {
hw->irq_info->irq_wake_mask &= ~ndx_bit;
if (hw->irq_info->irq_wake_ena) {
@ -197,6 +209,9 @@ void mdss_disable_irq_wake(struct mdss_hw *hw)
}
}
spin_unlock_irqrestore(&mdss_lock, irq_flags);
if (err)
pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
}
struct mdss_util_intf mdss_util = {

View file

@ -1118,7 +1118,7 @@ static void tx_prune_dtd_list(struct edid_3d_data_t *mhl_edid_3d_data,
if ((0 != p_desc->dtd.pixel_clock_low) ||
(0 != p_desc->dtd.pixel_clock_high)) {
MHL_TX_EDID_INFO(
"pix clock non-zero p_desc:%p", p_desc)
"pix clock non-zero p_desc:%pK", p_desc)
if ((0 == p_desc->dtd.horz_active_7_0) &&
(0 == p_desc->dtd.horz_active_blanking_high.
horz_active_11_8)) {
@ -1722,9 +1722,10 @@ static void prune_svd_list(
("\n\nInvalid extension size\n\n"));
while (pb_src < pb_limit) {
MHL_TX_EDID_INFO(
"moving data up %p(0x%02X) "
"<- %p(0x%02X)\n",
pb_dest, (uint16_t)*pb_dest,
"moving data up %pK(0x%02X) ",
pb_dest, (uint16_t)*pb_dest);
MHL_TX_EDID_INFO(
"<- %pK(0x%02X)\n",
pb_src, (uint16_t)*pb_src);
*pb_dest++ = *pb_src++;
}

View file

@ -2081,7 +2081,11 @@ int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
{
int rc = 0;
struct mdss_dsi_ctrl_pdata *mctrl = NULL;
int i;
int i, *vote_cnt;
void *m_clk_handle;
bool is_ecg = false;
int state = MDSS_DSI_CLK_OFF;
if (!ctrl) {
pr_err("%s: Invalid arg\n", __func__);
@ -2112,6 +2116,18 @@ int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
__func__);
}
/*
* it should add and remove extra votes based on voting clients to avoid
* removal of legitimate vote from DSI client.
*/
if (mctrl && (clk_handle == ctrl->dsi_clk_handle)) {
m_clk_handle = mctrl->dsi_clk_handle;
vote_cnt = &mctrl->m_dsi_vote_cnt;
} else if (mctrl) {
m_clk_handle = mctrl->mdp_clk_handle;
vote_cnt = &mctrl->m_mdp_vote_cnt;
}
/*
* When DSI is used in split mode, the link clock for master controller
* has to be turned on first before the link clock for slave can be
@ -2124,18 +2140,24 @@ int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
__func__, ctrl->ndx, clk_type, clk_state,
__builtin_return_address(0), mctrl ? 1 : 0);
if (mctrl && (clk_type & MDSS_DSI_LINK_CLK)) {
rc = mdss_dsi_clk_req_state(mctrl->dsi_clk_handle,
MDSS_DSI_ALL_CLKS,
MDSS_DSI_CLK_ON);
if (clk_state != MDSS_DSI_CLK_ON) {
/* preserve clk state; do not turn off forcefully */
is_ecg = is_dsi_clk_in_ecg_state(m_clk_handle);
if (is_ecg)
state = MDSS_DSI_CLK_EARLY_GATE;
}
rc = mdss_dsi_clk_req_state(m_clk_handle,
MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON, mctrl->ndx);
if (rc) {
pr_err("%s: failed to turn on mctrl clocks, rc=%d\n",
__func__, rc);
goto error;
}
ctrl->m_vote_cnt++;
(*vote_cnt)++;
}
rc = mdss_dsi_clk_req_state(clk_handle, clk_type, clk_state);
rc = mdss_dsi_clk_req_state(clk_handle, clk_type, clk_state, ctrl->ndx);
if (rc) {
pr_err("%s: failed set clk state, rc = %d\n", __func__, rc);
goto error;
@ -2164,24 +2186,24 @@ int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
* for ON, since the previous ECG state must have
* removed two votes to let clocks turn off.
*
* To satisfy the above requirement, m_vote_cnt keeps track of
* To satisfy the above requirement, vote_cnt keeps track of
* the number of ON votes for master requested by slave. For
* every OFF/ECG state request, Either 2 or m_vote_cnt number of
* every OFF/ECG state request, Either 2 or vote_cnt number of
* votes are removed depending on which is lower.
*/
for (i = 0; (i < ctrl->m_vote_cnt && i < 2); i++) {
rc = mdss_dsi_clk_req_state(mctrl->dsi_clk_handle,
MDSS_DSI_ALL_CLKS,
MDSS_DSI_CLK_OFF);
for (i = 0; (i < *vote_cnt && i < 2); i++) {
rc = mdss_dsi_clk_req_state(m_clk_handle,
MDSS_DSI_ALL_CLKS, state, mctrl->ndx);
if (rc) {
pr_err("%s: failed to set mctrl clk state, rc = %d\n",
__func__, rc);
goto error;
}
}
ctrl->m_vote_cnt -= i;
pr_debug("%s: ctrl=%d, m_vote_cnt=%d\n", __func__, ctrl->ndx,
ctrl->m_vote_cnt);
(*vote_cnt) -= i;
pr_debug("%s: ctrl=%d, vote_cnt=%d dsi_vote_cnt=%d mdp_vote_cnt:%d\n",
__func__, ctrl->ndx, *vote_cnt, mctrl->m_dsi_vote_cnt,
mctrl->m_mdp_vote_cnt);
}
error:

View file

@ -177,6 +177,14 @@ struct clk_rate_request {
* @set_flags: Set custom flags which deals with hardware specifics. Returns 0
* on success, -EEROR otherwise.
*
* @list_registers: Queries the hardware to get the current register contents.
* This callback is optional and required clocks could
* add this callback.
*
* @list_rate: Return the nth supported frequency for a given clock which is
* below rate_max on success and -ENXIO in case of no frequency
* table.
*
* The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
* implementations to split any work between atomic (enable) and sleepable
* (prepare) contexts. If enabling a clock requires code that might sleep,
@ -217,6 +225,10 @@ struct clk_ops {
void (*init)(struct clk_hw *hw);
int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
int (*set_flags)(struct clk_hw *hw, unsigned flags);
void (*list_registers)(struct seq_file *f,
struct clk_hw *hw);
long (*list_rate)(struct clk_hw *hw, unsigned n,
unsigned long rate_max);
};
/**

View file

@ -421,7 +421,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
}
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
int sk_filter(struct sock *sk, struct sk_buff *skb);
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
return sk_filter_trim_cap(sk, skb, 1);
}
int bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);

View file

@ -98,7 +98,7 @@ enum ipa_dp_evt_type {
};
/**
* enum hdr_total_len_or_pad_type - type vof alue held by TOTAL_LEN_OR_PAD
* enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD
* field in header configuration register.
* @IPA_HDR_PAD: field is used as padding length
* @IPA_HDR_TOTAL_LEN: field is used as total length
@ -432,6 +432,55 @@ struct ipa_ep_cfg_ctrl {
typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
unsigned long data);
/**
* enum ipa_wdi_meter_evt_type - type of event client callback is
* for AP+STA mode metering
* @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA -
* use ipa_get_wdi_sap_stats structure
* @IPA_SET_WIFI_QUOTA: set quota limit on STA -
* use ipa_set_wifi_quota structure
*/
enum ipa_wdi_meter_evt_type {
IPA_GET_WDI_SAP_STATS,
IPA_SET_WIFI_QUOTA,
};
struct ipa_get_wdi_sap_stats {
/* indicate to reset stats after query */
uint8_t reset_stats;
/* indicate valid stats from wlan-fw */
uint8_t stats_valid;
/* Tx: SAP->STA */
uint64_t ipv4_tx_packets;
uint64_t ipv4_tx_bytes;
/* Rx: STA->SAP */
uint64_t ipv4_rx_packets;
uint64_t ipv4_rx_bytes;
uint64_t ipv6_tx_packets;
uint64_t ipv6_tx_bytes;
uint64_t ipv6_rx_packets;
uint64_t ipv6_rx_bytes;
};
/**
* struct ipa_set_wifi_quota - structure used for
* IPA_SET_WIFI_QUOTA.
*
* @quota_bytes: Quota (in bytes) for the STA interface.
* @set_quota: Indicate whether to set the quota (use 1) or
* unset the quota.
*
*/
struct ipa_set_wifi_quota {
uint64_t quota_bytes;
uint8_t set_quota;
/* indicate valid quota set from wlan-fw */
uint8_t set_valid;
};
typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
void *data);
/**
* struct ipa_connect_params - low-level client connect input parameters. Either
* client allocates the data and desc FIFO and specifies that in data+desc OR
@ -1001,6 +1050,7 @@ struct ipa_wdi_dl_params_smmu {
* @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
* @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
* @smmu_enabled: true if WLAN uses SMMU
* @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info
*/
struct ipa_wdi_in_params {
struct ipa_sys_connect_params sys;
@ -1011,6 +1061,9 @@ struct ipa_wdi_in_params {
struct ipa_wdi_dl_params_smmu dl_smmu;
} u;
bool smmu_enabled;
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
ipa_wdi_meter_notifier_cb wdi_notify;
#endif
};
/**
@ -1265,6 +1318,9 @@ int ipa_resume_wdi_pipe(u32 clnt_hdl);
int ipa_suspend_wdi_pipe(u32 clnt_hdl);
int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa_get_smem_restr_bytes(void);
int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
uint64_t num_bytes);
/*
* To retrieve doorbell physical address of
* wlan pipes
@ -1845,6 +1901,12 @@ static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
return -EPERM;
}
static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
uint64_t num_bytes)
{
return -EPERM;
}
static inline int ipa_uc_wdi_get_dbpa(
struct ipa_wdi_db_params *out)
{

View file

@ -53,6 +53,8 @@ extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
extern unsigned int sysctl_sched_downmigrate_pct;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;

View file

@ -395,6 +395,15 @@ struct usb_bus {
struct mon_bus *mon_bus; /* non-null when associated */
int monitored; /* non-zero when monitored */
#endif
unsigned skip_resume:1; /* All USB devices are brought into full
* power state after system resume. It
* is desirable for some buses to keep
* their devices in suspend state even
* after system resume. The devices
* are resumed later when a remote
* wakeup is detected or an interface
* driver starts I/O.
*/
};
struct usb_dev_state;

View file

@ -1171,6 +1171,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
}
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE

View file

@ -133,6 +133,7 @@ TRACE_EVENT(sched_task_load,
__field( u32, flags )
__field( int, best_cpu )
__field( u64, latency )
__field( int, grp_id )
),
TP_fast_assign(
@ -148,12 +149,13 @@ TRACE_EVENT(sched_task_load,
__entry->latency = p->state == TASK_WAKING ?
sched_ktime_clock() -
p->ravg.mark_start : 0;
__entry->grp_id = p->grp ? p->grp->id : 0;
),
TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x best_cpu=%d latency=%llu",
TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu",
__entry->pid, __entry->comm, __entry->demand,
__entry->boost, __entry->reason, __entry->sync,
__entry->need_idle, __entry->flags,
__entry->need_idle, __entry->flags, __entry->grp_id,
__entry->best_cpu, __entry->latency)
);
@ -164,9 +166,12 @@ TRACE_EVENT(sched_set_preferred_cluster,
TP_ARGS(grp, total_demand),
TP_STRUCT__entry(
__field( int, id )
__field( u64, demand )
__field( int, cluster_first_cpu )
__field( int, id )
__field( u64, demand )
__field( int, cluster_first_cpu )
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field(unsigned int, task_demand )
),
TP_fast_assign(
@ -245,19 +250,19 @@ DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
TRACE_EVENT(sched_set_boost,
TP_PROTO(int ref_count),
TP_PROTO(int type),
TP_ARGS(ref_count),
TP_ARGS(type),
TP_STRUCT__entry(
__field(unsigned int, ref_count )
__field(int, type )
),
TP_fast_assign(
__entry->ref_count = ref_count;
__entry->type = type;
),
TP_printk("ref_count=%d", __entry->ref_count)
TP_printk("type %d", __entry->type)
);
#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP)

View file

@ -86,10 +86,4 @@ struct msm_sde_rotator_fence {
/* SDE Rotator private control ID's */
#define V4L2_CID_SDE_ROTATOR_SECURE (V4L2_CID_USER_BASE + 0x1000)
/*
* This control Id indicates this context is associated with the
* secure camera
*/
#define V4L2_CID_SDE_ROTATOR_SECURE_CAMERA (V4L2_CID_USER_BASE + 0x2000)
#endif /* __UAPI_MSM_SDE_ROTATOR_H__ */

View file

@ -15,7 +15,7 @@ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o idle.o sched_avg.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_HMP) += hmp.o
obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o

226
kernel/sched/boost.c Normal file
View file

@ -0,0 +1,226 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "sched.h"
#include <linux/of.h>
#include <linux/sched/core_ctl.h>
#include <trace/events/sched.h>
/*
* Scheduler boost is a mechanism to temporarily place tasks on CPUs
* with higher capacity than those where a task would have normally
* ended up with their load characteristics. Any entity enabling
* boost is responsible for disabling it as well.
*/
unsigned int sysctl_sched_boost;
static enum sched_boost_policy boost_policy;
static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE;
static DEFINE_MUTEX(boost_mutex);
static unsigned int freq_aggr_threshold_backup;
static inline void boost_kick(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
smp_send_reschedule(cpu);
}
static void boost_kick_cpus(void)
{
int i;
struct cpumask kick_mask;
if (boost_policy != SCHED_BOOST_ON_BIG)
return;
cpumask_andnot(&kick_mask, cpu_online_mask, cpu_isolated_mask);
for_each_cpu(i, &kick_mask) {
if (cpu_capacity(i) != max_capacity)
boost_kick(i);
}
}
int got_boost_kick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
return test_bit(BOOST_KICK, &rq->hmp_flags);
}
void clear_boost_kick(int cpu)
{
struct rq *rq = cpu_rq(cpu);
clear_bit(BOOST_KICK, &rq->hmp_flags);
}
/*
* Scheduler boost type and boost policy might at first seem unrelated,
* however, there exists a connection between them that will allow us
* to use them interchangeably during placement decisions. We'll explain
* the connection here in one possible way so that the implications are
* clear when looking at placement policies.
*
* When policy = SCHED_BOOST_NONE, type is either none or RESTRAINED
* When policy = SCHED_BOOST_ON_ALL or SCHED_BOOST_ON_BIG, type can
* neither be none nor RESTRAINED.
*/
static void set_boost_policy(int type)
{
if (type == SCHED_BOOST_NONE || type == RESTRAINED_BOOST) {
boost_policy = SCHED_BOOST_NONE;
return;
}
if (boost_policy_dt) {
boost_policy = boost_policy_dt;
return;
}
if (min_possible_efficiency != max_possible_efficiency) {
boost_policy = SCHED_BOOST_ON_BIG;
return;
}
boost_policy = SCHED_BOOST_ON_ALL;
}
enum sched_boost_policy sched_boost_policy(void)
{
return boost_policy;
}
static bool verify_boost_params(int old_val, int new_val)
{
/*
* Boost can only be turned on or off. There is no possiblity of
* switching from one boost type to another or to set the same
* kind of boost several times.
*/
return !(!!old_val == !!new_val);
}
static void _sched_set_boost(int old_val, int type)
{
switch (type) {
case NO_BOOST:
if (old_val == FULL_THROTTLE_BOOST)
core_ctl_set_boost(false);
else if (old_val == CONSERVATIVE_BOOST)
restore_cgroup_boost_settings();
else
update_freq_aggregate_threshold(
freq_aggr_threshold_backup);
break;
case FULL_THROTTLE_BOOST:
core_ctl_set_boost(true);
boost_kick_cpus();
break;
case CONSERVATIVE_BOOST:
update_cgroup_boost_settings();
boost_kick_cpus();
break;
case RESTRAINED_BOOST:
freq_aggr_threshold_backup =
update_freq_aggregate_threshold(1);
break;
default:
WARN_ON(1);
return;
}
set_boost_policy(type);
sysctl_sched_boost = type;
trace_sched_set_boost(type);
}
void sched_boost_parse_dt(void)
{
struct device_node *sn;
const char *boost_policy;
if (!sched_enable_hmp)
return;
sn = of_find_node_by_path("/sched-hmp");
if (!sn)
return;
if (!of_property_read_string(sn, "boost-policy", &boost_policy)) {
if (!strcmp(boost_policy, "boost-on-big"))
boost_policy_dt = SCHED_BOOST_ON_BIG;
else if (!strcmp(boost_policy, "boost-on-all"))
boost_policy_dt = SCHED_BOOST_ON_ALL;
}
}
int sched_set_boost(int type)
{
int ret = 0;
if (!sched_enable_hmp)
return -EINVAL;
mutex_lock(&boost_mutex);
if (verify_boost_params(sysctl_sched_boost, type))
_sched_set_boost(sysctl_sched_boost, type);
else
ret = -EINVAL;
mutex_unlock(&boost_mutex);
return ret;
}
int sched_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
unsigned int *data = (unsigned int *)table->data;
unsigned int old_val;
if (!sched_enable_hmp)
return -EINVAL;
mutex_lock(&boost_mutex);
old_val = *data;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
goto done;
if (verify_boost_params(old_val, *data)) {
_sched_set_boost(old_val, *data);
} else {
*data = old_val;
ret = -EINVAL;
}
done:
mutex_unlock(&boost_mutex);
return ret;
}
int sched_boost(void)
{
return sysctl_sched_boost;
}

Some files were not shown because too many files have changed in this diff Show more