diff --git a/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt b/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt new file mode 100644 index 000000000000..02f5103d5563 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt @@ -0,0 +1,12 @@ +Qualcomm Technologies, Inc. Diag MHI Driver + +Required properties: +-compatible : should be "qcom,diag-mhi". +-qcom,mhi : phandle of MHI Device to connect to. + +Example: + qcom,diag { + compatible = "qcom,diag-mhi"; + qcom,mhi = <&mhi_wlan>; + }; + diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index d442cf02a816..d58b98f9a702 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -86,6 +86,9 @@ SoCs: - MSM8998 compatible = "qcom,msm8998" +- MSM8998_9x55 + compatible = "qcom,msm8998-9x55" + - MSMHAMSTER compatible = "qcom,msmhamster" @@ -166,6 +169,9 @@ Generic board variants: - RUMI device: compatible = "qcom,rumi" +- SVR device: + compatible = "qcom,svr" + Boards (SoC type + board variant): @@ -199,6 +205,7 @@ compatible = "qcom,apqtitanium-mtp" compatible = "qcom,apq8098-cdp" compatible = "qcom,apq8098-mtp" compatible = "qcom,apq8098-qrd" +compatible = "qcom,apq8098-svr" compatible = "qcom,mdm9630-cdp" compatible = "qcom,mdm9630-mtp" compatible = "qcom,mdm9630-sim" @@ -270,6 +277,8 @@ compatible = "qcom,msm8998-rumi" compatible = "qcom,msm8998-cdp" compatible = "qcom,msm8998-mtp" compatible = "qcom,msm8998-qrd" +compatible = "qcom,msm8998-9x55-cdp" +compatible = "qcom,msm8998-9x55-mtp" compatible = "qcom,msmhamster-rumi" compatible = "qcom,msmhamster-cdp" compatible = "qcom,msmhamster-mtp" diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt index de5ab2c37967..2ef119e74bda 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt @@ -1,18 +1,20 @@ Qualcomm Technologies, Inc. IPC Router MHI Transport Required properties: --compatible: should be "qcom,ipc_router_mhi_xprt" --qcom,out-chan-id: MHI Channel ID for the transmit path --qcom,in-chan-id: MHI Channel ID for the receive path --qcom,xprt-remote: string that defines the edge of the transport (PIL Name) +-compatible: should be "qcom,ipc_router_mhi_xprt". +-qcom,mhi: phandle of MHI Device to connect to. +-qcom,out-chan-id: MHI Channel ID for the transmit path. +-qcom,in-chan-id: MHI Channel ID for the receive path. +-qcom,xprt-remote: string that defines the edge of the transport(PIL Name). -qcom,xprt-linkid: unique integer to identify the tier to which the link belongs to in the network and is used to avoid the - routing loops while forwarding the broadcast messages --qcom,xprt-version: unique version ID used by MHI transport header + routing loops while forwarding the broadcast messages. +-qcom,xprt-version: unique version ID used by MHI transport header. Example: qcom,ipc_router_external_modem_xprt2 { compatible = "qcom,ipc_router_mhi_xprt"; + qcom,mhi = <&mhi_wlan>; qcom,out-chan-id = <34>; qcom,in-chan-id = <35>; qcom,xprt-remote = "external-modem"; diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt index ce2d8bd54e43..1114308f9436 100644 --- a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt +++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt @@ -2,12 +2,15 @@ Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver Required properties: -compatible : Should be one of - To communicate with modem + To communicate with adsp qcom,smp2pgpio_client_rdbg_2_in (inbound) qcom,smp2pgpio_client_rdbg_2_out (outbound) To communicate with modem qcom,smp2pgpio_client_rdbg_1_in (inbound) qcom,smp2pgpio_client_rdbg_1_out (outbound) + To communicate with cdsp + qcom,smp2pgpio_client_rdbg_5_in (inbound) + qcom,smp2pgpio_client_rdbg_5_out (outbound) -gpios : the relevant gpio pins of the entry. Example: diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt index 6d63d1123f4c..6aa3bfe4b1d8 100644 --- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt +++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt @@ -11,8 +11,9 @@ the WLAN enable GPIO, 3.3V fixed voltage regulator resources. It also provides the reserved RAM dump memory location and size. Required properties: - - compatible: "qcom,cnss" - - wlan-en-gpio: WLAN_EN GPIO signal specified by QCA6174 specifications + - compatible: "qcom,cnss" for QCA6174 device + "qcom,cnss-qca6290" for QCA6290 device + - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications - vdd-wlan-supply: phandle to the regulator device tree node - pinctrl-names: Names corresponding to the numbered pinctrl states - pinctrl-: Pinctrl states as described in @@ -44,6 +45,13 @@ Optional properties: which should be drived depending on platforms - qcom,is-dual-wifi-enabled: Boolean property to control wlan enable(wlan-en) gpio on dual-wifi platforms. + - vdd-wlan-en-supply: WLAN_EN fixed regulator specified by QCA6174 specifications. + - qcom,wlan-en-vreg-support: Boolean property to decide the whether the WLAN_EN pin + is a gpio or fixed regulator. + - qcom,mhi: phandle to indicate the device which needs MHI support. + - qcom,cap-tsf-gpio: WLAN_TSF_CAPTURED GPIO signal specified by the chip + specifications, should be drived depending on + products Example: @@ -60,4 +68,6 @@ Example: pinctrl-0 = <&cnss_default>; qcom,wlan-rc-num = <0>; qcom,wlan-smmu-iova-address = <0 0x10000000>; + qcom,mhi = <&mhi_wlan>; + qcom,cap-tsf-gpio = <&tlmm 126 1>; }; diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt index c801e8486f87..700a8f7b077e 100644 --- a/Documentation/devicetree/bindings/cnss/icnss.txt +++ b/Documentation/devicetree/bindings/cnss/icnss.txt @@ -28,6 +28,7 @@ Optional properties: - qcom,icnss-vadc: VADC handle for vph_pwr read APIs. - qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs. - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass + - qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory Example: @@ -54,6 +55,7 @@ Example: <0 140 0 /* CE10 */ >, <0 141 0 /* CE11 */ >; qcom,wlan-msa-memory = <0x200000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; vdd-0.8-cx-mx-supply = <&pm8998_l5>; qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>; diff --git a/Documentation/devicetree/bindings/drm/msm/mdp.txt b/Documentation/devicetree/bindings/drm/msm/mdp.txt index 3a6db0553fe3..a76b604445bd 100644 --- a/Documentation/devicetree/bindings/drm/msm/mdp.txt +++ b/Documentation/devicetree/bindings/drm/msm/mdp.txt @@ -3,6 +3,7 @@ Qualcomm Technologies,Inc. Adreno/Snapdragon display controller Required properties: Optional properties: +- contiguous-region: reserved memory for HDMI and DSI buffer. - qcom,sde-plane-id-map: plane id mapping for virtual plane. - qcom,sde-plane-id: each virtual plane mapping node. - reg: reg property. @@ -17,6 +18,8 @@ Optional properties: Example: &mdss_mdp { + contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi>; + qcom,sde-plane-id-map { qcom,sde-plane-id@0 { reg = <0x0>; diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt index 1f8458cd0659..cc55f6e2bfa0 100644 --- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt @@ -187,6 +187,10 @@ Optional properties: "bl_ctrl_wled" = Backlight controlled by WLED. "bl_ctrl_dcs" = Backlight controlled by DCS commands. other: Unknown backlight control. (default) +- qcom,mdss-dsi-bl-dcs-command-state: A string that specifies the ctrl state for sending brightness + controlling commands, this is only available when backlight is controlled by DCS commands. + "dsi_lp_mode" = DSI low power mode (default). + "dsi_hs_mode" = DSI high speed mode. - qcom,mdss-dsi-bl-pwm-pmi: Boolean to indicate that PWM control is through second pmic chip. - qcom,mdss-dsi-bl-pmic-bank-select: LPG channel for backlight. Requred if blpmiccontroltype is PWM diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt index d8c3a7c35465..80813dd1b3e5 100644 --- a/Documentation/devicetree/bindings/gpu/adreno.txt +++ b/Documentation/devicetree/bindings/gpu/adreno.txt @@ -93,6 +93,7 @@ Optional Properties: - qcom,chipid: If it exists this property is used to replace the chip identification read from the GPU hardware. This is used to override faulty hardware readings. +- qcom,disable-wake-on-touch: Boolean. Disables the GPU power up on a touch input event. - qcom,disable-busy-time-burst: Boolean. Disables the busy time burst to avoid switching of power level for large frames based on the busy time limit. @@ -141,6 +142,9 @@ Optional Properties: rendering thread is running on masked CPUs. Bit 0 is for CPU-0, bit 1 is for CPU-1... +- qcom,l2pc-update-queue: + Disables L2PC on masked CPUs at queue time when it's true. + - qcom,snapshot-size: Specify the size of snapshot in bytes. This will override snapshot size defined in the driver code. diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt index 1e6aac56c44e..42e97f765bee 100644 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt @@ -78,6 +78,8 @@ Optional properties for WLED: - qcom,lcd-psm-ctrl : A boolean property to specify if PSM needs to be controlled dynamically when WLED module is enabled or disabled. +- qcom,auto-calibration-enable : A boolean property which enables auto-calibration + of the WLED sink configuration. Optional properties if 'qcom,disp-type-amoled' is mentioned in DT: - qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320. diff --git a/Documentation/devicetree/bindings/media/video/msm-ba.txt b/Documentation/devicetree/bindings/media/video/msm-ba.txt new file mode 100644 index 000000000000..9a6fe4d7e8ae --- /dev/null +++ b/Documentation/devicetree/bindings/media/video/msm-ba.txt @@ -0,0 +1,41 @@ +* Qualcomm Technologies Inc MSM BA + +[Root level node] +================== +Required properties: +- compatible: Must be "qcom,msm-ba". + +[Subnode] +========== +- qcom,ba-input-profile-#: Defines child nodes for the profiles supported + by BA driver. Each profile should have properties "qcom,type", + "qcom,name", "qcom,ba-input", "qcom,ba-output", "qcom,sd-name", + "qcom,ba-node" and "qcom,user-type". +Required properties: +- qcom,type: Input type such as CVBS(0), HDMI(4) etc as defined in BA driver. + This property is of type u32. +- qcom,name: Name of the input type. This property is of type string. +- qcom,ba-input: BA input id supported by a bridge chip for this profile. + This property is of type u32. +- qcom,ba-output: BA output id for the profile. This property is of type u32. +- qcom,sd-name: Name of the sub-device driver associated with this profile. + This property is of type string. +- qcom,ba-node: Defines the ba node id. This is the avdevice node used by camera + for this profile. This property is of type u32. +- qcom,user-type: This property defines how the profile is being used. If this + profile is used by kernel it is set to 0 and if used by userspace + it is set to 1. This property is of type u32. +Example: + + qcom,msm-ba { + compatible = "qcom,msm-ba"; + qcom,ba-input-profile-0 { + qcom,type = <4>; /* input type */ + qcom,name = "HDMI-1"; /* input name */ + qcom,ba-input = <13>; /* ba input id */ + qcom,ba-output = <0>; /* ba output id */ + qcom,sd-name = "adv7481"; /* sd name */ + qcom,ba-node = <0>; /* ba node */ + qcom,user-type = <1>; /* user type */ + }; + }; diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt index 9fb84020add7..bb413af4b54d 100644 --- a/Documentation/devicetree/bindings/media/video/msm-cci.txt +++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt @@ -123,6 +123,9 @@ Optional properties: - qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable - qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable - qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n +- qcom,gpio-custom1 : should contain index to gpio used by sensors specific to usecase +- qcom,gpio-custom2 : should contain index to gpio used by sensors specific to usecase +- qcom,gpio-custom3 : should contain index to gpio used by sensors specific to usecase - qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor - qcom,gpio-req-tbl-flags : should contain direction of gpios present in qcom,gpio-req-tbl-num property (in the same order) diff --git a/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt b/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt new file mode 100644 index 000000000000..8d5f55d7a8ca --- /dev/null +++ b/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt @@ -0,0 +1,14 @@ +MSM HDCP driver + +Standalone driver managing HDCP related communications +between TZ and HLOS for MSM chipset. + +Required properties: + +compatible = "qcom,msm-hdcp"; + +Example: + +qcom_msmhdcp: qcom,msm_hdcp { + compatible = "qcom,msm-hdcp"; +}; diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt index fc019bda50a7..bf3ad8a71c26 100644 --- a/Documentation/devicetree/bindings/pci/msm_pcie.txt +++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt @@ -97,6 +97,9 @@ Optional Properties: and assign for each endpoint. - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become stable after power on, before de-assert the PERST to the endpoint. + - qcom,switch-latency: The time (unit: ms) to wait for the PCIe endpoint's link + training with switch downstream port after the link between switch upstream + port and RC is up. - qcom,wr-halt-size: With base 2, this exponent determines the size of the data that PCIe core will halt on for each write transaction. - qcom,cpl-timeout: Completion timeout value. This value specifies the time range @@ -276,6 +279,7 @@ Example: qcom,smmu-exist; qcom,smmu-sid-base = <0x1480>; qcom,ep-latency = <100>; + qcom,switch-latency = <100>; qcom,wr-halt-size = <0xa>; /* 1KB */ qcom,cpl-timeout = <0x2>; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt new file mode 100644 index 000000000000..8a9514153172 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt @@ -0,0 +1,201 @@ +Qualcomm Technologies, Inc. MSM8998 TLMM block + +This binding describes the Top Level Mode Multiplexer block found in the +MSM8998 platform. With new GPIOs tiling, GPIO pins are +grouped into various cores - NORTH, WEST, EAST. TLMM_GPIO_ID_STATUSn +register's value for a GPIO pin decides the core location for it. + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,msm8998-pinctrl" + +- reg: + Usage: required + Value type: + Definition: the base address and size of the TLMM register space. + +- interrupts: + Usage: required + Value type: + Definition: should specify the TLMM summary IRQ. + +- interrupt-controller: + Usage: required + Value type: + Definition: identifies this node as an interrupt controller + +- #interrupt-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +- gpio-controller: + Usage: required + Value type: + Definition: identifies this node as a gpio controller + +- #gpio-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for +a general description of GPIO and interrupt bindings. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin, a group, or a list of pins or groups. This configuration can include the +mux function to select on those pin(s)/group(s), and various pin configuration +parameters, such as pull-up, drive strength, etc. + + +PIN CONFIGURATION NODES: + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. Valid pins are: + gpio0-gpio149, + sdc1_clk, + sdc1_cmd, + sdc1_data + sdc2_clk, + sdc2_cmd, + sdc2_data + sdc1_rclk, + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Functions are only valid for gpio pins. + Valid values are: + + blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens, + bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8, + qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b, + dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10, + blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12, + mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11, + atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char, + cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b, + pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c, + qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4, + qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5, + atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6, + atest_usb20, atest_char0, dac_calib10, qdss_stm10, + qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6, + blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11, + qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1, + qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11, + dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6, + qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14, + dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem, + dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto, + dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0, + dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25, + sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2, + qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3, + uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9, + blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7, + qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11, + blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0, + cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4, + blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4, + qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus, + isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s, + qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b, + sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b, + gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12, + qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29, + tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27, + qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk, + sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b, + sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b, + ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b, + blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt, + pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11, + qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx, + qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3, + gpio + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configued as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull down. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull up. + +- output-high: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + high. + Not valid for sdc pins. + +- output-low: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + low. + Not valid for sdc pins. + +- drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins, in mA. + Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16 + +Example: + + tlmm: pinctrl@01010000 { + compatible = "qcom,msm8998-pinctrl"; + reg = <0x01010000 0x300000>; + interrupts = <0 208 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + + uart_console_active: uart_console_active { + mux { + pins = "gpio4", "gpio5"; + function = "blsp_uart8"; + }; + + config { + pins = "gpio4", "gpio5"; + drive-strength = <2>; + bias-disable; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt index babc4523a29a..dd14890123e6 100644 --- a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt +++ b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt @@ -9,6 +9,8 @@ Required properties: Optional property: - qcom,fab-id-valid: Use this property when support to read Fab identification from REV ID peripheral is available. +- qcom,tp-rev-valid: Use this property when support to read TP + revision identification from REV ID peripheral. Example: qcom,revid@100 { diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt index 468db388b0a6..f01eae10bf4f 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt @@ -85,21 +85,6 @@ Charger specific properties: maximum charge current in mA for each thermal level. -- qcom,step-soc-thresholds - Usage: optional - Value type: Array of - Definition: Array of SOC threshold values, size of 4. This should be a - flat array that denotes the percentage ranging from 0 to 100. - If the array is not present, step charging is disabled. - -- qcom,step-current-deltas - Usage: optional - Value type: Array of - Definition: Array of delta values for charging current, size of 5, with - FCC as base. This should be a flat array that denotes the - offset of charging current in uA, from -3100000 to 3200000. - If the array is not present, step charging is disabled. - - io-channels Usage: optional Value type: List of @@ -182,6 +167,22 @@ Charger specific properties: Definition: Specifies the deglitch interval for OTG detection. If the value is not present, 50 msec is used as default. +- qcom,step-charging-enable + Usage: optional + Value type: bool + Definition: Boolean flag which when present enables step-charging. + +- qcom,wd-bark-time-secs + Usage: optional + Value type: + Definition: WD bark-timeout in seconds. The possible values are + 16, 32, 64, 128. If not defined it defaults to 64. + +- qcom,sw-jeita-enable + Usage: optional + Value type: bool + Definition: Boolean flag which when present enables sw compensation for jeita + ============================================= Second Level Nodes - SMB2 Charger Peripherals ============================================= @@ -217,9 +218,6 @@ pmi8998_charger: qcom,qpnp-smb2 { dpdm-supply = <&qusb_phy0>; - qcom,step-soc-thresholds = <60 70 80 90>; - qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>; - qcom,chgr@1000 { reg = <0x1000 0x100>; interrupts = <0x2 0x10 0x0 IRQ_TYPE_NONE>, diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt index f419655722d4..376af82381f2 100644 --- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt +++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt @@ -11,6 +11,8 @@ Required properties: Optional properties: - qcom,fastrpc-glink: Flag to use glink instead of smd for IPC +- qcom,fastrpc-vmid-heap-shared: Flag for Dynamic heap feature, to + share HLOS memory buffer to ADSP Optional subnodes: - qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context @@ -25,6 +27,7 @@ Example: qcom,msm_fastrpc { compatible = "qcom,msm-fastrpc-adsp"; qcom,fastrpc-glink; + qcom,fastrpc-vmid-heap-shared; qcom,msm_fastrpc_compute_cb_1 { compatible = "qcom,msm-fastrpc-compute-cb"; diff --git a/Documentation/devicetree/bindings/regulator/max20010.txt b/Documentation/devicetree/bindings/regulator/max20010.txt new file mode 100644 index 000000000000..3dd8f6d1cf19 --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/max20010.txt @@ -0,0 +1,77 @@ +Binding for Maxim MAX20010 regulator + +MAX20010 is a synchronous step-down converter. It is able to deliver upto 6A +with 2 different programmable output voltages from 0.5V to 1.27V in 10mV steps +and from 0.625V to 1.5875V in 12.5mV steps. It supports synchronous +rectification and automatic PWM/PFM transitions. + +The MAX20010 interface is via I2C bus. + +======================= +Supported Properties +======================= + +- compatible + Usage: required + Value type: + Definition: should be "maxim,max20010". + +- reg + Usage: required + Value type: + Definition: The device 8-bit I2C address. + +- vin-supply + Usage: optional + Value type: + Definition: This is the phandle for the parent regulator. Typically used + for EN pin control of the buck. + +- regulator-initial-mode + Usage: optional + Value type: + Definition: The regulator operating mode. Should be either + "MAX20010_OPMODE_SYNC" or "MAX20010_OPMODE_FPWM". + These constants are defined in file + include/dt-bindings/regulator/max20010.h + +- maxim,vrange-sel + Usage: optional + Value type: + Definition: Integer value specifies the voltage range to be used. + Supported values are 0 or 1. + Value 0 supports voltage range from 0.5V to 1.27V in 10mV + steps. Value 1 supports voltage range from 0.625V to 1.5875V + in 12.5mV steps. + +- maxim,soft-start-slew-rate + Usage: optional + Value type: + Definition: An integer value specifies the slew rate in uV/uS to be used + for soft-start operation of the buck. Supported values are + 5500, 11000, 22000 and 44000. + +- maxim,dvs-slew-rate + Usage: optional + Value type: + Definition: An integer value specifies the slew rate in uV/uS to be used + for buck dynamic voltage scaling operations. Supported + values are 5500, 11000, 22000 and 44000. + +======= +Example +======= + + i2c_0 { + max20010-regulator@74 { + compatible = "maxim,max20010"; + reg = <0x74>; + vin-supply = <&parent_reg>; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1270000>; + regulator-initial-mode = ; + maxim,vrange-sel = <0>; + maxim,soft-start-slew-rate = <5500>; + maxim,dvs-slew-rate = <5500>; + } + } diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt index ed383ce9ea8f..9798ac60b493 100644 --- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt @@ -209,6 +209,12 @@ Properties below are specific to BOOST subnode only. Definition: Current limit (in mA) of the BOOST rail. Possible values are 200 to 1600mA in 200mA steps. +- qcom,bst-headroom-mv + Usage: optional + Value type: + Definition: Headroom of the boost (in mV). The minimum headroom is + 200mV and if not specified defaults to 200mV. + ======= Example ======= @@ -250,5 +256,6 @@ pm660l_lcdb: qpnp-lcdb@ec00 { qcom,bst-pd-strength = <1>; qcom,bst-ps = <1>; qcom,bst-ps-threshold-ma = <50>; + qcom,bst-headroom-mv = <200>; }; }; diff --git a/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt b/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt new file mode 100644 index 000000000000..388426f44524 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/qcom/msm-early-cam.txt @@ -0,0 +1,110 @@ +* Qualcomm Technologies Inc MSM BA + +[Root level node] +================== +Required properties: +- compatible: Must be "qcom,early-cam". + +[Subnode] +========== +- qcom,early-cam-input-profile-#: Defines child nodes for the profiles supported + by early camera driver. Each profile should have properties + "mmagic-supply", "gdscr-supply", "vfe0-vdd-supply", + "qcom,cam-vreg-name", "clocks", "clock-names", + "qcom,clock-rates". +Required properties: +- mmagic-supply : should contain mmagic regulator used for mmagic clocks. +- gdscr-supply : should contain gdsr regulator used for cci clocks. +- vfe0-vdd-supply: phandle to vfe0 regulator. +- qcom,cam-vreg-name : name of the voltage regulators required for the device. +- clocks: List of clock handles. The parent clocks of the input clocks to the + devices in this power domain are set to oscclk before power gating + and restored back after powering on a domain. This is required for + all domains which are powered on and off and not required for unused + domains. +- clock-names: name of the clock used by the driver. +- qcom,clock-rates: clock rate in Hz. +Example: + + qcom,early-cam { + cell-index = <0>; + compatible = "qcom,early-cam"; + status = "ok"; + mmagic-supply = <&gdsc_mmagic_camss>; + gdscr-supply = <&gdsc_camss_top>; + vfe0-vdd-supply = <&gdsc_vfe0>; + qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd"; + clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, + <&clock_mmss clk_camss_top_ahb_clk>, + <&clock_mmss clk_cci_clk_src>, + <&clock_mmss clk_camss_cci_ahb_clk>, + <&clock_mmss clk_camss_cci_clk>, + <&clock_mmss clk_camss_ahb_clk>, + <&clock_mmss clk_mmagic_camss_axi_clk>, + <&clock_mmss clk_camss_vfe_ahb_clk>, + <&clock_mmss clk_camss_vfe0_ahb_clk>, + <&clock_mmss clk_camss_vfe_axi_clk>, + <&clock_mmss clk_camss_vfe0_stream_clk>, + <&clock_mmss clk_smmu_vfe_axi_clk>, + <&clock_mmss clk_smmu_vfe_ahb_clk>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_vfe0_clk_src>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_camss_csi2_ahb_clk>, + <&clock_mmss clk_camss_csi2_clk>, + <&clock_mmss clk_camss_csi2phy_clk>, + <&clock_mmss clk_csi2phytimer_clk_src>, + <&clock_mmss clk_camss_csi2phytimer_clk>, + <&clock_mmss clk_camss_csi2rdi_clk>, + <&clock_mmss clk_camss_ispif_ahb_clk>, + <&clock_mmss clk_camss_vfe0_clk>; + clock-names = + "mmss_mmagic_ahb_clk", + "camss_top_ahb_clk", + "cci_clk_src", + "camss_cci_ahb_clk", + "camss_cci_clk", + "camss_ahb_clk", + "mmagic_camss_axi_clk", + "camss_vfe_ahb_clk", + "camss_vfe0_ahb_clk", + "camss_vfe_axi_clk", + "camss_vfe0_stream_clk", + "smmu_vfe_axi_clk", + "smmu_vfe_ahb_clk", + "camss_csi_vfe0_clk", + "vfe0_clk_src", + "camss_csi_vfe0_clk", + "camss_csi2_ahb_clk", + "camss_csi2_clk", + "camss_csi2phy_clk", + "csi2phytimer_clk_src", + "camss_csi2phytimer_clk", + "camss_csi2rdi_clk", + "camss_ispif_ahb_clk", + "clk_camss_vfe0_clk"; + + qcom,clock-rates = <19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 0 + 0 + 0 + 320000000 + 0 + 0 + 0 + 0 + 19200000 + 0 + 0 + 200000000 + 200000000 + 200000000 + 200000000 + 200000000 + 0 + }; diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index 38e056cdc0ee..db21a2b58c2b 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -1311,6 +1311,13 @@ Optional properties: - pinctrl-x: Defines pinctrl state for each pin group. + - qcom,msm-cpudai-tdm-clk-attribute: Clock attribute for tdm. + 0 - Clk invalid attribute + 1 - Clk attribute couple no + 2 - Clk attribute couple dividend + 3 - Clk attribute couple divisor + 4 - Clk attribute invert couple no + Example: qcom,msm-dai-tdm-quat-rx { diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index 8b99dbce871b..9e6dd4905ca9 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -58,6 +58,7 @@ Optional properties: - pinctrl-names, pinctrl-0, pinctrl-1,.. pinctrl-n: Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt" for these optional properties +- non-removable : defines if the connected ufs device is not removable Note: If above properties are not defined it can be assumed that the supply diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt index 061c1d16ad24..d0855115b6d1 100644 --- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt +++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt @@ -12,7 +12,7 @@ Required properties: "riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base", "pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source", "wlan_tx_status", "alarms_txctl", "alarms_tactl", - "pronto_mcu_base". + "pronto_mcu_base", "pronto_qfuse". - interupts: Pronto to Apps interrupts for tx done and rx pending. - qcom,pronto-vddmx-supply: regulator to supply pronto pll. - qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM @@ -29,7 +29,7 @@ Required properties: - qcom,wcnss-vadc: VADC handle for battery voltage notification APIs. - pinctrl- : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt - pinctrl-names : Names corresponding to the numbered pinctrl states -- clocks: from common clock binding: handle to xo and rf_clk clocks. +- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks. - clock-names: Names of all the clocks that are accessed by the subsystem - qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage for iris and pronto regulators in milli-volts. @@ -39,11 +39,16 @@ iris and pronto regulators in micro-amps. Optional properties: - qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect should be performed during boot up. +- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz. +If wcnss_snoc clock is specified in the list of clocks, this property needs +to be set to make it functional. - qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value, using a smaller count for this buffer will reduce the memory usage. - qcom,is-pronto-v3: boolean flag to determine the pronto hardware version in use. subsequently correct workqueue will be used by DXE engine to push frames in TX data path. +- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band + capability. - qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature support for pronto hardware. - qcom,wcnss-pm : , <&clock_rpm clk_rf_clk2>, <&clock_debug clk_gcc_debug_mux>, - <&clock_gcc clk_wcnss_m_clk>; - clock-names = "xo", "rf_clk", "measure", "wcnss_debug"; + <&clock_gcc clk_wcnss_m_clk>, + <&clock_gcc clk_snoc_wcnss_a_clk>; + + clock-names = "xo", "rf_clk", "measure", "wcnss_debug", + "snoc_wcnss"; + + qcom,snoc-wcnss-clock-freq = <200000000>; qcom,wcnss-pm = <11 21 1200 1 1 6>; }; diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e953469cbe5e..bb8329c52298 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3605,6 +3605,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spia_pedr= spia_peddr= + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt index 8f3ce9b3aa11..ffff1439076a 100644 --- a/Documentation/vm/page_owner.txt +++ b/Documentation/vm/page_owner.txt @@ -28,10 +28,11 @@ with page owner and page owner is disabled in runtime due to no enabling boot option, runtime overhead is marginal. If disabled in runtime, it doesn't require memory to store owner information, so there is no runtime memory overhead. And, page owner inserts just two unlikely branches into -the page allocator hotpath and if it returns false then allocation is -done like as the kernel without page owner. These two unlikely branches -would not affect to allocation performance. Following is the kernel's -code size change due to this facility. +the page allocator hotpath and if not enabled, then allocation is done +like as the kernel without page owner. These two unlikely branches should +not affect to allocation performance, especially if the static keys jump +label patching functionality is available. Following is the kernel's code +size change due to this facility. - Without page owner text data bss dec hex filename diff --git a/Makefile b/Makefile index 1a49c8e64768..dbb22cf9f76a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 70 +SUBLEVEL = 80 EXTRAVERSION = NAME = Blurry Fish Butt @@ -623,6 +623,9 @@ include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) +KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) +KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) +KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os @@ -637,6 +640,12 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +endif + ifdef CONFIG_READABLE_ASM # Disable optimizations that make assembler listings hard to read. # reorder blocks reorders the control in the function @@ -792,12 +801,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan diff --git a/android/configs/android-base-arm64.cfg b/android/configs/android-base-arm64.cfg new file mode 100644 index 000000000000..43f23d6b5391 --- /dev/null +++ b/android/configs/android-base-arm64.cfg @@ -0,0 +1,5 @@ +# KEEP ALPHABETICALLY SORTED +CONFIG_ARMV8_DEPRECATED=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_SWP_EMULATION=y diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg index b0ef9fcbaac6..d1e3b0891a4e 100644 --- a/android/configs/android-base.cfg +++ b/android/configs/android-base.cfg @@ -3,6 +3,8 @@ # CONFIG_DEVMEM is not set # CONFIG_FHANDLE is not set # CONFIG_INET_LRO is not set +# CONFIG_NFSD is not set +# CONFIG_NFS_FS is not set # CONFIG_OABI_COMPAT is not set # CONFIG_SYSVIPC is not set # CONFIG_USELIB is not set @@ -10,16 +12,13 @@ CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder CONFIG_ANDROID_LOW_MEMORY_KILLER=y -CONFIG_ARMV8_DEPRECATED=y CONFIG_ASHMEM=y CONFIG_AUDIT=y CONFIG_BLK_DEV_INITRD=y CONFIG_CGROUPS=y CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_SCHED=y -CONFIG_CP15_BARRIER_EMULATION=y CONFIG_DEFAULT_SECURITY_SELINUX=y CONFIG_EMBEDDED=y CONFIG_FB=y @@ -153,9 +152,7 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY_SELINUX=y -CONFIG_SETEND_EMULATION=y CONFIG_STAGING=y -CONFIG_SWP_EMULATION=y CONFIG_SYNC=y CONFIG_TUN=y CONFIG_UID_SYS_STATS=y diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987b..cf4ae6958240 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 6f50f672efbd..de8ac998604d 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -54,14 +54,14 @@ timer@0200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x0200 0x100>; - interrupts = ; + interrupts = ; clocks = <&clk_periph>; }; local-timer@0600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x0600 0x100>; - interrupts = ; + interrupts = ; clocks = <&clk_periph>; }; diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi index 4b0ec0703825..8ca9217204a0 100644 --- a/arch/arm/boot/dts/imx6dl.dtsi +++ b/arch/arm/boot/dts/imx6dl.dtsi @@ -30,7 +30,7 @@ /* kHz uV */ 996000 1250000 792000 1175000 - 396000 1075000 + 396000 1150000 >; fsl,soc-operating-points = < /* ARM kHz SOC-PU uV */ diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 5f5e0f3d5b64..27cd4abfc74d 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -697,6 +697,8 @@ vmmc_aux-supply = <&vsim>; bus-width = <8>; non-removable; + no-sdio; + no-sd; }; &mmc3 { diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile index c938988d6634..1c3de8ccb400 100644 --- a/arch/arm/boot/dts/qcom/Makefile +++ b/arch/arm/boot/dts/qcom/Makefile @@ -168,9 +168,13 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \ apq8098-v2.1-cdp.dtb \ apq8098-v2.1-qrd.dtb \ apq8098-v2.1-mediabox.dtb \ + apq8098-v2.1-svr20.dtb \ msm8998-v2.1-interposer-sdm660-cdp.dtb \ msm8998-v2.1-interposer-sdm660-mtp.dtb \ - msm8998-v2.1-interposer-sdm660-qrd.dtb + msm8998-v2.1-interposer-sdm660-qrd.dtb \ + msm8998-9x55-rcm.dtb \ + msm8998-9x55-cdp.dtb \ + msm8998-9x55-mtp.dtb endif dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi index 4081a21b3134..db33594d3827 100644 --- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi +++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi @@ -713,6 +713,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -731,6 +735,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/apq8096-ba.dtsi b/arch/arm/boot/dts/qcom/apq8096-ba.dtsi new file mode 100644 index 000000000000..e6524593e502 --- /dev/null +++ b/arch/arm/boot/dts/qcom/apq8096-ba.dtsi @@ -0,0 +1,18 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + msm_ba: qcom,ba { + compatible = "qcom,msm-ba"; + status = "ok"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi index bfc6f210a0bb..e731c7edd518 100644 --- a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi +++ b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi @@ -12,6 +12,7 @@ #include "msm8996-pinctrl.dtsi" #include "apq8096-camera-sensor-dragonboard.dtsi" +#include "apq8096-ba.dtsi" / { bluetooth: bt_qca6174 { diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts index 9c4ff9f184e7..082b04791dbd 100644 --- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts +++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -45,10 +45,27 @@ adv7533@3d { status = "disabled"; }; + adv7533@39 { status = "disabled"; }; }; + + qcom,adv7481@70 { + status = "disabled"; + }; + + qcom,msm-ba { + status = "disabled"; + }; +}; + +&dsi_adv_7533_2 { + /delete-property/ qcom,dsi-display-active; +}; + +&sde_kms { + connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1>; }; &pil_modem { diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts index 022841b5e769..1fa49d8a060d 100644 --- a/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts +++ b/arch/arm/boot/dts/qcom/apq8098-v2.1-mediabox.dts @@ -90,6 +90,7 @@ &snd_9335 { qcom,msm-mi2s-master = <1>, <1>, <1>, <0>; + qcom,msm-mbhc-hphl-swh = <1>; }; &wcd_usbc_analog_en1_gpio { @@ -100,6 +101,10 @@ status = "disabled"; }; +&pcie0 { + qcom,boot-option = <0x0>; +}; + &soc { qcom,msm-dai-mi2s { dai_mi2s3: qcom,msm-dai-q6-mi2s-quat { diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts b/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts new file mode 100644 index 000000000000..4359a3f38ade --- /dev/null +++ b/arch/arm/boot/dts/qcom/apq8098-v2.1-svr20.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "apq8098-v2.1.dtsi" +#include "msm8998-svr20.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. APQ 8098 V2.1 SVR V2.0 Board"; + compatible = "qcom,apq8098-svr", "qcom,apq8098", "qcom,svr"; + qcom,board-id = <0x03020008 3>; +}; diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi b/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi index a5fa81b71537..ef85584c9984 100644 --- a/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi +++ b/arch/arm/boot/dts/qcom/apq8098-v2.1.dtsi @@ -16,3 +16,13 @@ model = "Qualcomm Technologies, Inc. APQ 8098 V2.1"; qcom,msm-id = <319 0x20001>; }; + +&soc { + qcom,rmnet-ipa { + status = "disabled"; + }; +}; + +&ipa_hw { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/qcom/apq8098-v2.dtsi b/arch/arm/boot/dts/qcom/apq8098-v2.dtsi index e51e310f7131..022921abcc58 100644 --- a/arch/arm/boot/dts/qcom/apq8098-v2.dtsi +++ b/arch/arm/boot/dts/qcom/apq8098-v2.dtsi @@ -16,3 +16,13 @@ model = "Qualcomm Technologies, Inc. APQ 8098 V2"; qcom,msm-id = <319 0x20000>; }; + +&soc { + qcom,rmnet-ipa { + status = "disabled"; + }; +}; + +&ipa_hw { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi new file mode 100644 index 000000000000..5e382f307530 --- /dev/null +++ b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi @@ -0,0 +1,75 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + +dsi_adv7533_1024_600p: qcom,mdss_dsi_adv7533_1024_600p { + label = "adv7533 1024x600p video mode dsi panel"; + qcom,mdss-dsi-panel-name = "dsi_adv7533_1024_600p"; + qcom,mdss-dsi-panel-controller = <&mdss_dsi0>; + qcom,mdss-dsi-panel-type = "dsi_video_mode"; + qcom,mdss-dsi-panel-destination = "display_1"; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-panel-width = <1024>; + qcom,mdss-dsi-panel-height = <600>; + qcom,mdss-dsi-h-front-porch = <110>; + qcom,mdss-dsi-h-back-porch = <220>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <20>; + qcom,mdss-dsi-v-front-porch = <5>; + qcom,mdss-dsi-v-pulse-width = <5>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-on-command = [ + 05 01 00 00 c8 00 02 11 00 + 05 01 00 00 0a 00 02 29 00]; + qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00 + 05 01 00 00 00 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-h-sync-pulse = <1>; + qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-panel-timings = [ + AB 1A 10 00 3E 43 16 1E 15 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x03>; + qcom,mdss-dsi-t-clk-pre = <0x20>; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>; + qcom,mdss-pan-physical-width-dimension = <160>; + qcom,mdss-pan-physical-height-dimension = <90>; + qcom,mdss-dsi-force-clock-lane-hs; + qcom,mdss-dsi-always-on; + qcom,mdss-dsi-panel-timings-phy-v2 = [1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 08 02 03 01 03 04 a0]; + qcom,dba-panel; + qcom,bridge-name = "adv7533"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi index 5971a3d1025e..aa627b3e7c63 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi @@ -68,7 +68,6 @@ qcom,mdss-dsi-lane-1-state; qcom,mdss-dsi-lane-2-state; qcom,mdss-dsi-lane-3-state; - qcom,cmd-sync-wait-broadcast; qcom,mdss-dsi-panel-timings = [e2 36 24 00 66 6a 28 38 2a 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi index 1a572f97c840..339d87f66d2f 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi @@ -193,7 +193,6 @@ qcom,mdss-dsi-lane-1-state; qcom,mdss-dsi-lane-2-state; qcom,mdss-dsi-lane-3-state; - qcom,cmd-sync-wait-broadcast; qcom,mdss-dsi-panel-timings = [e2 36 24 00 66 6a 28 38 2a 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi index 6d91e72851ec..5aa2e1ee8316 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi @@ -51,7 +51,6 @@ 39 01 00 00 78 00 03 f0 a5 a5 39 01 00 00 00 00 02 35 00 39 01 00 00 00 00 02 53 20 - 39 01 00 00 00 00 02 51 60 05 01 00 00 05 00 02 29 00]; qcom,mdss-dsi-off-command = [05 01 00 00 3c 00 02 28 00 05 01 00 00 b4 00 02 10 00]; @@ -136,6 +135,7 @@ qcom,mdss-dsi-mdp-trigger = "none"; qcom,mdss-dsi-lp11-init; qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-dcs-command-state = "dsi_hs_mode"; qcom,mdss-dsi-bl-min-level = <1>; qcom,mdss-dsi-bl-max-level = <255>; qcom,mdss-pan-physical-width-dimension = <68>; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi index 51a225b82f47..ff3b7b80c449 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,6 +54,8 @@ qcom,ulps-enabled; qcom,dcs-cmd-by-left; qcom,mdss-dsi-tx-eot-append; + qcom,mdss-pan-physical-width-dimension = <68>; + qcom,mdss-pan-physical-height-dimension = <121>; qcom,adjust-timer-wakeup-ms = <1>; qcom,mdss-dsi-on-command = [ diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi index 02c87067f212..933746b8abe7 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,6 +46,8 @@ qcom,mdss-dsi-mdp-trigger = "none"; qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>; qcom,mdss-dsi-tx-eot-append; + qcom,mdss-pan-physical-width-dimension = <68>; + qcom,mdss-pan-physical-height-dimension = <121>; qcom,adjust-timer-wakeup-ms = <1>; qcom,mdss-dsi-on-command = [ diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi new file mode 100644 index 000000000000..048897b084ec --- /dev/null +++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-svr-v2-3200mah.dtsi @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +qcom,svr835v2_3200mah { + /*3003021_TC_MLP603170_3200mAh_averaged_MasterSlave_Jun292017*/ + qcom, = <24>; + qcom,max-voltage-uv = <4200000>; + qcom,fg-cc-cv-threshold-mv = <4190>; + qcom,fastchg-current-ma = <3200>; + qcom,nom-batt-capacity-mah = <3200>; + qcom,batt-id-kohm = <0>; + qcom,battery-beta = <3435>; + qcom,battery-type = "svr835v2_3200mah"; + qcom,checksum = <0xB7B0>; + qcom,gui-version = "PMI8998GUI - 2.0.0.58"; + qcom,fg-profile-data = [ + 87 16 AB 0B + BE 15 3A 0A + 8B 1C 6D 02 + 76 0D 1F 0A + 50 18 ED 22 + 98 45 CA 52 + 83 00 00 00 + 0D 00 00 00 + 00 00 37 B4 + 78 C5 9D BA + 29 00 08 00 + 3E CA 11 E5 + D4 06 B7 EA + 51 07 0F 02 + 82 DD 22 3B + 1C 06 09 20 + 27 00 14 00 + 1C 19 82 0A + E9 0C 49 03 + 84 1C 5C 03 + D0 15 0D 12 + 91 19 0C 22 + F0 3C 35 4B + 7D 00 00 00 + 12 00 00 00 + 00 00 F3 D4 + 9F B4 AF D3 + 22 00 00 00 + CC EA 11 E5 + 2D F4 35 E3 + A5 F3 49 0B + 8F EA 5A 1A + 9B 33 CC FF + 07 10 00 00 + 21 0D 33 43 + 22 00 40 00 + 07 01 0A FA + FF 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + ]; +}; diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi index da28e56bc2df..e4e488597efd 100644 --- a/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-8998.dtsi @@ -155,6 +155,7 @@ interrupts = , , ; + qcom,deferred-regulator-disable-delay = <80>; vdd-supply = <&gdsc_gpu_cx>; clocks = <&clock_gcc clk_gcc_gpu_cfg_ahb_clk>, <&clock_gcc clk_gcc_bimc_gfx_clk>, diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi index 93aeef07cfe0..460e7e76ac4d 100644 --- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi @@ -24,6 +24,7 @@ compatible = "qcom,qpnp-revid"; reg = <0x100 0x100>; qcom,fab-id-valid; + qcom,tp-rev-valid; }; pm660_misc: qcom,misc@900 { diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi index 0f18ba5c94c7..075eaef21254 100644 --- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi @@ -250,9 +250,8 @@ <0xd900 0x100>; reg-names = "qpnp-wled-ctrl-base", "qpnp-wled-sink-base"; - interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, - <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; - interrupt-names = "ovp-irq", "sc-irq"; + interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "ovp-irq"; linux,name = "wled"; linux,default-trigger = "bkl-trigger"; qcom,fdbk-output = "auto"; @@ -268,9 +267,9 @@ qcom,fs-curr-ua = <25000>; qcom,cons-sync-write-delay-us = <1000>; qcom,led-strings-list = [00 01 02]; - qcom,en-ext-pfet-sc-pro; qcom,loop-auto-gm-en; qcom,pmic-revid = <&pm660l_revid>; + qcom,auto-calibration-enable; status = "ok"; }; diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi index 684f6cf9b389..147b537eba33 100644 --- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi @@ -634,6 +634,7 @@ qcom,en-ext-pfet-sc-pro; qcom,pmic-revid = <&pmi8998_revid>; qcom,loop-auto-gm-en; + qcom,auto-calibration-enable; status = "okay"; }; diff --git a/arch/arm/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm/boot/dts/qcom/msm-rdbg.dtsi index d0c91f9e72ae..6de1a8e2fb7e 100644 --- a/arch/arm/boot/dts/qcom/msm-rdbg.dtsi +++ b/arch/arm/boot/dts/qcom/msm-rdbg.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -72,4 +72,35 @@ compatible = "qcom,smp2pgpio_client_rdbg_1_out"; gpios = <&smp2pgpio_rdbg_1_out 0 0>; }; + + smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <5>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_5_in { + compatible = "qcom,smp2pgpio_client_rdbg_5_in"; + gpios = <&smp2pgpio_rdbg_5_in 0 0>; + }; + + smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <5>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_5_out { + compatible = "qcom,smp2pgpio_client_rdbg_5_out"; + gpios = <&smp2pgpio_rdbg_5_out 0 0>; + }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index a600008341c2..1283cdddc2db 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -542,11 +542,12 @@ &mdss_hdmi_cec_active>; pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; + /delete-property/ qcom,pluggable; }; #include "msm8996-sde-display.dtsi" -&mdss_mdp { +&sde_kms { qcom,mdss-pref-prim-intf = "dsi"; qcom,sde-plane-id-map { qcom,sde-plane-id@0 { @@ -857,6 +858,90 @@ }; &soc { + qcom,early-cam { + cell-index = <0>; + compatible = "qcom,early-cam"; + status = "ok"; + mmagic-supply = <&gdsc_mmagic_camss>; + gdscr-supply = <&gdsc_camss_top>; + vfe0-vdd-supply = <&gdsc_vfe0>; + qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd"; + clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, + <&clock_mmss clk_camss_top_ahb_clk>, + <&clock_mmss clk_cci_clk_src>, + <&clock_mmss clk_camss_cci_ahb_clk>, + <&clock_mmss clk_camss_cci_clk>, + <&clock_mmss clk_camss_ahb_clk>, + <&clock_mmss clk_mmagic_camss_axi_clk>, + <&clock_mmss clk_camss_vfe_ahb_clk>, + <&clock_mmss clk_camss_vfe0_ahb_clk>, + <&clock_mmss clk_camss_vfe_axi_clk>, + <&clock_mmss clk_camss_vfe0_stream_clk>, + <&clock_mmss clk_smmu_vfe_axi_clk>, + <&clock_mmss clk_smmu_vfe_ahb_clk>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_vfe0_clk_src>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_camss_csi2_ahb_clk>, + <&clock_mmss clk_camss_csi2_clk>, + <&clock_mmss clk_camss_csi2phy_clk>, + <&clock_mmss clk_csi2phytimer_clk_src>, + <&clock_mmss clk_camss_csi2phytimer_clk>, + <&clock_mmss clk_camss_csi2rdi_clk>, + <&clock_mmss clk_camss_ispif_ahb_clk>, + <&clock_mmss clk_camss_vfe0_clk>; + clock-names = + "mmss_mmagic_ahb_clk", + "camss_top_ahb_clk", + "cci_clk_src", + "camss_cci_ahb_clk", + "camss_cci_clk", + "camss_ahb_clk", + "mmagic_camss_axi_clk", + "camss_vfe_ahb_clk", + "camss_vfe0_ahb_clk", + "camss_vfe_axi_clk", + "camss_vfe0_stream_clk", + "smmu_vfe_axi_clk", + "smmu_vfe_ahb_clk", + "camss_csi_vfe0_clk", + "vfe0_clk_src", + "camss_csi_vfe0_clk", + "camss_csi2_ahb_clk", + "camss_csi2_clk", + "camss_csi2phy_clk", + "csi2phytimer_clk_src", + "camss_csi2phytimer_clk", + "camss_csi2rdi_clk", + "camss_ispif_ahb_clk", + "clk_camss_vfe0_clk"; + + qcom,clock-rates = <19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 0 + 0 + 0 + 320000000 + 0 + 0 + 0 + 0 + 19200000 + 0 + 0 + 200000000 + 200000000 + 200000000 + 200000000 + 200000000 + 0 + 100000000>; + }; + qcom,ntn_avb { compatible = "qcom,ntn_avb"; @@ -868,6 +953,9 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <1>; + qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <1>; + qcom,ntn-phy-addr = <7>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; @@ -1012,6 +1100,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -1030,6 +1122,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", @@ -1098,17 +1194,58 @@ pinctrl-0 = <&quat_tdm_dout_active>; pinctrl-1 = <&quat_tdm_dout_sleep>; }; + + qcom,adv7481@70 { + compatible = "qcom,adv7481"; + reg = <0x70 0xff>; + cam_vdig-supply = <&pm8994_s3>; + /* Cameras powered by PMIC: */ + cam_vio-supply = <&pm8994_lvs1>; + cam_vana-supply = <&pm8994_l17>; + /* Self-powered cameras: */ + qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; + qcom,cam-vreg-min-voltage = <1300000 0 2500000>; + qcom,cam-vreg-max-voltage = <1300000 0 2500000>; + qcom,cam-vreg-op-mode = <105000 0 80000>; + + qcom,cci-master = <0>; + gpios = <&tlmm 17 0>, /* I2C SDA */ + <&tlmm 18 0>, /* I2C SCL */ + <&pm8994_gpios 4 0>, /* RST */ + <&pm8994_gpios 5 0>, /* INT1 */ + <&pm8994_gpios 6 0>, /* INT2 */ + <&pm8994_gpios 7 0>; /* INT3 */ + }; + + qcom,msm-ba { + compatible = "qcom,msm-ba"; + qcom,ba-input-profile-0 { + qcom,type = <4>; /* input type */ + qcom,name = "HDMI-1"; /* input name */ + qcom,ba-input = <13>; /* ba input id */ + qcom,ba-output = <0>; /* ba output id */ + qcom,sd-name = "adv7481"; /* sd name */ + qcom,ba-node = <0>; /* ba node */ + qcom,user-type = <1>; /* user type */ + }; + + qcom,ba-input-profile-1 { + qcom,type = <0>; /* input type */ + qcom,name = "CVBS-0"; /* input name */ + qcom,ba-input = <0>; /* ba input id */ + qcom,ba-output = <0>; /* ba output id */ + qcom,sd-name = "adv7481"; /* sd name */ + qcom,ba-node = <1>; /* ba node */ + qcom,user-type = <1>; /* user type */ + }; + }; }; &pm8994_gpios { - gpio@c600 { /* GPIO 7 - NFC DWL REQ */ - qcom,mode = <1>; - qcom,output-type = <0>; - qcom,pull = <5>; + gpio@c600 { /* GPIO 7 - adv7481 INT3 */ + qcom,mode = <0>; qcom,vin-sel = <2>; - qcom,out-strength = <3>; qcom,src-sel = <0>; - qcom,master-en = <1>; status = "okay"; }; @@ -1159,17 +1296,23 @@ status = "okay"; }; - gpio@c300 { /* GPIO 4 */ - qcom,mode = <0>; + gpio@c300 { /* GPIO 4 - adv7481 RST */ + qcom,mode = <1>; qcom,pull = <0>; qcom,vin-sel = <2>; qcom,src-sel = <0>; status = "okay"; }; - gpio@c400 { /* GPIO 5 */ + gpio@c400 { /* GPIO 5 - adv7481 INT1 */ + qcom,mode = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + + gpio@c500 { /* GPIO 6 - adv7481 INT2*/ qcom,mode = <0>; - qcom,pull = <0>; qcom,vin-sel = <2>; qcom,src-sel = <0>; status = "okay"; @@ -1190,7 +1333,7 @@ qcom,vin-sel = <2>; /* 1.8 */ qcom,out-strength = <1>; qcom,src-sel = <0>; /* GPIO */ - qcom,master-en = <0>; /* Disable GPIO */ + qcom,master-en = <1>; /* Enable GPIO */ status = "okay"; }; @@ -1397,3 +1540,22 @@ spi-cpha; }; }; + +&vfe_smmu { + qcom,no-smr-check; +}; + +/ { + reserved-memory { + lk_mem: lk_pool@0x91600000 { + no-map; + reg = <0 0x91600000 0 0x00600000>; + label = "lk_pool"; + }; + + early_camera_mem: early_camera_mem@b3fff000 { + reg = <0 0xb3fff000 0 0x800000>; + label = "early_camera_mem"; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index 7c07102a1fed..c3b986786034 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -333,7 +333,7 @@ }; }; -&mdss_mdp { +&sde_kms { qcom,mdss-pref-prim-intf = "dsi"; qcom,sde-plane-id-map { qcom,sde-plane-id@0 { @@ -623,6 +623,90 @@ }; &soc { + qcom,early-cam { + cell-index = <0>; + compatible = "qcom,early-cam"; + status = "ok"; + mmagic-supply = <&gdsc_mmagic_camss>; + gdscr-supply = <&gdsc_camss_top>; + vfe0-vdd-supply = <&gdsc_vfe0>; + qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd"; + clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, + <&clock_mmss clk_camss_top_ahb_clk>, + <&clock_mmss clk_cci_clk_src>, + <&clock_mmss clk_camss_cci_ahb_clk>, + <&clock_mmss clk_camss_cci_clk>, + <&clock_mmss clk_camss_ahb_clk>, + <&clock_mmss clk_mmagic_camss_axi_clk>, + <&clock_mmss clk_camss_vfe_ahb_clk>, + <&clock_mmss clk_camss_vfe0_ahb_clk>, + <&clock_mmss clk_camss_vfe_axi_clk>, + <&clock_mmss clk_camss_vfe0_stream_clk>, + <&clock_mmss clk_smmu_vfe_axi_clk>, + <&clock_mmss clk_smmu_vfe_ahb_clk>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_vfe0_clk_src>, + <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_camss_csi2_ahb_clk>, + <&clock_mmss clk_camss_csi2_clk>, + <&clock_mmss clk_camss_csi2phy_clk>, + <&clock_mmss clk_csi2phytimer_clk_src>, + <&clock_mmss clk_camss_csi2phytimer_clk>, + <&clock_mmss clk_camss_csi2rdi_clk>, + <&clock_mmss clk_camss_ispif_ahb_clk>, + <&clock_mmss clk_camss_vfe0_clk>; + clock-names = + "mmss_mmagic_ahb_clk", + "camss_top_ahb_clk", + "cci_clk_src", + "camss_cci_ahb_clk", + "camss_cci_clk", + "camss_ahb_clk", + "mmagic_camss_axi_clk", + "camss_vfe_ahb_clk", + "camss_vfe0_ahb_clk", + "camss_vfe_axi_clk", + "camss_vfe0_stream_clk", + "smmu_vfe_axi_clk", + "smmu_vfe_ahb_clk", + "camss_csi_vfe0_clk", + "vfe0_clk_src", + "camss_csi_vfe0_clk", + "camss_csi2_ahb_clk", + "camss_csi2_clk", + "camss_csi2phy_clk", + "csi2phytimer_clk_src", + "camss_csi2phytimer_clk", + "camss_csi2rdi_clk", + "camss_ispif_ahb_clk", + "clk_camss_vfe0_clk"; + + qcom,clock-rates = <19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 19200000 + 0 + 0 + 0 + 320000000 + 0 + 0 + 0 + 0 + 19200000 + 0 + 0 + 200000000 + 200000000 + 200000000 + 200000000 + 200000000 + 0 + 100000000>; + }; + ntn1: ntn_avb@1 { /* Neutrno device on RC1*/ compatible = "qcom,ntn_avb"; @@ -635,6 +719,8 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <1>; qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <1>; + qcom,ntn-phy-addr = <7>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; @@ -649,6 +735,7 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <2>; qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <2>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; @@ -835,6 +922,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -853,6 +944,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", @@ -868,11 +963,12 @@ asoc-codec-names = "msm-stub-codec.1"; }; - usb_detect { + usb_detect: usb_detect { compatible = "qcom,gpio-usbdetect"; + qcom,vbus-det-gpio = <&pm8994_gpios 17 0>; interrupt-parent = <&spmi_bus>; - interrupts = <0x0 0xd0 0x0>; /* PM8994 GPIO17 */ - interrupt-names = "vbus_det_irq"; + interrupts = <0x0 0x9 0x0 IRQ_TYPE_NONE>; + interrupt-names ="pmic_id_irq"; }; loopback1: qcom,msm-pcm-loopback-low-latency { @@ -1071,18 +1167,10 @@ }; &usb3 { - interrupt-parent = <&usb3>; - interrupts = <0 1 2 3>; - #interrupt-cells = <1>; - interrupt-map-mask = <0x0 0xffffffff>; - interrupt-map = <0x0 0 &intc 0 0 347 0 - 0x0 1 &intc 0 0 243 0 - 0x0 2 &intc 0 0 180 0 - 0x0 3 &spmi_bus 0x0 0x0 0x9 0x0>; - interrupt-names = "hs_phy_irq", "ss_phy_irq", "pwr_event_irq", - "pmic_id_irq"; - + extcon = <&usb_detect>; vbus_dwc3-supply = <&usb_otg_switch>; + vdda33-supply = <&pm8994_l24>; + vdda18-supply = <&pm8994_l12>; }; &blsp1_uart2 { @@ -1227,4 +1315,22 @@ /delete-property/ qcom,spkr-sd-n-gpio; }; +&vfe_smmu { + qcom,no-smr-check; +}; + +/ { + reserved-memory { + lk_mem: lk_pool@0x91600000 { + no-map; + reg = <0 0x91600000 0 0x00600000>; + label = "lk_pool"; + }; + + early_camera_mem: early_camera_mem@b3fff000 { + reg = <0 0xb3fff000 0 0x800000>; + label = "early_camera_mem"; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi index 18a0f29e4d8a..94be1082c2be 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,7 @@ #include "dsi-panel-nt35597-dsc-wqxga-cmd.dtsi" #include "dsi-panel-hx8379a-truly-fwvga-video.dtsi" #include "dsi-panel-r69007-dualdsi-wqxga-cmd.dtsi" +#include "dsi-adv7533-1024-600p.dtsi" #include "dsi-adv7533-720p.dtsi" #include "dsi-adv7533-1080p.dtsi" #include "dsi-panel-nt35950-dsc-4k-cmd.dtsi" diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi index 3186f96b4275..12fa68fac0fb 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -459,6 +459,15 @@ qcom,mdss_pan_bpp = <24>; }; + msm_ext_disp: qcom,msm_ext_disp { + compatible = "qcom,msm-ext-disp"; + + ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx { + compatible = "qcom,msm-ext-disp-audio-codec-rx"; + qcom,msm_ext_disp = <&msm_ext_disp>; + }; + }; + mdss_hdmi_tx: qcom,hdmi_tx@9a0000 { cell-index = <0>; compatible = "qcom,hdmi-tx"; @@ -476,13 +485,14 @@ qcom,enable-load = <0>; qcom,disable-load = <0>; + qcom,msm_ext_disp = <&msm_ext_disp>; clocks = <&clock_mmss clk_mdss_mdp_vote_clk>, <&clock_mmss clk_mdss_ahb_clk>, <&clock_mmss clk_mdss_hdmi_clk>, <&clock_mmss clk_mdss_hdmi_ahb_clk>, <&clock_mmss clk_mdss_extpclk_clk>; - clock-names = "mdp_core_clk", "iface_clk", - "core_clk", "alt_iface_clk", "extp_clk"; + clock-names = "hpd_mdp_core_clk", "hpd_iface_clk", + "hpd_core_clk", "hpd_alt_iface_clk", "core_extp_clk"; qcom,hdmi-tx-hpd = <&pm8994_mpps 4 0>; qcom,mdss-fb-map = <&mdss_fb2>; diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi index d3ea51268590..c8898ec01992 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi @@ -538,6 +538,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -556,6 +560,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi index ab10a71d1fd7..0bd9b02f3d2e 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -343,7 +343,7 @@ qcom,mdss-pref-prim-intf = "dsi"; }; -&mdss_hdmi { +&sde_hdmi { status = "ok"; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi index ff128acb376a..316859a65801 100644 --- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi @@ -1449,7 +1449,7 @@ }; cnss_pins { - cnss_default: cnss_default { + cnss_bootstrap_active: cnss_bootstrap_active { mux { pins = "gpio46"; function = "gpio"; @@ -1458,6 +1458,20 @@ config { pins = "gpio46"; drive-strength = <16>; + output-high; + bias-pull-up; + }; + }; + cnss_bootstrap_sleep: cnss_bootstrap_sleep { + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + drive-strength = <2>; + output-low; bias-pull-down; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi index b86542a174da..936dfd4d1cb2 100644 --- a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi @@ -12,6 +12,7 @@ #include #include +#include &rpm_bus { /* PM8994 S1 + S6 = 2 phase VDD_CX supply */ @@ -1917,6 +1918,13 @@ gpio = <&pm8994_gpios 9 0>; }; + wlan_en_vreg: wlan_en_vreg { + compatible = "regulator-fixed"; + regulator-name = "wlan_en_vreg"; + enable-active-high; + gpio = <&pm8994_gpios 8 0>; + }; + hl7509_en_vreg: hl7509_en_vreg { compatible = "regulator-fixed"; regulator-name = "hl7509_en_vreg"; @@ -1967,4 +1975,16 @@ onnn,restore-reg; status = "disabled"; }; + + max20010_vreg: max20010-regulator@38 { + compatible = "maxim,max20010"; + reg = <0x38>; + vin-supply = <&hl7509_en_vreg>; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1270000>; + regulator-initial-mode = ; + maxim,vrange-sel = <0>; + maxim,soft-start-slew-rate = <5500>; + maxim,dvs-slew-rate = <5500>; + }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi index 061301f1c479..1c81bc433374 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi @@ -94,8 +94,8 @@ label = "dsi_dual_sharp_video"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -118,8 +118,8 @@ label = "single_dsi_sim"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -140,8 +140,8 @@ label = "single_dsi_toshiba_720p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -161,8 +161,8 @@ label = "single_dsi_jdi_1080p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -180,8 +180,8 @@ label = "single_dsi_sharp_1080p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -209,8 +209,8 @@ qcom,display-type = "primary"; /* dsi1/dsi0 swapped due to IMGSWAP */ - qcom,dsi-ctrl = <&mdss_dsi1 &mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi1 &sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -231,8 +231,8 @@ label = "dsi_dual_nt35597_video"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -253,8 +253,8 @@ label = "dsi_adv_7533_1"; qcom,display-type = "secondary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -269,8 +269,8 @@ label = "dsi_adv_7533_2"; qcom,display-type = "tertiary"; - qcom,dsi-ctrl = <&mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte1_clk_src>, <&clock_mmss clk_ext_pclk1_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -297,8 +297,8 @@ }; }; -&mdss_mdp { - connectors = <&mdss_hdmi &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>; +&sde_kms { + connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>; }; &dsi_dual_sharp_video { diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi index 1915c9377cf7..b0688668e667 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi @@ -11,7 +11,7 @@ */ &soc { - mdss_mdp: qcom,mdss_mdp@900000 { + sde_kms: qcom,sde_kms@900000 { compatible = "qcom,sde-kms"; reg = <0x00900000 0x90000>, <0x009b0000 0x1040>, @@ -182,8 +182,8 @@ }; }; - smmu_mdp_unsec: qcom,smmu_mdp_unsec_cb { - compatible = "qcom,smmu_mdp_unsec"; + smmu_kms_unsec: qcom,smmu_kms_unsec_cb { + compatible = "qcom,smmu_kms_unsec"; iommus = <&mdp_smmu 0>; }; @@ -213,7 +213,7 @@ }; }; - mdss_dsi0: qcom,mdss_dsi_ctrl0@994000 { + sde_dsi0: qcom,sde_dsi_ctrl0@994000 { compatible = "qcom,dsi-ctrl-hw-v1.4"; label = "dsi-ctrl-0"; cell-index = <0>; @@ -248,7 +248,7 @@ <22 512 0 0>, <22 512 0 1000>; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <4 0>; qcom,core-supply-entries { #address-cells = <1>; @@ -289,7 +289,7 @@ }; }; - mdss_dsi1: qcom,mdss_dsi_ctrl1@996000 { + sde_dsi1: qcom,sde_dsi_ctrl1@996000 { compatible = "qcom,dsi-ctrl-hw-v1.4"; label = "dsi-ctrl-1"; cell-index = <1>; @@ -323,7 +323,7 @@ <22 512 0 0>, <22 512 0 1000>; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <5 0>; qcom,core-supply-entries { #address-cells = <1>; @@ -363,7 +363,7 @@ }; }; - mdss_dsi_phy0: qcom,mdss_dsi_phy0@994400 { + sde_dsi_phy0: qcom,sde_dsi_phy0@994400 { compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-0"; cell-index = <0>; @@ -422,7 +422,7 @@ }; }; - mdss_dsi_phy1: qcom,mdss_dsi_phy1@996400 { + sde_dsi_phy1: qcom,sde_dsi_phy1@996400 { compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-1"; cell-index = <1>; @@ -481,7 +481,7 @@ }; }; - mdss_hdmi: qcom,hdmi_tx@9a0000 { + sde_hdmi_tx: qcom,hdmi_tx_8996@9a0000 { compatible = "qcom,hdmi-tx-8996"; reg = <0x009a0000 0x50c>, @@ -501,7 +501,7 @@ "core_clk", "alt_iface_clk", "extp_clk"; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <8 0>; hpd-gdsc-supply = <&gdsc_mdss>; qcom,hdmi-tx-hpd-gpio = <&pm8994_mpps 4 0>; @@ -513,23 +513,8 @@ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; - hdmi_audio: qcom,msm-hdmi-audio-rx { + sde_hdmi_audio: qcom,sde-hdmi-audio-rx { compatible = "qcom,msm-hdmi-audio-codec-rx"; }; }; }; - -/* dummy nodes for compatibility with 8996 mdss dtsi */ -&soc { - mdss_dsi: qcom,mdss_dsi_dummy { - /* dummy node for backward compatibility */ - }; - - mdss_hdmi_tx: qcom,mdss_hdmi_tx_dummy { - /* dummy node for backward compatibility */ - }; - - mdss_fb2: qcom,mdss_fb2_dummy { - /* dummy node for backward compatibility */ - }; -}; diff --git a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi index 9725bc3ee530..698c0193a164 100644 --- a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -480,7 +480,7 @@ gdsc-venus-supply = <&gdsc_venus>; }; -&mdss_hdmi { +&sde_hdmi_tx { hpd-gdsc-venus-supply = <&gdsc_venus>; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-vidc.dtsi b/arch/arm/boot/dts/qcom/msm8996-vidc.dtsi index 5ac31e3dd0cb..21aa1db446e2 100644 --- a/arch/arm/boot/dts/qcom/msm8996-vidc.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-vidc.dtsi @@ -226,6 +226,7 @@ clocks = <&clock_mmss clk_vmem_ahb_clk>, <&clock_mmss clk_vmem_maxi_clk>; clock-names = "ahb", "maxi"; + clock-config = <0x0 0x0 0x0 0x1>; qcom,msm-bus,name = "vmem"; qcom,msm-bus,num-cases = <2>; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index 7c3f035a841b..f5e059484c95 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -242,6 +242,7 @@ #include "msm8996-ion.dtsi" #include "msm8996-sde.dtsi" +#include "msm8996-mdss.dtsi" #include "msm8996-mdss-pll.dtsi" #include "msm8996-smp2p.dtsi" #include "msm8996-ipcrouter.dtsi" @@ -1064,6 +1065,7 @@ qcom,pm-qos-irq-cpu = <0>; qcom,pm-qos-irq-latency = <70 70>; + non-removable; status = "disabled"; }; @@ -1268,6 +1270,7 @@ qcom,pm-qos-cpu-group-latency-us = <70 70>; qcom,pm-qos-default-cpu = <0>; + non-removable; status = "disabled"; }; @@ -2332,15 +2335,17 @@ qcom,cnss { compatible = "qcom,cnss"; wlan-bootstrap-gpio = <&tlmm 46 0>; - wlan-en-gpio = <&pm8994_gpios 8 0>; + vdd-wlan-en-supply = <&wlan_en_vreg>; vdd-wlan-supply = <&rome_vreg>; vdd-wlan-io-supply = <&pm8994_s4>; vdd-wlan-xtal-supply = <&pm8994_l30>; vdd-wlan-core-supply = <&pm8994_s3>; wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>; + qcom,wlan-en-vreg-support; qcom,notify-modem-status; - pinctrl-names = "default"; - pinctrl-0 = <&cnss_default>; + pinctrl-names = "bootstrap_active", "bootstrap_sleep"; + pinctrl-0 = <&cnss_bootstrap_active>; + pinctrl-1 = <&cnss_bootstrap_sleep>; qcom,wlan-rc-num = <0>; qcom,wlan-ramdump-dynamic = <0x200000>; @@ -3352,6 +3357,82 @@ }; }; + qcom,msm-dai-tdm-pri-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37120>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36864>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36866>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36868>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36870>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-pri-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37121>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36865>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36867>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36869>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36871>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + qcom,msm-dai-tdm-sec-tx { compatible = "qcom,msm-dai-tdm"; qcom,msm-cpudai-tdm-group-id = <37137>; diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts index 668cb2844363..f5c33063643d 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts +++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts @@ -49,6 +49,14 @@ status = "disabled"; }; }; + + qcom,adv7481@70 { + status = "disabled"; + }; + + qcom,msm-ba { + status = "disabled"; + }; }; &pil_modem { diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts index 1ab8ee9cd538..d2aa5c854c83 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts +++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,7 +22,7 @@ model = "Qualcomm Technologies, Inc. MSM 8996pro AUTO ADP"; compatible = "qcom,msm8996-adp", "qcom,msm8996", "qcom,adp"; qcom,msm-id = <315 0x10000>; - qcom,board-id = <0x02010019 0>; + qcom,board-id = <0x02010019 0>, <0x00010001 0>; }; &spi_9 { @@ -46,6 +46,11 @@ qcom,hotplug-temp-hysteresis = <25>; qcom,therm-reset-temp = <119>; }; + + qcom,adv7481@70 { + qcom,cam-vreg-min-voltage = <1300000 0 1800000>; + qcom,cam-vreg-max-voltage = <1300000 0 1800000>; + }; }; &pil_modem { diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi index 48d544e18889..15295639e361 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi @@ -459,3 +459,12 @@ < 0 0 >, < 315000000 4 >; }; + +/* GPU overrides for auto */ +&msm_gpu { + qcom,gpu-pwrlevel-bins { + qcom,gpu-pwrlevels-0 { + qcom,initial-pwrlevel = <1>; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8996pro.dtsi b/arch/arm/boot/dts/qcom/msm8996pro.dtsi index ca89a517df5c..b9a2ccb973f2 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996pro.dtsi @@ -22,7 +22,7 @@ qcom,msm-id = <305 0x10000>; chosen { - bootargs = "fpsimd.fpsimd_settings=1 app_setting.use_app_setting=0 app_setting.use_32bit_app_setting_pro=1"; + bootargs = "lpm_levels.sleep_disabled=1 fpsimd.fpsimd_settings=1 app_setting.use_app_setting=0 app_setting.use_32bit_app_setting_pro=1"; }; }; @@ -1331,6 +1331,10 @@ qcom,poll-ms = <50>; qcom,limit-temp = <80>; qcom,core-limit-temp = <90>; + msm_thermal_freq: qcom,vdd-apps-rstr { + qcom,max-freq-level = <1209600>; + qcom,levels = <1056000 1516800 1516800>; + }; qcom,vdd-gfx-rstr{ qcom,levels = <6 8 9>; /* Nominal, Turbo, Turbo_L1 */ }; diff --git a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi index 32adb9a36dd4..355062adf7ef 100644 --- a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016 - 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -167,3 +167,40 @@ < 560000000 7 >, < 624000000 7 >; }; + +&soc { + ipa_hw: qcom,ipa@680000 { + compatible = "qcom,ipa"; + reg = <0x680000 0x4effc>, + <0x684000 0x26934>; + reg-names = "ipa-base", "bam-base"; + interrupts = <0 333 0>, + <0 432 0>; + interrupt-names = "ipa-irq", "bam-irq"; + qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */ + qcom,ipa-hw-mode = <0>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,ipa-bam-remote-mode; + qcom,modem-cfg-emb-pipe-flt; + clocks = <&clock_gcc clk_ipa_clk>; + clock-names = "core_clk"; + qcom,use-dma-zone; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <90 512 0 0>, <90 585 0 0>, /* No vote */ + <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */ + <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */ + qcom,bus-vector-names = "MIN", "SVS", "PERF"; + }; + + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa"; + qcom,rmnet-ipa-ssr; + qcom,ipa-loaduC; + qcom,ipa-advertise-sg-support; + }; +}; + diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts new file mode 100644 index 000000000000..cf167897bb89 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-cdp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 CDP"; + compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; + qcom,board-id= <1 2>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts new file mode 100644 index 000000000000..a95e9e4f272f --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-mtp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 MTP"; + compatible = "qcom,msm8998-9x55-mtp", "qcom,msm8998-9x55", "qcom,mtp"; + qcom,board-id= <8 6>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts new file mode 100644 index 000000000000..094ecbc50061 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-cdp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 RCM"; + compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; + qcom,board-id= <0x21 2>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi new file mode 100644 index 000000000000..be947507e398 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include "skeleton64.dtsi" +#include "msm8998-v2.1.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55"; + compatible = "qcom,msm8998-9x55"; + qcom,msm-id = <292 0x0>; + interrupt-parent = <&intc>; + + soc: soc { }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi index 14567c3b5010..2af3bf277096 100644 --- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -345,6 +345,60 @@ clock-names = "cam_src_clk", "cam_clk"; qcom,clock-rates = <24000000 0>; }; + + /* ToF Camera*/ + qcom,camera@3 { + cell-index = <3>; + compatible = "qcom,camera"; + reg = <0x3>; + qcom,csiphy-sd-index = <1>; + qcom,csid-sd-index = <3>; + qcom,mount-angle = <90>; + cam_vio-supply = <&pm8998_lvs1>; + qcom,cam-vreg-name = "cam_vio"; + qcom,cam-vreg-min-voltage = <1800000>; + qcom,cam-vreg-max-voltage = <1800000>; + qcom,cam-vreg-op-mode = <80000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk3_active + &cam_sensor_depth_v1_active + &cam_sensor_depth_v2_active + &cam_sensor_depth_default>; + pinctrl-1 = <&cam_sensor_mclk3_suspend + &cam_sensor_depth_v1_sleep + &cam_sensor_depth_v2_sleep + &cam_sensor_depth_sleep>; + gpios = <&tlmm 16 0>, + <&tlmm 24 0>, + <&tlmm 21 0>, + <&tlmm 28 0>, + <&tlmm 23 0>, + <&tlmm 7 0>; + qcom,gpio-vana = <1>; + qcom,gpio-custom2 = <2>; + qcom,gpio-reset = <3>; + qcom,gpio-custom3 = <4>; + qcom,gpio-custom1 = <5>; + qcom,gpio-req-tbl-num = <0 1 2 3 4 5>; + qcom,gpio-req-tbl-flags = <1 0 0 0 1 1>; + qcom,gpio-req-tbl-label = + "CAMIF_MCLK3", + "CAM_VANA", + "CAM_CUSTOM2", + "CAM_RESET1", + "CAM_CUSTOM3", + "CAM_CUSTOM1"; + qcom,sensor-position = <1>; /* 0 rear */ + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; /* I2C 1 */ + status = "ok"; + clocks = <&clock_mmss clk_mclk3_clk_src>, + <&clock_mmss clk_mmss_camss_mclk3_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + }; &pm8998_gpios { diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi new file mode 100644 index 000000000000..9d408ee5f3a7 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-svr20.dtsi @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + led_flash0: qcom,camera-flash@0 { + cell-index = <0>; + compatible = "qcom,camera-flash"; + qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>; + qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>; + qcom,switch-source = <&pmi8998_switch0>; + status = "ok"; + }; + + led_flash1: qcom,camera-flash@1 { + cell-index = <1>; + compatible = "qcom,camera-flash"; + qcom,flash-source = <&pmi8998_flash2>; + qcom,torch-source = <&pmi8998_torch2>; + qcom,switch-source = <&pmi8998_switch1>; + status = "ok"; + }; +}; + +&cci { + actuator0: qcom,actuator@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,actuator"; + qcom,cci-master = <0>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + }; + + actuator1: qcom,actuator@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,actuator"; + qcom,cci-master = <1>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + }; + + ois0: qcom,ois@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,ois"; + qcom,cci-master = <0>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + status = "disabled"; + }; + + eeprom0: qcom,eeprom@0 { + cell-index = <0>; + reg = <0>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm8998_lvs1>; + cam_vana-supply = <&pmi8998_bob>; + cam_vdig-supply = <&pm8998_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3600000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active + &cam_actuator_vaf_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend + &cam_actuator_vaf_suspend>; + gpios = <&tlmm 13 0>, + <&tlmm 30 0>, + <&pm8998_gpios 20 0>, + <&tlmm 29 0>, + <&tlmm 27 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-vana = <3>; + qcom,gpio-vaf = <4>; + qcom,gpio-req-tbl-num = <0 1 2 3 4>; + qcom,gpio-req-tbl-flags = <1 0 0 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0", + "CAM_VDIG", + "CAM_VANA", + "CAM_VAF"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss clk_mclk0_clk_src>, + <&clock_mmss clk_mmss_camss_mclk0_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom1: qcom,eeprom@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,eeprom"; + cam_vdig-supply = <&pm8998_lvs1>; + cam_vio-supply = <&pm8998_lvs1>; + cam_vana-supply = <&pmi8998_bob>; + qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; + qcom,cam-vreg-min-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; + qcom,cam-vreg-op-mode = <0 0 80000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 15 0>, + <&tlmm 9 0>, + <&tlmm 8 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VANA1"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk2_clk_src>, + <&clock_mmss clk_mmss_camss_mclk2_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom2: qcom,eeprom@2 { + cell-index = <2>; + reg = <0x2>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pm8998_lvs1>; + /*cam_vana-supply = <&pm8998_l22>;*/ + cam_vdig-supply = <&pm8998_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-max-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 14 0>, + <&tlmm 28 0>, + <&pm8998_gpios 9 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk1_clk_src>, + <&clock_mmss clk_mmss_camss_mclk1_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@0 { + cell-index = <0>; + compatible = "qcom,camera"; + reg = <0x0>; + qcom,csiphy-sd-index = <0>; + qcom,csid-sd-index = <0>; + qcom,mount-angle = <270>; + cam_vio-supply = <&pm8998_l8>; + cam_vana-supply = <&pmi8998_bob>; + cam_vdig-supply = <&pm8998_l9>; + cam_v_custom1-supply = <&pm8998_lvs1>; + qcom,cam-vreg-name = "cam_vdig", "cam_vana", + "cam_vio", "cam_v_custom1"; + qcom,cam-vreg-min-voltage = <1808000 3312000 1200000 0>; + qcom,cam-vreg-max-voltage = <2960000 3600000 1200000 0>; + qcom,cam-vreg-op-mode = <0 80000 105000 0>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_6dofl_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_6dofl_suspend>; + gpios = <&tlmm 13 0>, + <&tlmm 148 0>, + <&tlmm 93 0>, + <&tlmm 52 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-vdig = <3>; + qcom,gpio-req-tbl-num = <0 1 2 3>; + qcom,gpio-req-tbl-flags = <1 0 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0", + "CAM_VANA", + "CAM_VDIG"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <1>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss clk_mclk0_clk_src>, + <&clock_mmss clk_mmss_camss_mclk0_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@1 { + cell-index = <1>; + compatible = "qcom,camera"; + reg = <0x1>; + qcom,csiphy-sd-index = <1>; + qcom,csid-sd-index = <1>; + qcom,mount-angle = <90>; + qcom,eeprom-src = <&eeprom1>; + qcom,actuator-src = <&actuator1>; + cam_vdig-supply = <&pm8998_lvs1>; + cam_vio-supply = <&pm8998_lvs1>; + cam_vana-supply = <&pmi8998_bob>; + qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; + qcom,cam-vreg-min-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; + qcom,cam-vreg-op-mode = <0 0 80000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 15 0>, + <&tlmm 9 0>, + <&tlmm 8 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VANA1"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk2_clk_src>, + <&clock_mmss clk_mmss_camss_mclk2_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + qcom,camera@2 { + cell-index = <2>; + compatible = "qcom,camera"; + reg = <0x02>; + qcom,csiphy-sd-index = <2>; + qcom,csid-sd-index = <2>; + qcom,mount-angle = <90>; + qcom,eeprom-src = <&eeprom2>; + qcom,led-flash-src = <&led_flash1>; + qcom,actuator-src = <&actuator1>; + cam_vio-supply = <&pm8998_lvs1>; + cam_vana-supply = <&pmi8998_bob>; + cam_vdig-supply = <&pm8998_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = + <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = + <0 3600000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 14 0>, + <&tlmm 28 0>, + <&pm8998_gpios 9 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "disabled"; + clocks = <&clock_mmss clk_mclk1_clk_src>, + <&clock_mmss clk_mmss_camss_mclk1_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + qcom,camera@3 { + cell-index = <3>; + compatible = "qcom,camera"; + reg = <0x3>; + qcom,csiphy-sd-index = <0>; + qcom,csid-sd-index = <0>; + qcom,mount-angle = <270>; + cam_vio-supply = <&pm8998_l8>; + cam_vana-supply = <&pmi8998_bob>; + cam_vdig-supply = <&pm8998_l9>; + cam_v_custom1-supply = <&pm8998_lvs1>; + qcom,cam-vreg-name = "cam_vdig", "cam_vana", + "cam_vio", "cam_v_custom1"; + qcom,cam-vreg-min-voltage = <1808000 3312000 1200000 0>; + qcom,cam-vreg-max-voltage = <2960000 3600000 1200000 0>; + qcom,cam-vreg-op-mode = <0 80000 105000 0>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_6dofr_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_6dofr_suspend>; + gpios = <&tlmm 13 0>, + <&tlmm 149 0>, + <&tlmm 93 0>, + <&tlmm 52 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-vdig = <3>; + qcom,gpio-req-tbl-num = <0 1 2 3>; + qcom,gpio-req-tbl-flags = <1 0 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET1", + "CAM_VANA1", + "CAM_VDIG1"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <1>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss clk_mclk0_clk_src>, + <&clock_mmss clk_mmss_camss_mclk0_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + +}; + +&pm8998_gpios { + gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; + + gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi index 6ff62544b03c..0859fd638a00 100644 --- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi @@ -149,6 +149,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi b/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi index cc20c57a8099..3c5de162620c 100644 --- a/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-gpu.dtsi @@ -79,6 +79,9 @@ qcom,tsens-name = "tsens_tz_sensor12"; + /* Avoid L2PC on big cluster CPUs (CPU 4,5,6,7) */ + qcom,l2pc-cpu-mask = <0x000000f0>; + /* Quirks */ qcom,gpu-quirk-lmloadkill-disable; diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi index 93b6a7664ed8..897ab12fe0a7 100644 --- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi @@ -87,7 +87,6 @@ qcom,mdss-dsi-t-clk-post = <0x07>; qcom,mdss-dsi-t-clk-pre = <0x25>; qcom,mdss-dsi-tx-eot-append; - qcom,cmd-sync-wait-broadcast; qcom,esd-check-enabled; qcom,mdss-dsi-min-refresh-rate = <55>; qcom,mdss-dsi-max-refresh-rate = <60>; @@ -107,7 +106,6 @@ qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; qcom,mdss-dsi-tx-eot-append; - qcom,cmd-sync-wait-broadcast; qcom,esd-check-enabled; qcom,mdss-dsi-panel-status-check-mode = "reg_read"; qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi index bafe29211ef0..3827b1bbf8ba 100644 --- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi @@ -150,6 +150,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi index 71593012148d..ed1259918620 100644 --- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi @@ -993,6 +993,86 @@ }; }; + cam_sensor_depth_default: cam_sensor_depth_default { + mux { + pins = "gpio28","gpio23","gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio28","gpio23","gpio7"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_depth_sleep: cam_sensor_depth_sleep { + mux { + pins = "gpio28","gpio23","gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio28","gpio23","gpio7"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_depth_v1_active: cam_sensor_depth_v1_active { + /* Depth VANA */ + mux { + pins = "gpio24"; + function = "gpio"; + }; + + config { + pins = "gpio24"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_depth_v1_sleep: cam_sensor_depth_v1_sleep { + mux { + pins = "gpio24"; + function = "gpio"; + }; + + config { + pins = "gpio24"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_depth_v2_active: cam_sensor_depth_v2_active { + /* Depth CUSTOM2 */ + mux { + pins = "gpio21"; + function = "gpio"; + }; + + config { + pins = "gpio21"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_depth_v2_sleep: cam_sensor_depth_v2_sleep { + mux { + pins = "gpio21"; + function = "gpio"; + }; + + config { + pins = "gpio21"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + cam_sensor_mclk0_active: cam_sensor_mclk0_active { /* MCLK0 */ mux { diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts b/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts index 55255261a827..ce1c62d9c2ae 100644 --- a/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts +++ b/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts @@ -17,6 +17,7 @@ #include #include +#include "msm8998-mdss-panels.dtsi" #include "msm8998-qrd.dtsi" / { diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi index 97c4c5b1d455..c4a428635231 100644 --- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi @@ -143,6 +143,16 @@ qcom,out-strength = <1>; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi index a3eb3e5ab0d0..3c6b23d9581c 100644 --- a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi @@ -139,6 +139,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi new file mode 100644 index 000000000000..1347fcef4251 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-svr20-pinctrl.dtsi @@ -0,0 +1,3386 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + tlmm: pinctrl@03400000 { + compatible = "qcom,msm8998-pinctrl"; + reg = <0x3400000 0xc00000>; + interrupts = <0 208 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + + uart_console_active: uart_console_active { + mux { + pins = "gpio4", "gpio5"; + function = "blsp_uart8_a"; + }; + + config { + pins = "gpio4", "gpio5"; + drive-strength = <2>; + bias-disable; + }; + }; + + wcd9xxx_intr { + wcd_intr_default: wcd_intr_default{ + mux { + pins = "gpio54"; + function = "gpio"; + }; + + config { + pins = "gpio54"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + input-enable; + }; + }; + }; + + /* I2C CONFIGURATION */ + i2c_1 { + i2c_1_active: i2c_1_active { + mux { + pins = "gpio2", "gpio3"; + function = "blsp_i2c1"; + }; + + config { + pins = "gpio2", "gpio3"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_1_sleep: i2c_1_sleep { + mux { + pins = "gpio2", "gpio3"; + function = "blsp_i2c1"; + }; + + config { + pins = "gpio2", "gpio3"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_2 { + i2c_2_active: i2c_2_active { + mux { + pins = "gpio32", "gpio33"; + function = "blsp_i2c2"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <8>; + bias-disable; + }; + }; + + i2c_2_sleep: i2c_2_sleep { + mux { + pins = "gpio32", "gpio33"; + function = "blsp_i2c2"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <8>; + bias-pull-up; + }; + }; + }; + + i2c_3 { + i2c_3_active: i2c_3_active { + mux { + pins = "gpio47", "gpio48"; + function = "blsp_i2c3"; + }; + + config { + pins = "gpio47", "gpio48"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_3_sleep: i2c_3_sleep { + mux { + pins = "gpio47", "gpio48"; + function = "blsp_i2c3"; + }; + + config { + pins = "gpio47", "gpio48"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_4 { + i2c_4_active: i2c_4_active { + mux { + pins = "gpio10", "gpio11"; + function = "blsp_i2c4"; + }; + + config { + pins = "gpio10", "gpio11"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_4_sleep: i2c_4_sleep { + mux { + pins = "gpio10", "gpio11"; + function = "blsp_i2c4"; + }; + + config { + pins = "gpio10", "gpio11"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_5 { + i2c_5_active: i2c_5_active { + mux { + pins = "gpio87", "gpio88"; + function = "blsp_i2c5"; + }; + + config { + pins = "gpio87", "gpio88"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_5_sleep: i2c_5_sleep { + mux { + pins = "gpio87", "gpio88"; + function = "blsp_i2c5"; + }; + + config { + pins = "gpio87", "gpio88"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + i2c_6 { + i2c_6_active: i2c_6_active { + mux { + pins = "gpio43", "gpio44"; + function = "blsp_i2c6"; + }; + + config { + pins = "gpio43", "gpio44"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_6_sleep: i2c_6_sleep { + mux { + pins = "gpio43", "gpio44"; + function = "blsp_i2c6"; + }; + + config { + pins = "gpio43", "gpio44"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + nfc { + nfc_int_active: nfc_int_active { + /* active state */ + mux { + /* GPIO 92 NFC Read Interrupt */ + pins = "gpio92"; + function = "gpio"; + }; + + config { + pins = "gpio92"; + drive-strength = <6>; /* 6 MA */ + bias-pull-up; + }; + }; + + nfc_int_suspend: nfc_int_suspend { + /* sleep state */ + mux { + /* GPIO 92 NFC Read Interrupt */ + pins = "gpio92"; + function = "gpio"; + }; + + config { + pins = "gpio92"; + drive-strength = <6>; /* 6 MA */ + bias-pull-up; + }; + }; + + nfc_enable_active: nfc_enable_active { + /* active state */ + mux { + /* 12: NFC ENABLE 116:ESE Enable */ + pins = "gpio12", "gpio116"; + function = "gpio"; + }; + + config { + pins = "gpio12", "gpio116"; + drive-strength = <6>; /* 6 MA */ + bias-pull-up; + }; + }; + + nfc_enable_suspend: nfc_enable_suspend { + /* sleep state */ + mux { + /* 12: NFC ENABLE 116:ESE Enable */ + pins = "gpio12", "gpio116"; + function = "gpio"; + }; + + config { + pins = "gpio12", "gpio116"; + drive-strength = <6>; /* 6 MA */ + bias-disable; + }; + }; + }; + + i2c_7 { + i2c_7_active: i2c_7_active { + mux { + pins = "gpio55", "gpio56"; + function = "blsp_i2c7"; + }; + + config { + pins = "gpio55", "gpio56"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_7_sleep: i2c_7_sleep { + mux { + pins = "gpio55", "gpio56"; + function = "blsp_i2c7"; + }; + + config { + pins = "gpio55", "gpio56"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_8 { + i2c_8_active: i2c_8_active { + mux { + pins = "gpio6", "gpio7"; + function = "blsp_i2c8"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_8_sleep: i2c_8_sleep { + mux { + pins = "gpio6", "gpio7"; + function = "blsp_i2c8"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_9 { + i2c_9_active: i2c_9_active { + mux { + pins = "gpio51", "gpio52"; + function = "blsp_i2c9"; + }; + + config { + pins = "gpio51", "gpio52"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_9_sleep: i2c_9_sleep { + mux { + pins = "gpio51", "gpio52"; + function = "blsp_i2c9"; + }; + + config { + pins = "gpio51", "gpio52"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_10 { + i2c_10_active: i2c_10_active { + mux { + pins = "gpio67", "gpio68"; + function = "blsp_i2c10"; + }; + + config { + pins = "gpio67", "gpio68"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_10_sleep: i2c_10_sleep { + mux { + pins = "gpio67", "gpio68"; + function = "blsp_i2c10"; + }; + + config { + pins = "gpio67", "gpio68"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_11 { + i2c_11_active: i2c_11_active { + mux { + pins = "gpio60", "gpio61"; + function = "blsp_i2c11"; + }; + + config { + pins = "gpio60", "gpio61"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_11_sleep: i2c_11_sleep { + mux { + pins = "gpio60", "gpio61"; + function = "blsp_i2c11"; + }; + + config { + pins = "gpio60", "gpio61"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_12 { + i2c_12_active: i2c_12_active { + mux { + pins = "gpio83", "gpio84"; + function = "blsp_i2c12"; + }; + + config { + pins = "gpio83", "gpio84"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_12_sleep: i2c_12_sleep { + mux { + pins = "gpio83", "gpio84"; + function = "blsp_i2c12"; + }; + + config { + pins = "gpio83", "gpio84"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + /* SPI CONFIGURATION */ + + spi_1 { + spi_1_active: spi_1_active { + mux { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + function = "blsp_spi1"; + }; + + config { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_1_sleep: spi_1_sleep { + mux { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + function = "blsp_spi1"; + }; + + config { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_2 { + spi_2_active: spi_2_active { + mux { + pins = "gpio31", "gpio34", + "gpio32", "gpio33"; + function = "blsp_spi2"; + }; + + config { + pins = "gpio31", "gpio34", + "gpio32", "gpio33"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_2_sleep: spi_2_sleep { + mux { + pins = "gpio31", "gpio34", + "gpio32", "gpio33"; + function = "blsp_spi2"; + }; + + config { + pins = "gpio31", "gpio34", + "gpio32", "gpio33"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_3 { + spi_3_active: spi_3_active { + mux { + pins = "gpio45", "gpio46", + "gpio47", "gpio48"; + function = "blsp_spi3"; + }; + + config { + pins = "gpio45", "gpio46", + "gpio47", "gpio48"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_3_sleep: spi_3_sleep { + mux { + pins = "gpio45", "gpio46", + "gpio47", "gpio48"; + function = "blsp_spi3"; + }; + + config { + pins = "gpio45", "gpio46", + "gpio47", "gpio48"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + pcie0 { + pcie0_clkreq_default: pcie0_clkreq_default { + mux { + pins = "gpio36"; + function = "pci_e0"; + }; + + config { + pins = "gpio36"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + pcie0_perst_default: pcie0_perst_default { + mux { + pins = "gpio35"; + function = "gpio"; + }; + + config { + pins = "gpio35"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + pcie0_wake_default: pcie0_wake_default { + mux { + pins = "gpio37"; + function = "gpio"; + }; + + config { + pins = "gpio37"; + drive-strength = <2>; + bias-pull-down; + }; + }; + }; + + hph_en0_ctrl { + hph_en0_idle: hph_en0_idle { + mux { + pins = "gpio67"; + function = "gpio"; + }; + config { + pins = "gpio67"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + hph_en0_active: hph_en0_active { + mux { + pins = "gpio67"; + function = "gpio"; + }; + config { + pins = "gpio67"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + }; + + hph_en1_ctrl { + hph_en1_idle: hph_en1_idle { + mux { + pins = "gpio68"; + function = "gpio"; + }; + config { + pins = "gpio68"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + hph_en1_active: hph_en1_active { + mux { + pins = "gpio68"; + function = "gpio"; + }; + config { + pins = "gpio68"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + }; + + wcd_gnd_mic_swap { + wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle { + mux { + pins = "gpio75"; + function = "gpio"; + }; + config { + pins = "gpio75"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active { + mux { + pins = "gpio75"; + function = "gpio"; + }; + config { + pins = "gpio75"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + }; + + /* USB C analog configuration */ + wcd_usbc_analog_en1 { + wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle { + mux { + pins = "gpio59"; + function = "gpio"; + }; + config { + pins = "gpio59"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + + wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active { + mux { + pins = "gpio59"; + function = "gpio"; + }; + config { + pins = "gpio59"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + }; + + wcd_usbc_analog_en2n { + wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle { + mux { + pins = "gpio60"; + function = "gpio"; + }; + config { + pins = "gpio60"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + + wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active { + mux { + pins = "gpio60"; + function = "gpio"; + }; + config { + pins = "gpio60"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + }; + + cdc_reset_ctrl { + cdc_reset_sleep: cdc_reset_sleep { + mux { + pins = "gpio116"; + function = "gpio"; + }; + config { + pins = "gpio116"; + drive-strength = <16>; + bias-disable; + output-low; + }; + }; + cdc_reset_active:cdc_reset_active { + mux { + pins = "gpio116"; + function = "gpio"; + }; + config { + pins = "gpio116"; + drive-strength = <16>; + bias-pull-down; + output-high; + }; + }; + }; + + spi_4 { + spi_4_active: spi_4_active { + mux { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + function = "blsp_spi4"; + }; + + config { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_4_sleep: spi_4_sleep { + mux { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + function = "blsp_spi4"; + }; + + config { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spkr_1_sd_n { + spkr_1_sd_n_sleep: spkr_1_sd_n_sleep { + mux { + pins = "gpio111"; + function = "gpio"; + }; + config { + pins = "gpio111"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + spkr_1_sd_n_active: spkr_1_sd_n_active { + mux { + pins = "gpio111"; + function = "gpio"; + }; + config { + pins = "gpio111"; + drive-strength = <16>; /* 16 mA */ + bias-disable; + output-high; + }; + }; + }; + + spi_5 { + spi_5_active: spi_5_active { + mux { + pins = "gpio85", "gpio86", + "gpio87", "gpio88"; + function = "blsp_spi5"; + }; + + config { + pins = "gpio85", "gpio86", + "gpio87", "gpio88"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_5_sleep: spi_5_sleep { + mux { + pins = "gpio85", "gpio86", + "gpio87", "gpio88"; + function = "blsp_spi5"; + }; + + config { + pins = "gpio85", "gpio86", + "gpio87", "gpio88"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spkr_2_sd_n { + spkr_2_sd_n_sleep: spkr_2_sd_n_sleep { + mux { + pins = "gpio112"; + function = "gpio"; + }; + config { + pins = "gpio112"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + spkr_2_sd_n_active: spkr_2_sd_n_active { + mux { + pins = "gpio112"; + function = "gpio"; + }; + config { + pins = "gpio112"; + drive-strength = <16>; /* 16 mA */ + bias-disable; + output-high; + }; + }; + }; + + cci0_active: cci0_active { + mux { + /* CLK, DATA */ + pins = "gpio17","gpio18"; // Only 2 + function = "cci_i2c"; + }; + + config { + pins = "gpio17","gpio18"; + bias-pull-up; /* PULL UP*/ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cci0_suspend: cci0_suspend { + mux { + /* CLK, DATA */ + pins = "gpio17","gpio18"; + function = "cci_i2c"; + }; + + config { + pins = "gpio17","gpio18"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cci1_active: cci1_active { + mux { + /* CLK, DATA */ + pins = "gpio19","gpio20"; + function = "cci_i2c"; + }; + + config { + pins = "gpio19","gpio20"; + bias-pull-up; /* PULL UP*/ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cci1_suspend: cci1_suspend { + mux { + /* CLK, DATA */ + pins = "gpio19","gpio20"; + function = "cci_i2c"; + }; + + config { + pins = "gpio19","gpio20"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_actuator_vaf_active: cam_actuator_vaf_active { + /* ACTUATOR POWER */ + mux { + pins = "gpio27"; + function = "gpio"; + }; + + config { + pins = "gpio27"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_actuator_vaf_suspend: cam_actuator_vaf_suspend { + /* ACTUATOR POWER */ + mux { + pins = "gpio27"; + function = "gpio"; + }; + + config { + pins = "gpio27"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_mclk0_active: cam_sensor_mclk0_active { + /* MCLK0 */ + mux { + /* CLK, DATA */ + pins = "gpio13"; + function = "cam_mclk"; + }; + + config { + pins = "gpio13"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + cam_sensor_mclk0_suspend: cam_sensor_mclk0_suspend { + /* MCLK0 */ + mux { + /* CLK, DATA */ + pins = "gpio13"; + function = "cam_mclk"; + }; + + config { + pins = "gpio13"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + cam_sensor_6dofl_active: cam_sensor_6dofl_active { + /* RESET, STANDBY */ + mux { + pins = "gpio148","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio148","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_6dofr_active: cam_sensor_6dofr_active { + /* RESET, STANDBY */ + mux { + pins = "gpio149","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio149","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_6dofl_suspend: cam_sensor_6dofl_suspend { + /* RESET, STANDBY */ + mux { + pins = "gpio148","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio148","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_6dofr_suspend: cam_sensor_6dofr_suspend { + /* RESET, STANDBY */ + mux { + pins = "gpio149","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio149","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_rear_active: cam_sensor_rear_active { + /* RESET, STANDBY */ + mux { + pins = "gpio30","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio30","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + max_volt_active: max_volt_active { + /* RESET */ + mux { + pins = "gpio128", "gpio129", + "gpio130", "gpio133"; + function = "gpio"; + }; + + config { + pins = "gpio128", "gpio129", + "gpio130", "gpio133"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + max_volt_suspend: max_volt_suspend { + /* RESET */ + mux { + pins = "gpio128", "gpio129", + "gpio130", "gpio133"; + function = "gpio"; + }; + + config { + pins = "gpio128", "gpio129", + "gpio130", "gpio133"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + max_rst_active: max_rst_active { + /* RESET */ + mux { + pins = "gpio30"; + function = "gpio"; + }; + + config { + pins = "gpio30"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + max_rst_suspend: max_rst_suspend { + /* RESET */ + mux { + pins = "gpio30"; + function = "gpio"; + }; + + config { + pins = "gpio30"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + spi_6 { + spi_6_active: spi_6_active { + mux { + pins = "gpio41", "gpio42", + "gpio43", "gpio44"; + function = "blsp_spi6"; + }; + + config { + pins = "gpio41", "gpio42", + "gpio43", "gpio44"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_6_sleep: spi_6_sleep { + mux { + pins = "gpio41", "gpio42", + "gpio43", "gpio44"; + function = "blsp_spi6"; + }; + + config { + pins = "gpio41", "gpio42", + "gpio43", "gpio44"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_7 { + spi_7_active: spi_7_active { + mux { + pins = "gpio53", "gpio54", + "gpio55", "gpio56"; + function = "blsp_spi7"; + }; + + config { + pins = "gpio53", "gpio54", + "gpio55", "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_7_sleep: spi_7_sleep { + mux { + pins = "gpio53", "gpio54", + "gpio55", "gpio56"; + function = "blsp_spi7"; + }; + + config { + pins = "gpio53", "gpio54", + "gpio55", "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_8 { + spi_8_active: spi_8_active { + mux { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + function = "blsp_spi8"; + }; + + config { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_8_sleep: spi_8_sleep { + mux { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + function = "blsp_spi8"; + }; + + config { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_9 { + spi_9_active: spi_9_active { + mux { + pins = "gpio49", "gpio50", + "gpio51", "gpio52"; + function = "blsp_spi9"; + }; + + config { + pins = "gpio49", "gpio50", + "gpio51", "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_9_sleep: spi_9_sleep { + mux { + pins = "gpio49", "gpio50", + "gpio51", "gpio52"; + function = "blsp_spi9"; + }; + + config { + pins = "gpio49", "gpio50", + "gpio51", "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_10 { + spi_10_active: spi_10_active { + mux { + pins = "gpio65", "gpio66", + "gpio67", "gpio68"; + function = "blsp_spi10"; + }; + + config { + pins = "gpio65", "gpio66", + "gpio67", "gpio68"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_10_sleep: spi_10_sleep { + mux { + pins = "gpio65", "gpio66", + "gpio67", "gpio68"; + function = "blsp_spi10"; + }; + + config { + pins = "gpio65", "gpio66", + "gpio67", "gpio68"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_11 { + spi_11_active: spi_11_active { + mux { + pins = "gpio58", "gpio59", + "gpio60", "gpio61"; + function = "blsp_spi11"; + }; + + config { + pins = "gpio58", "gpio59", + "gpio60", "gpio61"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_11_sleep: spi_11_sleep { + mux { + pins = "gpio58", "gpio59", + "gpio60", "gpio61"; + function = "blsp_spi11"; + }; + + config { + pins = "gpio58", "gpio59", + "gpio60", "gpio61"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_12 { + spi_12_active: spi_12_active { + mux { + pins = "gpio81", "gpio82", + "gpio83", "gpio84"; + function = "blsp_spi12"; + }; + + config { + pins = "gpio81", "gpio82", + "gpio83", "gpio84"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_12_sleep: spi_12_sleep { + mux { + pins = "gpio81", "gpio82", + "gpio83", "gpio84"; + function = "blsp_spi12"; + }; + + config { + pins = "gpio81", "gpio82", + "gpio83", "gpio84"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* HS UART CONFIGURATION */ + blsp1_uart1_active: blsp1_uart1_active { + mux { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + function = "blsp_uart1_a"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart1_sleep: blsp1_uart1_sleep { + mux { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", "gpio3"; + drive-strength = <2>; + bias-disable; + }; + }; + + cam_sensor_rear_suspend: cam_sensor_rear_suspend { + /* RESET, STANDBY */ + mux { + pins = "gpio30","gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio30","gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_mclk1_active: cam_sensor_mclk1_active { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio14"; + function = "cam_mclk"; + }; + + config { + pins = "gpio14"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_mclk1_suspend: cam_sensor_mclk1_suspend { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio14"; + function = "cam_mclk"; + }; + + config { + pins = "gpio14"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_rear2_active: cam_sensor_rear2_active { + /* RESET, STANDBY */ + mux { + pins = "gpio9","gpio8"; + function = "gpio"; + }; + + config { + pins = "gpio9","gpio8"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + blsp1_uart2_active: blsp1_uart2_active { + mux { + pins = "gpio31", "gpio34", "gpio33", "gpio32"; + function = "blsp_uart2_a"; + }; + + config { + pins = "gpio31", "gpio34", "gpio33", "gpio32"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart2_sleep: blsp1_uart2_sleep { + mux { + pins = "gpio31", "gpio34", "gpio33", "gpio32"; + function = "gpio"; + }; + + config { + pins = "gpio31", "gpio34", "gpio33", "gpio32"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart3: blsp1_uart3 { + blsp1_uart3_tx_active: blsp1_uart3_tx_active { + mux { + pins = "gpio45"; + function = "blsp_uart3_a"; + }; + + config { + pins = "gpio45"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep { + mux { + pins = "gpio45"; + function = "gpio"; + }; + + config { + pins = "gpio45"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active { + mux { + pins = "gpio46", "gpio47"; + function = "blsp_uart3_a"; + }; + + config { + pins = "gpio46", "gpio47"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep { + mux { + pins = "gpio46", "gpio47"; + function = "gpio"; + }; + + config { + pins = "gpio46", "gpio47"; + drive-strength = <2>; + bias-no-pull; + }; + }; + + blsp1_uart3_rfr_active: blsp1_uart3_rfr_active { + mux { + pins = "gpio48"; + function = "blsp_uart3_a"; + }; + + config { + pins = "gpio48"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep { + mux { + pins = "gpio48"; + function = "gpio"; + }; + + config { + pins = "gpio48"; + drive-strength = <2>; + bias-no-pull; + }; + }; + }; + + cam_sensor_rear2_suspend: cam_sensor_rear2_suspend { + /* RESET, STANDBY */ + mux { + pins = "gpio9","gpio8"; + function = "gpio"; + }; + config { + pins = "gpio9","gpio8"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_sensor_mclk2_active: cam_sensor_mclk2_active { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio15"; + function = "cam_mclk"; + }; + + config { + pins = "gpio15"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + cam_sensor_mclk2_suspend: cam_sensor_mclk2_suspend { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio15"; + function = "cam_mclk"; + }; + + config { + pins = "gpio15"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + cam_sensor_mclk3_active: cam_sensor_mclk3_active { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio16"; + function = "cam_mclk"; + }; + + config { + pins = "gpio16"; + bias-disable; /* No PULL */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + cam_sensor_mclk3_suspend: cam_sensor_mclk3_suspend { + /* MCLK1 */ + mux { + /* CLK, DATA */ + pins = "gpio16"; + function = "cam_mclk"; + }; + + config { + pins = "gpio16"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <8>; /* 2 MA */ + }; + }; + + + cam_sensor_front_active: cam_sensor_front_active { + /* RESET VANA*/ + mux { + pins = "gpio28", "gpio29"; + function = "gpio"; + }; + + config { + pins = "gpio28", "gpio29"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + blsp2_uart1_active: blsp2_uart1_active { + mux { + pins = "gpio53", "gpio54", "gpio55", "gpio56"; + function = "blsp_uart7_a"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", "gpio56"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp2_uart1_sleep: blsp2_uart1_sleep { + mux { + pins = "gpio53", "gpio54", "gpio55", "gpio56"; + function = "gpio"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", "gpio56"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp2_uart2_active: blsp2_uart2_active { + mux { + pins = "gpio4", "gpio5", "gpio6", "gpio7"; + function = "blsp_uart8_a"; + }; + + config { + pins = "gpio4", "gpio5", "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp2_uart2_sleep: blsp2_uart2_sleep { + mux { + pins = "gpio4", "gpio5", "gpio6", "gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio4", "gpio5", "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + cam_sensor_front_suspend: cam_sensor_front_suspend { + /* RESET */ + mux { + pins = "gpio28"; + function = "gpio"; + }; + + config { + pins = "gpio28"; + bias-disable; /* No PULL */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + pmx_mdss: pmx_mdss { + mdss_dsi_active: mdss_dsi_active { + mux { + pins = "gpio94", "gpio97", "gpio51"; + function = "gpio"; + }; + + config { + pins = "gpio94", "gpio97", "gpio51"; + drive-strength = <8>; /* 8 mA */ + bias-disable = <0>; /* no pull */ + }; + }; + mdss_dsi_suspend: mdss_dsi_suspend { + mux { + pins = "gpio94", "gpio97", "gpio51"; + function = "gpio"; + }; + + config { + pins = "gpio94", "gpio97", "gpio51"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + }; + + pmx_mdss_te { + mdss_te_active: mdss_te_active { + mux { + pins = "gpio97"; + function = "mdp_vsync_b"; + }; + config { + pins = "gpio97"; + drive-strength = <2>; /* 8 mA */ + bias-pull-down; /* pull down*/ + }; + }; + + mdss_te_suspend: mdss_te_suspend { + mux { + pins = "gpio97"; + function = "mdp_vsync_b"; + }; + config { + pins = "gpio97"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + }; + + mdss_dp_aux_active: mdss_dp_aux_active { + mux { + pins = "gpio77", "gpio78"; + function = "gpio"; + }; + + config { + pins = "gpio77", "gpio78"; + bias-disable = <0>; /* no pull */ + drive-strength = <8>; + }; + }; + + mdss_dp_aux_suspend: mdss_dp_aux_suspend { + mux { + pins = "gpio77", "gpio78"; + function = "gpio"; + }; + + config { + pins = "gpio77", "gpio78"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_dp_usbplug_cc_active: mdss_dp_usbplug_cc_active { + mux { + pins = "gpio38"; + function = "gpio"; + }; + + config { + pins = "gpio38"; + bias-disable; + drive-strength = <16>; + }; + }; + + mdss_dp_usbplug_cc_suspend: mdss_dp_usbplug_cc_suspend { + mux { + pins = "gpio38"; + function = "gpio"; + }; + + config { + pins = "gpio38"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_dp_hpd_active: mdss_dp_hpd_active { + mux { + pins = "gpio34"; + function = "edp_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <16>; + }; + }; + + mdss_dp_hpd_suspend: mdss_dp_hpd_suspend { + mux { + pins = "gpio34"; + function = "edp_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + blsp2_uart3_active: blsp2_uart3_active { + mux { + pins = "gpio49", "gpio50", "gpio51", "gpio52"; + function = "blsp_uart9_a"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", "gpio52"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp2_uart3_sleep: blsp2_uart3_sleep { + mux { + pins = "gpio49", "gpio50", "gpio51", "gpio52"; + function = "gpio"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", "gpio52"; + drive-strength = <2>; + bias-disable; + }; + }; + + /* add pingrp for touchscreen */ + pmx_ts_int_active { + ts_int_active: ts_int_active { + mux { + pins = "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio125"; + drive-strength = <8>; + bias-pull-up; + }; + }; + }; + + pmx_ts_int_suspend { + ts_int_suspend1: ts_int_suspend1 { + mux { + pins = "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio125"; + drive-strength = <2>; + bias-pull-down; + }; + }; + }; + + pmx_ts_reset_active { + ts_reset_active: ts_reset_active { + mux { + pins = "gpio89"; + function = "gpio"; + }; + + config { + pins = "gpio89"; + drive-strength = <8>; + bias-pull-up; + }; + }; + }; + + pmx_ts_reset_suspend { + ts_reset_suspend1: ts_reset_suspend1 { + mux { + pins = "gpio89"; + function = "gpio"; + }; + + config { + pins = "gpio89"; + drive-strength = <2>; + bias-pull-down; + }; + }; + }; + + pmx_ts_release { + ts_release: ts_release { + mux { + pins = "gpio125", "gpio89"; + function = "gpio"; + }; + + config { + pins = "gpio125", "gpio89"; + drive-strength = <2>; + bias-pull-down; + }; + }; + }; + + ts_mux { + ts_active: ts_active { + mux { + pins = "gpio89", "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio89", "gpio125"; + drive-strength = <16>; + bias-pull-up; + }; + }; + + ts_reset_suspend: ts_reset_suspend { + mux { + pins = "gpio89"; + function = "gpio"; + }; + + config { + pins = "gpio89"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + ts_int_suspend: ts_int_suspend { + mux { + pins = "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio125"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + ufs_dev_reset_assert: ufs_dev_reset_assert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * UFS_RESET driver strengths are having + * different values/steps compared to typical + * GPIO drive strengths. + * + * Following table clarifies: + * + * HDRV value | UFS_RESET | Typical GPIO + * (dec) | (mA) | (mA) + * 0 | 0.8 | 2 + * 1 | 1.55 | 4 + * 2 | 2.35 | 6 + * 3 | 3.1 | 8 + * 4 | 3.9 | 10 + * 5 | 4.65 | 12 + * 6 | 5.4 | 14 + * 7 | 6.15 | 16 + * + * POR value for UFS_RESET HDRV is 3 which means + * 3.1mA and we want to use that. Hence just + * specify 8mA to "drive-strength" binding and + * that should result into writing 3 to HDRV + * field. + */ + drive-strength = <8>; /* default: 3.1 mA */ + output-low; /* active low reset */ + }; + }; + + ufs_dev_reset_deassert: ufs_dev_reset_deassert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * default: 3.1 mA + * check comments under ufs_dev_reset_assert + */ + drive-strength = <8>; + output-high; /* active low reset */ + }; + }; + + sdc2_clk_on: sdc2_clk_on { + config { + pins = "sdc2_clk"; + bias-disable; /* NO pull */ + drive-strength = <16>; /* 16 MA */ + }; + }; + + sdc2_clk_off: sdc2_clk_off { + config { + pins = "sdc2_clk"; + bias-disable; /* NO pull */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + sdc2_cmd_on: sdc2_cmd_on { + config { + pins = "sdc2_cmd"; + bias-pull-up; /* pull up */ + drive-strength = <10>; /* 10 MA */ + }; + }; + + sdc2_cmd_off: sdc2_cmd_off { + config { + pins = "sdc2_cmd"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + sdc2_data_on: sdc2_data_on { + config { + pins = "sdc2_data"; + bias-pull-up; /* pull up */ + drive-strength = <10>; /* 10 MA */ + }; + }; + + sdc2_data_off: sdc2_data_off { + config { + pins = "sdc2_data"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + sdc2_cd_on: sdc2_cd_on { + mux { + pins = "gpio95"; + function = "gpio"; + }; + + config { + pins = "gpio95"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + sdc2_cd_off: sdc2_cd_off { + mux { + pins = "gpio95"; + function = "gpio"; + }; + + config { + pins = "gpio95"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + + }; + + led_enable: led_enable { + mux { + pins = "gpio21"; + drive_strength = <16>; + output-high; + }; + }; + + led_disable: led_disable { + mux { + pins = "gpio21"; + drive_strength = <2>; + output-low; + }; + }; + + trigout_a: trigout_a { + mux { + pins = "gpio58"; + function = "qdss_cti1_a"; + }; + + config { + pins = "gpio58"; + drive-strength = <2>; + bias-disable; + }; + }; + + mdss_hdmi_5v_active: mdss_hdmi_5v_active { + mux { + pins = "gpio133"; + function = "gpio"; + }; + + config { + pins = "gpio133"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + mdss_hdmi_5v_suspend: mdss_hdmi_5v_suspend { + mux { + pins = "gpio133"; + function = "gpio"; + }; + + config { + pins = "gpio133"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + + mdss_hdmi_hpd_active: mdss_hdmi_hpd_active { + mux { + pins = "gpio34"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <16>; + }; + }; + + mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend { + mux { + pins = "gpio34"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio34"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_hdmi_ddc_active: mdss_hdmi_ddc_active { + mux { + pins = "gpio32", "gpio33"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend { + mux { + pins = "gpio32", "gpio33"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio32", "gpio33"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_cec_active: mdss_hdmi_cec_active { + mux { + pins = "gpio31"; + function = "hdmi_cec"; + }; + + config { + pins = "gpio31"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_cec_suspend: mdss_hdmi_cec_suspend { + mux { + pins = "gpio31"; + function = "hdmi_cec"; + }; + + config { + pins = "gpio31"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + tsif0_signals_active: tsif0_signals_active { + tsif1_clk { + pins = "gpio89"; /* TSIF0 CLK */ + function = "tsif1_clk"; + }; + tsif1_en { + pins = "gpio90"; /* TSIF0 Enable */ + function = "tsif1_en"; + }; + tsif1_data { + pins = "gpio91"; /* TSIF0 DATA */ + function = "tsif1_data"; + }; + signals_cfg { + pins = "gpio89", "gpio90", "gpio91"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + /* sync signal is only used if configured to mode-2 */ + tsif0_sync_active: tsif0_sync_active { + tsif1_sync { + pins = "gpio9"; /* TSIF0 SYNC */ + function = "tsif1_sync"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + tsif1_signals_active: tsif1_signals_active { + tsif2_clk { + pins = "gpio93"; /* TSIF1 CLK */ + function = "tsif2_clk"; + }; + tsif2_en { + pins = "gpio94"; /* TSIF1 Enable */ + function = "tsif2_en"; + }; + tsif2_data { + pins = "gpio95"; /* TSIF1 DATA */ + function = "tsif2_data"; + }; + signals_cfg { + pins = "gpio93", "gpio94", "gpio95"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + /* sync signal is only used if configured to mode-2 */ + tsif1_sync_active: tsif1_sync_active { + tsif2_sync { + pins = "gpio96"; /* TSIF1 SYNC */ + function = "tsif2_sync"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + pri_aux_pcm_clk { + pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep { + mux { + pins = "gpio65"; + function = "gpio"; + }; + + config { + pins = "gpio65"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_aux_pcm_clk_active: pri_aux_pcm_clk_active { + mux { + pins = "gpio65"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio65"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_aux_pcm_sync { + pri_aux_pcm_sync_sleep: pri_aux_pcm_sync_sleep { + mux { + pins = "gpio66"; + function = "gpio"; + }; + + config { + pins = "gpio66"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_aux_pcm_sync_active: pri_aux_pcm_sync_active { + mux { + pins = "gpio66"; + function = "pri_mi2s_ws"; + }; + + config { + pins = "gpio66"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_aux_pcm_din { + pri_aux_pcm_din_sleep: pri_aux_pcm_din_sleep { + mux { + pins = "gpio67"; + function = "gpio"; + }; + + config { + pins = "gpio67"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_aux_pcm_din_active: pri_aux_pcm_din_active { + mux { + pins = "gpio67"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio67"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_aux_pcm_dout { + pri_aux_pcm_dout_sleep: pri_aux_pcm_dout_sleep { + mux { + pins = "gpio68"; + function = "gpio"; + }; + + config { + pins = "gpio68"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_aux_pcm_dout_active: pri_aux_pcm_dout_active { + mux { + pins = "gpio68"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio68"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_aux_pcm { + sec_aux_pcm_sleep: sec_aux_pcm_sleep { + mux { + pins = "gpio80", "gpio81"; + function = "gpio"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_aux_pcm_active: sec_aux_pcm_active { + mux { + pins = "gpio80", "gpio81"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_aux_pcm_din { + sec_aux_pcm_din_sleep: sec_aux_pcm_din_sleep { + mux { + pins = "gpio82"; + function = "gpio"; + }; + + config { + pins = "gpio82"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_aux_pcm_din_active: sec_aux_pcm_din_active { + mux { + pins = "gpio82"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio82"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_aux_pcm_dout { + sec_aux_pcm_dout_sleep: sec_aux_pcm_dout_sleep { + mux { + pins = "gpio83"; + function = "gpio"; + }; + + config { + pins = "gpio83"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_aux_pcm_dout_active: sec_aux_pcm_dout_active { + mux { + pins = "gpio83"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio83"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_aux_pcm { + tert_aux_pcm_sleep: tert_aux_pcm_sleep { + mux { + pins = "gpio75", "gpio76"; + function = "gpio"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_aux_pcm_active: tert_aux_pcm_active { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + tert_aux_pcm_din { + tert_aux_pcm_din_sleep: tert_aux_pcm_din_sleep { + mux { + pins = "gpio77"; + function = "gpio"; + }; + + config { + pins = "gpio77"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_aux_pcm_din_active: tert_aux_pcm_din_active { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_aux_pcm_dout { + tert_aux_pcm_dout_sleep: tert_aux_pcm_dout_sleep { + mux { + pins = "gpio78"; + function = "gpio"; + }; + + config { + pins = "gpio78"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_aux_pcm_dout_active: tert_aux_pcm_dout_active { + mux { + pins = "gpio78"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio78"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_aux_pcm { + quat_aux_pcm_sleep: quat_aux_pcm_sleep { + mux { + pins = "gpio58", "gpio59"; + function = "gpio"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_aux_pcm_active: quat_aux_pcm_active { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quat_aux_pcm_din { + quat_aux_pcm_din_sleep: quat_aux_pcm_din_sleep { + mux { + pins = "gpio60"; + function = "gpio"; + }; + + config { + pins = "gpio60"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_aux_pcm_din_active: quat_aux_pcm_din_active { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_aux_pcm_dout { + quat_aux_pcm_dout_sleep: quat_aux_pcm_dout_sleep { + mux { + pins = "gpio61"; + function = "gpio"; + }; + + config { + pins = "gpio61"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_aux_pcm_dout_active: quat_aux_pcm_dout_active { + mux { + pins = "gpio61"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio61"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_mi2s_mclk { + pri_mi2s_mclk_sleep: pri_mi2s_mclk_sleep { + mux { + pins = "gpio64"; + function = "gpio"; + }; + + config { + pins = "gpio64"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_mclk_active: pri_mi2s_mclk_active { + mux { + pins = "gpio64"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio64"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_sck { + pri_mi2s_sck_sleep: pri_mi2s_sck_sleep { + mux { + pins = "gpio65"; + function = "gpio"; + }; + + config { + pins = "gpio65"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sck_active: pri_mi2s_sck_active { + mux { + pins = "gpio65"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio65"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_ws { + pri_mi2s_ws_sleep: pri_mi2s_ws_sleep { + mux { + pins = "gpio66"; + function = "gpio"; + }; + + config { + pins = "gpio66"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_ws_active: pri_mi2s_ws_active { + mux { + pins = "gpio66"; + function = "pri_mi2s_ws"; + }; + + config { + pins = "gpio66"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_sd0 { + pri_mi2s_sd0_sleep: pri_mi2s_sd0_sleep { + mux { + pins = "gpio67"; + function = "gpio"; + }; + + config { + pins = "gpio67"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd0_active: pri_mi2s_sd0_active { + mux { + pins = "gpio67"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio67"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_mi2s_sd1 { + pri_mi2s_sd1_sleep: pri_mi2s_sd1_sleep { + mux { + pins = "gpio68"; + function = "gpio"; + }; + + config { + pins = "gpio68"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd1_active: pri_mi2s_sd1_active { + mux { + pins = "gpio68"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio68"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_mclk { + sec_mi2s_mclk_sleep: sec_mi2s_mclk_sleep { + mux { + pins = "gpio79"; + function = "gpio"; + }; + + config { + pins = "gpio79"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_mclk_active: sec_mi2s_mclk_active { + mux { + pins = "gpio79"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio79"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s { + sec_mi2s_sleep: sec_mi2s_sleep { + mux { + pins = "gpio80", "gpio81"; + function = "gpio"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <2>; /* 2 mA */ + bias-disable; /* NO PULL */ + input-enable; + }; + }; + + sec_mi2s_active: sec_mi2s_active { + mux { + pins = "gpio80", "gpio81"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd0 { + sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep { + mux { + pins = "gpio82"; + function = "gpio"; + }; + + config { + pins = "gpio82"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd0_active: sec_mi2s_sd0_active { + mux { + pins = "gpio82"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio82"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd1 { + sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep { + mux { + pins = "gpio83"; + function = "gpio"; + }; + + config { + pins = "gpio83"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd1_active: sec_mi2s_sd1_active { + mux { + pins = "gpio83"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio83"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_mi2s_mclk { + tert_mi2s_mclk_sleep: tert_mi2s_mclk_sleep { + mux { + pins = "gpio74"; + function = "gpio"; + }; + + config { + pins = "gpio74"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_mi2s_mclk_active: tert_mi2s_mclk_active { + mux { + pins = "gpio74"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio74"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_mi2s { + tert_mi2s_sleep: tert_mi2s_sleep { + mux { + pins = "gpio75", "gpio76"; + function = "gpio"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_mi2s_active: tert_mi2s_active { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + tert_mi2s_sd0 { + tert_mi2s_sd0_sleep: tert_mi2s_sd0_sleep { + mux { + pins = "gpio77"; + function = "gpio"; + }; + + config { + pins = "gpio77"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_mi2s_sd0_active: tert_mi2s_sd0_active { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_mi2s_sd1 { + tert_mi2s_sd1_sleep: tert_mi2s_sd1_sleep { + mux { + pins = "gpio78"; + function = "gpio"; + }; + + config { + pins = "gpio78"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + tert_mi2s_sd1_active: tert_mi2s_sd1_active { + mux { + pins = "gpio78"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio78"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s_mclk { + quat_mi2s_mclk_sleep: quat_mi2s_mclk_sleep { + mux { + pins = "gpio57"; + function = "gpio"; + }; + + config { + pins = "gpio57"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_mclk_active: quat_mi2s_mclk_active { + mux { + pins = "gpio57"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio57"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s { + quat_mi2s_sleep: quat_mi2s_sleep { + mux { + pins = "gpio58", "gpio59"; + function = "gpio"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_active: quat_mi2s_active { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quat_mi2s_sd0 { + quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep { + mux { + pins = "gpio60"; + function = "gpio"; + }; + + config { + pins = "gpio60"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_sd0_active: quat_mi2s_sd0_active { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s_sd1 { + quat_mi2s_sd1_sleep: quat_mi2s_sd1_sleep { + mux { + pins = "gpio61"; + function = "gpio"; + }; + + config { + pins = "gpio61"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_sd1_active: quat_mi2s_sd1_active { + mux { + pins = "gpio61"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio61"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s_sd2 { + quat_mi2s_sd2_sleep: quat_mi2s_sd2_sleep { + mux { + pins = "gpio62"; + function = "gpio"; + }; + + config { + pins = "gpio62"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_sd2_active: quat_mi2s_sd2_active { + mux { + pins = "gpio62"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio62"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s_sd3 { + quat_mi2s_sd3_sleep: quat_mi2s_sd3_sleep { + mux { + pins = "gpio63"; + function = "gpio"; + }; + + config { + pins = "gpio63"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quat_mi2s_sd3_active: quat_mi2s_sd3_active { + mux { + pins = "gpio63"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio63"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + spkr_i2s_clk_pin { + spkr_i2s_clk_sleep: spkr_i2s_clk_sleep { + mux { + pins = "gpio69"; + function = "spkr_i2s"; + }; + + config { + pins = "gpio69"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + + spkr_i2s_clk_active: spkr_i2s_clk_active { + mux { + pins = "gpio69"; + function = "spkr_i2s"; + }; + + config { + pins = "gpio69"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + spkr_1_sd_mediabox { + spkr_1_sd_sleep_mediabox: spkr_1_sd_sleep_mediabox { + mux { + pins = "gpio85"; + function = "gpio"; + }; + config { + pins = "gpio85"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + spkr_1_sd_active_mediabox: spkr_1_sd_active_mediabox { + mux { + pins = "gpio85"; + function = "gpio"; + }; + config { + pins = "gpio85"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + output-high; + }; + }; + }; + + spkr_2_sd_mediabox_mediabox { + spkr_2_sd_sleep_mediabox: spkr_2_sd_sleep_mediabox { + mux { + pins = "gpio112"; + function = "gpio"; + }; + config { + pins = "gpio112"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + spkr_2_sd_active_mediabox: spkr_2_sd_active_mediabox { + mux { + pins = "gpio112"; + function = "gpio"; + }; + config { + pins = "gpio112"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + output-high; + }; + }; + }; + + sdc2_cd_on_mediabox: sdc2_cd_on_mediabox { + mux { + pins = "gpio86"; + function = "gpio"; + }; + + config { + pins = "gpio86"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + sdc2_cd_off_mediabox: sdc2_cd_off_mediabox { + mux { + pins = "gpio86"; + function = "gpio"; + }; + + config { + pins = "gpio86"; + bias-pull-up; /* pull up */ + drive-strength = <2>; /* 2 MA */ + }; + }; + + ir_active: ir_active { + mux { + pins = "gpio90", "gpio91"; + function = "gpio"; + }; + + config { + pins = "gpio90", "gpio91"; + drive-strength = <16>; + bias-disable; + output-high; + }; + }; + + mtch6102_int { + mtch6102_int_active: mtch6102_int_active { + mux { + pins = "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio125"; + drive-strength = <16>; /* 16 mA */ + bias-pull-up; + }; + }; + + mtch6102_int_suspend: mtch6102_int_suspend { + mux { + pins = "gpio125"; + function = "gpio"; + }; + + config { + pins = "gpio125"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + }; + }; + }; + + mtch6102_rst { + mtch6102_rst_active: mtch6102_rst_active { + mux { + pins = "gpio85"; + function = "gpio"; + }; + + config { + pins = "gpio85"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + mtch6102_rst_suspend: mtch6102_rst_suspend { + mux { + pins = "gpio85"; + function = "gpio"; + }; + + config { + pins = "gpio85"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + }; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi b/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi new file mode 100644 index 000000000000..a1fce88c8e54 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-svr20.dtsi @@ -0,0 +1,409 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "msm8998-svr20-pinctrl.dtsi" +#include "msm8998-camera-sensor-svr20.dtsi" +&vendor { + bluetooth: bt_wcn3990 { + compatible = "qca,wcn3990"; + qca,bt-vdd-io-supply = <&pm8998_s3>; + qca,bt-vdd-xtal-supply = <&pm8998_s5>; + qca,bt-vdd-core-supply = <&pm8998_l7>; + qca,bt-vdd-pa-supply = <&pm8998_l17>; + qca,bt-vdd-ldo-supply = <&pm8998_l25>; + qca,bt-chip-pwd-supply = <&pmi8998_bob_pin1>; + clocks = <&clock_gcc clk_rf_clk2_pin>; + clock-names = "rf_clk2"; + + qca,bt-vdd-io-voltage-level = <1352000 1352000>; + qca,bt-vdd-xtal-voltage-level = <2040000 2040000>; + qca,bt-vdd-core-voltage-level = <1800000 1800000>; + qca,bt-vdd-pa-voltage-level = <1304000 1304000>; + qca,bt-vdd-ldo-voltage-level = <3312000 3312000>; + qca,bt-chip-pwd-voltage-level = <3600000 3600000>; + + qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */ + }; + svr20_batterydata: qcom,battery-data { + qcom,batt-id-range-pct = <25>; + #include "fg-gen3-batterydata-svr-v2-3200mah.dtsi" + }; +}; + +&blue_led { + qcom,default-state = "on"; + linux,default-trigger = "system-running"; +}; + +&pmi8998_charger { + qcom,fcc-max-ua = <5000000>; + qcom,usb-icl-ua = <3000000>; +}; + +&blsp1_uart3_hs { + status = "ok"; +}; + +&ufsphy1 { + vdda-phy-supply = <&pm8998_l1>; + vdda-pll-supply = <&pm8998_l2>; + vddp-ref-clk-supply = <&pm8998_l26>; + vdda-phy-max-microamp = <51400>; + vdda-pll-max-microamp = <14600>; + vddp-ref-clk-max-microamp = <100>; + vddp-ref-clk-always-on; + status = "ok"; +}; + +&ufs1 { + vdd-hba-supply = <&gdsc_ufs>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm8998_l20>; + vccq-supply = <&pm8998_l26>; + vccq2-supply = <&pm8998_s4>; + vcc-max-microamp = <750000>; + vccq-max-microamp = <560000>; + vccq2-max-microamp = <750000>; + status = "ok"; +}; + +&ufs_ice { + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pm8998_l21>; + qcom,vdd-voltage-level = <2950000 2960000>; + qcom,vdd-current-level = <200 800000>; + + vdd-io-supply = <&pm8998_l13>; + qcom,vdd-io-voltage-level = <1808000 2960000>; + qcom,vdd-io-current-level = <200 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + qcom,clk-rates = <400000 20000000 25000000 + 50000000 100000000 200000000>; + qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104"; + + cd-gpios = <&tlmm 95 0x1>; + + status = "ok"; +}; + +&uartblsp2dm1 { + status = "ok"; + pinctrl-names = "default"; + pinctrl-0 = <&uart_console_active>; +}; + +&pm8998_gpios { + /* GPIO 5 for Home Key */ + gpio@c400 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + /* GPIO 6 for Vol+ Key */ + gpio@c500 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + /* GPIO 7 for Snapshot Key */ + gpio@c600 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + /* GPIO 8 for Focus Key */ + gpio@c700 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + gpio@cc00 { /* GPIO 13 */ + qcom,mode = <1>; + qcom,output-type = <0>; + qcom,pull = <5>; + qcom,vin-sel = <0>; + qcom,out-strength = <1>; + qcom,src-sel = <3>; + qcom,master-en = <1>; + status = "okay"; + }; + + /* GPIO 21 (NFC_CLK_REQ) */ + gpio@d400 { + qcom,mode = <0>; + qcom,vin-sel = <1>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; + + /* GPIO 18 SMB138X */ + gpio@d100 { + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; +}; + +&i2c_5 { + status = "okay"; +}; + +&i2c_6 { /* BLSP1 QUP6 (NFC) */ + status = "okay"; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 92 0x00>; + qcom,nq-ven = <&tlmm 12 0x00>; + qcom,nq-firm = <&tlmm 93 0x00>; + qcom,nq-clkreq = <&pm8998_gpios 21 0x00>; + qcom,nq-esepwr = <&tlmm 116 0x00>; + interrupt-parent = <&tlmm>; + qcom,clk-src = "BBCLK3"; + interrupts = <92 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; + clocks = <&clock_gcc clk_ln_bb_clk3_pin>; + clock-names = "ref_clk"; + }; +}; + +&mdss_hdmi_tx { + status = "disabled"; + pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active", + "hdmi_active", "hdmi_sleep"; + pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; + pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>; + pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>; + pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>; + pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; +}; + +&mdss_dp_ctrl { + status = "disabled"; + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>; + pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>; + qcom,aux-en-gpio = <&tlmm 77 0>; + qcom,aux-sel-gpio = <&tlmm 78 0>; + qcom,usbplug-cc-gpio = <&tlmm 38 0>; +}; + +&mdss_mdp { + qcom,mdss-pref-prim-intf = "dsi"; +}; + +&mdss_dsi { + hw-config = "split_dsi"; +}; + +&mem_client_3_size { + qcom,peripheral-size = <0x500000>; +}; + +&pmi8998_haptics { + status = "okay"; +}; + +&pm8998_vadc { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@85 { + label = "vcoin"; + reg = <0x85>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; +}; + +&pm8998_adc_tm { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,btm-channel-number = <0x60>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x68>; + qcom,thermal-node; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x70>; + qcom,thermal-node; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x78>; + qcom,thermal-node; + }; +}; + +&wil6210 { + status = "ok"; +}; + +&soc { + gpio_keys { + compatible = "gpio-keys"; + input-name = "gpio-keys"; + status = "okay"; + + home { + label = "home"; + gpios = <&pm8998_gpios 5 0x1>; + linux,input-type = <1>; + linux,code = <158>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + + vol_up { + label = "volume_up"; + gpios = <&pm8998_gpios 6 0x1>; + linux,input-type = <1>; + linux,code = <115>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + + vol_down { + label = "volume_down"; + gpios = <&pm8998_gpios 7 0x1>; + linux,input-type = <1>; + linux,code = <114>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + + confirm { + label = "confirm_key"; + gpios = <&pm8998_gpios 8 0x1>; + linux,input-type = <1>; + linux,code = <28>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + }; +}; + +&pmi8998_fg { + qcom,battery-data = <&svr20_batterydata>; + qcom,fg-force-load-profile; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi index b2f30de94bbc..acdd4bdcd95b 100644 --- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi @@ -436,7 +436,7 @@ 0x9ac 0x00 0x00 0x8a0 0x01 0x00 0x9e0 0x00 0x00 - 0x9dc 0x01 0x00 + 0x9dc 0x20 0x00 0x9a8 0x00 0x00 0x8a4 0x01 0x00 0x8a8 0x73 0x00 diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi index 5218a1d86e6d..eafa6b841c17 100644 --- a/arch/arm/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998.dtsi @@ -2366,6 +2366,10 @@ hyplog-size-offset = <0x414>; /* 0x066BFB34 */ }; + qcom_msmhdcp: qcom,msm_hdcp { + compatible = "qcom,msm-hdcp"; + }; + qcom_crypto: qcrypto@1DE0000 { compatible = "qcom,qcrypto"; reg = <0x1DE0000 0x20000>, @@ -2671,7 +2675,7 @@ 0x9ac 0x00 0x00 0x8a0 0x01 0x00 0x9e0 0x00 0x00 - 0x9dc 0x01 0x00 + 0x9dc 0x20 0x00 0x9a8 0x00 0x00 0x8a4 0x01 0x00 0x8a8 0x73 0x00 @@ -3059,8 +3063,8 @@ qcom,msm-core@780000 { compatible = "qcom,apss-core-ea"; reg = <0x780000 0x1000>; - qcom,low-hyst-temp = <10>; - qcom,high-hyst-temp = <5>; + qcom,low-hyst-temp = <100>; + qcom,high-hyst-temp = <100>; qcom,polling-interval = <50>; ea0: ea0 { diff --git a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts index 227a8999a745..4c4c758daa29 100644 --- a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts +++ b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts @@ -60,6 +60,19 @@ /delete-node/ &tasha_hph_en0; /delete-node/ &tasha_hph_en1; +&qusb_phy0 { + qcom,qusb-phy-init-seq = <0xf8 0x80 + 0xb3 0x84 + 0x83 0x88 + 0xc7 0x8c + 0x30 0x08 + 0x79 0x0c + 0x21 0x10 + 0x14 0x9c + 0x9f 0x1c + 0x00 0x18>; +}; + &tasha_snd { qcom,model = "sdm660-tasha-skus-snd-card"; qcom,audio-routing = diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts index 7fb0c9d03825..0f4b462fd57b 100644 --- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts +++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts @@ -170,6 +170,15 @@ qcom,mdss-pref-prim-intf = "dsi"; }; +&mdss_dp_ctrl { + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>; + pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>; + qcom,aux-en-gpio = <&tlmm 55 0>; + qcom,aux-sel-gpio = <&tlmm 56 0>; + qcom,usbplug-cc-gpio = <&tlmm 58 0>; +}; + &mdss_dsi { hw-config = "single_dsi"; }; @@ -218,4 +227,14 @@ <0x00188018 0x4>; reg-names = "qusb_phy_base", "ref_clk_addr"; + qcom,qusb-phy-init-seq = <0xf8 0x80 + 0xb3 0x84 + 0x83 0x88 + 0xc7 0x8c + 0x30 0x08 + 0x79 0x0c + 0x21 0x10 + 0x14 0x9c + 0x9f 0x1c + 0x00 0x18>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi index e0d51db067c9..72b89a7e7c47 100644 --- a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi @@ -152,6 +152,7 @@ qcom,gpu-mempool@1 { reg = <1>; qcom,mempool-page-size = <65536>; + qcom,mempool-allocate; }; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi index b8272b29aa89..f39f8b880690 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi @@ -343,6 +343,7 @@ <0x02 216>, /* tsens1_tsens_upper_lower_int */ <0x31 212>, /* usb30_power_event_irq */ <0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx */ + <0x3d 209>, /* lpi_dir_conn_irq_apps[1] */ <0x4f 379>, /* qusb2phy_intr */ <0x57 358>, /* ee0_apps_hlos_spmi_periph_irq */ <0x5b 519>, /* lpass_pmu_tmr_timeout_irq_cx */ @@ -480,7 +481,6 @@ <0xff 206>, /* rpm_ipc[22] */ <0xff 207>, /* rpm_ipc[23] */ <0xff 208>, /* lpi_dir_conn_irq_apps[0] */ - <0xff 209>, /* lpi_dir_conn_irq_apps[1] */ <0xff 210>, /* lpi_dir_conn_irq_apps[2] */ <0xff 213>, /* secure_wdog_bark_irq */ <0xff 214>, /* tsens1_tsens_max_min_int */ diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 9897900d3fd5..5618f02e34f2 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -299,21 +299,44 @@ soc: soc { }; + firmware: firmware { + android { + compatible = "android,firmware"; + fstab { + compatible = "android,fstab"; + vendor { + compatible = "android,vendor"; + dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor"; + type = "ext4"; + mnt_flags = "ro,barrier=1,discard"; + fsmgr_flags = "wait,slotselect"; + status = "ok"; + }; + }; + }; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; - removed_region0: removed_region0@85800000 { + wlan_msa_guard: wlan_msa_guard@85600000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x85800000 0x0 0x700000>; + reg = <0x0 0x85600000 0x0 0x100000>; }; - removed_region1: removed_region1@86000000 { + wlan_msa_mem: wlan_msa_mem@85700000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x86000000 0x0 0x2f00000>; + reg = <0x0 0x85700000 0x0 0x100000>; + }; + + removed_region: removed_region0@85800000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85800000 0x0 0x3700000>; }; modem_fw_mem: modem_fw_region@8ac00000 { @@ -590,6 +613,7 @@ compatible = "qcom,memshare-peripheral"; qcom,peripheral-size = <0x0>; qcom,client-id = <1>; + qcom,allocate-boot-time; label = "modem"; }; }; @@ -794,8 +818,8 @@ qcom,msm-core@780000 { compatible = "qcom,apss-core-ea"; reg = <0x780000 0x1000>; - qcom,low-hyst-temp = <10>; - qcom,high-hyst-temp = <5>; + qcom,low-hyst-temp = <100>; + qcom,high-hyst-temp = <100>; ea0: ea0 { sensor = <&sensor_information3>; @@ -1142,8 +1166,9 @@ < 1113600 762 >, < 1344000 2086 >, < 1670400 2929 >, - < 2150400 3879 >, - < 2380800 4943 >; + < 1881600 3879 >, + < 2150400 4943 >, + < 2380800 5163 >; }; devfreq_memlat_4: qcom,arm-memlat-mon-4 { @@ -1263,9 +1288,9 @@ }; }; - qcom,rmtfs_sharedmem@0 { + qcom,rmtfs_sharedmem@85e00000 { compatible = "qcom,sharedmem-uio"; - reg = <0x0 0x200000>; + reg = <0x85e00000 0x200000>; reg-names = "rmtfs"; qcom,client-id = <0x00000001>; }; @@ -1657,6 +1682,7 @@ qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; qcom,wlan-msa-memory = <0x100000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; }; @@ -2006,6 +2032,7 @@ qcom,qsee-ce-hw-instance = <0>; qcom,disk-encrypt-pipe-pair = <2>; qcom,support-fde; + qcom,fde-key-size; qcom,no-clock-support; qcom,msm-bus,name = "qseecom-noc"; qcom,msm-bus,num-cases = <4>; diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi index 46f77e9a3253..476fedec35a4 100644 --- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi @@ -29,6 +29,16 @@ qcom,switch-source = <&pm660l_switch1>; status = "ok"; }; + + cam_actuator_regulator: cam_actuator_fixed_regulator { + compatible = "regulator-fixed"; + regulator-name = "cam_actuator_regulator"; + regulator-min-microvolt = <3600000>; + regulator-max-microvolt = <3600000>; + enable-active-high; + gpio = <&tlmm 50 0>; + vin-supply = <&pm660l_bob>; + }; }; &cci { @@ -37,14 +47,11 @@ reg = <0x0>; compatible = "qcom,actuator"; qcom,cci-master = <0>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; actuator1: qcom,actuator@1 { @@ -52,14 +59,11 @@ reg = <0x1>; compatible = "qcom,actuator"; qcom,cci-master = <1>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; actuator2: qcom,actuator@2 { @@ -67,14 +71,11 @@ reg = <0x2>; compatible = "qcom,actuator"; qcom,cci-master = <1>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; ois0: qcom,ois@0 { @@ -82,15 +83,31 @@ reg = <0x0>; compatible = "qcom,ois"; qcom,cci-master = <0>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; + status = "disabled"; + }; + + tof0: qcom,tof@29{ + cell-index = <0>; + reg = <0x29>; + compatible = "st,stmvl53l0"; + qcom,cci-master = <0>; + cam_laser-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_laser"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_tof_active>; + pinctrl-1 = <&cam_tof_suspend>; + stm,irq-gpio = <&tlmm 45 0x2008>; + gpios = <&tlmm 42 0>; qcom,gpio-req-tbl-num = <0>; qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; - status = "disabled"; + qcom,gpio-req-tbl-label = "RNG_EN"; }; eeprom0: qcom,eeprom@0 { diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi index 94166bf8dd3e..11eb288804ff 100644 --- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi @@ -40,6 +40,16 @@ vin-supply = <&pm660l_bob>; }; + cam_actuator_regulator: cam_actuator_fixed_regulator { + compatible = "regulator-fixed"; + regulator-name = "cam_actuator_regulator"; + regulator-min-microvolt = <3600000>; + regulator-max-microvolt = <3600000>; + enable-active-high; + gpio = <&tlmm 50 0>; + vin-supply = <&pm660l_bob>; + }; + cam_dvdd_gpio_regulator: cam_dvdd_fixed_regulator { compatible = "regulator-fixed"; regulator-name = "cam_dvdd_gpio_regulator"; @@ -67,14 +77,11 @@ reg = <0x0>; compatible = "qcom,actuator"; qcom,cci-master = <0>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; actuator1: qcom,actuator@1 { @@ -82,14 +89,11 @@ reg = <0x1>; compatible = "qcom,actuator"; qcom,cci-master = <1>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; actuator2: qcom,actuator@2 { @@ -97,14 +101,11 @@ reg = <0x2>; compatible = "qcom,actuator"; qcom,cci-master = <1>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; - qcom,gpio-req-tbl-num = <0>; - qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; }; ois0: qcom,ois@0 { @@ -112,15 +113,31 @@ reg = <0x0>; compatible = "qcom,ois"; qcom,cci-master = <0>; - gpios = <&tlmm 50 0>; - qcom,gpio-vaf = <0>; + cam_vaf-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + qcom,cam-vreg-op-mode = <0>; + status = "disabled"; + }; + + tof0: qcom,tof@29{ + cell-index = <0>; + reg = <0x29>; + compatible = "st,stmvl53l0"; + qcom,cci-master = <0>; + cam_laser-supply = <&cam_actuator_regulator>; + qcom,cam-vreg-name = "cam_laser"; + qcom,cam-vreg-min-voltage = <3600000>; + qcom,cam-vreg-max-voltage = <3600000>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_tof_active>; + pinctrl-1 = <&cam_tof_suspend>; + stm,irq-gpio = <&tlmm 45 0x2008>; + gpios = <&tlmm 42 0>; qcom,gpio-req-tbl-num = <0>; qcom,gpio-req-tbl-flags = <0>; - qcom,gpio-req-tbl-label = "CAM_VAF"; - pinctrl-names = "cam_default", "cam_suspend"; - pinctrl-0 = <&cam_actuator_vaf_active>; - pinctrl-1 = <&cam_actuator_vaf_suspend>; - status = "disabled"; + qcom,gpio-req-tbl-label = "RNG_EN"; }; eeprom0: qcom,eeprom@0 { diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi index ec754f3cce80..6bccb320577b 100644 --- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi @@ -40,6 +40,16 @@ gpio = <&tlmm 50 0>; vin-supply = <&pm660l_bob>; }; + + cam_rear_dvdd_gpio_regulator: cam_rear_dvdd_fixed_regulator { + compatible = "regulator-fixed"; + regulator-name = "cam_rear_dvdd_gpio_regulator"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + enable-active-high; + gpio = <&pm660l_gpios 4 0>; + vin-supply = <&pm660_s5>; + }; }; &tlmm { @@ -172,10 +182,10 @@ compatible = "qcom,eeprom"; cam_vio-supply = <&pm660_l11>; cam_vana-supply = <&cam_avdd_gpio_regulator>; - cam_vdig-supply = <&pm660_s5>; + cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; - qcom,cam-vreg-min-voltage = <1780000 0 1350000>; - qcom,cam-vreg-max-voltage = <1950000 0 1350000>; + qcom,cam-vreg-min-voltage = <1780000 0 0>; + qcom,cam-vreg-max-voltage = <1950000 0 0>; qcom,cam-vreg-op-mode = <105000 0 105000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -184,15 +194,12 @@ pinctrl-1 = <&cam_sensor_mclk0_suspend &cam_sensor_rear_suspend>; gpios = <&tlmm 32 0>, - <&tlmm 46 0>, - <&pm660l_gpios 4 0>; + <&tlmm 46 0>; qcom,gpio-reset = <1>; - qcom,gpio-vdig = <2>; - qcom,gpio-req-tbl-num = <0 1 1>; - qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-num = <0 1>; + qcom,gpio-req-tbl-flags = <1 0>; qcom,gpio-req-tbl-label = "CAMIF_MCLK2", - "CAM_RESET0", - "CAM_VDIG"; + "CAM_RESET0"; qcom,sensor-position = <0>; qcom,sensor-mode = <0>; qcom,cci-master = <0>; @@ -209,11 +216,11 @@ compatible = "qcom,eeprom"; cam_vio-supply = <&pm660_l11>; cam_vana-supply = <&cam_avdd_gpio_regulator>; - cam_vdig-supply = <&pm660_s5>; + cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; - qcom,cam-vreg-min-voltage = <1780000 0 1350000>; - qcom,cam-vreg-max-voltage = <1950000 0 1350000>; - qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,cam-vreg-min-voltage = <1780000 0 0>; + qcom,cam-vreg-max-voltage = <1950000 0 0>; + qcom,cam-vreg-op-mode = <105000 0 0>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk2_active @@ -221,15 +228,12 @@ pinctrl-1 = <&cam_sensor_mclk2_suspend &cam_sensor_rear2_suspend>; gpios = <&tlmm 34 0>, - <&tlmm 48 0>, - <&pm660l_gpios 4 0>; + <&tlmm 48 0>; qcom,gpio-reset = <1>; - qcom,gpio-vdig = <2>; - qcom,gpio-req-tbl-num = <0 1 1>; - qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-num = <0 1>; + qcom,gpio-req-tbl-flags = <1 0>; qcom,gpio-req-tbl-label = "CAMIF_MCLK1", - "CAM_RESET1", - "CAM_VDIG"; + "CAM_RESET1"; qcom,sensor-position = <0>; qcom,sensor-mode = <0>; qcom,cci-master = <1>; @@ -290,11 +294,11 @@ qcom,eeprom-src = <&eeprom0>; cam_vio-supply = <&pm660_l11>; cam_vana-supply = <&cam_avdd_gpio_regulator>; - cam_vdig-supply = <&pm660_s5>; + cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; - qcom,cam-vreg-min-voltage = <1780000 0 1350000>; - qcom,cam-vreg-max-voltage = <1950000 0 1350000>; - qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,cam-vreg-min-voltage = <1780000 0 0>; + qcom,cam-vreg-max-voltage = <1950000 0 0>; + qcom,cam-vreg-op-mode = <105000 0 0>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk0_active @@ -302,15 +306,12 @@ pinctrl-1 = <&cam_sensor_mclk0_suspend &cam_sensor_rear_suspend>; gpios = <&tlmm 32 0>, - <&tlmm 46 0>, - <&pm660l_gpios 4 0>; + <&tlmm 46 0>; qcom,gpio-reset = <1>; - qcom,gpio-vdig = <2>; - qcom,gpio-req-tbl-num = <0 1 1>; - qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-num = <0 1>; + qcom,gpio-req-tbl-flags = <1 0>; qcom,gpio-req-tbl-label = "CAMIF_MCLK2", - "CAM_RESET0", - "CAM_VDIG"; + "CAM_RESET0"; qcom,sensor-position = <0>; qcom,sensor-mode = <0>; qcom,cci-master = <0>; @@ -333,11 +334,11 @@ qcom,eeprom-src = <&eeprom1>; cam_vio-supply = <&pm660_l11>; cam_vana-supply = <&cam_avdd_gpio_regulator>; - cam_vdig-supply = <&pm660_s5>; + cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; - qcom,cam-vreg-min-voltage = <1780000 0 1350000>; - qcom,cam-vreg-max-voltage = <1950000 0 1350000>; - qcom,cam-vreg-op-mode = <105000 0 105000>; + qcom,cam-vreg-min-voltage = <1780000 0 0>; + qcom,cam-vreg-max-voltage = <1950000 0 0>; + qcom,cam-vreg-op-mode = <105000 0 0>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; pinctrl-0 = <&cam_sensor_mclk2_active @@ -345,15 +346,12 @@ pinctrl-1 = <&cam_sensor_mclk2_suspend &cam_sensor_rear2_suspend>; gpios = <&tlmm 34 0>, - <&tlmm 48 0>, - <&pm660l_gpios 4 0>; + <&tlmm 48 0>; qcom,gpio-reset = <1>; - qcom,gpio-vdig = <2>; - qcom,gpio-req-tbl-num = <0 1 1>; - qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-num = <0 1>; + qcom,gpio-req-tbl-flags = <1 0>; qcom,gpio-req-tbl-label = "CAMIF_MCLK1", - "CAM_RESET1", - "CAM_VDIG"; + "CAM_RESET1"; qcom,sensor-position = <0>; qcom,sensor-mode = <0>; qcom,cci-master = <1>; diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi index f75794ba942f..d60a22d7cb4d 100644 --- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi @@ -108,6 +108,7 @@ lanes-per-direction = <1>; + non-removable; qcom,msm-bus,name = "ufs1"; qcom,msm-bus,num-cases = <12>; qcom,msm-bus,num-paths = <2>; @@ -268,14 +269,18 @@ compatible = "qcom,qusb2phy"; reg = <0x0c012000 0x180>, <0x01fcb24c 0x4>, + <0x00780240 0x4>, <0x00188018 0x4>; reg-names = "qusb_phy_base", "tcsr_clamp_dig_n_1p8", + "tune2_efuse_addr", "ref_clk_addr"; vdd-supply = <&pm660l_l1>; vdda18-supply = <&pm660_l10>; vdda33-supply = <&pm660l_l7>; qcom,vdd-voltage-level = <0 925000 925000>; + qcom,tune2-efuse-bit-pos = <25>; + qcom,tune2-efuse-num-bits = <4>; qcom,qusb-phy-init-seq = <0xf8 0x80 0xb3 0x84 0x83 0x88 diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi index f5d61d440a27..fecb86dcfdeb 100644 --- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi @@ -158,6 +158,7 @@ qcom,gpu-mempool@1 { reg = <1>; qcom,mempool-page-size = <65536>; + qcom,mempool-allocate; }; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi index 45b7201fbf71..50f5d83346c6 100644 --- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi @@ -56,6 +56,16 @@ qcom,master-en = <1>; status = "okay"; }; + + /* GPIO 11 for Home Key */ + gpio@ca00 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; }; &i2c_6 { /* BLSP1 QUP6 (NFC) */ diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi index d902078b1048..e55a67e68b36 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi @@ -1101,6 +1101,34 @@ }; }; + cam_tof_active: cam_tof_active { + /* LASER */ + mux { + pins = "gpio50", "gpio42", "gpio45"; + function = "gpio"; + }; + + config { + pins = "gpio50", "gpio42", "gpio45"; + bias-pull-up; + drive-strength = <2>; /* 2 MA */ + }; + }; + + cam_tof_suspend: cam_tof_suspend { + /* LASER */ + mux { + pins = "gpio50", "gpio42", "gpio45"; + function = "gpio"; + }; + + config { + pins = "gpio50", "gpio42", "gpio45"; + bias-pull-down; /* PULL DOWN */ + drive-strength = <2>; /* 2 MA */ + }; + }; + cam_sensor_mclk0_active: cam_sensor_mclk0_active { /* MCLK0 */ mux { diff --git a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi index 21fab4923331..679a1b89b2f8 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi @@ -39,7 +39,7 @@ qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; - qcom,saw2-avs-ctl = <0x1010031>; + qcom,saw2-avs-ctl = <0x101c031>; qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; @@ -341,6 +341,7 @@ qcom,gic-map = <0x02 216>, /* tsens1_tsens_upper_lower_int */ <0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx */ + <0x3d 209>, /* lpi_dir_conn_irq_apps[1] */ <0x4f 379>, /* qusb2phy_intr for Dm */ <0x50 380>, /* qusb2phy_intr for Dm for secondary PHY */ <0x51 379>, /* qusb2phy_intr for Dp */ @@ -484,7 +485,6 @@ <0xff 206>, /* rpm_ipc[22] */ <0xff 207>, /* rpm_ipc[23] */ <0xff 208>, /* lpi_dir_conn_irq_apps[0] */ - <0xff 209>, /* lpi_dir_conn_irq_apps[1] */ <0xff 210>, /* lpi_dir_conn_irq_apps[2] */ <0xff 212>, /* usb30s_power_event_irq */ <0xff 213>, /* secure_wdog_bark_irq */ diff --git a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi index b701ecd562cd..a4111f6d1b94 100644 --- a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi @@ -453,6 +453,8 @@ pm660l_bob: regulator-bob { regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3600000>; + qcom,pwm-threshold-current = <2000000>; + qcom,init-bob-mode = <2>; status = "okay"; }; @@ -462,6 +464,8 @@ qcom,set = <3>; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3600000>; + qcom,pwm-threshold-current = <2000000>; + qcom,init-bob-mode = <2>; qcom,use-pin-ctrl-voltage1; }; @@ -471,6 +475,8 @@ qcom,set = <3>; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3600000>; + qcom,pwm-threshold-current = <2000000>; + qcom,init-bob-mode = <2>; qcom,use-pin-ctrl-voltage2; }; @@ -480,6 +486,8 @@ qcom,set = <3>; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3600000>; + qcom,pwm-threshold-current = <2000000>; + qcom,init-bob-mode = <2>; qcom,use-pin-ctrl-voltage3; }; }; @@ -736,8 +744,8 @@ < (-4000) 4000 7000 19000 (-8000)>; qcom,cpr-closed-loop-voltage-fuse-adjustment = - <(-32000) (-30000) (-29000) (-23000) - (-21000)>; + <(-32000) (-30000) (-29000) (-38000) + (-36000)>; qcom,cpr-floor-to-ceiling-max-range = <32000 32000 32000 40000 44000 diff --git a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi index 06b3be2d5c0a..588973fbd840 100644 --- a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi @@ -209,7 +209,7 @@ <&mmss_bimc_smmu 0x411>, <&mmss_bimc_smmu 0x431>; buffer-types = <0xfff>; - virtual-addr-pool = <0x70800000 0x6f800000>; + virtual-addr-pool = <0x79000000 0x60000000>; }; firmware_cb { @@ -231,7 +231,7 @@ <&mmss_bimc_smmu 0x529>, <&mmss_bimc_smmu 0x52b>; buffer-types = <0x241>; - virtual-addr-pool = <0x4b000000 0x25800000>; + virtual-addr-pool = <0x51000000 0x28000000>; qcom,secure-context-bank; }; @@ -243,7 +243,7 @@ <&mmss_bimc_smmu 0x510>, <&mmss_bimc_smmu 0x52c>; buffer-types = <0x106>; - virtual-addr-pool = <0x25800000 0x25800000>; + virtual-addr-pool = <0x29000000 0x28000000>; qcom,secure-context-bank; }; @@ -260,7 +260,7 @@ <&mmss_bimc_smmu 0x52d>, <&mmss_bimc_smmu 0x540>; buffer-types = <0x480>; - virtual-addr-pool = <0x1000000 0x24800000>; + virtual-addr-pool = <0x1000000 0x28000000>; qcom,secure-context-bank; }; }; diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index 2e576a51677f..e00753f8b3e7 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -297,11 +297,40 @@ soc: soc { }; + firmware: firmware { + android { + compatible = "android,firmware"; + fstab { + compatible = "android,fstab"; + vendor { + compatible = "android,vendor"; + dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor"; + type = "ext4"; + mnt_flags = "ro,barrier=1,discard"; + fsmgr_flags = "wait,slotselect"; + status = "ok"; + }; + }; + }; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; + wlan_msa_guard: wlan_msa_guard@85600000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85600000 0x0 0x100000>; + }; + + wlan_msa_mem: wlan_msa_mem@85700000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85700000 0x0 0x100000>; + }; + removed_regions: removed_regions@85800000 { compatible = "removed-dma-pool"; no-map; @@ -643,6 +672,16 @@ clock-names = "core", "iface"; }; + qcom,qbt1000 { + compatible = "qcom,qbt1000"; + clock-names = "core", "iface"; + clocks = <&clock_gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>, + <&clock_gcc GCC_BLSP1_AHB_CLK>; + clock-frequency = <15000000>; + qcom,ipc-gpio = <&tlmm 72 0>; + qcom,finger-detect-gpio = <&pm660_gpios 11 0>; + }; + qcom,sensor-information { compatible = "qcom,sensor-information"; sensor_information0: qcom,sensor-information-0 { @@ -848,8 +887,8 @@ qcom,msm-core@780000 { compatible = "qcom,apss-core-ea"; reg = <0x780000 0x1000>; - qcom,low-hyst-temp = <10>; - qcom,high-hyst-temp = <5>; + qcom,low-hyst-temp = <100>; + qcom,high-hyst-temp = <100>; ea0: ea0 { sensor = <&sensor_information1>; @@ -1215,9 +1254,17 @@ compatible = "qcom,clk-cpu-osm"; reg = <0x179c0000 0x4000>, <0x17916000 0x1000>, <0x17816000 0x1000>, <0x179d1000 0x1000>, - <0x00784130 0x8>; + <0x00784130 0x8>, <0x17914800 0x800>; reg-names = "osm", "pwrcl_pll", "perfcl_pll", - "apcs_common", "perfcl_efuse"; + "apcs_common", "perfcl_efuse", + "pwrcl_acd"; + + qcom,acdtd-val = <0x0000a111 0x0000a111>; + qcom,acdcr-val = <0x002c5ffd 0x002c5ffd>; + qcom,acdsscr-val = <0x00000901 0x00000901>; + qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>; + qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>; + qcom,acdautoxfer-val = <0x00000015 0x00000015>; vdd-pwrcl-supply = <&apc0_pwrcl_vreg>; vdd-perfcl-supply = <&apc1_perfcl_vreg>; @@ -1486,9 +1533,9 @@ }; }; - qcom,rmtfs_sharedmem@0 { + qcom,rmtfs_sharedmem@85e00000 { compatible = "qcom,sharedmem-uio"; - reg = <0x0 0x200000>; + reg = <0x85e00000 0x200000>; reg-names = "rmtfs"; qcom,client-id = <0x00000001>; }; @@ -1530,6 +1577,7 @@ qcom,msm_fastrpc { compatible = "qcom,msm-fastrpc-adsp"; qcom,fastrpc-glink; + qcom,fastrpc-vmid-heap-shared; qcom,msm_fastrpc_compute_cb1 { compatible = "qcom,msm-fastrpc-compute-cb"; @@ -1903,6 +1951,7 @@ qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; qcom,wlan-msa-memory = <0x100000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; }; @@ -2115,6 +2164,7 @@ qcom,qsee-ce-hw-instance = <0>; qcom,disk-encrypt-pipe-pair = <2>; qcom,support-fde; + qcom,fde-key-size; qcom,no-clock-support; qcom,msm-bus,name = "qseecom-noc"; qcom,msm-bus,num-cases = <4>; @@ -2287,6 +2337,7 @@ lanes-per-direction = <1>; spm-level = <5>; + non-removable; qcom,msm-bus,name = "ufs1"; qcom,msm-bus,num-cases = <12>; qcom,msm-bus,num-paths = <2>; @@ -2520,6 +2571,7 @@ #include "msm-arm-smmu-impl-defs-660.dtsi" #include "sdm660-common.dtsi" #include "sdm660-blsp.dtsi" +#include "msm-rdbg.dtsi" #include "sdm660-camera.dtsi" #include "sdm660-vidc.dtsi" #include "msm-audio.dtsi" diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts index 97d4dcd4eaf5..0ec61340a389 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts @@ -112,6 +112,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -130,6 +134,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", @@ -325,6 +333,82 @@ }; }; + qcom,msm-dai-tdm-pri-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37120>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36864>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36866>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36868>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36870>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-pri-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37121>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36865>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36867>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36869>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36871>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + qcom,msm-dai-tdm-sec-tx { compatible = "qcom,msm-dai-tdm"; qcom,msm-cpudai-tdm-group-id = <37137>; diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index f3142369f594..01116ee1284b 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -87,9 +87,9 @@ CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig index 43b6432118f0..878e720a927b 100644 --- a/arch/arm/configs/sdm660-perf_defconfig +++ b/arch/arm/configs/sdm660-perf_defconfig @@ -656,6 +656,7 @@ CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig index e3aa35da81ce..524abcf83e77 100644 --- a/arch/arm/configs/sdm660_defconfig +++ b/arch/arm/configs/sdm660_defconfig @@ -694,6 +694,7 @@ CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ffd8f12..f13ae153fb24 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* This is the base location for PIE (ET_DYN with INTERP) loads. */ +#define ELF_ET_DYN_BASE 0x400000UL /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 5964c77c593d..a1898c6092d1 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -175,6 +175,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { __save_stack_trace(tsk, trace, 1); } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..bfc5aae0c280 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -110,7 +110,6 @@ __do_hyp_init: @ - Write permission implies XN: disabled @ - Instruction cache: enabled @ - Data/Unified cache: enabled - @ - Memory alignment checks: enabled @ - MMU: enabled (this code must be run from an identity mapping) mrc p15, 4, r0, c1, c0, 0 @ HSCR ldr r2, =HSCTLR_MASK @@ -118,8 +117,8 @@ __do_hyp_init: mrc p15, 0, r1, c1, c0, 0 @ SCTLR ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) and r1, r1, r2 - ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) + ARM( ldr r2, =(HSCTLR_M) ) + THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) orr r1, r1, r2 orr r0, r0, r1 isb diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 33ee522bb76f..e4a774f7aba1 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -876,6 +876,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pmd_t *pmd; pud = stage2_get_pud(kvm, cache, addr); + if (!pud) + return NULL; + if (pud_none(*pud)) { if (!cache) return NULL; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 66353caa35b9..641334ebf46d 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 221b11bb50e3..f353849d9388 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1197,15 +1197,15 @@ void __init sanity_check_meminfo(void) high_memory = __va(arm_lowmem_limit - 1) + 1; + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + /* * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the * last full pmd, which should be mapped. */ - if (memblock_limit) - memblock_limit = round_down(memblock_limit, PMD_SIZE); - if (!memblock_limit) - memblock_limit = arm_lowmem_limit; + memblock_limit = round_down(memblock_limit, PMD_SIZE); memblock_set_current_limit(memblock_limit); } diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts index ce5d848251fa..7b34822d61e9 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts @@ -26,7 +26,7 @@ stdout-path = "serial0:115200n8"; }; - memory { + memory@0 { device_type = "memory"; reg = <0x0 0x0 0x40000000>; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 857eda5c7217..172402cc1a0f 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -71,7 +71,7 @@ <1 10 0xf01>; }; - amba_apu { + amba_apu: amba_apu@0 { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <1>; @@ -191,7 +191,7 @@ }; i2c0: i2c@ff020000 { - compatible = "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 17 4>; @@ -202,7 +202,7 @@ }; i2c1: i2c@ff030000 { - compatible = "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 18 4>; diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index d2a0fc53235d..efdfb4da2de2 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -308,7 +308,9 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m +CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y CONFIG_I2C_QUP=y CONFIG_I2C_MSM_V2=y CONFIG_SLIMBUS_MSM_NGD=y @@ -344,6 +346,7 @@ CONFIG_WCD9335_CODEC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_MAX20010=y CONFIG_REGULATOR_ONSEMI_NCP6335D=y CONFIG_REGULATOR_RPM_SMD=y CONFIG_REGULATOR_QPNP=y @@ -368,7 +371,10 @@ CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_AIS=y CONFIG_MSM_AIS_DEBUG=y CONFIG_MSM_AIS_CAMERA_SENSOR=y +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7481=y CONFIG_QCOM_KGSL=y +CONFIG_MSM_BA_V4L2=y CONFIG_FB=y CONFIG_FB_MSM=y CONFIG_FB_MSM_MDSS=y diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index e67df6338136..e9ef95772ebd 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -311,7 +311,9 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m +CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y CONFIG_I2C_QUP=y CONFIG_I2C_MSM_V2=y CONFIG_SLIMBUS_MSM_NGD=y @@ -347,6 +349,7 @@ CONFIG_WCD9335_CODEC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_MAX20010=y CONFIG_REGULATOR_ONSEMI_NCP6335D=y CONFIG_REGULATOR_RPM_SMD=y CONFIG_REGULATOR_QPNP=y @@ -372,7 +375,10 @@ CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_AIS=y CONFIG_MSM_AIS_DEBUG=y CONFIG_MSM_AIS_CAMERA_SENSOR=y +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7481=y CONFIG_QCOM_KGSL=y +CONFIG_MSM_BA_V4L2=y CONFIG_FB=y CONFIG_FB_MSM=y CONFIG_FB_MSM_MDSS=y diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig index acde18d2fe31..61418724b897 100644 --- a/arch/arm64/configs/msm-perf_defconfig +++ b/arch/arm64/configs/msm-perf_defconfig @@ -34,7 +34,7 @@ CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig index f510f43427ce..ee2b9fa628ff 100644 --- a/arch/arm64/configs/msm_defconfig +++ b/arch/arm64/configs/msm_defconfig @@ -31,7 +31,7 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 6f5be663140f..e16fc58ce913 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -1,5 +1,6 @@ CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_USELIB is not set CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -32,7 +33,6 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set # CONFIG_MEMBARRIER is not set @@ -40,7 +40,7 @@ CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -104,6 +104,7 @@ CONFIG_IP_PNP_DHCP=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y +# CONFIG_INET_LRO is not set CONFIG_INET_DIAG_DESTROY=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y @@ -294,6 +295,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y @@ -319,6 +321,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -471,6 +474,7 @@ CONFIG_USB_CONFIGFS_F_FS=y CONFIG_USB_CONFIGFS_F_MTP=y CONFIG_USB_CONFIGFS_F_PTP=y CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_USB_CONFIGFS_F_HID=y @@ -607,6 +611,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -638,6 +645,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index f09a134a2fd5..77157bb85ee1 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -32,14 +32,13 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set # CONFIG_MEMBARRIER is not set CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -297,6 +296,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y @@ -325,6 +325,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -476,6 +477,7 @@ CONFIG_USB_CONFIGFS_F_FS=y CONFIG_USB_CONFIGFS_F_MTP=y CONFIG_USB_CONFIGFS_F_PTP=y CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_USB_CONFIGFS_F_HID=y @@ -486,6 +488,7 @@ CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_USB_CONFIGFS_F_CCID=y CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_RING_BUFFER=y CONFIG_MMC_PARANOID_SD_INIT=y CONFIG_MMC_CLKGATE=y CONFIG_MMC_BLOCK_MINORS=32 @@ -632,6 +635,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -714,6 +720,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig index 994b0f230968..0d36b8ca455d 100644 --- a/arch/arm64/configs/msmcortex_mediabox_defconfig +++ b/arch/arm64/configs/msmcortex_mediabox_defconfig @@ -408,10 +408,10 @@ CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y CONFIG_MSM_SDE_HDMI_CEC=y -CONFIG_DVB_MPQ=m -CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ=y +CONFIG_DVB_MPQ_DEMUX=y CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y -CONFIG_TSPP=m +CONFIG_TSPP=y CONFIG_DRM=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LOGO=y diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig index 939b34f7d6dd..c92551c69bf8 100644 --- a/arch/arm64/configs/sdm660-perf_defconfig +++ b/arch/arm64/configs/sdm660-perf_defconfig @@ -7,6 +7,8 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_RCU_EXPERT=y CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 @@ -36,7 +38,7 @@ CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -67,6 +69,7 @@ CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y CONFIG_RANDOMIZE_BASE=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y @@ -297,6 +300,7 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_HBTP_INPUT=y CONFIG_INPUT_QPNP_POWER_ON=y CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_STMVL53L0=y # CONFIG_SERIO_SERPORT is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set @@ -308,6 +312,7 @@ CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_MSM_V2=y CONFIG_SLIMBUS_MSM_NGD=y @@ -571,6 +576,7 @@ CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_AVTIMER=y CONFIG_QCOM_REMOTEQDSS=y CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_QBT1000=y CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y CONFIG_MSM_RPM_LOG=y CONFIG_MSM_RPM_STATS_LOG=y @@ -637,6 +643,7 @@ CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig index aafde733099b..6b45087e2603 100644 --- a/arch/arm64/configs/sdm660_defconfig +++ b/arch/arm64/configs/sdm660_defconfig @@ -8,6 +8,9 @@ CONFIG_TASKSTATS=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 @@ -37,7 +40,7 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -70,6 +73,7 @@ CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y CONFIG_RANDOMIZE_BASE=y CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set @@ -305,6 +309,7 @@ CONFIG_INPUT_QPNP_POWER_ON=y CONFIG_INPUT_KEYCHORD=y CONFIG_INPUT_UINPUT=y CONFIG_INPUT_GPIO=y +CONFIG_INPUT_STMVL53L0=y # CONFIG_SERIO_SERPORT is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set @@ -316,6 +321,7 @@ CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_MSM_V2=y CONFIG_SLIMBUS_MSM_NGD=y @@ -595,6 +601,7 @@ CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_AVTIMER=y CONFIG_QCOM_REMOTEQDSS=y CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_QBT1000=y CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y CONFIG_MSM_RPM_LOG=y CONFIG_MSM_RPM_STATS_LOG=y @@ -709,6 +716,7 @@ CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index aee323b13802..0a11cd502dbc 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -22,9 +22,9 @@ #define ACPI_MADT_GICC_LENGTH \ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) -#define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ - (entry)->header.length != ACPI_MADT_GICC_LENGTH) +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h new file mode 100644 index 000000000000..be2d2347d995 --- /dev/null +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -0,0 +1,13 @@ +#ifndef __ASM_ASM_UACCESS_H +#define __ASM_ASM_UACCESS_H + +/* + * Remove the address tag from a virtual address, if present. + */ + .macro clear_address_tag, dst, addr + tst \addr, #(1 << 55) + bic \dst, \addr, #(0xff << 56) + csel \dst, \dst, \addr, eq + .endm + +#endif diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index c5dbc5cb8f10..0671711b46ab 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -44,23 +44,33 @@ #define smp_store_release(p, v) \ do { \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile ("stlrb %w1, %0" \ - : "=Q" (*p) : "r" (v) : "memory"); \ + : "=Q" (*p) \ + : "r" (*(__u8 *)__u.__c) \ + : "memory"); \ break; \ case 2: \ asm volatile ("stlrh %w1, %0" \ - : "=Q" (*p) : "r" (v) : "memory"); \ + : "=Q" (*p) \ + : "r" (*(__u16 *)__u.__c) \ + : "memory"); \ break; \ case 4: \ asm volatile ("stlr %w1, %0" \ - : "=Q" (*p) : "r" (v) : "memory"); \ + : "=Q" (*p) \ + : "r" (*(__u32 *)__u.__c) \ + : "memory"); \ break; \ case 8: \ asm volatile ("stlr %1, %0" \ - : "=Q" (*p) : "r" (v) : "memory"); \ + : "=Q" (*p) \ + : "r" (*(__u64 *)__u.__c) \ + : "memory"); \ break; \ } \ } while (0) diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index a383c288ef49..b98332269462 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -114,12 +114,11 @@ #define ELF_EXEC_PAGESIZE PAGE_SIZE /* - * This is the location that an ET_DYN program is loaded if exec'ed. Typical - * use of this is to invoke "./ld.so someprog" to test out a new version of - * the loader. We need to make sure that it is out of the way of the program - * that it will "exec", and that there is sufficient room for the brk. + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ @@ -170,7 +169,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index eea245bf546a..3112c2a9d96f 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -167,9 +167,9 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) #define readq_relaxed_no_log(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; }) #define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c))) -#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c))) +#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le16(v), (c))) #define writel_relaxed_no_log(v, c) ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c))) -#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c))) +#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le64(v), (c))) /* * I/O memory access primitives. Reads are ordered relative to any diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index ac177d96e773..064cef9ae2d1 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -27,6 +27,7 @@ /* * User space memory access functions */ +#include #include #include @@ -119,6 +120,13 @@ static inline void set_fs(mm_segment_t fs) flag; \ }) +/* + * When dealing with data aborts, watchpoints, or instruction traps we may end + * up with a tagged userland pointer. Clear the tag to get a sane pointer to + * pass on to access_ok(), for instance. + */ +#define untagged_addr(addr) sign_extend64(addr, 55) + #define access_ok(type, addr, size) __range_ok(addr, size) #define user_addr_max get_fs diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 884b317e56c3..10d3642deb7c 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -299,7 +299,8 @@ do { \ _ASM_EXTABLE(0b, 4b) \ _ASM_EXTABLE(1b, 4b) \ : "=&r" (res), "+r" (data), "=&r" (temp) \ - : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ + : "r" ((unsigned long)addr), "i" (-EAGAIN), \ + "i" (-EFAULT) \ : "memory"); \ uaccess_disable(); \ } while (0) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 0ea65307f866..7822e36d87fb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -32,6 +32,7 @@ #include #include #include +#include #include /* @@ -437,12 +438,13 @@ el1_da: /* * Data abort handling */ - mrs x0, far_el1 + mrs x3, far_el1 enable_dbg // re-enable interrupts if they were enabled in the aborted context tbnz x23, #7, 1f // PSR_I_BIT enable_irq 1: + clear_address_tag x0, x3 mov x2, sp // struct pt_regs bl do_mem_abort @@ -604,7 +606,7 @@ el0_da: // enable interrupts before calling the main handler enable_dbg_and_irq ct_user_exit - bic x0, x26, #(0xff << 56) + clear_address_tag x0, x26 mov x1, x25 mov x2, sp bl do_mem_abort diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index f4dfd8c41e06..1c694f3c643c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -36,6 +36,7 @@ #include #include #include +#include /* Breakpoint currently in use for each BRP. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c index 748ad449fc18..dc92b29ac103 100644 --- a/arch/arm64/kernel/perf_trace_counters.c +++ b/arch/arm64/kernel/perf_trace_counters.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -124,6 +124,7 @@ static ssize_t write_enabled_perftp_file_bool(struct file *file, char buf[32]; size_t buf_size; + buf[0] = 0; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index cb3eec8e8e50..1fd1a9a6596f 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -184,6 +184,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index c0ee84020784..db0087fd9823 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -421,6 +421,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return &cpu_topology[cpu].core_sibling; } +static int cpu_cpu_flags(void) +{ + return SD_ASYM_CPUCAPACITY; +} + static inline int cpu_corepower_flags(void) { return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \ @@ -431,7 +436,7 @@ static struct sched_domain_topology_level arm64_topology[] = { #ifdef CONFIG_SCHED_MC { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) }, #endif - { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) }, + { cpu_cpu_mask, cpu_cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) }, { NULL, }, }; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1804aea44faa..2720d47da366 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -92,21 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; pud = pud_offset(pgd, addr); - printk(", *pud=%016llx", pud_val(*pud)); + pr_cont(", *pud=%016llx", pud_val(*pud)); if (pud_none(*pud) || pud_bad(*pud)) break; pmd = pmd_offset(pud, addr); - printk(", *pmd=%016llx", pmd_val(*pmd)); + pr_cont(", *pmd=%016llx", pmd_val(*pmd)); if (pmd_none(*pmd) || pmd_bad(*pmd)) break; pte = pte_offset_map(pmd, addr); - printk(", *pte=%016llx", pte_val(*pte)); + pr_cont(", *pte=%016llx", pte_val(*pte)); pte_unmap(pte); } while(0); - printk("\n"); + pr_cont("\n"); } #ifdef CONFIG_ARM64_HW_AFDBM diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a62..efa59f1f8022 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c index 3cedd1f95e0f..8ae4067a5eda 100644 --- a/arch/mips/ath79/common.c +++ b/arch/mips/ath79/common.c @@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void) { BUG_ON(!ath79_ddr_pci_win_base); - __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0); - __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1); - __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2); - __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3); - __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4); - __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5); - __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6); - __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7); + __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0); + __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4); + __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8); + __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc); + __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10); + __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14); + __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18); + __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c); } EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows); diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index de781cf54bc7..da80878f2c0d 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs) return __microMIPS_compute_return_epc(regs); if (cpu_has_mips16) return __MIPS16e_compute_return_epc(regs); - return regs->cp0_epc; - } - - if (!delay_slot(regs)) { + } else if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; } diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index d8f9b357b222..71e8f4c0b8da 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * * @regs: Pointer to pt_regs * @insn: branch instruction to decode - * @returns: -EFAULT on error and forces SIGBUS, and on success + * @returns: -EFAULT on error and forces SIGILL, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. * @@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* Fall through */ case jr_op: if (NO_R6EMU && insn.r_format.func == jr_op) - goto sigill_r6; + goto sigill_r2r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, switch (insn.i_format.rt) { case bltzl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bltzall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bltzall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a NAL @@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bgezall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bgezall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a BAL @@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are unconditional and in j_format. */ + case jalx_op: case jal_op: regs->regs[31] = regs->cp0_epc + 8; case j_op: @@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case beql_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { @@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bnel_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { @@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case blezl_op: /* not really i_format */ if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case blez_op: /* * Compact branches for R6 for the @@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgtzl_op: if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgtz_op: /* * Compact branches for R6 for the @@ -816,8 +813,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; } /* Compact branch: BNEZC || JIALC */ - if (insn.i_format.rs) + if (!insn.i_format.rs) { + /* JIALC: set $31/ra */ regs->regs[31] = epc + 4; + } regs->cp0_epc += 8; break; #endif @@ -841,11 +840,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, return ret; sigill_dsp: - printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); - force_sig(SIGBUS, current); + pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); return -EFAULT; -sigill_r6: - pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", +sigill_r2r6: + pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n", current->comm); force_sig(SIGILL, current); return -EFAULT; diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 7791840cf22c..db07793f7b43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -173,6 +175,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index f63a289977cc..0b3e58a3189f 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 298b2b773d12..f1fab6ff53e6 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) } seq_printf(m, "isa\t\t\t:"); - if (cpu_has_mips_r1) + if (cpu_has_mips_1) seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c95bf18260f8..24c115a0721a 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -927,7 +927,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->regs[2]); + trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 2d23c834ba96..29b0c5f978e4 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -372,7 +372,7 @@ EXPORT(sys_call_table) PTR sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index deac63315d0e..a6323a969919 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -312,7 +312,7 @@ EXPORT(sys_call_table) PTR sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_io_setup /* 5200 */ PTR sys_io_destroy PTR sys_io_getevents diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index ee93d5fe61d7..e0fdca8d3abe 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -298,7 +298,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR compat_sys_io_setup /* 6200 */ PTR sys_io_destroy PTR compat_sys_io_getevents diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index b77052ec6fb2..87c697181d25 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -367,7 +367,7 @@ EXPORT(sys32_call_table) PTR compat_sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 53a7ef9a8f32..4234b2d726c5 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -138,10 +139,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) __asm__ __volatile__ ( " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" - "1: ll %[old], (%[addr]) \n" + "1: \n" + user_ll("%[old]", "(%[addr])") " move %[tmp], %[new] \n" - "2: sc %[tmp], (%[addr]) \n" - " bnez %[tmp], 4f \n" + "2: \n" + user_sc("%[tmp]", "(%[addr])") + " beqz %[tmp], 4f \n" "3: \n" " .insn \n" " .subsection 2 \n" @@ -199,6 +202,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) unreachable(); } +/* + * mips_atomic_set() normally returns directly via syscall_exit potentially + * clobbering static registers, so be sure to preserve them. + */ +save_static_function(sys_sysmips); + SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) { switch (cmd) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 99a402231f4d..31ca2edd7218 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 734a2c7665ec..6da2e4a6ba39 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -2496,6 +2496,35 @@ dcopuop: return 0; } +/* + * Emulate FPU instructions. + * + * If we use FPU hardware, then we have been typically called to handle + * an unimplemented operation, such as where an operand is a NaN or + * denormalized. In that case exit the emulation loop after a single + * iteration so as to let hardware execute any subsequent instructions. + * + * If we have no FPU hardware or it has been disabled, then continue + * emulating floating-point instructions until one of these conditions + * has occurred: + * + * - a non-FPU instruction has been encountered, + * + * - an attempt to emulate has ended with a signal, + * + * - the ISA mode has been switched. + * + * We need to terminate the emulation loop if we got switched to the + * MIPS16 mode, whether supported or not, so that we do not attempt + * to emulate a MIPS16 instruction as a regular MIPS FPU instruction. + * Similarly if we got switched to the microMIPS mode and only the + * regular MIPS mode is supported, so that we do not attempt to emulate + * a microMIPS instruction as a regular MIPS FPU instruction. Or if + * we got switched to the regular MIPS mode and only the microMIPS mode + * is supported, so that we do not attempt to emulate a regular MIPS + * instruction that should cause an Address Error exception instead. + * For simplicity we always terminate upon an ISA mode switch. + */ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void *__user *fault_addr) { @@ -2581,6 +2610,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; if (sig) break; + /* + * We have to check for the ISA bit explicitly here, + * because `get_isa16_mode' may return 0 if support + * for code compression has been globally disabled, + * or otherwise we may produce the wrong signal or + * even proceed successfully where we must not. + */ + if ((xcp->cp0_epc ^ prevepc) & 0x1) + break; cond_resched(); } while (xcp->cp0_epc > prevepc); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 353037699512..c5fdea5debe5 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index dfb04fcedb04..48d6349fd9d7 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = { }; static struct rt2880_pmx_func pwm1_grp_mt7628[] = { - FUNC("sdcx", 3, 19, 1), + FUNC("sdxc d6", 3, 19, 1), FUNC("utif", 2, 19, 1), FUNC("gpio", 1, 19, 1), - FUNC("pwm", 0, 19, 1), + FUNC("pwm1", 0, 19, 1), }; static struct rt2880_pmx_func pwm0_grp_mt7628[] = { - FUNC("sdcx", 3, 18, 1), + FUNC("sdxc d7", 3, 18, 1), FUNC("utif", 2, 18, 1), FUNC("gpio", 1, 18, 1), - FUNC("pwm", 0, 18, 1), + FUNC("pwm0", 0, 18, 1), }; static struct rt2880_pmx_func uart2_grp_mt7628[] = { - FUNC("sdcx", 3, 20, 2), + FUNC("sdxc d5 d4", 3, 20, 2), FUNC("pwm", 2, 20, 2), FUNC("gpio", 1, 20, 2), - FUNC("uart", 0, 20, 2), + FUNC("uart2", 0, 20, 2), }; static struct rt2880_pmx_func uart1_grp_mt7628[] = { - FUNC("sdcx", 3, 45, 2), + FUNC("sw_r", 3, 45, 2), FUNC("pwm", 2, 45, 2), FUNC("gpio", 1, 45, 2), - FUNC("uart", 0, 45, 2), + FUNC("uart1", 0, 45, 2), }; static struct rt2880_pmx_func i2c_grp_mt7628[] = { @@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; -static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) }; +static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { FUNC("jtag", 3, 22, 8), FUNC("utif", 2, 22, 8), FUNC("gpio", 1, 22, 8), - FUNC("sdcx", 0, 22, 8), + FUNC("sdxc", 0, 22, 8), }; static struct rt2880_pmx_func uart0_grp_mt7628[] = { FUNC("-", 3, 12, 2), FUNC("-", 2, 12, 2), FUNC("gpio", 1, 12, 2), - FUNC("uart", 0, 12, 2), + FUNC("uart0", 0, 12, 2), }; static struct rt2880_pmx_func i2s_grp_mt7628[] = { @@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { FUNC("-", 3, 6, 1), FUNC("refclk", 2, 6, 1), FUNC("gpio", 1, 6, 1), - FUNC("spi", 0, 6, 1), + FUNC("spi cs1", 0, 6, 1), }; static struct rt2880_pmx_func spis_grp_mt7628[] = { @@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = { FUNC("gpio", 0, 11, 1), }; -#define MT7628_GPIO_MODE_MASK 0x3 +static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { + FUNC("rsvd", 3, 35, 1), + FUNC("rsvd", 2, 35, 1), + FUNC("gpio", 1, 35, 1), + FUNC("wled_kn", 0, 35, 1), +}; -#define MT7628_GPIO_MODE_PWM1 30 -#define MT7628_GPIO_MODE_PWM0 28 -#define MT7628_GPIO_MODE_UART2 26 -#define MT7628_GPIO_MODE_UART1 24 -#define MT7628_GPIO_MODE_I2C 20 -#define MT7628_GPIO_MODE_REFCLK 18 -#define MT7628_GPIO_MODE_PERST 16 -#define MT7628_GPIO_MODE_WDT 14 -#define MT7628_GPIO_MODE_SPI 12 -#define MT7628_GPIO_MODE_SDMODE 10 -#define MT7628_GPIO_MODE_UART0 8 -#define MT7628_GPIO_MODE_I2S 6 -#define MT7628_GPIO_MODE_CS1 4 -#define MT7628_GPIO_MODE_SPIS 2 -#define MT7628_GPIO_MODE_GPIO 0 +static struct rt2880_pmx_func wled_an_grp_mt7628[] = { + FUNC("rsvd", 3, 44, 1), + FUNC("rsvd", 2, 44, 1), + FUNC("gpio", 1, 44, 1), + FUNC("wled_an", 0, 44, 1), +}; + +#define MT7628_GPIO_MODE_MASK 0x3 + +#define MT7628_GPIO_MODE_WLED_KN 48 +#define MT7628_GPIO_MODE_WLED_AN 32 +#define MT7628_GPIO_MODE_PWM1 30 +#define MT7628_GPIO_MODE_PWM0 28 +#define MT7628_GPIO_MODE_UART2 26 +#define MT7628_GPIO_MODE_UART1 24 +#define MT7628_GPIO_MODE_I2C 20 +#define MT7628_GPIO_MODE_REFCLK 18 +#define MT7628_GPIO_MODE_PERST 16 +#define MT7628_GPIO_MODE_WDT 14 +#define MT7628_GPIO_MODE_SPI 12 +#define MT7628_GPIO_MODE_SDMODE 10 +#define MT7628_GPIO_MODE_UART0 8 +#define MT7628_GPIO_MODE_I2S 6 +#define MT7628_GPIO_MODE_CS1 4 +#define MT7628_GPIO_MODE_SPIS 2 +#define MT7628_GPIO_MODE_GPIO 0 static struct rt2880_pmx_group mt7628an_pinmux_data[] = { - GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, + GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_PWM1), - GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, + GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_PWM0), GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_UART2), @@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = { 1, MT7628_GPIO_MODE_SPIS), GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_GPIO), + GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_WLED_AN), + GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_WLED_KN), { 0 } }; @@ -439,7 +459,7 @@ void __init ralink_clk_init(void) ralink_clk_add("10000c00.uartlite", periph_rate); ralink_clk_add("10180000.wmac", xtal_rate); - if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) { + if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) { /* * When the CPU goes into sleep mode, the BUS clock will be * too low for USB to function properly. Adjust the busses diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c index 15506a1ff22a..9dd67749c592 100644 --- a/arch/mips/ralink/rt288x.c +++ b/arch/mips/ralink/rt288x.c @@ -109,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info) soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; rt2880_pinmux_data = rt2880_pinmux_data_act; - ralink_soc == RT2880_SOC; + ralink_soc = RT2880_SOC; } diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 2d69a853b742..3a08b55609b6 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -38,6 +38,8 @@ SECTIONS /* Read-only sections, merged into text segment: */ . = LOAD_BASE ; + _text = .; + /* _s_kernel_ro must be page aligned */ . = ALIGN(PAGE_SIZE); _s_kernel_ro = .; diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index d8d60a57183f..f53725202955 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -39,6 +39,8 @@ struct hppa_dma_ops { ** flush/purge and allocate "regular" cacheable pages for everything. */ +#define DMA_ERROR_CODE (~(dma_addr_t)0) + #ifdef CONFIG_PA11 extern struct hppa_dma_ops pcxl_dma_ops; extern struct hppa_dma_ops pcx_dma_ops; @@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev) break; } } - BUG_ON(!dev->platform_data); return dev->platform_data; } - -#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) - + +#define GET_IOC(dev) ({ \ + void *__pdata = parisc_walk_tree(dev); \ + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ +}) #ifdef CONFIG_IOMMU_CCIO struct parisc_device; diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 59be25764433..a81226257878 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context) mtctl(__space_to_prot(context), 8); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) { mtctl(__pa(next->pgd), 25); load_context(next->context); } } +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} +#define switch_mm_irqs_off switch_mm_irqs_off + #define deactivate_mm(tsk,mm) do { } while (0) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 5aba01ac457f..4dda73c44fee 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; @@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } @@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; @@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index d4ffcfbc9885..041e1f9ec129 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -361,7 +361,7 @@ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ - ENTRY_SAME(keyctl) + ENTRY_COMP(keyctl) ENTRY_SAME(ioprio_set) ENTRY_SAME(ioprio_get) ENTRY_SAME(inotify_init) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 16dbe81c97c9..2f33a67bc531 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -298,7 +298,7 @@ bad_area: case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || - address < vma->vm_start || address > vma->vm_end) { + address < vma->vm_start || address >= vma->vm_end) { si.si_signo = SIGSEGV; si.si_code = SEGV_MAPERR; break; diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 55f106ed12bf..039c4b910615 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -460,7 +460,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * Atomically increments @v by 1, so long as @v is non-zero. * Returns non-zero if @v was non-zero, and zero otherwise. */ -static __inline__ long atomic64_inc_not_zero(atomic64_t *v) +static __inline__ int atomic64_inc_not_zero(atomic64_t *v) { long t1, t2; @@ -479,7 +479,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) : "r" (&v->counter) : "cc", "xer", "memory"); - return t1; + return t1 != 0; } #endif /* __powerpc64__ */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index ee46ffef608e..743ad7a400d6 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -23,12 +23,13 @@ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0x20000000 +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ + 0x100000000UL) #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 627d129d7fcb..ca372bbc0ffe 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1236,7 +1236,7 @@ static inline unsigned long mfvtb (void) " .llong 0\n" \ ".previous" \ : "=r" (rval) \ - : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \ + : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ rval;}) #else #define mftb() ({unsigned long rval; \ diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 98949b0df00a..6696c1986844 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) * * For pHyp, we have to enable IO for log retrieval. Otherwise, * 0xFF's is always returned from PCI config space. + * + * When the @severity is EEH_LOG_PERM, the PE is going to be + * removed. Prior to that, the drivers for devices included in + * the PE will be closed. The drivers rely on working IO path + * to bring the devices to quiet state. Otherwise, PCI traffic + * from those devices after they are removed is like to cause + * another unexpected EEH error. */ if (!(pe->type & EEH_PE_PHB)) { - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || + severity == EEH_LOG_PERM) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); /* diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index c314db8b798c..9837c98caabe 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) */ #define MAX_WAIT_FOR_RECOVERY 300 -static void eeh_handle_normal_event(struct eeh_pe *pe) +static bool eeh_handle_normal_event(struct eeh_pe *pe) { struct pci_bus *frozen_bus; int rc = 0; @@ -665,7 +665,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) if (!frozen_bus) { pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", __func__, pe->phb->global_number, pe->addr); - return; + return false; } eeh_pe_update_time_stamp(pe); @@ -790,7 +790,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) pr_info("EEH: Notify device driver to resume\n"); eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); - return; + return false; excess_failures: /* @@ -831,7 +831,11 @@ perm_error: pci_lock_rescan_remove(); pcibios_remove_pci_devices(frozen_bus); pci_unlock_rescan_remove(); + + /* The passed PE should no longer be used */ + return true; } + return false; } static void eeh_handle_special_event(void) @@ -897,7 +901,14 @@ static void eeh_handle_special_event(void) */ if (rc == EEH_NEXT_ERR_FROZEN_PE || rc == EEH_NEXT_ERR_FENCED_PHB) { - eeh_handle_normal_event(pe); + /* + * eeh_handle_normal_event() can make the PE stale if it + * determines that the PE cannot possibly be recovered. + * Don't modify the PE state if that's the case. + */ + if (eeh_handle_normal_event(pe)) + continue; + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } else { pci_lock_rescan_remove(); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7c053f281406..1138fec3dd65 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) #endif #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } @@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3c3a367b6e59..428563b195c3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2687,12 +2687,38 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; + unsigned long ebb_regs[3] = {}; /* shut up GCC */ + unsigned long user_tar = 0; + unsigned long proc_fscr = 0; + unsigned int user_vrsave; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + /* Enable TM so we can read the TM SPRs */ + mtmsr(mfmsr() | MSR_TM); + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ @@ -2715,6 +2741,17 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); + + /* Save userspace EBB and other register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + ebb_regs[0] = mfspr(SPRN_EBBHR); + ebb_regs[1] = mfspr(SPRN_EBBRR); + ebb_regs[2] = mfspr(SPRN_BESCR); + user_tar = mfspr(SPRN_TAR); + proc_fscr = mfspr(SPRN_FSCR); + } + user_vrsave = mfspr(SPRN_VRSAVE); + vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; @@ -2736,6 +2773,29 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } } while (is_kvmppc_resume_guest(r)); + /* Restore userspace EBB and other register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_EBBHR, ebb_regs[0]); + mtspr(SPRN_EBBRR, ebb_regs[1]); + mtspr(SPRN_BESCR, ebb_regs[2]); + mtspr(SPRN_TAR, user_tar); + mtspr(SPRN_FSCR, proc_fscr); + } + mtspr(SPRN_VRSAVE, user_vrsave); + + /* + * Since we don't do lazy TM reload, we need to reload + * the TM registers here. + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + mtspr(SPRN_TFHAR, current->thread.tm_tfhar); + mtspr(SPRN_TFIAR, current->thread.tm_tfiar); + mtspr(SPRN_TEXASR, current->thread.tm_texasr); + } +#endif + out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 1a743f87b37d..ffab9269bfe4 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -36,6 +36,13 @@ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 +/* Stack frame offsets for kvmppc_hv_entry */ +#define SFS 112 +#define STACK_SLOT_TRAP (SFS-4) +#define STACK_SLOT_CIABR (SFS-16) +#define STACK_SLOT_DAWR (SFS-24) +#define STACK_SLOT_DAWRX (SFS-32) + /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -274,10 +281,10 @@ kvm_novcpu_exit: bl kvmhv_accumulate_time #endif 13: mr r3, r12 - stw r12, 112-4(r1) + stw r12, STACK_SLOT_TRAP(r1) bl kvmhv_commence_exit nop - lwz r12, 112-4(r1) + lwz r12, STACK_SLOT_TRAP(r1) b kvmhv_switch_to_host /* @@ -489,7 +496,7 @@ kvmppc_hv_entry: */ mflr r0 std r0, PPC_LR_STKOFF(r1) - stdu r1, -112(r1) + stdu r1, -SFS(r1) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) @@ -643,6 +650,16 @@ kvmppc_got_guest: mtspr SPRN_PURR,r7 mtspr SPRN_SPURR,r8 + /* Save host values of some registers */ +BEGIN_FTR_SECTION + mfspr r5, SPRN_CIABR + mfspr r6, SPRN_DAWR + mfspr r7, SPRN_DAWRX + std r5, STACK_SLOT_CIABR(r1) + std r6, STACK_SLOT_DAWR(r1) + std r7, STACK_SLOT_DAWRX(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -1266,8 +1283,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) */ li r0, 0 mtspr SPRN_IAMR, r0 - mtspr SPRN_CIABR, r0 - mtspr SPRN_DAWRX, r0 + mtspr SPRN_PSPB, r0 mtspr SPRN_TCSCR, r0 mtspr SPRN_WORT, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ @@ -1283,6 +1299,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) std r6,VCPU_UAMOR(r9) li r6,0 mtspr SPRN_AMR,r6 + mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ mfspr r8, SPRN_DSCR @@ -1424,6 +1441,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) slbia ptesync + /* Restore host values of some registers */ +BEGIN_FTR_SECTION + ld r5, STACK_SLOT_CIABR(r1) + ld r6, STACK_SLOT_DAWR(r1) + ld r7, STACK_SLOT_DAWRX(r1) + mtspr SPRN_CIABR, r5 + mtspr SPRN_DAWR, r6 + mtspr SPRN_DAWRX, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do @@ -1533,8 +1560,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) - ld r0, 112+PPC_LR_STKOFF(r1) - addi r1, r1, 112 + ld r0, SFS+PPC_LR_STKOFF(r1) + addi r1, r1, SFS mtlr r0 blr diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 4014881e9843..e37162d356d8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -687,8 +687,10 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 19: switch ((instr >> 1) & 0x3ff) { case 0: /* mcrf */ - rd = (instr >> 21) & 0x1c; - ra = (instr >> 16) & 0x1c; + rd = 7 - ((instr >> 23) & 0x7); + ra = 7 - ((instr >> 18) & 0x7); + rd *= 4; + ra *= 4; val = (regs->ccr >> ra) & 0xf; regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); goto instr_done; @@ -967,6 +969,19 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, #endif case 19: /* mfcr */ + if ((instr >> 20) & 1) { + imm = 0xf0000000UL; + for (sh = 0; sh < 8; ++sh) { + if (instr & (0x80000 >> sh)) { + regs->gpr[rd] = regs->ccr & imm; + break; + } + imm >>= 4; + } + + goto instr_done; + } + regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 4c48b487698c..0b48ce40d351 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ + /* + * It's possible the bad EA is too large to fit in the SLB cache, which + * would mean we'd fail to invalidate it on context switch. So mark the + * SLB cache as full so we force a full flush. We also set cr7+eq to + * mark the address as a kernel address, so slb_finish_load() skips + * trying to insert it into the SLB cache. + */ + li r9,SLB_CACHE_ENTRIES + 1 + sth r9,PACASLBCACHEPTR(r13) + crset 4*cr7+eq li r10,0 /* BAD_VSID */ li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 0f432a702870..6ad12b244770 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index e9ff44cd5d86..e8b1027e1b5b 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -110,6 +110,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn) for (i = 0; i < num_lmbs; i++) { lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); + lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index); lmbs[i].flags = be32_to_cpu(lmbs[i].flags); } @@ -553,6 +554,7 @@ static void dlpar_update_drconf_property(struct device_node *dn, for (i = 0; i < num_lmbs; i++) { lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); + lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index); lmbs[i].flags = cpu_to_be32(lmbs[i].flags); } diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 7c7fcc042549..fb695f142563 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np) of_detach_node(np); of_node_put(parent); - of_node_put(np); /* Must decrement the refcount */ return 0; } diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index bab6739a1154..b9eb7b1a49d2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -154,14 +154,13 @@ extern unsigned int vdso_enabled; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. 64-bit - tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_32bit_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 6ba0bf928909..6bc941be6921 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task, { unsigned long mask = -1UL; + /* + * No arguments for this syscall, there's nothing to do. + */ + if (!n) + return; + BUG_ON(i + n > 6); #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_31BIT)) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 7460df3eec6b..4612ed7ec2e5 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -229,12 +229,17 @@ ENTRY(sie64a) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There +# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. +# Other instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. # See also .Lcleanup_sie -.Lrewind_pad: - nop 0 +.Lrewind_pad6: + nopr 7 +.Lrewind_pad4: + nopr 7 +.Lrewind_pad2: + nopr 7 .globl sie_exit sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area @@ -247,7 +252,9 @@ sie_exit: stg %r14,__SF_EMPTY+16(%r15) # set exit reason code j sie_exit - EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(.Lrewind_pad6,.Lsie_fault) + EX_TABLE(.Lrewind_pad4,.Lsie_fault) + EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) #endif diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index f2b6b1d9c804..126c4a9b9bf9 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -97,7 +97,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -135,7 +135,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ef7d6c8fea66..f354fd84adeb 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -372,7 +372,7 @@ void __init vmem_map_init(void) ro_end = (unsigned long)&_eshared & PAGE_MASK; for_each_memblock(memory, reg) { start = reg->base; - end = reg->base + reg->size - 1; + end = reg->base + reg->size; if (start >= ro_end || end <= ro_start) vmem_add_mem(start, end - start, 0); else if (start >= ro_start && end <= ro_end) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c2..7df7d5944188 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3736be630113..894bcaed002e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -183,9 +183,9 @@ config NR_CPUS int "Maximum number of CPUs" depends on SMP range 2 32 if SPARC32 - range 2 1024 if SPARC64 + range 2 4096 if SPARC64 default 32 if SPARC32 - default 64 if SPARC64 + default 4096 if SPARC64 source kernel/Kconfig.hz diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index f7de0dbc38af..83b36a5371ff 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -52,7 +52,7 @@ #define CTX_NR_MASK TAG_CONTEXT_BITS #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) -#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) +#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) #define CTX_VALID(__ctx) \ (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index b84be675e507..349dd23e2876 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock; extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; +DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); void get_new_mmu_context(struct mm_struct *mm); -#ifdef CONFIG_SMP -void smp_new_mmu_context_version(void); -#else -#define smp_new_mmu_context_version() do { } while (0) -#endif - int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); @@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long); static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { unsigned long ctx_valid, flags; - int cpu; + int cpu = smp_processor_id(); + per_cpu(per_cpu_secondary_mm, cpu) = mm; if (unlikely(mm == &init_mm)) return; @@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * for the first time, we must flush that context out of the * local TLB. */ - cpu = smp_processor_id(); if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { cpumask_set_cpu(cpu, mm_cpumask(mm)); __flush_tlb_mm(CTX_HWBITS(mm->context), @@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str } #define deactivate_mm(tsk,mm) do { } while (0) - -/* Activate a new MM instance for the current task. */ -static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&mm->context.lock, flags); - if (!CTX_VALID(mm->context)) - get_new_mmu_context(mm); - cpu = smp_processor_id(); - if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) - cpumask_set_cpu(cpu, mm_cpumask(mm)); - - load_secondary_context(mm); - __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); - tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); -} - +#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) #endif /* !(__ASSEMBLY__) */ #endif /* !(__SPARC64_MMU_CONTEXT_H) */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 91b963a887b7..29c3b400f949 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -91,9 +91,9 @@ extern unsigned long pfn_base; * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* * In general all page table modifications should use the V8 atomic diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h index 266937030546..522b43db2ed3 100644 --- a/arch/sparc/include/asm/pil.h +++ b/arch/sparc/include/asm/pil.h @@ -20,7 +20,6 @@ #define PIL_SMP_CALL_FUNC 1 #define PIL_SMP_RECEIVE_SIGNAL 2 #define PIL_SMP_CAPTURE 3 -#define PIL_SMP_CTX_NEW_VERSION 4 #define PIL_DEVICE_IRQ 5 #define PIL_SMP_CALL_FUNC_SNGL 6 #define PIL_DEFERRED_PCR_WORK 7 diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 29d64b1758ed..be0cc1beed41 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -16,7 +16,7 @@ extern char reboot_command[]; */ extern unsigned char boot_cpu_id; -extern unsigned long empty_zero_page; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; extern int serial_console; static inline int con_is_present(void) diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 8174f6cdbbbb..9dca7a892978 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -327,6 +327,7 @@ struct vio_dev { int compat_len; u64 dev_no; + u64 id; unsigned long channel_id; diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e22416ce56ea..bfbde8c4ffb2 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) { #ifdef CONFIG_SMP unsigned long page; + void *mondo, *p; - BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); + + /* Make sure mondo block is 64byte aligned */ + p = kzalloc(127, GFP_KERNEL); + if (!p) { + prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); + prom_halt(); + } + mondo = (void *)(((unsigned long)p + 63) & ~0x3f); + tb->cpu_mondo_block_pa = __pa(mondo); page = get_zeroed_page(GFP_KERNEL); if (!page) { - prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); + prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); prom_halt(); } - tb->cpu_mondo_block_pa = __pa(page); - tb->cpu_list_pa = __pa(page + 64); + tb->cpu_list_pa = __pa(page); #endif } diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index e7f652be9e61..44f32dd4477f 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr /* smp_64.c */ void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 19cd08d18672..95a9fa0d2195 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -959,37 +959,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) preempt_enable(); } -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) -{ - struct mm_struct *mm; - unsigned long flags; - - clear_softint(1 << irq); - - /* See if we need to allocate a new TLB context because - * the version of the one we are using is now out of date. - */ - mm = current->active_mm; - if (unlikely(!mm || (mm == &init_mm))) - return; - - spin_lock_irqsave(&mm->context.lock, flags); - - if (unlikely(!CTX_VALID(mm->context))) - get_new_mmu_context(mm); - - spin_unlock_irqrestore(&mm->context.lock, flags); - - load_secondary_context(mm); - __flush_tlb_mm(CTX_HWBITS(mm->context), - SECONDARY_CONTEXT); -} - -void smp_new_mmu_context_version(void) -{ - smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); -} - #ifdef CONFIG_KGDB void kgdb_roundup_cpus(unsigned long flags) { diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index b489e9759518..98a5cf313d39 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index d21cd625c0de..cc97a43268ee 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) void bad_trap(struct pt_regs *regs, long lvl) { - char buffer[32]; + char buffer[36]; siginfo_t info; if (notify_die(DIE_TRAP, "bad trap", regs, @@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl) { - char buffer[32]; + char buffer[36]; if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index d568c8207af7..395ec1800530 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -470,13 +470,16 @@ __tsb_context_switch: .type copy_tsb,#function copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size * %o2=new_tsb_base, %o3=new_tsb_size + * %o4=page_size_shift */ sethi %uhi(TSB_PASS_BITS), %g7 srlx %o3, 4, %o3 - add %o0, %o1, %g1 /* end of old tsb */ + add %o0, %o1, %o1 /* end of old tsb */ sllx %g7, 32, %g7 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ + mov %o4, %g1 /* page_size_shift */ + 661: prefetcha [%o0] ASI_N, #one_read .section .tsb_phys_patch, "ax" .word 661b @@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size /* This can definitely be computed faster... */ srlx %o0, 4, %o5 /* Build index */ and %o5, 511, %o5 /* Mask index */ - sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ + sllx %o5, %g1, %o5 /* Put into vaddr position */ or %o4, %o5, %o4 /* Full VADDR. */ - srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ + srlx %o4, %g1, %o4 /* Shift down to create index */ and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ @@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 80: add %o0, 16, %o0 - cmp %o0, %g1 + cmp %o0, %o1 bne,pt %xcc, 90b nop diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S index c6dfdaa29e20..170ead662f2a 100644 --- a/arch/sparc/kernel/ttable_64.S +++ b/arch/sparc/kernel/ttable_64.S @@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) -tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) +tl0_irq4: BTRAP(0x44) #else tl0_irq1: BTRAP(0x41) tl0_irq2: BTRAP(0x42) diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index cb5789c9f961..34824ca396f0 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -284,13 +284,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (!id) { dev_set_name(&vdev->dev, "%s", bus_id_name); vdev->dev_no = ~(u64)0; + vdev->id = ~(u64)0; } else if (!cfg_handle) { dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); vdev->dev_no = *id; + vdev->id = ~(u64)0; } else { dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, *cfg_handle, *id); vdev->dev_no = *cfg_handle; + vdev->id = *id; } vdev->dev.parent = parent; @@ -333,27 +336,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node) (void) vio_create_one(hp, node, &root_vdev->dev); } +struct vio_md_node_query { + const char *type; + u64 dev_no; + u64 id; +}; + static int vio_md_node_match(struct device *dev, void *arg) { + struct vio_md_node_query *query = (struct vio_md_node_query *) arg; struct vio_dev *vdev = to_vio_dev(dev); - if (vdev->mp == (u64) arg) - return 1; + if (vdev->dev_no != query->dev_no) + return 0; + if (vdev->id != query->id) + return 0; + if (strcmp(vdev->type, query->type)) + return 0; - return 0; + return 1; } static void vio_remove(struct mdesc_handle *hp, u64 node) { + const char *type; + const u64 *id, *cfg_handle; + u64 a; + struct vio_md_node_query query; struct device *dev; - dev = device_find_child(&root_vdev->dev, (void *) node, + type = mdesc_get_property(hp, node, "device-type", NULL); + if (!type) { + type = mdesc_get_property(hp, node, "name", NULL); + if (!type) + type = mdesc_node_name(hp, node); + } + + query.type = type; + + id = mdesc_get_property(hp, node, "id", NULL); + cfg_handle = NULL; + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { + u64 target; + + target = mdesc_arc_target(hp, a); + cfg_handle = mdesc_get_property(hp, target, + "cfg-handle", NULL); + if (cfg_handle) + break; + } + + if (!id) { + query.dev_no = ~(u64)0; + query.id = ~(u64)0; + } else if (!cfg_handle) { + query.dev_no = *id; + query.id = ~(u64)0; + } else { + query.dev_no = *cfg_handle; + query.id = *id; + } + + dev = device_find_child(&root_vdev->dev, &query, vio_md_node_match); if (dev) { printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); device_unregister(dev); put_device(dev); + } else { + if (!id) + printk(KERN_ERR "VIO: Removed unknown %s node.\n", + type); + else if (!cfg_handle) + printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", + type, *id); + else + printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", + type, *cfg_handle, *id); } } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index da1142401bf4..ffa842b4d7d4 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index eb8287155279..3b7092d9ea8f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -301,7 +301,7 @@ void __init mem_init(void) /* Saves us work later. */ - memset((void *)&empty_zero_page, 0, PAGE_SIZE); + memset((void *)empty_zero_page, 0, PAGE_SIZE); i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); i += 1; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 965655afdbb6..384aba109d7c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -656,10 +656,58 @@ EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); -unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; +unsigned long tlb_context_cache = CTX_FIRST_VERSION; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); +DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; + +static void mmu_context_wrap(void) +{ + unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; + unsigned long new_ver, new_ctx, old_ctx; + struct mm_struct *mm; + int cpu; + + bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); + + /* Reserve kernel context */ + set_bit(0, mmu_context_bmap); + + new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; + if (unlikely(new_ver == 0)) + new_ver = CTX_FIRST_VERSION; + tlb_context_cache = new_ver; + + /* + * Make sure that any new mm that are added into per_cpu_secondary_mm, + * are going to go through get_new_mmu_context() path. + */ + mb(); + + /* + * Updated versions to current on those CPUs that had valid secondary + * contexts + */ + for_each_online_cpu(cpu) { + /* + * If a new mm is stored after we took this mm from the array, + * it will go into get_new_mmu_context() path, because we + * already bumped the version in tlb_context_cache. + */ + mm = per_cpu(per_cpu_secondary_mm, cpu); + + if (unlikely(!mm || mm == &init_mm)) + continue; + + old_ctx = mm->context.sparc64_ctx_val; + if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { + new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; + set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); + mm->context.sparc64_ctx_val = new_ctx; + } + } +} /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. @@ -675,48 +723,30 @@ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - int new_version; spin_lock(&ctx_alloc_lock); +retry: + /* wrap might have happened, test again if our context became valid */ + if (unlikely(CTX_VALID(mm->context))) + goto out; orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); - new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { - int i; - new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + - CTX_FIRST_VERSION; - if (new_ctx == 1) - new_ctx = CTX_FIRST_VERSION; - - /* Don't call memset, for 16 entries that's just - * plain silly... - */ - mmu_context_bmap[0] = 3; - mmu_context_bmap[1] = 0; - mmu_context_bmap[2] = 0; - mmu_context_bmap[3] = 0; - for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { - mmu_context_bmap[i + 0] = 0; - mmu_context_bmap[i + 1] = 0; - mmu_context_bmap[i + 2] = 0; - mmu_context_bmap[i + 3] = 0; - } - new_version = 1; - goto out; + mmu_context_wrap(); + goto retry; } } + if (mm->context.sparc64_ctx_val) + cpumask_clear(mm_cpumask(mm)); mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); -out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; +out: spin_unlock(&ctx_alloc_lock); - - if (unlikely(new_version)) - smp_new_mmu_context_version(); } static int numa_enabled = 1; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 9cdeca0fa955..266411291634 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -451,7 +451,8 @@ retry_tsb_alloc: extern void copy_tsb(unsigned long old_tsb_base, unsigned long old_tsb_size, unsigned long new_tsb_base, - unsigned long new_tsb_size); + unsigned long new_tsb_size, + unsigned long page_size_shift); unsigned long old_tsb_base = (unsigned long) old_tsb; unsigned long new_tsb_base = (unsigned long) new_tsb; @@ -459,7 +460,9 @@ retry_tsb_alloc: old_tsb_base = __pa(old_tsb_base); new_tsb_base = __pa(new_tsb_base); } - copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, + tsb_index == MM_TSB_BASE ? + PAGE_SHIFT : REAL_HPAGE_SHIFT); } mm->context.tsb_block[tsb_index].tsb = new_tsb; diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 5d2fd6cd3189..fcf4d27a38fb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -971,11 +971,6 @@ xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry - .globl xcall_new_mmu_context_version -xcall_new_mmu_context_version: - wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint - retry - #ifdef CONFIG_KGDB .globl xcall_kgdb_capture xcall_kgdb_capture: diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index c034dc3fe2d4..c97ee6c7f949 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index dd14616b7739..7de207a11014 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, static bool avx2_usable(void) { - if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index d262f985bbc8..07cf288b692e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -245,12 +245,13 @@ extern int force_personality32; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7befcb76..19d14ac23ef9 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -221,6 +221,9 @@ struct x86_emulate_ops { void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); + + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -290,7 +293,6 @@ struct x86_emulate_ctxt { /* interruptibility state, as a result of execution of STI or MOV SS */ int interruptibility; - int emul_flags; bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 690b4027e17c..37db36fddc88 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -405,6 +405,8 @@ #define MSR_IA32_TSC_ADJUST 0x0000003b #define MSR_IA32_BNDCFGS 0x00000d90 +#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc + #define MSR_IA32_XSS 0x00000da0 #define FEATURE_CONTROL_LOCKED (1<<0) diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 0b1ff4c1c14e..fffb2794dd89 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -7,6 +7,7 @@ bool pat_enabled(void); void pat_disable(const char *reason); extern void pat_init(void); +extern void init_cache_modes(void); extern int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 4c20dd333412..85133b2b8e99 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -43,6 +43,7 @@ #include #include +#include #include #include @@ -213,10 +214,12 @@ privcmd_call(unsigned call, __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + stac(); asm volatile("call *%[call]" : __HYPERCALL_5PARAM : [call] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); + clac(); return (long)__res; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e75907601a41..1e5eb9f2ff5f 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -328,6 +328,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, int pin; struct mpc_intsrc mp_irq; + /* + * Check bus_irq boundary. + */ + if (bus_irq >= NR_IRQS_LEGACY) { + pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq); + return; + } + /* * Convert 'gsi' to 'ioapic.pin'. */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1e5d2f07416b..fc91c98bee01 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2115,7 +2115,7 @@ static inline void __init check_timer(void) int idx; idx = find_irq_entry(apic1, pin1, mp_INT); if (idx != -1 && irq_trigger(idx)) - unmask_ioapic_irq(irq_get_chip_data(0)); + unmask_ioapic_irq(irq_get_irq_data(0)); } irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 62aca448726a..2116176c1721 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -682,6 +682,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) const char *name = th_names[bank]; int err = 0; + if (!dev) + return -ENODEV; + if (is_shared_bank(bank)) { nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 47190bd399e7..cec49ecf5f31 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token) */ rcu_irq_exit(); native_safe_halt(); - rcu_irq_enter(); local_irq_disable(); + rcu_irq_enter(); } } if (!n.halted) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d2bbe343fda7..e67b834279b2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p) if (mtrr_trim_uncached_memory(max_pfn)) max_pfn = e820_end_of_ram_pfn(); + /* + * This call is required when the CPU does not support PAT. If + * mtrr_bp_init() invoked it already via pat_init() the call has no + * effect. + */ + init_cache_modes(); + #ifdef CONFIG_X86_32 /* max_low_pfn get updated here */ find_low_pfn_range(); diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 10e0272d789a..136ad7c1ce7b 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 642e9c93a097..83d6369c45f5 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) return ret; } +bool kvm_mpx_supported(void) +{ + return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) + && kvm_x86_ops->mpx_supported()); +} +EXPORT_SYMBOL_GPL(kvm_mpx_supported); + u64 kvm_supported_xcr0(void) { u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; - if (!kvm_x86_ops->mpx_supported()) + if (!kvm_mpx_supported()) xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); return xcr0; @@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); - vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); + vcpu->arch.eager_fpu = use_eager_fpu(); if (vcpu->arch.eager_fpu) kvm_x86_ops->fpu_activate(vcpu); @@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; - unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; /* cpuid 1.edx */ @@ -737,18 +744,20 @@ out: static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) { struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; - int j, nent = vcpu->arch.cpuid_nent; + struct kvm_cpuid_entry2 *ej; + int j = i; + int nent = vcpu->arch.cpuid_nent; e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; /* when no next entry is found, the current entry[i] is reselected */ - for (j = i + 1; ; j = (j + 1) % nent) { - struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; - if (ej->function == e->function) { - ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; - return j; - } - } - return 0; /* silence gcc, even though control never reaches here */ + do { + j = (j + 1) % nent; + ej = &vcpu->arch.cpuid_entries[j]; + } while (ej->function != e->function); + + ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; + + return j; } /* find an entry with matching function, matching index (if needed), and that diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 3f5c48ddba45..d1534feefcfe 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -4,6 +4,7 @@ #include "x86.h" int kvm_update_cpuid(struct kvm_vcpu *vcpu); +bool kvm_mpx_supported(void); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, @@ -134,14 +135,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_RTM)); } -static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_MPX)); -} - static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -150,6 +143,14 @@ static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); } +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); + return best && (best->ebx & bit(X86_FEATURE_MPX)); +} + static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1dcea225977d..04b2f3cad7ba 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) u64 smbase; int ret; - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); /* @@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; } - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt->ops->set_nmi_mask(ctxt, false); - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; - ctxt->emul_flags &= ~X86EMUL_SMM_MASK; + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); return X86EMUL_CONTINUE; } @@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; + unsigned emul_flags; ctxt->mem_read.pos = 0; @@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } + emul_flags = ctxt->ops->get_hflags(ctxt); if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || @@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) special_insn: - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8eb8a934b531..1049c3c9b877 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3433,12 +3433,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } -static bool can_do_async_pf(struct kvm_vcpu *vcpu) +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) { if (unlikely(!lapic_in_kernel(vcpu) || kvm_event_needs_reinjection(vcpu))) return false; + if (is_guest_mode(vcpu)) + return false; + return kvm_x86_ops->interrupt_allowed(vcpu); } @@ -3454,7 +3457,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, if (!async) return false; /* *pfn has correct page already */ - if (!prefault && can_do_async_pf(vcpu)) { + if (!prefault && kvm_can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(gva, gfn); if (kvm_find_async_pf_gfn(vcpu, gfn)) { trace_kvm_async_pf_doublefault(gva, gfn); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 55ffb7b0f95e..e60fc80f8a9c 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -74,6 +74,7 @@ enum { int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) { diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index ab38af4f4947..23a7c7ba377a 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 50ca8f409a7c..b12391119ce8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); -static bool vmx_mpx_supported(void); static bool vmx_xsaves_supported(void); static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); @@ -2264,7 +2263,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; @@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ @@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ @@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + return 1; + if (is_noncanonical_address(data & PAGE_MASK) || + (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; @@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void) for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported()) continue; break; default: @@ -6253,7 +6257,6 @@ static __init int hardware_setup(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); @@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ae2b9cd358f2..8e526c6fd784 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4844,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, if (var.unusable) { memset(desc, 0, sizeof(*desc)); + if (base3) + *base3 = 0; return false; } @@ -4999,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); } +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) +{ + return emul_to_vcpu(ctxt)->arch.hflags; +} + +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) +{ + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); +} + static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, @@ -5038,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = { .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, .set_nmi_mask = emulator_set_nmi_mask, + .get_hflags = emulator_get_hflags, + .set_hflags = emulator_set_hflags, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -5090,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); - ctxt->emul_flags = vcpu->arch.hflags; init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; @@ -5486,8 +5499,6 @@ restart: unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - if (vcpu->arch.hflags != ctxt->emul_flags) - kvm_set_hflags(vcpu, ctxt->emul_flags); kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); @@ -5974,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, + &ctxt->exception); } static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) @@ -8245,8 +8257,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else - return !kvm_event_needs_reinjection(vcpu) && - kvm_x86_ops->interrupt_allowed(vcpu); + return kvm_can_do_async_pf(vcpu); } void kvm_arch_start_assignment(struct kvm *kvm) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 27f89c79a44b..423644c230e7 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled) movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz 17f + jz .L_copy_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled) leaq 64(%rdi),%rdi decl %ecx jnz 1b -17: movl %edx,%ecx +.L_copy_short_string: + movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz 20f @@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string) */ ENTRY(copy_user_enhanced_fast_string) ASM_STAC + cmpl $64,%edx + jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ movl %edx,%ecx 1: rep movsb diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 42982b26e32b..39bdaf3ac44a 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index ef05755a1900..7ed47b1e6f42 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void *)-1) { + if (info->si_addr == (void __user *)-1) { err = -EINVAL; goto err_out; } @@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void) if (!kernel_managing_mpx_tables(current->mm)) return -EINVAL; - if (do_mpx_bt_fault()) { - force_sig(SIGSEGV, current); - /* - * The force_sig() is essentially "handling" this - * exception, so we do not pass up the error - * from do_mpx_bt_fault(). - */ - } - return 0; + return do_mpx_bt_fault(); } /* diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 47b6436e41c2..3686a1db25b2 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -100,5 +100,6 @@ void __init initmem_init(void) printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); + __vmalloc_start_set = true; setup_bootmem_allocator(); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6ad687d104ca..3f1bb4f93a5a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -36,14 +36,14 @@ #undef pr_fmt #define pr_fmt(fmt) "" fmt -static bool boot_cpu_done; - -static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); -static void init_cache_modes(void); +static bool __read_mostly boot_cpu_done; +static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); +static bool __read_mostly pat_initialized; +static bool __read_mostly init_cm_done; void pat_disable(const char *reason) { - if (!__pat_enabled) + if (pat_disabled) return; if (boot_cpu_done) { @@ -51,10 +51,8 @@ void pat_disable(const char *reason) return; } - __pat_enabled = 0; + pat_disabled = true; pr_info("x86/PAT: %s\n", reason); - - init_cache_modes(); } static int __init nopat(char *str) @@ -66,7 +64,7 @@ early_param("nopat", nopat); bool pat_enabled(void) { - return !!__pat_enabled; + return pat_initialized; } EXPORT_SYMBOL_GPL(pat_enabled); @@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat) update_cache_mode_entry(i, cache); } pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); + + init_cm_done = true; } #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) @@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat) } wrmsrl(MSR_IA32_CR_PAT, pat); + pat_initialized = true; __init_cache_modes(pat); } @@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat) wrmsrl(MSR_IA32_CR_PAT, pat); } -static void init_cache_modes(void) +void init_cache_modes(void) { u64 pat = 0; - static int init_cm_done; if (init_cm_done) return; @@ -286,8 +286,6 @@ static void init_cache_modes(void) } __init_cache_modes(pat); - - init_cm_done = 1; } /** @@ -305,10 +303,8 @@ void pat_init(void) u64 pat; struct cpuinfo_x86 *c = &boot_cpu_data; - if (!pat_enabled()) { - init_cache_modes(); + if (pat_disabled) return; - } if ((c->x86_vendor == X86_VENDOR_INTEL) && (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5fb6adaaa796..5a760fd66bec 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, { struct flush_tlb_info info; - if (end == 0) - end = start + PAGE_SIZE; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; @@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); preempt_enable(); } diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0c2fae8d929d..73eb7fd4aec4 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode) die("Segment relocations found but --realmode not specified\n"); /* Order the relocations for more efficient processing */ - sort_relocs(&relocs16); sort_relocs(&relocs32); #if ELF_BITS == 64 sort_relocs(&relocs32neg); sort_relocs(&relocs64); +#else + sort_relocs(&relocs16); #endif /* Print the relocations */ diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index f71f88ea7646..19707db966f1 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h @@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } # define PLATFORM_NR_IRQS 0 #endif #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS -#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) +#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) +#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) #if VARIANT_NR_IRQS == 0 static inline void variant_init_irq(void) { } diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4ac3d23161cf..441694464b1e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { int irq = irq_find_mapping(NULL, hwirq); - if (hwirq >= NR_IRQS) { - printk(KERN_EMERG "%s: cannot handle IRQ %d\n", - __func__, hwirq); - } - #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 83cf49685373..3aaaae18417c 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index dbeea2b440a1..1fda7e20dfcb 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -24,16 +24,18 @@ /* Interrupt configuration. */ -#define PLATFORM_NR_IRQS 10 +#define PLATFORM_NR_IRQS 0 /* Default assignment of LX60 devices to external interrupts. */ #ifdef CONFIG_XTENSA_MX #define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM +#define C67X00_IRQ XCHAL_EXTINT8_NUM #else #define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM +#define C67X00_IRQ XCHAL_EXTINT5_NUM #endif /* @@ -63,5 +65,5 @@ #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_SIZE 0x10 -#define C67X00_IRQ 5 + #endif /* __XTENSA_XTAVNET_HARDWARE_H */ diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index e9f65f79cf2e..d1e9439fad45 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -209,8 +209,8 @@ static struct resource ethoc_res[] = { .flags = IORESOURCE_MEM, }, [2] = { /* IRQ number */ - .start = OETH_IRQ, - .end = OETH_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -246,8 +246,8 @@ static struct resource c67x00_res[] = { .flags = IORESOURCE_MEM, }, [1] = { /* IRQ number */ - .start = C67X00_IRQ, - .end = C67X00_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -280,7 +280,7 @@ static struct resource serial_resource = { static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, - .irq = DUART16552_INTNUM, + .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM32, diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 93e7c1b32edd..5610cd537da7 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state, continue; bsd_start = le32_to_cpu(p->p_offset); bsd_size = le32_to_cpu(p->p_size); + if (memcmp(flavour, "bsd\0", 4) == 0) + bsd_start += offset; if (offset == bsd_start && size == bsd_size) /* full parent partition, we have it already */ continue; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0c0468869e25..52154ef21b5e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -245,6 +245,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, u8 *ihash = ohash + crypto_ahash_digestsize(auth); u32 tmp[2]; + if (!authsize) + goto decrypt; + /* Move high-order bits of sequence number back. */ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); @@ -253,6 +256,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, if (crypto_memneq(ihash, ohash, authsize)) return -EBADMSG; +decrypt: + sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); diff --git a/crypto/gcm.c b/crypto/gcm.c index 1238b3c5a321..0a12c09d7cb2 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, err = crypto_ablkcipher_encrypt(&data->req); if (err == -EINPROGRESS || err == -EBUSY) { - err = wait_for_completion_interruptible( - &data->result.completion); - if (!err) - err = data->result.err; + wait_for_completion(&data->result.completion); + err = data->result.err; } if (err) diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 5ea5dc219f56..73c9c7fa9001 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) if (check_children && list_empty(&adev->children)) return -ENODEV; - return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; + /* + * If the device has a _HID (or _CID) returning a valid ACPI/PNP + * device ID, it is better to make it look less attractive here, so that + * the other device with the same _ADR value (that may not have a valid + * device ID) can be matched going forward. [This means a second spec + * violation in a row, so whatever we do here is best effort anyway.] + */ + return sta_present && list_empty(&adev->pnp.ids) ? + FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, diff --git a/drivers/android/Makefile b/drivers/android/Makefile index 3b7e4b072c58..4b7c726bb560 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -1,3 +1,3 @@ ccflags-y += -I$(src) # needed for trace events -obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d1490be45c67..71ebe36577c6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -15,17 +15,49 @@ * */ +/* + * Locking overview + * + * There are 3 main spinlocks which must be acquired in the + * order shown: + * + * 1) proc->outer_lock : protects binder_ref + * binder_proc_lock() and binder_proc_unlock() are + * used to acq/rel. + * 2) node->lock : protects most fields of binder_node. + * binder_node_lock() and binder_node_unlock() are + * used to acq/rel + * 3) proc->inner_lock : protects the thread and node lists + * (proc->threads, proc->waiting_threads, proc->nodes) + * and all todo lists associated with the binder_proc + * (proc->todo, thread->todo, proc->delivered_death and + * node->async_todo), as well as thread->transaction_stack + * binder_inner_proc_lock() and binder_inner_proc_unlock() + * are used to acq/rel + * + * Any lock under procA must never be nested under any lock at the same + * level or below on procB. + * + * Functions that require a lock held on entry indicate which lock + * in the suffix of the function name: + * + * foo_olocked() : requires node->outer_lock + * foo_nlocked() : requires node->lock + * foo_ilocked() : requires proc->inner_lock + * foo_oilocked(): requires proc->outer_lock and proc->inner_lock + * foo_nilocked(): requires node->lock and proc->inner_lock + * ... + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -35,23 +67,32 @@ #include #include #include -#include -#include #include #include +#include #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT #define BINDER_IPC_32BIT 1 #endif #include +#include "binder_alloc.h" #include "binder_trace.h" +static HLIST_HEAD(binder_deferred_list); +static DEFINE_MUTEX(binder_deferred_lock); + static HLIST_HEAD(binder_devices); +static HLIST_HEAD(binder_procs); +static DEFINE_MUTEX(binder_procs_lock); + +static HLIST_HEAD(binder_dead_nodes); +static DEFINE_SPINLOCK(binder_dead_nodes_lock); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; -atomic_t binder_last_id; +static atomic_t binder_last_id; +static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ @@ -97,17 +138,13 @@ enum { BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, - BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, - BINDER_DEBUG_PRIORITY_CAP = 1U << 14, - BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, + BINDER_DEBUG_PRIORITY_CAP = 1U << 13, + BINDER_DEBUG_SPINLOCKS = 1U << 14, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static bool binder_debug_no_lock; -module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); - static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, S_IRUGO); @@ -164,30 +201,27 @@ enum binder_stat_types { }; struct binder_stats { - int br[_IOC_NR(BR_FAILED_REPLY) + 1]; - int bc[_IOC_NR(BC_REPLY_SG) + 1]; -}; - -/* These are still global, since it's not always easy to get the context */ -struct binder_obj_stats { + atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; + atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; atomic_t obj_created[BINDER_STAT_COUNT]; atomic_t obj_deleted[BINDER_STAT_COUNT]; }; -static struct binder_obj_stats binder_obj_stats; +static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { - atomic_inc(&binder_obj_stats.obj_deleted[type]); + atomic_inc(&binder_stats.obj_deleted[type]); } static inline void binder_stats_created(enum binder_stat_types type) { - atomic_inc(&binder_obj_stats.obj_created[type]); + atomic_inc(&binder_stats.obj_created[type]); } struct binder_transaction_log_entry { int debug_id; + int debug_id_done; int call_type; int from_proc; int from_thread; @@ -197,48 +231,45 @@ struct binder_transaction_log_entry { int to_node; int data_size; int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; const char *context_name; }; struct binder_transaction_log { - int next; - int full; + atomic_t cur; + bool full; struct binder_transaction_log_entry entry[32]; }; +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; + unsigned int cur = atomic_inc_return(&log->cur); - e = &log->entry[log->next]; - memset(e, 0, sizeof(*e)); - log->next++; - if (log->next == ARRAY_SIZE(log->entry)) { - log->next = 0; + if (cur >= ARRAY_SIZE(log->entry)) log->full = 1; - } + e = &log->entry[cur % ARRAY_SIZE(log->entry)]; + WRITE_ONCE(e->debug_id_done, 0); + /* + * write-barrier to synchronize access to e->debug_id_done. + * We make sure the initialized 0 value is seen before + * memset() other fields are zeroed by memset. + */ + smp_wmb(); + memset(e, 0, sizeof(*e)); return e; } struct binder_context { struct binder_node *binder_context_mgr_node; + struct mutex context_mgr_node_lock; + kuid_t binder_context_mgr_uid; const char *name; - - struct mutex binder_main_lock; - struct mutex binder_deferred_lock; - struct mutex binder_mmap_lock; - - struct hlist_head binder_procs; - struct hlist_head binder_dead_nodes; - struct hlist_head binder_deferred_list; - - struct work_struct deferred_work; - struct workqueue_struct *binder_deferred_workqueue; - struct binder_transaction_log transaction_log; - struct binder_transaction_log transaction_log_failed; - - struct binder_stats binder_stats; }; struct binder_device { @@ -247,11 +278,20 @@ struct binder_device { struct binder_context context; }; +/** + * struct binder_work - work enqueued on a worklist + * @entry: node enqueued on list + * @type: type of work to be performed + * + * There are separate work lists for proc, thread, and node (async). + */ struct binder_work { struct list_head entry; + enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, @@ -259,8 +299,76 @@ struct binder_work { } type; }; +struct binder_error { + struct binder_work work; + uint32_t cmd; +}; + +/** + * struct binder_node - binder node bookkeeping + * @debug_id: unique ID for debugging + * (invariant after initialized) + * @lock: lock for node fields + * @work: worklist element for node work + * (protected by @proc->inner_lock) + * @rb_node: element for proc->nodes tree + * (protected by @proc->inner_lock) + * @dead_node: element for binder_dead_nodes list + * (protected by binder_dead_nodes_lock) + * @proc: binder_proc that owns this node + * (invariant after initialized) + * @refs: list of references on this node + * (protected by @lock) + * @internal_strong_refs: used to take strong references when + * initiating a transaction + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_weak_refs: weak user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_strong_refs: strong user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @tmp_refs: temporary kernel refs + * (protected by @proc->inner_lock while @proc + * is valid, and by binder_dead_nodes_lock + * if @proc is NULL. During inc/dec and node release + * it is also protected by @lock to provide safety + * as the node dies and @proc becomes NULL) + * @ptr: userspace pointer for node + * (invariant, no lock needed) + * @cookie: userspace cookie for node + * (invariant, no lock needed) + * @has_strong_ref: userspace notified of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_strong_ref: userspace has acked notification of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_weak_ref: userspace notified of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_weak_ref: userspace has acked notification of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_async_transaction: async transaction to node in progress + * (protected by @lock) + * @sched_policy: minimum scheduling policy for node + * (invariant after initialized) + * @accept_fds: file descriptor operations supported for node + * (invariant after initialized) + * @min_priority: minimum scheduling priority + * (invariant after initialized) + * @inherit_rt: inherit RT scheduling policy from caller + * (invariant after initialized) + * @async_todo: list of async work items + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder nodes. + */ struct binder_node { int debug_id; + spinlock_t lock; struct binder_work work; union { struct rb_node rb_node; @@ -271,88 +379,185 @@ struct binder_node { int internal_strong_refs; int local_weak_refs; int local_strong_refs; + int tmp_refs; binder_uintptr_t ptr; binder_uintptr_t cookie; - unsigned has_strong_ref:1; - unsigned pending_strong_ref:1; - unsigned has_weak_ref:1; - unsigned pending_weak_ref:1; - unsigned has_async_transaction:1; - unsigned accept_fds:1; - unsigned min_priority:8; + struct { + /* + * bitfield elements protected by + * proc inner_lock + */ + u8 has_strong_ref:1; + u8 pending_strong_ref:1; + u8 has_weak_ref:1; + u8 pending_weak_ref:1; + }; + struct { + /* + * invariant after initialization + */ + u8 sched_policy:2; + u8 inherit_rt:1; + u8 accept_fds:1; + u8 min_priority; + }; + bool has_async_transaction; struct list_head async_todo; }; struct binder_ref_death { + /** + * @work: worklist element for death notifications + * (protected by inner_lock of the proc that + * this ref belongs to) + */ struct binder_work work; binder_uintptr_t cookie; }; +/** + * struct binder_ref_data - binder_ref counts and id + * @debug_id: unique ID for the ref + * @desc: unique userspace handle for ref + * @strong: strong ref count (debugging only if not locked) + * @weak: weak ref count (debugging only if not locked) + * + * Structure to hold ref count and ref id information. Since + * the actual ref can only be accessed with a lock, this structure + * is used to return information about the ref to callers of + * ref inc/dec functions. + */ +struct binder_ref_data { + int debug_id; + uint32_t desc; + int strong; + int weak; +}; + +/** + * struct binder_ref - struct to track references on nodes + * @data: binder_ref_data containing id, handle, and current refcounts + * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree + * @rb_node_node: node for lookup by @node in proc's rb_tree + * @node_entry: list entry for node->refs list in target node + * (protected by @node->lock) + * @proc: binder_proc containing ref + * @node: binder_node of target node. When cleaning up a + * ref for deletion in binder_cleanup_ref, a non-NULL + * @node indicates the node must be freed + * @death: pointer to death notification (ref_death) if requested + * (protected by @node->lock) + * + * Structure to track references from procA to target node (on procB). This + * structure is unsafe to access without holding @proc->outer_lock. + */ struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ - int debug_id; + struct binder_ref_data data; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; - uint32_t desc; - int strong; - int weak; struct binder_ref_death *death; }; -struct binder_buffer { - struct list_head entry; /* free and allocated entries by address */ - struct rb_node rb_node; /* free entry by size or allocated entry */ - /* by address */ - unsigned free:1; - unsigned allow_user_free:1; - unsigned async_transaction:1; - unsigned debug_id:29; - - struct binder_transaction *transaction; - - struct binder_node *target_node; - size_t data_size; - size_t offsets_size; - size_t extra_buffers_size; - uint8_t data[0]; -}; - enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; +/** + * struct binder_priority - scheduler policy and priority + * @sched_policy scheduler policy + * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT + * + * The binder driver supports inheriting the following scheduler policies: + * SCHED_NORMAL + * SCHED_BATCH + * SCHED_FIFO + * SCHED_RR + */ +struct binder_priority { + unsigned int sched_policy; + int prio; +}; + +/** + * struct binder_proc - binder process bookkeeping + * @proc_node: element for binder_procs list + * @threads: rbtree of binder_threads in this proc + * (protected by @inner_lock) + * @nodes: rbtree of binder nodes associated with + * this proc ordered by node->ptr + * (protected by @inner_lock) + * @refs_by_desc: rbtree of refs ordered by ref->desc + * (protected by @outer_lock) + * @refs_by_node: rbtree of refs ordered by ref->node + * (protected by @outer_lock) + * @waiting_threads: threads currently waiting for proc work + * (protected by @inner_lock) + * @pid PID of group_leader of process + * (invariant after initialized) + * @tsk task_struct for group_leader of process + * (invariant after initialized) + * @files files_struct for process + * (invariant after initialized) + * @deferred_work_node: element for binder_deferred_list + * (protected by binder_deferred_lock) + * @deferred_work: bitmap of deferred work to perform + * (protected by binder_deferred_lock) + * @is_dead: process is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @inner_lock) + * @todo: list of work for this process + * (protected by @inner_lock) + * @wait: wait queue head to wait for proc work + * (invariant after initialized) + * @stats: per-process binder statistics + * (atomics, no lock needed) + * @delivered_death: list of delivered death notification + * (protected by @inner_lock) + * @max_threads: cap on number of binder threads + * (protected by @inner_lock) + * @requested_threads: number of binder threads requested but not + * yet started. In current implementation, can + * only be 0 or 1. + * (protected by @inner_lock) + * @requested_threads_started: number binder threads started + * (protected by @inner_lock) + * @tmp_ref: temporary reference to indicate proc is in use + * (protected by @inner_lock) + * @default_priority: default scheduler priority + * (invariant after initialized) + * @debugfs_entry: debugfs node + * @alloc: binder allocator bookkeeping + * @context: binder_context for this proc + * (invariant after initialized) + * @inner_lock: can nest under outer_lock and/or node lock + * @outer_lock: no nesting under innor or node lock + * Lock order: 1) outer, 2) node, 3) inner + * + * Bookkeeping structure for binder processes + */ struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; + struct list_head waiting_threads; int pid; - struct vm_area_struct *vma; - struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; - void *buffer; - ptrdiff_t user_buffer_offset; + bool is_dead; - struct list_head buffers; - struct rb_root free_buffers; - struct rb_root allocated_buffers; - size_t free_async_space; - - struct page **pages; - size_t buffer_size; - uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; @@ -360,10 +565,13 @@ struct binder_proc { int max_threads; int requested_threads; int requested_threads_started; - int ready_threads; - long default_priority; + int tmp_ref; + struct binder_priority default_priority; struct dentry *debugfs_entry; + struct binder_alloc alloc; struct binder_context *context; + spinlock_t inner_lock; + spinlock_t outer_lock; }; enum { @@ -372,22 +580,60 @@ enum { BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, - BINDER_LOOPER_STATE_NEED_RETURN = 0x20 + BINDER_LOOPER_STATE_POLL = 0x20, }; +/** + * struct binder_thread - binder thread bookkeeping + * @proc: binder process for this thread + * (invariant after initialization) + * @rb_node: element for proc->threads rbtree + * (protected by @proc->inner_lock) + * @waiting_thread_node: element for @proc->waiting_threads list + * (protected by @proc->inner_lock) + * @pid: PID for this thread + * (invariant after initialization) + * @looper: bitmap of looping state + * (only accessed by this thread) + * @looper_needs_return: looping thread needs to exit driver + * (no lock needed) + * @transaction_stack: stack of in-progress transactions for this thread + * (protected by @proc->inner_lock) + * @todo: list of work to do for this thread + * (protected by @proc->inner_lock) + * @return_error: transaction errors reported by this thread + * (only accessed by this thread) + * @reply_error: transaction errors reported by target thread + * (protected by @proc->inner_lock) + * @wait: wait queue for thread work + * @stats: per-thread statistics + * (atomics, no lock needed) + * @tmp_ref: temporary reference to indicate thread is in use + * (atomic since @proc->inner_lock cannot + * always be acquired) + * @is_dead: thread is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @proc->inner_lock) + * @task: struct task_struct for this thread + * + * Bookkeeping structure for binder threads. + */ struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; + struct list_head waiting_thread_node; int pid; - int looper; + int looper; /* only modified by this thread */ + bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; - uint32_t return_error; /* Write failed, return error code in read buf */ - uint32_t return_error2; /* Write failed, return error code in read */ - /* buffer. Used when sending a reply to a dead process that */ - /* we are also waiting on */ + struct binder_error return_error; + struct binder_error reply_error; wait_queue_head_t wait; struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; + struct task_struct *task; }; struct binder_transaction { @@ -404,20 +650,263 @@ struct binder_transaction { struct binder_buffer *buffer; unsigned int code; unsigned int flags; - long priority; - long saved_priority; + struct binder_priority priority; + struct binder_priority saved_priority; + bool set_priority_called; kuid_t sender_euid; + /** + * @lock: protects @from, @to_proc, and @to_thread + * + * @from, @to_proc, and @to_thread can be set to NULL + * during thread teardown + */ + spinlock_t lock; }; +/** + * binder_proc_lock() - Acquire outer lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->outer_lock. Used to protect binder_ref + * structures associated with the given proc. + */ +#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) +static void +_binder_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->outer_lock); +} + +/** + * binder_proc_unlock() - Release spinlock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_proc_lock() + */ +#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) +static void +_binder_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->outer_lock); +} + +/** + * binder_inner_proc_lock() - Acquire inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->inner_lock. Used to protect todo lists + */ +#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) +static void +_binder_inner_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->inner_lock); +} + +/** + * binder_inner_proc_unlock() - Release inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_inner_proc_lock() + */ +#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) +static void +_binder_inner_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->inner_lock); +} + +/** + * binder_node_lock() - Acquire spinlock for given binder_node + * @node: struct binder_node to acquire + * + * Acquires node->lock. Used to protect binder_node fields + */ +#define binder_node_lock(node) _binder_node_lock(node, __LINE__) +static void +_binder_node_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); +} + +/** + * binder_node_unlock() - Release spinlock for given binder_proc + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) +static void +_binder_node_unlock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&node->lock); +} + +/** + * binder_node_inner_lock() - Acquire node and inner locks + * @node: struct binder_node to acquire + * + * Acquires node->lock. If node->proc also acquires + * proc->inner_lock. Used to protect binder_node fields + */ +#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) +static void +_binder_node_inner_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); + if (node->proc) + binder_inner_proc_lock(node->proc); +} + +/** + * binder_node_unlock() - Release node and inner locks + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) +static void +_binder_node_inner_unlock(struct binder_node *node, int line) +{ + struct binder_proc *proc = node->proc; + + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + if (proc) + binder_inner_proc_unlock(proc); + spin_unlock(&node->lock); +} + +static bool binder_worklist_empty_ilocked(struct list_head *list) +{ + return list_empty(list); +} + +/** + * binder_worklist_empty() - Check if no items on the work list + * @proc: binder_proc associated with list + * @list: list to check + * + * Return: true if there are no items on list, else false + */ +static bool binder_worklist_empty(struct binder_proc *proc, + struct list_head *list) +{ + bool ret; + + binder_inner_proc_lock(proc); + ret = binder_worklist_empty_ilocked(list); + binder_inner_proc_unlock(proc); + return ret; +} + +static void +binder_enqueue_work_ilocked(struct binder_work *work, + struct list_head *target_list) +{ + BUG_ON(target_list == NULL); + BUG_ON(work->entry.next && !list_empty(&work->entry)); + list_add_tail(&work->entry, target_list); +} + +/** + * binder_enqueue_work() - Add an item to the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + */ +static void +binder_enqueue_work(struct binder_proc *proc, + struct binder_work *work, + struct list_head *target_list) +{ + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(work, target_list); + binder_inner_proc_unlock(proc); +} + +static void +binder_dequeue_work_ilocked(struct binder_work *work) +{ + list_del_init(&work->entry); +} + +/** + * binder_dequeue_work() - Removes an item from the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to remove from list + * + * Removes the specified work item from whatever list it is on. + * Can safely be called if work is not on any list. + */ +static void +binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) +{ + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(work); + binder_inner_proc_unlock(proc); +} + +static struct binder_work *binder_dequeue_work_head_ilocked( + struct list_head *list) +{ + struct binder_work *w; + + w = list_first_entry_or_null(list, struct binder_work, entry); + if (w) + list_del_init(&w->entry); + return w; +} + +/** + * binder_dequeue_work_head() - Dequeues the item at head of list + * @proc: binder_proc associated with list + * @list: list to dequeue head + * + * Removes the head of the list if there are items on the list + * + * Return: pointer dequeued binder_work, NULL if list was empty + */ +static struct binder_work *binder_dequeue_work_head( + struct binder_proc *proc, + struct list_head *list) +{ + struct binder_work *w; + + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + binder_inner_proc_unlock(proc); + return w; +} + static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); +static void binder_free_thread(struct binder_thread *thread); +static void binder_free_proc(struct binder_proc *proc); +static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; - int ret; if (files == NULL) return -ESRCH; @@ -428,11 +917,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - preempt_enable_no_resched(); - ret = __alloc_fd(files, 0, rlim_cur, flags); - preempt_disable(); - - return ret; + return __alloc_fd(files, 0, rlim_cur, flags); } /* @@ -441,11 +926,8 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { - if (proc->files) { - preempt_enable_no_resched(); + if (proc->files) __fd_install(proc->files, fd, file); - preempt_disable(); - } } /* @@ -469,526 +951,281 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) return retval; } -static inline void binder_lock(struct binder_context *context, const char *tag) +static bool binder_has_work_ilocked(struct binder_thread *thread, + bool do_proc_work) { - trace_binder_lock(tag); - mutex_lock(&context->binder_main_lock); - preempt_disable(); - trace_binder_locked(tag); + return !binder_worklist_empty_ilocked(&thread->todo) || + thread->looper_need_return || + (do_proc_work && + !binder_worklist_empty_ilocked(&thread->proc->todo)); } -static inline void binder_unlock(struct binder_context *context, - const char *tag) +static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) { - trace_binder_unlock(tag); - mutex_unlock(&context->binder_main_lock); - preempt_enable(); + bool has_work; + + binder_inner_proc_lock(thread->proc); + has_work = binder_has_work_ilocked(thread, do_proc_work); + binder_inner_proc_unlock(thread->proc); + + return has_work; } -static inline void *kzalloc_preempt_disabled(size_t size) +static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { - void *ptr; - - ptr = kzalloc(size, GFP_NOWAIT); - if (ptr) - return ptr; - - preempt_enable_no_resched(); - ptr = kzalloc(size, GFP_KERNEL); - preempt_disable(); - - return ptr; + return !thread->transaction_stack && + binder_worklist_empty_ilocked(&thread->todo) && + (thread->looper & (BINDER_LOOPER_STATE_ENTERED | + BINDER_LOOPER_STATE_REGISTERED)); } -static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n) +static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, + bool sync) { - long ret; + struct rb_node *n; + struct binder_thread *thread; - preempt_enable_no_resched(); - ret = copy_to_user(to, from, n); - preempt_disable(); - return ret; + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { + thread = rb_entry(n, struct binder_thread, rb_node); + if (thread->looper & BINDER_LOOPER_STATE_POLL && + binder_available_for_proc_work_ilocked(thread)) { + if (sync) + wake_up_interruptible_sync(&thread->wait); + else + wake_up_interruptible(&thread->wait); + } + } } -static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n) +/** + * binder_select_thread_ilocked() - selects a thread for doing proc work. + * @proc: process to select a thread from + * + * Note that calling this function moves the thread off the waiting_threads + * list, so it can only be woken up by the caller of this function, or a + * signal. Therefore, callers *should* always wake up the thread this function + * returns. + * + * Return: If there's a thread currently waiting for process work, + * returns that thread. Otherwise returns NULL. + */ +static struct binder_thread * +binder_select_thread_ilocked(struct binder_proc *proc) { - long ret; + struct binder_thread *thread; - preempt_enable_no_resched(); - ret = copy_from_user(to, from, n); - preempt_disable(); - return ret; + assert_spin_locked(&proc->inner_lock); + thread = list_first_entry_or_null(&proc->waiting_threads, + struct binder_thread, + waiting_thread_node); + + if (thread) + list_del_init(&thread->waiting_thread_node); + + return thread; } -#define get_user_preempt_disabled(x, ptr) \ -({ \ - int __ret; \ - preempt_enable_no_resched(); \ - __ret = get_user(x, ptr); \ - preempt_disable(); \ - __ret; \ -}) - -#define put_user_preempt_disabled(x, ptr) \ -({ \ - int __ret; \ - preempt_enable_no_resched(); \ - __ret = put_user(x, ptr); \ - preempt_disable(); \ - __ret; \ -}) - -static void binder_set_nice(long nice) +/** + * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. + * @proc: process to wake up a thread in + * @thread: specific thread to wake-up (may be NULL) + * @sync: whether to do a synchronous wake-up + * + * This function wakes up a thread in the @proc process. + * The caller may provide a specific thread to wake-up in + * the @thread parameter. If @thread is NULL, this function + * will wake up threads that have called poll(). + * + * Note that for this function to work as expected, callers + * should first call binder_select_thread() to find a thread + * to handle the work (if they don't have a thread already), + * and pass the result into the @thread parameter. + */ +static void binder_wakeup_thread_ilocked(struct binder_proc *proc, + struct binder_thread *thread, + bool sync) { - long min_nice; + assert_spin_locked(&proc->inner_lock); - if (can_nice(current, nice)) { - set_user_nice(current, nice); + if (thread) { + if (sync) + wake_up_interruptible_sync(&thread->wait); + else + wake_up_interruptible(&thread->wait); return; } - min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) - return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); + + /* Didn't find a thread waiting for proc work; this can happen + * in two scenarios: + * 1. All threads are busy handling transactions + * In that case, one of those threads should call back into + * the kernel driver soon and pick up this work. + * 2. Threads are using the (e)poll interface, in which case + * they may be blocked on the waitqueue without having been + * added to waiting_threads. For this case, we just iterate + * over all threads not handling transaction work, and + * wake them all up. We wake all because we don't know whether + * a thread that called into (e)poll is handling non-binder + * work currently. + */ + binder_wakeup_poll_threads_ilocked(proc, sync); } -static size_t binder_buffer_size(struct binder_proc *proc, - struct binder_buffer *buffer) +static void binder_wakeup_proc_ilocked(struct binder_proc *proc) { - if (list_is_last(&buffer->entry, &proc->buffers)) - return proc->buffer + proc->buffer_size - (void *)buffer->data; - return (size_t)list_entry(buffer->entry.next, - struct binder_buffer, entry) - (size_t)buffer->data; + struct binder_thread *thread = binder_select_thread_ilocked(proc); + + binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } -static void binder_insert_free_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) +static bool is_rt_policy(int policy) { - struct rb_node **p = &proc->free_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - size_t buffer_size; - size_t new_buffer_size; - - BUG_ON(!new_buffer->free); - - new_buffer_size = binder_buffer_size(proc, new_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", - proc->pid, new_buffer_size, new_buffer); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - - buffer_size = binder_buffer_size(proc, buffer); - - if (new_buffer_size < buffer_size) - p = &parent->rb_left; - else - p = &parent->rb_right; - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); + return policy == SCHED_FIFO || policy == SCHED_RR; } -static void binder_insert_allocated_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) +static bool is_fair_policy(int policy) { - struct rb_node **p = &proc->allocated_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - - BUG_ON(new_buffer->free); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (new_buffer < buffer) - p = &parent->rb_left; - else if (new_buffer > buffer) - p = &parent->rb_right; - else - BUG(); - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); + return policy == SCHED_NORMAL || policy == SCHED_BATCH; } -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, - uintptr_t user_ptr) +static bool binder_supported_policy(int policy) { - struct rb_node *n = proc->allocated_buffers.rb_node; - struct binder_buffer *buffer; - struct binder_buffer *kern_ptr; - - kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset - - offsetof(struct binder_buffer, data)); - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (kern_ptr < buffer) - n = n->rb_left; - else if (kern_ptr > buffer) - n = n->rb_right; - else - return buffer; - } - return NULL; + return is_fair_policy(policy) || is_rt_policy(policy); } -static int binder_update_page_range(struct binder_proc *proc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) +static int to_userspace_prio(int policy, int kernel_priority) { - void *page_addr; - unsigned long user_page_addr; - struct page **page; - struct mm_struct *mm; - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, - allocate ? "allocate" : "free", start, end); - - if (end <= start) - return 0; - - trace_binder_update_page_range(proc, allocate, start, end); - - if (vma) - mm = NULL; + if (is_fair_policy(policy)) + return PRIO_TO_NICE(kernel_priority); else - mm = get_task_mm(proc->tsk); - - preempt_enable_no_resched(); - - if (mm) { - down_write(&mm->mmap_sem); - vma = proc->vma; - if (vma && mm != proc->vma_vm_mm) { - pr_err("%d: vma mm and task mm mismatch\n", - proc->pid); - vma = NULL; - } - } - - if (allocate == 0) - goto free_range; - - if (vma == NULL) { - pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - proc->pid); - goto err_no_vma; - } - - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - int ret; - - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - - BUG_ON(*page); - *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", - proc->pid, page_addr); - goto err_alloc_page_failed; - } - ret = map_kernel_range_noflush((unsigned long)page_addr, - PAGE_SIZE, PAGE_KERNEL, page); - flush_cache_vmap((unsigned long)page_addr, - (unsigned long)page_addr + PAGE_SIZE); - if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", - proc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + proc->user_buffer_offset; - ret = vm_insert_page(vma, user_page_addr, page[0]); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - proc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - /* vm_insert_page does not seem to increment the refcount */ - } - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - - preempt_disable(); - - return 0; - -free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - if (vma) - zap_page_range(vma, (uintptr_t)page_addr + - proc->user_buffer_offset, PAGE_SIZE, NULL); -err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: - __free_page(*page); - *page = NULL; -err_alloc_page_failed: - ; - } -err_no_vma: - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - - preempt_disable(); - - return -ENOMEM; + return MAX_USER_RT_PRIO - 1 - kernel_priority; } -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +static int to_kernel_prio(int policy, int user_priority) { - struct rb_node *n = proc->free_buffers.rb_node; - struct binder_buffer *buffer; - size_t buffer_size; - struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; - size_t size, data_offsets_size; + if (is_fair_policy(policy)) + return NICE_TO_PRIO(user_priority); + else + return MAX_USER_RT_PRIO - 1 - user_priority; +} - if (proc->vma == NULL) { - pr_err("%d: binder_alloc_buf, no vma\n", - proc->pid); - return NULL; - } +static void binder_do_set_priority(struct task_struct *task, + struct binder_priority desired, + bool verify) +{ + int priority; /* user-space prio value */ + bool has_cap_nice; + unsigned int policy = desired.sched_policy; - data_offsets_size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); + if (task->policy == policy && task->normal_prio == desired.prio) + return; - if (data_offsets_size < data_size || data_offsets_size < offsets_size) { - binder_user_error("%d: got transaction with invalid size %zd-%zd\n", - proc->pid, data_size, offsets_size); - return NULL; - } - size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); - if (size < data_offsets_size || size < extra_buffers_size) { - binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n", - proc->pid, extra_buffers_size); - return NULL; - } - if (is_async && - proc->free_async_space < size + sizeof(struct binder_buffer)) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd failed, no async space left\n", - proc->pid, size); - return NULL; - } + has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); + priority = to_userspace_prio(policy, desired.prio); - if (size < buffer_size) { - best_fit = n; - n = n->rb_left; - } else if (size > buffer_size) - n = n->rb_right; - else { - best_fit = n; - break; + if (verify && is_rt_policy(policy) && !has_cap_nice) { + long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); + + if (max_rtprio == 0) { + policy = SCHED_NORMAL; + priority = MIN_NICE; + } else if (priority > max_rtprio) { + priority = max_rtprio; } } - if (best_fit == NULL) { - pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - proc->pid, size); - return NULL; - } - if (n == NULL) { - buffer = rb_entry(best_fit, struct binder_buffer, rb_node); - buffer_size = binder_buffer_size(proc, buffer); - } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", - proc->pid, size, buffer, buffer_size); + if (verify && is_fair_policy(policy) && !has_cap_nice) { + long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE)); - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); - if (n == NULL) { - if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) - buffer_size = size; /* no room for other buffers */ - else - buffer_size = size + sizeof(struct binder_buffer); - } - end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - if (binder_update_page_range(proc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) - return NULL; - - rb_erase(best_fit, &proc->free_buffers); - buffer->free = 0; - binder_insert_allocated_buffer(proc, buffer); - if (buffer_size != size) { - struct binder_buffer *new_buffer = (void *)buffer->data + size; - - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(proc, new_buffer); - } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", - proc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; - buffer->extra_buffers_size = extra_buffers_size; - buffer->async_transaction = is_async; - if (is_async) { - proc->free_async_space -= size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_alloc_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - return buffer; -} - -static void *buffer_start_page(struct binder_buffer *buffer) -{ - return (void *)((uintptr_t)buffer & PAGE_MASK); -} - -static void *buffer_end_page(struct binder_buffer *buffer) -{ - return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); -} - -static void binder_delete_free_buffer(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - struct binder_buffer *prev, *next = NULL; - int free_page_end = 1; - int free_page_start = 1; - - BUG_ON(proc->buffers.next == &buffer->entry); - prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); - BUG_ON(!prev->free); - if (buffer_end_page(prev) == buffer_start_page(buffer)) { - free_page_start = 0; - if (buffer_end_page(prev) == buffer_end_page(buffer)) - free_page_end = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - - if (!list_is_last(&buffer->entry, &proc->buffers)) { - next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - if (buffer_start_page(next) == buffer_end_page(buffer)) { - free_page_end = 0; - if (buffer_start_page(next) == - buffer_start_page(buffer)) - free_page_start = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); + if (min_nice > MAX_NICE) { + binder_user_error("%d RLIMIT_NICE not set\n", + task->pid); + return; + } else if (priority < min_nice) { + priority = min_nice; } } - list_del(&buffer->entry); - if (free_page_start || free_page_end) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", - proc->pid, buffer, free_page_start ? "" : " end", - free_page_end ? "" : " start", prev, next); - binder_update_page_range(proc, 0, free_page_start ? - buffer_start_page(buffer) : buffer_end_page(buffer), - (free_page_end ? buffer_end_page(buffer) : - buffer_start_page(buffer)) + PAGE_SIZE, NULL); + + if (policy != desired.sched_policy || + to_kernel_prio(policy, priority) != desired.prio) + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: priority %d not allowed, using %d instead\n", + task->pid, desired.prio, + to_kernel_prio(policy, priority)); + + /* Set the actual priority */ + if (task->policy != policy || is_rt_policy(policy)) { + struct sched_param params; + + params.sched_priority = is_rt_policy(policy) ? priority : 0; + + sched_setscheduler_nocheck(task, + policy | SCHED_RESET_ON_FORK, + ¶ms); } + if (is_fair_policy(policy)) + set_user_nice(task, priority); } -static void binder_free_buf(struct binder_proc *proc, - struct binder_buffer *buffer) +static void binder_set_priority(struct task_struct *task, + struct binder_priority desired) { - size_t size, buffer_size; - - buffer_size = binder_buffer_size(proc, buffer); - - size = ALIGN(buffer->data_size, sizeof(void *)) + - ALIGN(buffer->offsets_size, sizeof(void *)) + - ALIGN(buffer->extra_buffers_size, sizeof(void *)); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", - proc->pid, buffer, size, buffer_size); - - BUG_ON(buffer->free); - BUG_ON(size > buffer_size); - BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < proc->buffer); - BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); - - if (buffer->async_transaction) { - proc->free_async_space += size + sizeof(struct binder_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_free_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - binder_update_page_range(proc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); - rb_erase(&buffer->rb_node, &proc->allocated_buffers); - buffer->free = 1; - if (!list_is_last(&buffer->entry, &proc->buffers)) { - struct binder_buffer *next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - - if (next->free) { - rb_erase(&next->rb_node, &proc->free_buffers); - binder_delete_free_buffer(proc, next); - } - } - if (proc->buffers.next != &buffer->entry) { - struct binder_buffer *prev = list_entry(buffer->entry.prev, - struct binder_buffer, entry); - - if (prev->free) { - binder_delete_free_buffer(proc, buffer); - rb_erase(&prev->rb_node, &proc->free_buffers); - buffer = prev; - } - } - binder_insert_free_buffer(proc, buffer); + binder_do_set_priority(task, desired, /* verify = */ true); } -static struct binder_node *binder_get_node(struct binder_proc *proc, - binder_uintptr_t ptr) +static void binder_restore_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ false); +} + +static void binder_transaction_priority(struct task_struct *task, + struct binder_transaction *t, + struct binder_priority node_prio, + bool inherit_rt) +{ + struct binder_priority desired_prio; + + if (t->set_priority_called) + return; + + t->set_priority_called = true; + t->saved_priority.sched_policy = task->policy; + t->saved_priority.prio = task->normal_prio; + + if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { + desired_prio.prio = NICE_TO_PRIO(0); + desired_prio.sched_policy = SCHED_NORMAL; + } else { + desired_prio.prio = t->priority.prio; + desired_prio.sched_policy = t->priority.sched_policy; + } + + if (node_prio.prio < t->priority.prio || + (node_prio.prio == t->priority.prio && + node_prio.sched_policy == SCHED_FIFO)) { + /* + * In case the minimum priority on the node is + * higher (lower value), use that priority. If + * the priority is the same, but the node uses + * SCHED_FIFO, prefer SCHED_FIFO, since it can + * run unbounded, unlike SCHED_RR. + */ + desired_prio = node_prio; + } + + binder_set_priority(task, desired_prio); +} + +static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, + binder_uintptr_t ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; + assert_spin_locked(&proc->inner_lock); + while (n) { node = rb_entry(n, struct binder_node, rb_node); @@ -996,21 +1233,47 @@ static struct binder_node *binder_get_node(struct binder_proc *proc, n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; - else + else { + /* + * take an implicit weak reference + * to ensure node stays alive until + * call to binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); return node; + } } return NULL; } -static struct binder_node *binder_new_node(struct binder_proc *proc, - binder_uintptr_t ptr, - binder_uintptr_t cookie) +static struct binder_node *binder_get_node(struct binder_proc *proc, + binder_uintptr_t ptr) +{ + struct binder_node *node; + + binder_inner_proc_lock(proc); + node = binder_get_node_ilocked(proc, ptr); + binder_inner_proc_unlock(proc); + return node; +} + +static struct binder_node *binder_init_node_ilocked( + struct binder_proc *proc, + struct binder_node *new_node, + struct flat_binder_object *fp) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; + binder_uintptr_t ptr = fp ? fp->binder : 0; + binder_uintptr_t cookie = fp ? fp->cookie : 0; + __u32 flags = fp ? fp->flags : 0; + s8 priority; + + assert_spin_locked(&proc->inner_lock); while (*p) { + parent = *p; node = rb_entry(parent, struct binder_node, rb_node); @@ -1018,14 +1281,19 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; - else - return NULL; + else { + /* + * A matching node is already in + * the rb tree. Abandon the init + * and return it. + */ + binder_inc_node_tmpref_ilocked(node); + return node; + } } - - node = kzalloc_preempt_disabled(sizeof(*node)); - if (node == NULL) - return NULL; + node = new_node; binder_stats_created(BINDER_STAT_NODE); + node->tmp_refs++; rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = atomic_inc_return(&binder_last_id); @@ -1033,18 +1301,58 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; + priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >> + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; + node->min_priority = to_kernel_prio(node->sched_policy, priority); + node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT); + spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx created\n", proc->pid, current->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); + return node; } -static int binder_inc_node(struct binder_node *node, int strong, int internal, - struct list_head *target_list) +static struct binder_node *binder_new_node(struct binder_proc *proc, + struct flat_binder_object *fp) { + struct binder_node *node; + struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (!new_node) + return NULL; + binder_inner_proc_lock(proc); + node = binder_init_node_ilocked(proc, new_node, fp); + binder_inner_proc_unlock(proc); + if (node != new_node) + /* + * The node was already added by another thread + */ + kfree(new_node); + + return node; +} + +static void binder_free_node(struct binder_node *node) +{ + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); +} + +static int binder_inc_node_nilocked(struct binder_node *node, int strong, + int internal, + struct list_head *target_list) +{ + struct binder_proc *proc = node->proc; + + assert_spin_locked(&node->lock); + if (proc) + assert_spin_locked(&proc->inner_lock); if (strong) { if (internal) { if (target_list == NULL && @@ -1061,8 +1369,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { - list_del_init(&node->work.entry); - list_add_tail(&node->work.entry, target_list); + binder_dequeue_work_ilocked(&node->work); + binder_enqueue_work_ilocked(&node->work, target_list); } } else { if (!internal) @@ -1073,58 +1381,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, node->debug_id); return -EINVAL; } - list_add_tail(&node->work.entry, target_list); + binder_enqueue_work_ilocked(&node->work, target_list); } } return 0; } -static int binder_dec_node(struct binder_node *node, int strong, int internal) +static int binder_inc_node(struct binder_node *node, int strong, int internal, + struct list_head *target_list) { + int ret; + + binder_node_inner_lock(node); + ret = binder_inc_node_nilocked(node, strong, internal, target_list); + binder_node_inner_unlock(node); + + return ret; +} + +static bool binder_dec_node_nilocked(struct binder_node *node, + int strong, int internal) +{ + struct binder_proc *proc = node->proc; + + assert_spin_locked(&node->lock); + if (proc) + assert_spin_locked(&proc->inner_lock); if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) - return 0; + return false; } else { if (!internal) node->local_weak_refs--; - if (node->local_weak_refs || !hlist_empty(&node->refs)) - return 0; + if (node->local_weak_refs || node->tmp_refs || + !hlist_empty(&node->refs)) + return false; } - if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { + + if (proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { - list_add_tail(&node->work.entry, &node->proc->todo); - wake_up_interruptible(&node->proc->wait); + binder_enqueue_work_ilocked(&node->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && - !node->local_weak_refs) { - list_del_init(&node->work.entry); - if (node->proc) { - rb_erase(&node->rb_node, &node->proc->nodes); + !node->local_weak_refs && !node->tmp_refs) { + if (proc) { + binder_dequeue_work_ilocked(&node->work); + rb_erase(&node->rb_node, &proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "refless node %d deleted\n", node->debug_id); } else { + BUG_ON(!list_empty(&node->work.entry)); + spin_lock(&binder_dead_nodes_lock); + /* + * tmp_refs could have changed so + * check it again + */ + if (node->tmp_refs) { + spin_unlock(&binder_dead_nodes_lock); + return false; + } hlist_del(&node->dead_node); + spin_unlock(&binder_dead_nodes_lock); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "dead node %d deleted\n", node->debug_id); } - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); + return true; } } - - return 0; + return false; } +static void binder_dec_node(struct binder_node *node, int strong, int internal) +{ + bool free_node; -static struct binder_ref *binder_get_ref(struct binder_proc *proc, - u32 desc, bool need_strong_ref) + binder_node_inner_lock(node); + free_node = binder_dec_node_nilocked(node, strong, internal); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_inc_node_tmpref_ilocked(struct binder_node *node) +{ + /* + * No call to binder_inc_node() is needed since we + * don't need to inform userspace of any changes to + * tmp_refs + */ + node->tmp_refs++; +} + +/** + * binder_inc_node_tmpref() - take a temporary reference on node + * @node: node to reference + * + * Take reference on node to prevent the node from being freed + * while referenced only by a local variable. The inner lock is + * needed to serialize with the node work on the queue (which + * isn't needed after the node is dead). If the node is dead + * (node->proc is NULL), use binder_dead_nodes_lock to protect + * node->tmp_refs against dead-node-only cases where the node + * lock cannot be acquired (eg traversing the dead node list to + * print nodes) + */ +static void binder_inc_node_tmpref(struct binder_node *node) +{ + binder_node_lock(node); + if (node->proc) + binder_inner_proc_lock(node->proc); + else + spin_lock(&binder_dead_nodes_lock); + binder_inc_node_tmpref_ilocked(node); + if (node->proc) + binder_inner_proc_unlock(node->proc); + else + spin_unlock(&binder_dead_nodes_lock); + binder_node_unlock(node); +} + +/** + * binder_dec_node_tmpref() - remove a temporary reference on node + * @node: node to reference + * + * Release temporary reference on node taken via binder_inc_node_tmpref() + */ +static void binder_dec_node_tmpref(struct binder_node *node) +{ + bool free_node; + + binder_node_inner_lock(node); + if (!node->proc) + spin_lock(&binder_dead_nodes_lock); + node->tmp_refs--; + BUG_ON(node->tmp_refs < 0); + if (!node->proc) + spin_unlock(&binder_dead_nodes_lock); + /* + * Call binder_dec_node() to check if all refcounts are 0 + * and cleanup is needed. Calling with strong=0 and internal=1 + * causes no actual reference to be released in binder_dec_node(). + * If that changes, a change is needed here too. + */ + free_node = binder_dec_node_nilocked(node, 0, 1); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_put_node(struct binder_node *node) +{ + binder_dec_node_tmpref(node); +} + +static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, + u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; @@ -1132,11 +1551,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (desc < ref->desc) { + if (desc < ref->data.desc) { n = n->rb_left; - } else if (desc > ref->desc) { + } else if (desc > ref->data.desc) { n = n->rb_right; - } else if (need_strong_ref && !ref->strong) { + } else if (need_strong_ref && !ref->data.strong) { binder_user_error("tried to use weak ref as strong ref\n"); return NULL; } else { @@ -1146,14 +1565,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, return NULL; } -static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, - struct binder_node *node) +/** + * binder_get_ref_for_node_olocked() - get the ref associated with given node + * @proc: binder_proc that owns the ref + * @node: binder_node of target + * @new_ref: newly allocated binder_ref to be initialized or %NULL + * + * Look up the ref for the given node and return it if it exists + * + * If it doesn't exist and the caller provides a newly allocated + * ref, initialize the fields of the newly allocated ref and insert + * into the given proc rb_trees and node refs list. + * + * Return: the ref for node. It is possible that another thread + * allocated/initialized the ref first in which case the + * returned ref would be different than the passed-in + * new_ref. new_ref must be kfree'd by the caller in + * this case. + */ +static struct binder_ref *binder_get_ref_for_node_olocked( + struct binder_proc *proc, + struct binder_node *node, + struct binder_ref *new_ref) { - struct rb_node *n; + struct binder_context *context = proc->context; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; - struct binder_ref *ref, *new_ref; - struct binder_context *context = proc->context; + struct binder_ref *ref; + struct rb_node *n; while (*p) { parent = *p; @@ -1166,22 +1605,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, else return ref; } - new_ref = kzalloc_preempt_disabled(sizeof(*ref)); - if (new_ref == NULL) + if (!new_ref) return NULL; + binder_stats_created(BINDER_STAT_REF); - new_ref->debug_id = atomic_inc_return(&binder_last_id); + new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; + new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->desc > new_ref->desc) + if (ref->data.desc > new_ref->data.desc) break; - new_ref->desc = ref->desc + 1; + new_ref->data.desc = ref->data.desc + 1; } p = &proc->refs_by_desc.rb_node; @@ -1189,121 +1628,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); - if (new_ref->desc < ref->desc) + if (new_ref->data.desc < ref->data.desc) p = &(*p)->rb_left; - else if (new_ref->desc > ref->desc) + else if (new_ref->data.desc > ref->data.desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); - if (node) { - hlist_add_head(&new_ref->node_entry, &node->refs); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for node %d\n", - proc->pid, new_ref->debug_id, new_ref->desc, - node->debug_id); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for dead node\n", - proc->pid, new_ref->debug_id, new_ref->desc); - } + binder_node_lock(node); + hlist_add_head(&new_ref->node_entry, &node->refs); + + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d new ref %d desc %d for node %d\n", + proc->pid, new_ref->data.debug_id, new_ref->data.desc, + node->debug_id); + binder_node_unlock(node); return new_ref; } -static void binder_delete_ref(struct binder_ref *ref) +static void binder_cleanup_ref_olocked(struct binder_ref *ref) { + bool delete_node = false; + binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d delete ref %d desc %d for node %d\n", - ref->proc->pid, ref->debug_id, ref->desc, + ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); - if (ref->strong) - binder_dec_node(ref->node, 1, 1); + + binder_node_inner_lock(ref->node); + if (ref->data.strong) + binder_dec_node_nilocked(ref->node, 1, 1); + hlist_del(&ref->node_entry); - binder_dec_node(ref->node, 0, 1); + delete_node = binder_dec_node_nilocked(ref->node, 0, 1); + binder_node_inner_unlock(ref->node); + /* + * Clear ref->node unless we want the caller to free the node + */ + if (!delete_node) { + /* + * The caller uses ref->node to determine + * whether the node needs to be freed. Clear + * it since the node is still alive. + */ + ref->node = NULL; + } + if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n", - ref->proc->pid, ref->debug_id, ref->desc); - list_del(&ref->death->work.entry); - kfree(ref->death); + ref->proc->pid, ref->data.debug_id, + ref->data.desc); + binder_dequeue_work(ref->proc, &ref->death->work); binder_stats_deleted(BINDER_STAT_DEATH); } - kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } -static int binder_inc_ref(struct binder_ref *ref, int strong, - struct list_head *target_list) +/** + * binder_inc_ref_olocked() - increment the ref for given handle + * @ref: ref to be incremented + * @strong: if true, strong increment, else weak + * @target_list: list to queue node work on + * + * Increment the ref. @ref->proc->outer_lock must be held on entry + * + * Return: 0, if successful, else errno + */ +static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, + struct list_head *target_list) { int ret; if (strong) { - if (ref->strong == 0) { + if (ref->data.strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } - ref->strong++; + ref->data.strong++; } else { - if (ref->weak == 0) { + if (ref->data.weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } - ref->weak++; + ref->data.weak++; } return 0; } - -static int binder_dec_ref(struct binder_ref *ref, int strong) +/** + * binder_dec_ref() - dec the ref for given handle + * @ref: ref to be decremented + * @strong: if true, strong decrement, else weak + * + * Decrement the ref. + * + * Return: true if ref is cleaned up and ready to be freed + */ +static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) { if (strong) { - if (ref->strong == 0) { + if (ref->data.strong == 0) { binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; - } - ref->strong--; - if (ref->strong == 0) { - int ret; - - ret = binder_dec_node(ref->node, strong, 1); - if (ret) - return ret; + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; } + ref->data.strong--; + if (ref->data.strong == 0) + binder_dec_node(ref->node, strong, 1); } else { - if (ref->weak == 0) { + if (ref->data.weak == 0) { binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; } - ref->weak--; + ref->data.weak--; } - if (ref->strong == 0 && ref->weak == 0) - binder_delete_ref(ref); - return 0; + if (ref->data.strong == 0 && ref->data.weak == 0) { + binder_cleanup_ref_olocked(ref); + return true; + } + return false; } -static void binder_pop_transaction(struct binder_thread *target_thread, - struct binder_transaction *t) +/** + * binder_get_node_from_ref() - get the node from the given proc/desc + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @need_strong_ref: if true, only return node if ref is strong + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, return the associated binder_node + * + * Return: a binder_node or NULL if not found or not strong when strong required + */ +static struct binder_node *binder_get_node_from_ref( + struct binder_proc *proc, + u32 desc, bool need_strong_ref, + struct binder_ref_data *rdata) { - if (target_thread) { - BUG_ON(target_thread->transaction_stack != t); - BUG_ON(target_thread->transaction_stack->from != target_thread); - target_thread->transaction_stack = - target_thread->transaction_stack->from_parent; - t->from = NULL; + struct binder_node *node; + struct binder_ref *ref; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, need_strong_ref); + if (!ref) + goto err_no_ref; + node = ref->node; + /* + * Take an implicit reference on the node to ensure + * it stays alive until the call to binder_put_node() + */ + binder_inc_node_tmpref(node); + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + return node; + +err_no_ref: + binder_proc_unlock(proc); + return NULL; +} + +/** + * binder_free_ref() - free the binder_ref + * @ref: ref to free + * + * Free the binder_ref. Free the binder_node indicated by ref->node + * (if non-NULL) and the binder_ref_death indicated by ref->death. + */ +static void binder_free_ref(struct binder_ref *ref) +{ + if (ref->node) + binder_free_node(ref->node); + kfree(ref->death); + kfree(ref); +} + +/** + * binder_update_ref_for_handle() - inc/dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @increment: true=inc reference, false=dec reference + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, increment or decrement the ref + * according to "increment" arg. + * + * Return: 0 if successful, else errno + */ +static int binder_update_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool increment, bool strong, + struct binder_ref_data *rdata) +{ + int ret = 0; + struct binder_ref *ref; + bool delete_ref = false; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, strong); + if (!ref) { + ret = -EINVAL; + goto err_no_ref; } - t->need_reply = 0; + if (increment) + ret = binder_inc_ref_olocked(ref, strong, NULL); + else + delete_ref = binder_dec_ref_olocked(ref, strong); + + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + if (delete_ref) + binder_free_ref(ref); + return ret; + +err_no_ref: + binder_proc_unlock(proc); + return ret; +} + +/** + * binder_dec_ref_for_handle() - dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Just calls binder_update_ref_for_handle() to decrement the ref. + * + * Return: 0 if successful, else errno + */ +static int binder_dec_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool strong, struct binder_ref_data *rdata) +{ + return binder_update_ref_for_handle(proc, desc, false, strong, rdata); +} + + +/** + * binder_inc_ref_for_node() - increment the ref for given proc/node + * @proc: proc containing the ref + * @node: target node + * @strong: true=strong reference, false=weak reference + * @target_list: worklist to use if node is incremented + * @rdata: the id/refcount data for the ref + * + * Given a proc and node, increment the ref. Create the ref if it + * doesn't already exist + * + * Return: 0 if successful, else errno + */ +static int binder_inc_ref_for_node(struct binder_proc *proc, + struct binder_node *node, + bool strong, + struct list_head *target_list, + struct binder_ref_data *rdata) +{ + struct binder_ref *ref; + struct binder_ref *new_ref = NULL; + int ret = 0; + + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, NULL); + if (!ref) { + binder_proc_unlock(proc); + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!new_ref) + return -ENOMEM; + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, new_ref); + } + ret = binder_inc_ref_olocked(ref, strong, target_list); + *rdata = ref->data; + binder_proc_unlock(proc); + if (new_ref && ref != new_ref) + /* + * Another thread created the ref first so + * free the one we allocated + */ + kfree(new_ref); + return ret; +} + +static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, + struct binder_transaction *t) +{ + BUG_ON(!target_thread); + assert_spin_locked(&target_thread->proc->inner_lock); + BUG_ON(target_thread->transaction_stack != t); + BUG_ON(target_thread->transaction_stack->from != target_thread); + target_thread->transaction_stack = + target_thread->transaction_stack->from_parent; + t->from = NULL; +} + +/** + * binder_thread_dec_tmpref() - decrement thread->tmp_ref + * @thread: thread to decrement + * + * A thread needs to be kept alive while being used to create or + * handle a transaction. binder_get_txn_from() is used to safely + * extract t->from from a binder_transaction and keep the thread + * indicated by t->from from being freed. When done with that + * binder_thread, this function is called to decrement the + * tmp_ref and free if appropriate (thread has been released + * and no transaction being processed by the driver) + */ +static void binder_thread_dec_tmpref(struct binder_thread *thread) +{ + /* + * atomic is used to protect the counter value while + * it cannot reach zero or thread->is_dead is false + */ + binder_inner_proc_lock(thread->proc); + atomic_dec(&thread->tmp_ref); + if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { + binder_inner_proc_unlock(thread->proc); + binder_free_thread(thread); + return; + } + binder_inner_proc_unlock(thread->proc); +} + +/** + * binder_proc_dec_tmpref() - decrement proc->tmp_ref + * @proc: proc to decrement + * + * A binder_proc needs to be kept alive while being used to create or + * handle a transaction. proc->tmp_ref is incremented when + * creating a new transaction or the binder_proc is currently in-use + * by threads that are being released. When done with the binder_proc, + * this function is called to decrement the counter and free the + * proc if appropriate (proc has been released, all threads have + * been released and not currenly in-use to process a transaction). + */ +static void binder_proc_dec_tmpref(struct binder_proc *proc) +{ + binder_inner_proc_lock(proc); + proc->tmp_ref--; + if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && + !proc->tmp_ref) { + binder_inner_proc_unlock(proc); + binder_free_proc(proc); + return; + } + binder_inner_proc_unlock(proc); +} + +/** + * binder_get_txn_from() - safely extract the "from" thread in transaction + * @t: binder transaction for t->from + * + * Atomically return the "from" thread and increment the tmp_ref + * count for the thread to ensure it stays alive until + * binder_thread_dec_tmpref() is called. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from( + struct binder_transaction *t) +{ + struct binder_thread *from; + + spin_lock(&t->lock); + from = t->from; + if (from) + atomic_inc(&from->tmp_ref); + spin_unlock(&t->lock); + return from; +} + +/** + * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock + * @t: binder transaction for t->from + * + * Same as binder_get_txn_from() except it also acquires the proc->inner_lock + * to guarantee that the thread cannot be released while operating on it. + * The caller must call binder_inner_proc_unlock() to release the inner lock + * as well as call binder_dec_thread_txn() to release the reference. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from_and_acq_inner( + struct binder_transaction *t) +{ + struct binder_thread *from; + + from = binder_get_txn_from(t); + if (!from) + return NULL; + binder_inner_proc_lock(from->proc); + if (t->from) { + BUG_ON(from != t->from); + return from; + } + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); + return NULL; +} + +static void binder_free_transaction(struct binder_transaction *t) +{ if (t->buffer) t->buffer->transaction = NULL; kfree(t); @@ -1318,30 +2059,28 @@ static void binder_send_failed_reply(struct binder_transaction *t, BUG_ON(t->flags & TF_ONE_WAY); while (1) { - target_thread = t->from; + target_thread = binder_get_txn_from_and_acq_inner(t); if (target_thread) { - if (target_thread->return_error != BR_OK && - target_thread->return_error2 == BR_OK) { - target_thread->return_error2 = - target_thread->return_error; - target_thread->return_error = BR_OK; - } - if (target_thread->return_error == BR_OK) { - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "send failed reply for transaction %d to %d:%d\n", - t->debug_id, - target_thread->proc->pid, - target_thread->pid); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "send failed reply for transaction %d to %d:%d\n", + t->debug_id, + target_thread->proc->pid, + target_thread->pid); - binder_pop_transaction(target_thread, t); - target_thread->return_error = error_code; + binder_pop_transaction_ilocked(target_thread, t); + if (target_thread->reply_error.cmd == BR_OK) { + target_thread->reply_error.cmd = error_code; + binder_enqueue_work_ilocked( + &target_thread->reply_error.work, + &target_thread->todo); wake_up_interruptible(&target_thread->wait); } else { - pr_err("reply failed, target thread, %d:%d, has error code %d already\n", - target_thread->proc->pid, - target_thread->pid, - target_thread->return_error); + WARN(1, "Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } + binder_inner_proc_unlock(target_thread->proc); + binder_thread_dec_tmpref(target_thread); + binder_free_transaction(t); return; } next = t->from_parent; @@ -1350,7 +2089,7 @@ static void binder_send_failed_reply(struct binder_transaction *t, "send failed reply for transaction %d, target dead\n", t->debug_id); - binder_pop_transaction(target_thread, t); + binder_free_transaction(t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); @@ -1559,25 +2298,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, node->debug_id, (u64)node->ptr); binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 0); + binder_put_node(node); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct flat_binder_object *fp; - struct binder_ref *ref; + struct binder_ref_data rdata; + int ret; fp = to_flat_binder_object(hdr); - ref = binder_get_ref(proc, fp->handle, - hdr->type == BINDER_TYPE_HANDLE); + ret = binder_dec_ref_for_handle(proc, fp->handle, + hdr->type == BINDER_TYPE_HANDLE, &rdata); - if (ref == NULL) { - pr_err("transaction release %d bad handle %d\n", - debug_id, fp->handle); + if (ret) { + pr_err("transaction release %d bad handle %d, ret = %d\n", + debug_id, fp->handle, ret); break; } binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, ref->node->debug_id); - binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); + " ref %d desc %d\n", + rdata.debug_id, rdata.desc); } break; case BINDER_TYPE_FD: { @@ -1616,7 +2356,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * back to kernel address space to access it */ parent_buffer = parent->buffer - - proc->user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &proc->alloc); fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -1648,102 +2389,122 @@ static int binder_translate_binder(struct flat_binder_object *fp, struct binder_thread *thread) { struct binder_node *node; - struct binder_ref *ref; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_ref_data rdata; + int ret = 0; node = binder_get_node(proc, fp->binder); if (!node) { - node = binder_new_node(proc, fp->binder, fp->cookie); + node = binder_new_node(proc, fp); if (!node) return -ENOMEM; - - node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; - node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)fp->binder, node->debug_id, (u64)fp->cookie, (u64)node->cookie); - return -EINVAL; + ret = -EINVAL; + goto done; + } + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) - return -EPERM; - ref = binder_get_ref_for_node(target_proc, node); - if (!ref) - return -EINVAL; + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_BINDER, + &thread->todo, &rdata); + if (ret) + goto done; if (fp->hdr.type == BINDER_TYPE_BINDER) fp->hdr.type = BINDER_TYPE_HANDLE; else fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; fp->binder = 0; - fp->handle = ref->desc; + fp->handle = rdata.desc; fp->cookie = 0; - binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); - trace_binder_transaction_node_to_ref(t, node, ref); + trace_binder_transaction_node_to_ref(t, node, &rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx -> ref %d desc %d\n", node->debug_id, (u64)node->ptr, - ref->debug_id, ref->desc); - - return 0; + rdata.debug_id, rdata.desc); +done: + binder_put_node(node); + return ret; } static int binder_translate_handle(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) { - struct binder_ref *ref; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_node *node; + struct binder_ref_data src_rdata; + int ret = 0; - ref = binder_get_ref(proc, fp->handle, - fp->hdr.type == BINDER_TYPE_HANDLE); - if (!ref) { + node = binder_get_node_from_ref(proc, fp->handle, + fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); + if (!node) { binder_user_error("%d:%d got transaction with invalid handle, %d\n", proc->pid, thread->pid, fp->handle); return -EINVAL; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) - return -EPERM; + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; + } - if (ref->node->proc == target_proc) { + binder_node_lock(node); + if (node->proc == target_proc) { if (fp->hdr.type == BINDER_TYPE_HANDLE) fp->hdr.type = BINDER_TYPE_BINDER; else fp->hdr.type = BINDER_TYPE_WEAK_BINDER; - fp->binder = ref->node->ptr; - fp->cookie = ref->node->cookie; - binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER, - 0, NULL); - trace_binder_transaction_ref_to_node(t, ref); + fp->binder = node->ptr; + fp->cookie = node->cookie; + if (node->proc) + binder_inner_proc_lock(node->proc); + binder_inc_node_nilocked(node, + fp->hdr.type == BINDER_TYPE_BINDER, + 0, NULL); + if (node->proc) + binder_inner_proc_unlock(node->proc); + trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", - ref->debug_id, ref->desc, ref->node->debug_id, - (u64)ref->node->ptr); + src_rdata.debug_id, src_rdata.desc, node->debug_id, + (u64)node->ptr); + binder_node_unlock(node); } else { - struct binder_ref *new_ref; + int ret; + struct binder_ref_data dest_rdata; - new_ref = binder_get_ref_for_node(target_proc, ref->node); - if (!new_ref) - return -EINVAL; + binder_node_unlock(node); + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_HANDLE, + NULL, &dest_rdata); + if (ret) + goto done; fp->binder = 0; - fp->handle = new_ref->desc; + fp->handle = dest_rdata.desc; fp->cookie = 0; - binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE, - NULL); - trace_binder_transaction_ref_to_ref(t, ref, new_ref); + trace_binder_transaction_ref_to_ref(t, node, &src_rdata, + &dest_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, new_ref->debug_id, - new_ref->desc, ref->node->debug_id); + src_rdata.debug_id, src_rdata.desc, + dest_rdata.debug_id, dest_rdata.desc, + node->debug_id); } - return 0; +done: + binder_put_node(node); + return ret; } static int binder_translate_fd(int fd, @@ -1834,7 +2595,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, * Since the parent was already fixed up, convert it * back to the kernel address space to access it */ - parent_buffer = parent->buffer - target_proc->user_buffer_offset; + parent_buffer = parent->buffer - + binder_alloc_get_user_buffer_offset(&target_proc->alloc); fd_array = (u32 *)(parent_buffer + fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", @@ -1902,12 +2664,87 @@ static int binder_fixup_parent(struct binder_transaction *t, return -EINVAL; } parent_buffer = (u8 *)(parent->buffer - - target_proc->user_buffer_offset); + binder_alloc_get_user_buffer_offset( + &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; return 0; } +/** + * binder_proc_transaction() - sends a transaction to a process and wakes it up + * @t: transaction to send + * @proc: process to send the transaction to + * @thread: thread in @proc to send the transaction to (may be NULL) + * + * This function queues a transaction to the specified process. It will try + * to find a thread in the target process to handle the transaction and + * wake it up. If no thread is found, the work is queued to the proc + * waitqueue. + * + * If the @thread parameter is not NULL, the transaction is always queued + * to the waitlist of that specific thread. + * + * Return: true if the transactions was successfully queued + * false if the target process or thread is dead + */ +static bool binder_proc_transaction(struct binder_transaction *t, + struct binder_proc *proc, + struct binder_thread *thread) +{ + struct list_head *target_list = NULL; + struct binder_node *node = t->buffer->target_node; + struct binder_priority node_prio; + bool oneway = !!(t->flags & TF_ONE_WAY); + bool wakeup = true; + + BUG_ON(!node); + binder_node_lock(node); + node_prio.prio = node->min_priority; + node_prio.sched_policy = node->sched_policy; + + if (oneway) { + BUG_ON(thread); + if (node->has_async_transaction) { + target_list = &node->async_todo; + wakeup = false; + } else { + node->has_async_transaction = 1; + } + } + + binder_inner_proc_lock(proc); + + if (proc->is_dead || (thread && thread->is_dead)) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + return false; + } + + if (!thread && !target_list) + thread = binder_select_thread_ilocked(proc); + + if (thread) { + target_list = &thread->todo; + binder_transaction_priority(thread->task, t, node_prio, + node->inherit_rt); + } else if (!target_list) { + target_list = &proc->todo; + } else { + BUG_ON(target_list != &node->async_todo); + } + + binder_enqueue_work_ilocked(&t->work, target_list); + + if (wakeup) + binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + + return true; +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -1919,19 +2756,21 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t *offp, *off_end, *off_start; binder_size_t off_min; u8 *sg_bufp, *sg_buf_end; - struct binder_proc *target_proc; + struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; - struct list_head *target_list; - wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; - uint32_t return_error; + uint32_t return_error = 0; + uint32_t return_error_param = 0; + uint32_t return_error_line = 0; struct binder_buffer_object *last_fixup_obj = NULL; binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; + int t_debug_id = atomic_inc_return(&binder_last_id); - e = binder_transaction_log_add(&context->transaction_log); + e = binder_transaction_log_add(&binder_transaction_log); + e->debug_id = t_debug_id; e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; @@ -1941,29 +2780,39 @@ static void binder_transaction(struct binder_proc *proc, e->context_name = proc->context->name; if (reply) { + binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { + binder_inner_proc_unlock(proc); binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; goto err_empty_call_stack; } - binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { + spin_lock(&in_reply_to->lock); binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); + spin_unlock(&in_reply_to->lock); + binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; - target_thread = in_reply_to->from; + binder_inner_proc_unlock(proc); + target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { @@ -1972,89 +2821,137 @@ static void binder_transaction(struct binder_proc *proc, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); + binder_inner_proc_unlock(target_thread->proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_thread->proc); } else { if (tr->target.handle) { struct binder_ref *ref; - ref = binder_get_ref(proc, tr->target.handle, true); - if (ref == NULL) { + /* + * There must already be a strong ref + * on this node. If so, do a strong + * increment on the node to ensure it + * stays alive until the transaction is + * done. + */ + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, tr->target.handle, + true); + if (ref) { + binder_inc_node(ref->node, 1, 0, NULL); + target_node = ref->node; + } + binder_proc_unlock(proc); + if (target_node == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_invalid_target_handle; } - target_node = ref->node; } else { + mutex_lock(&context->context_mgr_node_lock); target_node = context->binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; + mutex_unlock(&context->context_mgr_node_lock); + return_error_line = __LINE__; goto err_no_context_mgr_node; } + binder_inc_node(target_node, 1, 0, NULL); + mutex_unlock(&context->context_mgr_node_lock); } e->to_node = target_node->debug_id; + binder_node_lock(target_node); target_proc = target_node->proc; if (target_proc == NULL) { + binder_node_unlock(target_node); return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; goto err_dead_binder; } + binder_inner_proc_lock(target_proc); + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_proc); + binder_node_unlock(target_node); if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; + return_error_param = -EPERM; + return_error_line = __LINE__; goto err_invalid_target_handle; } + binder_inner_proc_lock(proc); if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { + spin_lock(&tmp->lock); binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); + spin_unlock(&tmp->lock); + binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; goto err_bad_call_stack; } while (tmp) { - if (tmp->from && tmp->from->proc == target_proc) - target_thread = tmp->from; + struct binder_thread *from; + + spin_lock(&tmp->lock); + from = tmp->from; + if (from && from->proc == target_proc) { + atomic_inc(&from->tmp_ref); + target_thread = from; + spin_unlock(&tmp->lock); + break; + } + spin_unlock(&tmp->lock); tmp = tmp->from_parent; } } + binder_inner_proc_unlock(proc); } - if (target_thread) { + if (target_thread) e->to_thread = target_thread->pid; - target_list = &target_thread->todo; - target_wait = &target_thread->wait; - } else { - target_list = &target_proc->todo; - target_wait = &target_proc->wait; - } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ - t = kzalloc_preempt_disabled(sizeof(*t)); + t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); + spin_lock_init(&t->lock); - tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete)); + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); - t->debug_id = atomic_inc_return(&binder_last_id); - e->debug_id = t->debug_id; + t->debug_id = t_debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, @@ -2084,15 +2981,30 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; - t->priority = task_nice(current); + if (!(t->flags & TF_ONE_WAY) && + binder_supported_policy(current->policy)) { + /* Inherit supported policies for synchronous transactions */ + t->priority.sched_policy = current->policy; + t->priority.prio = current->normal_prio; + } else { + /* Otherwise, fall back to the default priority */ + t->priority = target_proc->default_priority; + } trace_binder_transaction(reply, t, target_node); - t->buffer = binder_alloc_buf(target_proc, tr->data_size, + t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY)); - if (t->buffer == NULL) { - return_error = BR_FAILED_REPLY; + if (IS_ERR(t->buffer)) { + /* + * -ESRCH indicates VMA cleared. The target is dying. + */ + return_error_param = PTR_ERR(t->buffer); + return_error = return_error_param == -ESRCH ? + BR_DEAD_REPLY : BR_FAILED_REPLY; + return_error_line = __LINE__; + t->buffer = NULL; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; @@ -2100,31 +3012,34 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); - if (target_node) - binder_inc_node(target_node, 1, 0, NULL); - off_start = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); offp = off_start; - if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t) + if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; goto err_copy_data_failed; } - if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t) + if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { @@ -2132,6 +3047,8 @@ static void binder_transaction(struct binder_proc *proc, proc->pid, thread->pid, (u64)extra_buffers_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } off_end = (void *)off_start + tr->offsets_size; @@ -2148,6 +3065,8 @@ static void binder_transaction(struct binder_proc *proc, (u64)off_min, (u64)t->buffer->data_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } @@ -2162,6 +3081,8 @@ static void binder_transaction(struct binder_proc *proc, ret = binder_translate_binder(fp, t, thread); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } } break; @@ -2173,6 +3094,8 @@ static void binder_transaction(struct binder_proc *proc, ret = binder_translate_handle(fp, t, thread); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } } break; @@ -2184,6 +3107,8 @@ static void binder_transaction(struct binder_proc *proc, if (target_fd < 0) { return_error = BR_FAILED_REPLY; + return_error_param = target_fd; + return_error_line = __LINE__; goto err_translate_failed; } fp->pad_binder = 0; @@ -2200,6 +3125,8 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_parent; } if (!binder_validate_fixup(t->buffer, off_start, @@ -2209,12 +3136,16 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_parent; } ret = binder_translate_fd_array(fda, parent, t, thread, in_reply_to); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj = parent; @@ -2230,20 +3161,24 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with too large buffer\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } - if (copy_from_user_preempt_disabled( - sg_bufp, - (const void __user *)(uintptr_t) - bp->buffer, bp->length)) { + if (copy_from_user(sg_bufp, + (const void __user *)(uintptr_t) + bp->buffer, bp->length)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); + return_error_param = -EFAULT; return_error = BR_FAILED_REPLY; + return_error_line = __LINE__; goto err_copy_data_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t)sg_bufp + - target_proc->user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &target_proc->alloc); sg_bufp += ALIGN(bp->length, sizeof(u64)); ret = binder_fixup_parent(t, thread, bp, off_start, @@ -2252,6 +3187,8 @@ static void binder_transaction(struct binder_proc *proc, last_fixup_min_off); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj = bp; @@ -2261,42 +3198,61 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with invalid object type, %x\n", proc->pid, thread->pid, hdr->type); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_object_type; } } + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + binder_enqueue_work(proc, tcomplete, &thread->todo); + t->work.type = BINDER_WORK_TRANSACTION; + if (reply) { + binder_inner_proc_lock(target_proc); + if (target_thread->is_dead) { + binder_inner_proc_unlock(target_proc); + goto err_dead_proc_or_thread; + } BUG_ON(t->buffer->async_transaction != 0); - binder_pop_transaction(target_thread, in_reply_to); + binder_pop_transaction_ilocked(target_thread, in_reply_to); + binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_inner_proc_unlock(target_proc); + wake_up_interruptible_sync(&target_thread->wait); + binder_restore_priority(current, in_reply_to->saved_priority); + binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); + binder_inner_proc_lock(proc); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; + binder_inner_proc_unlock(proc); + if (!binder_proc_transaction(t, target_proc, target_thread)) { + binder_inner_proc_lock(proc); + binder_pop_transaction_ilocked(thread, t); + binder_inner_proc_unlock(proc); + goto err_dead_proc_or_thread; + } } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); - if (target_node->has_async_transaction) { - target_list = &target_node->async_todo; - target_wait = NULL; - } else - target_node->has_async_transaction = 1; - } - t->work.type = BINDER_WORK_TRANSACTION; - list_add_tail(&t->work.entry, target_list); - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - list_add_tail(&tcomplete->entry, &thread->todo); - if (target_wait) { - if (reply || !(t->flags & TF_ONE_WAY)) { - preempt_disable(); - wake_up_interruptible_sync(target_wait); - preempt_enable_no_resched(); - } - else { - wake_up_interruptible(target_wait); - } + if (!binder_proc_transaction(t, target_proc, NULL)) + goto err_dead_proc_or_thread; } + if (target_thread) + binder_thread_dec_tmpref(target_thread); + binder_proc_dec_tmpref(target_proc); + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); return; +err_dead_proc_or_thread: + return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; err_translate_failed: err_bad_object_type: err_bad_offset: @@ -2304,8 +3260,9 @@ err_bad_parent: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); + target_node = NULL; t->buffer->transaction = NULL; - binder_free_buf(target_proc, t->buffer); + binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); @@ -2318,25 +3275,50 @@ err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: + if (target_thread) + binder_thread_dec_tmpref(target_thread); + if (target_proc) + binder_proc_dec_tmpref(target_proc); + if (target_node) + binder_dec_node(target_node, 1, 0); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction failed %d, size %lld-%lld\n", - proc->pid, thread->pid, return_error, - (u64)tr->data_size, (u64)tr->offsets_size); + "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", + proc->pid, thread->pid, return_error, return_error_param, + (u64)tr->data_size, (u64)tr->offsets_size, + return_error_line); { struct binder_transaction_log_entry *fe; - fe = binder_transaction_log_add( - &context->transaction_log_failed); + e->return_error = return_error; + e->return_error_param = return_error_param; + e->return_error_line = return_error_line; + fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); + WRITE_ONCE(fe->debug_id_done, t_debug_id); } - BUG_ON(thread->return_error != BR_OK); + BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { - thread->return_error = BR_TRANSACTION_COMPLETE; + binder_restore_priority(current, in_reply_to->saved_priority); + thread->return_error.cmd = BR_TRANSACTION_COMPLETE; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); binder_send_failed_reply(in_reply_to, return_error); - } else - thread->return_error = return_error; + } else { + thread->return_error.cmd = return_error; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); + } } static int binder_thread_write(struct binder_proc *proc, @@ -2350,15 +3332,17 @@ static int binder_thread_write(struct binder_proc *proc, void __user *ptr = buffer + *consumed; void __user *end = buffer + size; - while (ptr < end && thread->return_error == BR_OK) { - if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr)) + while (ptr < end && thread->return_error.cmd == BR_OK) { + int ret; + + if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) { - context->binder_stats.bc[_IOC_NR(cmd)]++; - proc->stats.bc[_IOC_NR(cmd)]++; - thread->stats.bc[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { + atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); } switch (cmd) { case BC_INCREFS: @@ -2366,53 +3350,61 @@ static int binder_thread_write(struct binder_proc *proc, case BC_RELEASE: case BC_DECREFS: { uint32_t target; - struct binder_ref *ref; const char *debug_string; + bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; + bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; + struct binder_ref_data rdata; - if (get_user_preempt_disabled(target, (uint32_t __user *)ptr)) + if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; + ptr += sizeof(uint32_t); - if (target == 0 && context->binder_context_mgr_node && - (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { - ref = binder_get_ref_for_node(proc, - context->binder_context_mgr_node); - if (ref->desc != target) { - binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", - proc->pid, thread->pid, - ref->desc); - } - } else - ref = binder_get_ref(proc, target, - cmd == BC_ACQUIRE || - cmd == BC_RELEASE); - if (ref == NULL) { - binder_user_error("%d:%d refcount change on invalid ref %d\n", - proc->pid, thread->pid, target); - break; + ret = -1; + if (increment && !target) { + struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); + ctx_mgr_node = context->binder_context_mgr_node; + if (ctx_mgr_node) + ret = binder_inc_ref_for_node( + proc, ctx_mgr_node, + strong, NULL, &rdata); + mutex_unlock(&context->context_mgr_node_lock); + } + if (ret) + ret = binder_update_ref_for_handle( + proc, target, increment, strong, + &rdata); + if (!ret && rdata.desc != target) { + binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", + proc->pid, thread->pid, + target, rdata.desc); } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; - binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; - binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; - binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; - binder_dec_ref(ref, 0); + break; + } + if (ret) { + binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", + proc->pid, thread->pid, debug_string, + strong, target, ret); break; } binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s ref %d desc %d s %d w %d for node %d\n", - proc->pid, thread->pid, debug_string, ref->debug_id, - ref->desc, ref->strong, ref->weak, ref->node->debug_id); + "%d:%d %s ref %d desc %d s %d w %d\n", + proc->pid, thread->pid, debug_string, + rdata.debug_id, rdata.desc, rdata.strong, + rdata.weak); break; } case BC_INCREFS_DONE: @@ -2420,11 +3412,12 @@ static int binder_thread_write(struct binder_proc *proc, binder_uintptr_t node_ptr; binder_uintptr_t cookie; struct binder_node *node; + bool free_node; - if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr)) + if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); node = binder_get_node(proc, node_ptr); @@ -2444,13 +3437,17 @@ static int binder_thread_write(struct binder_proc *proc, "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr, node->debug_id, (u64)cookie, (u64)node->cookie); + binder_put_node(node); break; } + binder_node_inner_lock(node); if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", proc->pid, thread->pid, node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); break; } node->pending_strong_ref = 0; @@ -2459,16 +3456,23 @@ static int binder_thread_write(struct binder_proc *proc, binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", proc->pid, thread->pid, node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); break; } node->pending_weak_ref = 0; } - binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); + free_node = binder_dec_node_nilocked(node, + cmd == BC_ACQUIRE_DONE, 0); + WARN_ON(free_node); binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s node %d ls %d lw %d\n", + "%d:%d %s node %d ls %d lw %d tr %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", - node->debug_id, node->local_strong_refs, node->local_weak_refs); + node->debug_id, node->local_strong_refs, + node->local_weak_refs, node->tmp_refs); + binder_node_inner_unlock(node); + binder_put_node(node); break; } case BC_ATTEMPT_ACQUIRE: @@ -2482,11 +3486,12 @@ static int binder_thread_write(struct binder_proc *proc, binder_uintptr_t data_ptr; struct binder_buffer *buffer; - if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr)) + if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - buffer = binder_buffer_lookup(proc, data_ptr); + buffer = binder_alloc_prepare_to_free(&proc->alloc, + data_ptr); if (buffer == NULL) { binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", proc->pid, thread->pid, (u64)data_ptr); @@ -2508,15 +3513,25 @@ static int binder_thread_write(struct binder_proc *proc, buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { - BUG_ON(!buffer->target_node->has_async_transaction); - if (list_empty(&buffer->target_node->async_todo)) - buffer->target_node->has_async_transaction = 0; + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) + buf_node->has_async_transaction = 0; else - list_move_tail(buffer->target_node->async_todo.next, &thread->todo); + binder_enqueue_work_ilocked( + w, &thread->todo); + binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); - binder_free_buf(proc, buffer); + binder_alloc_free_buf(&proc->alloc, buffer); break; } @@ -2524,8 +3539,7 @@ static int binder_thread_write(struct binder_proc *proc, case BC_REPLY_SG: { struct binder_transaction_data_sg tr; - if (copy_from_user_preempt_disabled(&tr, ptr, - sizeof(tr))) + if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr.transaction_data, @@ -2536,7 +3550,7 @@ static int binder_thread_write(struct binder_proc *proc, case BC_REPLY: { struct binder_transaction_data tr; - if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr))) + if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, @@ -2548,6 +3562,7 @@ static int binder_thread_write(struct binder_proc *proc, binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); + binder_inner_proc_lock(proc); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", @@ -2561,6 +3576,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; + binder_inner_proc_unlock(proc); break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, @@ -2585,15 +3601,37 @@ static int binder_thread_write(struct binder_proc *proc, uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; - struct binder_ref_death *death; + struct binder_ref_death *death = NULL; - if (get_user_preempt_disabled(target, (uint32_t __user *)ptr)) + if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - ref = binder_get_ref(proc, target, false); + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + /* + * Allocate memory for death notification + * before taking lock + */ + death = kzalloc(sizeof(*death), GFP_KERNEL); + if (death == NULL) { + WARN_ON(thread->return_error.cmd != + BR_OK); + thread->return_error.cmd = BR_ERROR; + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); + binder_debug( + BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); + break; + } + } + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, target, false); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, @@ -2601,6 +3639,8 @@ static int binder_thread_write(struct binder_proc *proc, "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); + binder_proc_unlock(proc); + kfree(death); break; } @@ -2610,21 +3650,18 @@ static int binder_thread_write(struct binder_proc *proc, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", - (u64)cookie, ref->debug_id, ref->desc, - ref->strong, ref->weak, ref->node->debug_id); + (u64)cookie, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak, ref->node->debug_id); + binder_node_lock(ref->node); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); - break; - } - death = kzalloc_preempt_disabled(sizeof(*death)); - if (death == NULL) { - thread->return_error = BR_ERROR; - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", - proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + kfree(death); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -2633,17 +3670,29 @@ static int binder_thread_write(struct binder_proc *proc, ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&ref->death->work.entry, &thread->todo); - } else { - list_add_tail(&ref->death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work( + proc, + &ref->death->work, + &thread->todo); + else { + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked( + &ref->death->work, + &proc->todo); + binder_wakeup_proc_ilocked( + proc); + binder_inner_proc_unlock(proc); } } } else { if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } death = ref->death; @@ -2652,33 +3701,52 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)death->cookie, (u64)cookie); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } ref->death = NULL; + binder_inner_proc_lock(proc); if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, + &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked( + proc); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } + binder_inner_proc_unlock(proc); } + binder_node_unlock(ref->node); + binder_proc_unlock(proc); } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; binder_uintptr_t cookie; struct binder_ref_death *death = NULL; - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(cookie); - list_for_each_entry(w, &proc->delivered_death, entry) { - struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->delivered_death, + entry) { + struct binder_ref_death *tmp_death = + container_of(w, + struct binder_ref_death, + work); if (tmp_death->cookie == cookie) { death = tmp_death; @@ -2692,21 +3760,26 @@ static int binder_thread_write(struct binder_proc *proc, if (death == NULL) { binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", proc->pid, thread->pid, (u64)cookie); + binder_inner_proc_unlock(proc); break; } - - list_del_init(&death->work.entry); + binder_dequeue_work_ilocked(&death->work); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked(proc); } } - } - break; + binder_inner_proc_unlock(proc); + } break; default: pr_err("%d:%d unknown command %d\n", @@ -2722,24 +3795,80 @@ static void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) { - proc->context->binder_stats.br[_IOC_NR(cmd)]++; - proc->stats.br[_IOC_NR(cmd)]++; - thread->stats.br[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { + atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); } } -static int binder_has_proc_work(struct binder_proc *proc, - struct binder_thread *thread) -{ - return !list_empty(&proc->todo) || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); -} - static int binder_has_thread_work(struct binder_thread *thread) { - return !list_empty(&thread->todo) || thread->return_error != BR_OK || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); + return !binder_worklist_empty(thread->proc, &thread->todo) || + thread->looper_need_return; +} + +static int binder_put_node_cmd(struct binder_proc *proc, + struct binder_thread *thread, + void __user **ptrp, + binder_uintptr_t node_ptr, + binder_uintptr_t node_cookie, + int node_debug_id, + uint32_t cmd, const char *cmd_name) +{ + void __user *ptr = *ptrp; + + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + + if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", + proc->pid, thread->pid, cmd_name, node_debug_id, + (u64)node_ptr, (u64)node_cookie); + + *ptrp = ptr; + return 0; +} + +static int binder_wait_for_work(struct binder_thread *thread, + bool do_proc_work) +{ + DEFINE_WAIT(wait); + struct binder_proc *proc = thread->proc; + int ret = 0; + + freezer_do_not_count(); + binder_inner_proc_lock(proc); + for (;;) { + prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); + if (binder_has_work_ilocked(thread, do_proc_work)) + break; + if (do_proc_work) + list_add(&thread->waiting_thread_node, + &proc->waiting_threads); + binder_inner_proc_unlock(proc); + schedule(); + binder_inner_proc_lock(proc); + list_del_init(&thread->waiting_thread_node); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + finish_wait(&thread->wait, &wait); + binder_inner_proc_unlock(proc); + freezer_count(); + + return ret; } static int binder_thread_read(struct binder_proc *proc, @@ -2755,43 +3884,21 @@ static int binder_thread_read(struct binder_proc *proc, int wait_for_proc_work; if (*consumed == 0) { - if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr)) + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo); - - if (thread->return_error != BR_OK && ptr < end) { - if (thread->return_error2 != BR_OK) { - if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error2); - if (ptr == end) - goto done; - thread->return_error2 = BR_OK; - } - if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error); - thread->return_error = BR_OK; - goto done; - } - + binder_inner_proc_lock(proc); + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); + binder_inner_proc_unlock(proc); thread->looper |= BINDER_LOOPER_STATE_WAITING; - if (wait_for_proc_work) - proc->ready_threads++; - - binder_unlock(proc->context, __func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, - !list_empty(&thread->todo)); + !binder_worklist_empty(proc, &thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { @@ -2800,24 +3907,16 @@ retry: wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } - binder_set_nice(proc->default_priority); - if (non_block) { - if (!binder_has_proc_work(proc, thread)) - ret = -EAGAIN; - } else - ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); - } else { - if (non_block) { - if (!binder_has_thread_work(thread)) - ret = -EAGAIN; - } else - ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); + binder_restore_priority(current, proc->default_priority); } - binder_lock(proc->context, __func__); + if (non_block) { + if (!binder_has_work(thread, wait_for_proc_work)) + ret = -EAGAIN; + } else { + ret = binder_wait_for_work(thread, wait_for_proc_work); + } - if (wait_for_proc_work) - proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) @@ -2826,33 +3925,54 @@ retry: while (1) { uint32_t cmd; struct binder_transaction_data tr; - struct binder_work *w; + struct binder_work *w = NULL; + struct list_head *list = NULL; struct binder_transaction *t = NULL; + struct binder_thread *t_from; + + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&thread->todo)) + list = &thread->todo; + else if (!binder_worklist_empty_ilocked(&proc->todo) && + wait_for_proc_work) + list = &proc->todo; + else { + binder_inner_proc_unlock(proc); - if (!list_empty(&thread->todo)) { - w = list_first_entry(&thread->todo, struct binder_work, - entry); - } else if (!list_empty(&proc->todo) && wait_for_proc_work) { - w = list_first_entry(&proc->todo, struct binder_work, - entry); - } else { /* no data added */ - if (ptr - buffer == 4 && - !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) + if (ptr - buffer == 4 && !thread->looper_need_return) goto retry; break; } - if (end - ptr < sizeof(tr) + 4) + if (end - ptr < sizeof(tr) + 4) { + binder_inner_proc_unlock(proc); break; + } + w = binder_dequeue_work_head_ilocked(list); switch (w->type) { case BINDER_WORK_TRANSACTION: { + binder_inner_proc_unlock(proc); t = container_of(w, struct binder_transaction, work); } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + WARN_ON(e->cmd == BR_OK); + binder_inner_proc_unlock(proc); + if (put_user(e->cmd, (uint32_t __user *)ptr)) + return -EFAULT; + e->cmd = BR_OK; + ptr += sizeof(uint32_t); + + binder_stat_br(proc, thread, cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) + if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -2860,112 +3980,134 @@ retry: binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); - - list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); - uint32_t cmd = BR_NOOP; - const char *cmd_name; - int strong = node->internal_strong_refs || node->local_strong_refs; - int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; + int strong, weak; + binder_uintptr_t node_ptr = node->ptr; + binder_uintptr_t node_cookie = node->cookie; + int node_debug_id = node->debug_id; + int has_weak_ref; + int has_strong_ref; + void __user *orig_ptr = ptr; - if (weak && !node->has_weak_ref) { - cmd = BR_INCREFS; - cmd_name = "BR_INCREFS"; + BUG_ON(proc != node->proc); + strong = node->internal_strong_refs || + node->local_strong_refs; + weak = !hlist_empty(&node->refs) || + node->local_weak_refs || + node->tmp_refs || strong; + has_strong_ref = node->has_strong_ref; + has_weak_ref = node->has_weak_ref; + + if (weak && !has_weak_ref) { node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; - } else if (strong && !node->has_strong_ref) { - cmd = BR_ACQUIRE; - cmd_name = "BR_ACQUIRE"; + } + if (strong && !has_strong_ref) { node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; - } else if (!strong && node->has_strong_ref) { - cmd = BR_RELEASE; - cmd_name = "BR_RELEASE"; + } + if (!strong && has_strong_ref) node->has_strong_ref = 0; - } else if (!weak && node->has_weak_ref) { - cmd = BR_DECREFS; - cmd_name = "BR_DECREFS"; + if (!weak && has_weak_ref) node->has_weak_ref = 0; - } - if (cmd != BR_NOOP) { - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *) - (binder_uintptr_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *) - (binder_uintptr_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); + if (!weak && !strong) { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx deleted\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + rb_erase(&node->rb_node, &proc->nodes); + binder_inner_proc_unlock(proc); + binder_node_lock(node); + /* + * Acquire the node lock before freeing the + * node to serialize with other threads that + * may have been holding the node lock while + * decrementing this node (avoids race where + * this thread frees while the other thread + * is unlocking the node after the final + * decrement) + */ + binder_node_unlock(node); + binder_free_node(node); + } else + binder_inner_proc_unlock(proc); - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s %d u%016llx c%016llx\n", - proc->pid, thread->pid, cmd_name, - node->debug_id, - (u64)node->ptr, (u64)node->cookie); - } else { - list_del_init(&w->entry); - if (!weak && !strong) { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%016llx c%016llx deleted\n", - proc->pid, thread->pid, - node->debug_id, - (u64)node->ptr, - (u64)node->cookie); - rb_erase(&node->rb_node, &proc->nodes); - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%016llx c%016llx state unchanged\n", - proc->pid, thread->pid, - node->debug_id, - (u64)node->ptr, - (u64)node->cookie); - } - } + if (weak && !has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_INCREFS, "BR_INCREFS"); + if (!ret && strong && !has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_ACQUIRE, "BR_ACQUIRE"); + if (!ret && !strong && has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_RELEASE, "BR_RELEASE"); + if (!ret && !weak && has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_DECREFS, "BR_DECREFS"); + if (orig_ptr == ptr) + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx state unchanged\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + if (ret) + return ret; } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; + binder_uintptr_t cookie; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - binder_stat_br(proc, thread, cmd); + cookie = death->cookie; + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", - (u64)death->cookie); - + (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { - list_del(&w->entry); + binder_inner_proc_unlock(proc); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); - } else - list_move(&w->entry, &proc->delivered_death); + } else { + binder_enqueue_work_ilocked( + w, &proc->delivered_death); + binder_inner_proc_unlock(proc); + } + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, + (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, cmd); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; @@ -2977,16 +4119,14 @@ retry: BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; + struct binder_priority node_prio; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); + node_prio.sched_policy = target_node->sched_policy; + node_prio.prio = target_node->min_priority; + binder_transaction_priority(current, t, node_prio, + target_node->inherit_rt); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; @@ -2997,8 +4137,9 @@ retry: tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); - if (t->from) { - struct task_struct *sender = t->from->proc->tsk; + t_from = binder_get_txn_from(t); + if (t_from) { + struct task_struct *sender = t_from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); @@ -3008,18 +4149,24 @@ retry: tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t)( - (uintptr_t)t->buffer->data + - proc->user_buffer_offset); + tr.data.ptr.buffer = (binder_uintptr_t) + ((uintptr_t)t->buffer->data + + binder_alloc_get_user_buffer_offset(&proc->alloc)); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) + if (put_user(cmd, (uint32_t __user *)ptr)) { + if (t_from) + binder_thread_dec_tmpref(t_from); return -EFAULT; + } ptr += sizeof(uint32_t); - if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr))) + if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (t_from) + binder_thread_dec_tmpref(t_from); return -EFAULT; + } ptr += sizeof(tr); trace_binder_transaction_received(t); @@ -3029,21 +4176,22 @@ retry: proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", - t->debug_id, t->from ? t->from->proc->pid : 0, - t->from ? t->from->pid : 0, cmd, + t->debug_id, t_from ? t_from->proc->pid : 0, + t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); - list_del(&t->work.entry); + if (t_from) + binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; + binder_inner_proc_unlock(thread->proc); } else { - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); + binder_free_transaction(t); } break; } @@ -3051,29 +4199,36 @@ retry: done: *consumed = ptr - buffer; - if (proc->requested_threads + proc->ready_threads == 0 && + binder_inner_proc_lock(proc); + if (proc->requested_threads == 0 && + list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; + binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); - if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer)) + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); - } + } else + binder_inner_proc_unlock(proc); return 0; } -static void binder_release_work(struct list_head *list) +static void binder_release_work(struct binder_proc *proc, + struct list_head *list) { struct binder_work *w; - while (!list_empty(list)) { - w = list_first_entry(list, struct binder_work, entry); - list_del_init(&w->entry); + while (1) { + w = binder_dequeue_work_head(proc, list); + if (!w) + return; + switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; @@ -3086,11 +4241,17 @@ static void binder_release_work(struct list_head *list) binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d\n", t->debug_id); - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); + binder_free_transaction(t); } } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered TRANSACTION_ERROR: %u\n", + e->cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); @@ -3117,7 +4278,8 @@ static void binder_release_work(struct list_head *list) } -static struct binder_thread *binder_get_thread(struct binder_proc *proc) +static struct binder_thread *binder_get_thread_ilocked( + struct binder_proc *proc, struct binder_thread *new_thread) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; @@ -3132,38 +4294,102 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) else if (current->pid > thread->pid) p = &(*p)->rb_right; else - break; + return thread; } - if (*p == NULL) { - thread = kzalloc_preempt_disabled(sizeof(*thread)); - if (thread == NULL) + if (!new_thread) + return NULL; + thread = new_thread; + binder_stats_created(BINDER_STAT_THREAD); + thread->proc = proc; + thread->pid = current->pid; + get_task_struct(current); + thread->task = current; + atomic_set(&thread->tmp_ref, 0); + init_waitqueue_head(&thread->wait); + INIT_LIST_HEAD(&thread->todo); + rb_link_node(&thread->rb_node, parent, p); + rb_insert_color(&thread->rb_node, &proc->threads); + thread->looper_need_return = true; + thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->return_error.cmd = BR_OK; + thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->reply_error.cmd = BR_OK; + INIT_LIST_HEAD(&new_thread->waiting_thread_node); + return thread; +} + +static struct binder_thread *binder_get_thread(struct binder_proc *proc) +{ + struct binder_thread *thread; + struct binder_thread *new_thread; + + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, NULL); + binder_inner_proc_unlock(proc); + if (!thread) { + new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); + if (new_thread == NULL) return NULL; - binder_stats_created(BINDER_STAT_THREAD); - thread->proc = proc; - thread->pid = current->pid; - init_waitqueue_head(&thread->wait); - INIT_LIST_HEAD(&thread->todo); - rb_link_node(&thread->rb_node, parent, p); - rb_insert_color(&thread->rb_node, &proc->threads); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; - thread->return_error = BR_OK; - thread->return_error2 = BR_OK; + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, new_thread); + binder_inner_proc_unlock(proc); + if (thread != new_thread) + kfree(new_thread); } return thread; } -static int binder_free_thread(struct binder_proc *proc, - struct binder_thread *thread) +static void binder_free_proc(struct binder_proc *proc) +{ + BUG_ON(!list_empty(&proc->todo)); + BUG_ON(!list_empty(&proc->delivered_death)); + binder_alloc_deferred_release(&proc->alloc); + put_task_struct(proc->tsk); + binder_stats_deleted(BINDER_STAT_PROC); + kfree(proc); +} + +static void binder_free_thread(struct binder_thread *thread) +{ + BUG_ON(!list_empty(&thread->todo)); + binder_stats_deleted(BINDER_STAT_THREAD); + binder_proc_dec_tmpref(thread->proc); + put_task_struct(thread->task); + kfree(thread); +} + +static int binder_thread_release(struct binder_proc *proc, + struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; + struct binder_transaction *last_t = NULL; + binder_inner_proc_lock(thread->proc); + /* + * take a ref on the proc so it survives + * after we remove this thread from proc->threads. + * The corresponding dec is when we actually + * free the thread in binder_free_thread() + */ + proc->tmp_ref++; + /* + * take a ref on this thread to ensure it + * survives while we are releasing it + */ + atomic_inc(&thread->tmp_ref); rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; - if (t && t->to_thread == thread) - send_reply = t; + if (t) { + spin_lock(&t->lock); + if (t->to_thread == thread) + send_reply = t; + } + thread->is_dead = true; + while (t) { + last_t = t; active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "release %d:%d transaction %d %s, still active\n", @@ -3184,12 +4410,16 @@ static int binder_free_thread(struct binder_proc *proc, t = t->from_parent; } else BUG(); + spin_unlock(&last_t->lock); + if (t) + spin_lock(&t->lock); } + binder_inner_proc_unlock(thread->proc); + if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); - binder_release_work(&thread->todo); - kfree(thread); - binder_stats_deleted(BINDER_STAT_THREAD); + binder_release_work(proc, &thread->todo); + binder_thread_dec_tmpref(thread); return active_transactions; } @@ -3198,30 +4428,24 @@ static unsigned int binder_poll(struct file *filp, { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; - int wait_for_proc_work; - - binder_lock(proc->context, __func__); + bool wait_for_proc_work; thread = binder_get_thread(proc); - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo) && thread->return_error == BR_OK; + binder_inner_proc_lock(thread->proc); + thread->looper |= BINDER_LOOPER_STATE_POLL; + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); - binder_unlock(proc->context, __func__); + binder_inner_proc_unlock(thread->proc); + + if (binder_has_work(thread, wait_for_proc_work)) + return POLLIN; + + poll_wait(filp, &thread->wait, wait); + + if (binder_has_thread_work(thread)) + return POLLIN; - if (wait_for_proc_work) { - if (binder_has_proc_work(proc, thread)) - return POLLIN; - poll_wait(filp, &proc->wait, wait); - if (binder_has_proc_work(proc, thread)) - return POLLIN; - } else { - if (binder_has_thread_work(thread)) - return POLLIN; - poll_wait(filp, &thread->wait, wait); - if (binder_has_thread_work(thread)) - return POLLIN; - } return 0; } @@ -3239,7 +4463,7 @@ static int binder_ioctl_write_read(struct file *filp, ret = -EINVAL; goto out; } - if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) { + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto out; } @@ -3257,7 +4481,7 @@ static int binder_ioctl_write_read(struct file *filp, trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } @@ -3268,10 +4492,12 @@ static int binder_ioctl_write_read(struct file *filp, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); - if (!list_empty(&proc->todo)) - wake_up_interruptible(&proc->wait); + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&proc->todo)) + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); if (ret < 0) { - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } @@ -3281,7 +4507,7 @@ static int binder_ioctl_write_read(struct file *filp, proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) { + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } @@ -3294,9 +4520,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) int ret = 0; struct binder_proc *proc = filp->private_data; struct binder_context *context = proc->context; - + struct binder_node *new_node; kuid_t curr_euid = current_euid(); + mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; @@ -3317,24 +4544,52 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - context->binder_context_mgr_node = binder_new_node(proc, 0, 0); - if (!context->binder_context_mgr_node) { + new_node = binder_new_node(proc, NULL); + if (!new_node) { ret = -ENOMEM; goto out; } - context->binder_context_mgr_node->local_weak_refs++; - context->binder_context_mgr_node->local_strong_refs++; - context->binder_context_mgr_node->has_strong_ref = 1; - context->binder_context_mgr_node->has_weak_ref = 1; + binder_node_lock(new_node); + new_node->local_weak_refs++; + new_node->local_strong_refs++; + new_node->has_strong_ref = 1; + new_node->has_weak_ref = 1; + context->binder_context_mgr_node = new_node; + binder_node_unlock(new_node); + binder_put_node(new_node); out: + mutex_unlock(&context->context_mgr_node_lock); return ret; } +static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, + struct binder_node_debug_info *info) { + struct rb_node *n; + binder_uintptr_t ptr = info->ptr; + + memset(info, 0, sizeof(*info)); + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + if (node->ptr > ptr) { + info->ptr = node->ptr; + info->cookie = node->cookie; + info->has_strong_ref = node->has_strong_ref; + info->has_weak_ref = node->has_weak_ref; + break; + } + } + binder_inner_proc_unlock(proc); + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; - struct binder_context *context = proc->context; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; @@ -3348,7 +4603,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (ret) goto err_unlocked; - binder_lock(context, __func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; @@ -3361,12 +4615,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (ret) goto err; break; - case BINDER_SET_MAX_THREADS: - if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { + case BINDER_SET_MAX_THREADS: { + int max_threads; + + if (copy_from_user(&max_threads, ubuf, + sizeof(max_threads))) { ret = -EINVAL; goto err; } + binder_inner_proc_lock(proc); + proc->max_threads = max_threads; + binder_inner_proc_unlock(proc); break; + } case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp); if (ret) @@ -3375,7 +4636,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); - binder_free_thread(proc, thread); + binder_thread_release(proc, thread); thread = NULL; break; case BINDER_VERSION: { @@ -3385,8 +4646,27 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = -EINVAL; goto err; } - if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { - ret = -EINVAL; + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, + &ver->protocol_version)) { + ret = -EINVAL; + goto err; + } + break; + } + case BINDER_GET_NODE_DEBUG_INFO: { + struct binder_node_debug_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_debug_info(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; goto err; } break; @@ -3398,8 +4678,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = 0; err: if (thread) - thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; - binder_unlock(context, __func__); + thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); @@ -3428,8 +4707,7 @@ static void binder_vma_close(struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); - proc->vma = NULL; - proc->vma_vm_mm = NULL; + binder_alloc_vma_close(&proc->alloc); binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } @@ -3447,11 +4725,8 @@ static const struct vm_operations_struct binder_vm_ops = { static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; - - struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; - struct binder_buffer *buffer; if (proc->tsk != current->group_leader) return -EINVAL; @@ -3460,8 +4735,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, + "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", + __func__, proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); @@ -3471,77 +4746,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; - - mutex_lock(&proc->context->binder_mmap_lock); - if (proc->buffer) { - ret = -EBUSY; - failure_string = "already mapped"; - goto err_already_mapped; - } - - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - proc->buffer = area->addr; - proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; - mutex_unlock(&proc->context->binder_mmap_lock); - -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { - pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif - proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); - if (proc->pages == NULL) { - ret = -ENOMEM; - failure_string = "alloc page array"; - goto err_alloc_pages_failed; - } - proc->buffer_size = vma->vm_end - vma->vm_start; - vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; - /* binder_update_page_range assumes preemption is disabled */ - preempt_disable(); - ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma); - preempt_enable_no_resched(); - if (ret) { - ret = -ENOMEM; - failure_string = "alloc small buf"; - goto err_alloc_small_buf_failed; - } - buffer = proc->buffer; - INIT_LIST_HEAD(&proc->buffers); - list_add(&buffer->entry, &proc->buffers); - buffer->free = 1; - binder_insert_free_buffer(proc, buffer); - proc->free_async_space = proc->buffer_size / 2; - barrier(); + ret = binder_alloc_mmap_handler(&proc->alloc, vma); + if (ret) + return ret; proc->files = get_files_struct(current); - proc->vma = vma; - proc->vma_vm_mm = vma->vm_mm; - - /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", - proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; -err_alloc_small_buf_failed: - kfree(proc->pages); - proc->pages = NULL; -err_alloc_pages_failed: - mutex_lock(&proc->context->binder_mmap_lock); - vfree(proc->buffer); - proc->buffer = NULL; -err_get_vm_area_failed: -err_already_mapped: - mutex_unlock(&proc->context->binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); @@ -3559,24 +4772,33 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; + spin_lock_init(&proc->inner_lock); + spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); - init_waitqueue_head(&proc->wait); - proc->default_priority = task_nice(current); + if (binder_supported_policy(current->policy)) { + proc->default_priority.sched_policy = current->policy; + proc->default_priority.prio = current->normal_prio; + } else { + proc->default_priority.sched_policy = SCHED_NORMAL; + proc->default_priority.prio = NICE_TO_PRIO(0); + } + binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; - - binder_lock(proc->context, __func__); + binder_alloc_init(&proc->alloc); binder_stats_created(BINDER_STAT_PROC); - hlist_add_head(&proc->proc_node, &proc->context->binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); + INIT_LIST_HEAD(&proc->waiting_threads); filp->private_data = proc; - binder_unlock(proc->context, __func__); + mutex_lock(&binder_procs_lock); + hlist_add_head(&proc->proc_node, &binder_procs); + mutex_unlock(&binder_procs_lock); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; @@ -3612,16 +4834,17 @@ static void binder_deferred_flush(struct binder_proc *proc) struct rb_node *n; int wake_count = 0; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; + thread->looper_need_return = true; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } - wake_up_interruptible_all(&proc->wait); + binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, @@ -3641,15 +4864,22 @@ static int binder_release(struct inode *nodp, struct file *filp) static int binder_node_release(struct binder_node *node, int refs) { struct binder_ref *ref; - struct binder_context *context = node->proc->context; int death = 0; + struct binder_proc *proc = node->proc; - list_del_init(&node->work.entry); - binder_release_work(&node->async_todo); + binder_release_work(proc, &node->async_todo); - if (hlist_empty(&node->refs)) { - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); + binder_node_lock(node); + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(&node->work); + /* + * The caller must have taken a temporary ref on the node, + */ + BUG_ON(!node->tmp_refs); + if (hlist_empty(&node->refs) && node->tmp_refs == 1) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + binder_free_node(node); return refs; } @@ -3657,45 +4887,58 @@ static int binder_node_release(struct binder_node *node, int refs) node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; - hlist_add_head(&node->dead_node, &context->binder_dead_nodes); + binder_inner_proc_unlock(proc); + + spin_lock(&binder_dead_nodes_lock); + hlist_add_head(&node->dead_node, &binder_dead_nodes); + spin_unlock(&binder_dead_nodes_lock); hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; - - if (!ref->death) + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * death notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->death) { + binder_inner_proc_unlock(ref->proc); continue; + } death++; - if (list_empty(&ref->death->work.entry)) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - list_add_tail(&ref->death->work.entry, - &ref->proc->todo); - wake_up_interruptible(&ref->proc->wait); - } else - BUG(); + BUG_ON(!list_empty(&ref->death->work.entry)); + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + binder_enqueue_work_ilocked(&ref->death->work, + &ref->proc->todo); + binder_wakeup_proc_ilocked(ref->proc); + binder_inner_proc_unlock(ref->proc); } binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death); + binder_node_unlock(node); + binder_put_node(node); return refs; } static void binder_deferred_release(struct binder_proc *proc) { - struct binder_transaction *t; struct binder_context *context = proc->context; struct rb_node *n; - int threads, nodes, incoming_refs, outgoing_refs, buffers, - active_transactions, page_count; + int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->vma); BUG_ON(proc->files); + mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); + mutex_unlock(&binder_procs_lock); + mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node && context->binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, @@ -3703,15 +4946,25 @@ static void binder_deferred_release(struct binder_proc *proc) __func__, proc->pid); context->binder_context_mgr_node = NULL; } + mutex_unlock(&context->context_mgr_node_lock); + binder_inner_proc_lock(proc); + /* + * Make sure proc stays alive after we + * remove all the threads + */ + proc->tmp_ref++; + proc->is_dead = true; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread; thread = rb_entry(n, struct binder_thread, rb_node); + binder_inner_proc_unlock(proc); threads++; - active_transactions += binder_free_thread(proc, thread); + active_transactions += binder_thread_release(proc, thread); + binder_inner_proc_lock(proc); } nodes = 0; @@ -3721,94 +4974,55 @@ static void binder_deferred_release(struct binder_proc *proc) node = rb_entry(n, struct binder_node, rb_node); nodes++; + /* + * take a temporary ref on the node before + * calling binder_node_release() which will either + * kfree() the node or call binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); rb_erase(&node->rb_node, &proc->nodes); - incoming_refs = binder_node_release(node, - incoming_refs); + binder_inner_proc_unlock(proc); + incoming_refs = binder_node_release(node, incoming_refs); + binder_inner_proc_lock(proc); } + binder_inner_proc_unlock(proc); outgoing_refs = 0; + binder_proc_lock(proc); while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref; ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; - binder_delete_ref(ref); + binder_cleanup_ref_olocked(ref); + binder_proc_unlock(proc); + binder_free_ref(ref); + binder_proc_lock(proc); } + binder_proc_unlock(proc); - binder_release_work(&proc->todo); - binder_release_work(&proc->delivered_death); - - buffers = 0; - while ((n = rb_first(&proc->allocated_buffers))) { - struct binder_buffer *buffer; - - buffer = rb_entry(n, struct binder_buffer, rb_node); - - t = buffer->transaction; - if (t) { - t->buffer = NULL; - buffer->transaction = NULL; - pr_err("release proc %d, transaction %d, not freed\n", - proc->pid, t->debug_id); - /*BUG();*/ - } - - binder_free_buf(proc, buffer); - buffers++; - } - - binder_stats_deleted(BINDER_STAT_PROC); - - page_count = 0; - if (proc->pages) { - int i; - - for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; - - if (!proc->pages[i]) - continue; - - page_addr = proc->buffer + i * PAGE_SIZE; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %p not freed\n", - __func__, proc->pid, i, page_addr); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); - __free_page(proc->pages[i]); - page_count++; - } - kfree(proc->pages); - vfree(proc->buffer); - } - - put_task_struct(proc->tsk); + binder_release_work(proc, &proc->todo); + binder_release_work(proc, &proc->delivered_death); binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", + "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", __func__, proc->pid, threads, nodes, incoming_refs, - outgoing_refs, active_transactions, buffers, page_count); + outgoing_refs, active_transactions); - kfree(proc); + binder_proc_dec_tmpref(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; - struct binder_context *context = - container_of(work, struct binder_context, deferred_work); int defer; do { - trace_binder_lock(__func__); - mutex_lock(&context->binder_main_lock); - trace_binder_locked(__func__); - - mutex_lock(&context->binder_deferred_lock); - preempt_disable(); - if (!hlist_empty(&context->binder_deferred_list)) { - proc = hlist_entry(context->binder_deferred_list.first, + mutex_lock(&binder_deferred_lock); + if (!hlist_empty(&binder_deferred_list)) { + proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; @@ -3817,7 +5031,7 @@ static void binder_deferred_func(struct work_struct *work) proc = NULL; defer = 0; } - mutex_unlock(&context->binder_deferred_lock); + mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { @@ -3832,63 +5046,71 @@ static void binder_deferred_func(struct work_struct *work) if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ - trace_binder_unlock(__func__); - mutex_unlock(&context->binder_main_lock); - preempt_enable_no_resched(); if (files) put_files_struct(files); } while (proc); } +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { - mutex_lock(&proc->context->binder_deferred_lock); + mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, - &proc->context->binder_deferred_list); - queue_work(proc->context->binder_deferred_workqueue, - &proc->context->deferred_work); + &binder_deferred_list); + queue_work(binder_deferred_workqueue, &binder_deferred_work); } - mutex_unlock(&proc->context->binder_deferred_lock); + mutex_unlock(&binder_deferred_lock); } -static void print_binder_transaction(struct seq_file *m, const char *prefix, - struct binder_transaction *t) +static void print_binder_transaction_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + struct binder_transaction *t) { + struct binder_proc *to_proc; + struct binder_buffer *buffer = t->buffer; + + spin_lock(&t->lock); + to_proc = t->to_proc; seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, - t->to_proc ? t->to_proc->pid : 0, + to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); - if (t->buffer == NULL) { + t->code, t->flags, t->priority.sched_policy, + t->priority.prio, t->need_reply); + spin_unlock(&t->lock); + + if (proc != to_proc) { + /* + * Can only safely deref buffer if we are holding the + * correct proc inner lock for this node + */ + seq_puts(m, "\n"); + return; + } + + if (buffer == NULL) { seq_puts(m, " buffer free\n"); return; } - if (t->buffer->target_node) - seq_printf(m, " node %d", - t->buffer->target_node->debug_id); + if (buffer->target_node) + seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", - t->buffer->data_size, t->buffer->offsets_size, - t->buffer->data); -} - -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %p size %zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, - buffer->transaction ? "active" : "delivered"); + buffer->data); } -static void print_binder_work(struct seq_file *m, const char *prefix, - const char *transaction_prefix, - struct binder_work *w) +static void print_binder_work_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + const char *transaction_prefix, + struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; @@ -3896,8 +5118,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix, switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); - print_binder_transaction(m, transaction_prefix, t); + print_binder_transaction_ilocked( + m, proc, transaction_prefix, t); break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + seq_printf(m, "%stransaction error: %u\n", + prefix, e->cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; @@ -3922,40 +5152,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix, } } -static void print_binder_thread(struct seq_file *m, - struct binder_thread *thread, - int print_always) +static void print_binder_thread_ilocked(struct seq_file *m, + struct binder_thread *thread, + int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; - seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); + seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", + thread->pid, thread->looper, + thread->looper_need_return, + atomic_read(&thread->tmp_ref)); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { - print_binder_transaction(m, - " outgoing transaction", t); + print_binder_transaction_ilocked(m, thread->proc, + " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { - print_binder_transaction(m, + print_binder_transaction_ilocked(m, thread->proc, " incoming transaction", t); t = t->to_parent; } else { - print_binder_transaction(m, " bad transaction", t); + print_binder_transaction_ilocked(m, thread->proc, + " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { - print_binder_work(m, " ", " pending transaction", w); + print_binder_work_ilocked(m, thread->proc, " ", + " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } -static void print_binder_node(struct seq_file *m, struct binder_node *node) +static void print_binder_node_nilocked(struct seq_file *m, + struct binder_node *node) { struct binder_ref *ref; struct binder_work *w; @@ -3965,27 +5201,35 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node) hlist_for_each_entry(ref, &node->refs, node_entry) count++; - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", + seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->sched_policy, node->min_priority, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, - node->internal_strong_refs, count); + node->internal_strong_refs, count, node->tmp_refs); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); - list_for_each_entry(w, &node->async_todo, entry) - print_binder_work(m, " ", - " pending async transaction", w); + if (node->proc) { + list_for_each_entry(w, &node->async_todo, entry) + print_binder_work_ilocked(m, node->proc, " ", + " pending async transaction", w); + } } -static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) +static void print_binder_ref_olocked(struct seq_file *m, + struct binder_ref *ref) { + binder_node_lock(ref->node); seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", - ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", - ref->node->debug_id, ref->strong, ref->weak, ref->death); + ref->data.debug_id, ref->data.desc, + ref->node->proc ? "" : "dead ", + ref->node->debug_id, ref->data.strong, + ref->data.weak, ref->death); + binder_node_unlock(ref->node); } static void print_binder_proc(struct seq_file *m, @@ -3995,36 +5239,60 @@ static void print_binder_proc(struct seq_file *m, struct rb_node *n; size_t start_pos = m->count; size_t header_pos; + struct binder_node *last_node = NULL; seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); header_pos = m->count; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) - print_binder_thread(m, rb_entry(n, struct binder_thread, + print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, rb_node), print_all); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); - if (print_all || node->has_async_transaction) - print_binder_node(m, node); + /* + * take a temporary reference on the node so it + * survives and isn't removed from the tree + * while we print it. + */ + binder_inc_node_tmpref_ilocked(node); + /* Need to drop inner lock to take node lock */ + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + binder_node_inner_lock(node); + print_binder_node_nilocked(m, node); + binder_node_inner_unlock(node); + last_node = node; + binder_inner_proc_lock(proc); } + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + if (print_all) { + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) - print_binder_ref(m, rb_entry(n, struct binder_ref, - rb_node_desc)); + print_binder_ref_olocked(m, rb_entry(n, + struct binder_ref, + rb_node_desc)); + binder_proc_unlock(proc); } - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); + binder_alloc_print_allocated(m, &proc->alloc); + binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) - print_binder_work(m, " ", " pending transaction", w); + print_binder_work_ilocked(m, proc, " ", + " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } + binder_inner_proc_unlock(proc); if (!print_all && m->count == header_pos) m->count = start_pos; } @@ -4082,54 +5350,45 @@ static const char * const binder_objstat_strings[] = { "transaction_complete" }; -static void add_binder_stats(struct binder_stats *from, struct binder_stats *to) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(to->bc); i++) - to->bc[i] += from->bc[i]; - - for (i = 0; i < ARRAY_SIZE(to->br); i++) - to->br[i] += from->br[i]; -} - static void print_binder_stats(struct seq_file *m, const char *prefix, - struct binder_stats *stats, - struct binder_obj_stats *obj_stats) + struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { - if (stats->bc[i]) + int temp = atomic_read(&stats->bc[i]); + + if (temp) seq_printf(m, "%s%s: %d\n", prefix, - binder_command_strings[i], stats->bc[i]); + binder_command_strings[i], temp); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { - if (stats->br[i]) + int temp = atomic_read(&stats->br[i]); + + if (temp) seq_printf(m, "%s%s: %d\n", prefix, - binder_return_strings[i], stats->br[i]); + binder_return_strings[i], temp); } - if (!obj_stats) - return; - - BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); - BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != - ARRAY_SIZE(obj_stats->obj_deleted)); - for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) { - int obj_created = atomic_read(&obj_stats->obj_created[i]); - int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]); + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { + int created = atomic_read(&stats->obj_created[i]); + int deleted = atomic_read(&stats->obj_deleted[i]); - if (obj_created || obj_deleted) - seq_printf(m, "%s%s: active %d total %d\n", prefix, - binder_objstat_strings[i], - obj_created - obj_deleted, obj_created); + if (created || deleted) + seq_printf(m, "%s%s: active %d total %d\n", + prefix, + binder_objstat_strings[i], + created - deleted, + created); } } @@ -4137,226 +5396,193 @@ static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; + struct binder_thread *thread; struct rb_node *n; - int count, strong, weak; + int count, strong, weak, ready_threads; + size_t free_async_space = + binder_alloc_get_free_async_space(&proc->alloc); seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); count = 0; + ready_threads = 0; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; + + list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) + ready_threads++; + seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, - proc->ready_threads, proc->free_async_space); + ready_threads, + free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; + binder_inner_proc_unlock(proc); seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; - strong += ref->strong; - weak += ref->weak; + strong += ref->data.strong; + weak += ref->data.weak; } + binder_proc_unlock(proc); seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); - count = 0; - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - count++; + count = binder_alloc_get_allocated_count(&proc->alloc); seq_printf(m, " buffers: %d\n", count); count = 0; + binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) { - switch (w->type) { - case BINDER_WORK_TRANSACTION: + if (w->type == BINDER_WORK_TRANSACTION) count++; - break; - default: - break; - } } + binder_inner_proc_unlock(proc); seq_printf(m, " pending transactions: %d\n", count); - print_binder_stats(m, " ", &proc->stats, NULL); + print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; struct binder_node *node; - int do_lock = !binder_debug_no_lock; - bool wrote_dead_nodes_header = false; + struct binder_node *last_node = NULL; seq_puts(m, "binder state:\n"); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - if (!wrote_dead_nodes_header && - !hlist_empty(&context->binder_dead_nodes)) { - seq_puts(m, "dead nodes:\n"); - wrote_dead_nodes_header = true; - } - hlist_for_each_entry(node, &context->binder_dead_nodes, - dead_node) - print_binder_node(m, node); - - if (do_lock) - binder_unlock(context, __func__); + spin_lock(&binder_dead_nodes_lock); + if (!hlist_empty(&binder_dead_nodes)) + seq_puts(m, "dead nodes:\n"); + hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { + /* + * take a temporary reference on the node so it + * survives and isn't removed from the list + * while we print it. + */ + node->tmp_refs++; + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); + binder_node_lock(node); + print_binder_node_nilocked(m, node); + binder_node_unlock(node); + last_node = node; + spin_lock(&binder_dead_nodes_lock); } + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 1); + mutex_unlock(&binder_procs_lock); - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc(m, proc, 1); - if (do_lock) - binder_unlock(context, __func__); - } return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; - struct binder_stats total_binder_stats; - int do_lock = !binder_debug_no_lock; - - memset(&total_binder_stats, 0, sizeof(struct binder_stats)); - - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - - add_binder_stats(&context->binder_stats, &total_binder_stats); - - if (do_lock) - binder_unlock(context, __func__); - } seq_puts(m, "binder stats:\n"); - print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); + print_binder_stats(m, "", &binder_stats); + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc_stats(m, proc); + mutex_unlock(&binder_procs_lock); - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc_stats(m, proc); - if (do_lock) - binder_unlock(context, __func__); - } return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; - int do_lock = !binder_debug_no_lock; seq_puts(m, "binder transactions:\n"); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 0); + mutex_unlock(&binder_procs_lock); - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc(m, proc, 0); - if (do_lock) - binder_unlock(context, __func__); - } return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *itr; int pid = (unsigned long)m->private; - int do_lock = !binder_debug_no_lock; - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - - hlist_for_each_entry(itr, &context->binder_procs, proc_node) { - if (itr->pid == pid) { - seq_puts(m, "binder proc state:\n"); - print_binder_proc(m, itr, 1); - } + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(itr, &binder_procs, proc_node) { + if (itr->pid == pid) { + seq_puts(m, "binder proc state:\n"); + print_binder_proc(m, itr, 1); } - if (do_lock) - binder_unlock(context, __func__); } + mutex_unlock(&binder_procs_lock); + return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { + int debug_id = READ_ONCE(e->debug_id_done); + /* + * read barrier to guarantee debug_id_done read before + * we print the log values + */ + smp_rmb(); seq_printf(m, - "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n", + "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->context_name, - e->to_node, e->target_handle, e->data_size, e->offsets_size); -} - -static int print_binder_transaction_log(struct seq_file *m, - struct binder_transaction_log *log) -{ - int i; - if (log->full) { - for (i = log->next; i < ARRAY_SIZE(log->entry); i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - } - for (i = 0; i < log->next; i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - return 0; + e->to_node, e->target_handle, e->data_size, e->offsets_size, + e->return_error, e->return_error_param, + e->return_error_line); + /* + * read-barrier to guarantee read of debug_id_done after + * done printing the fields of the entry + */ + smp_rmb(); + seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? + "\n" : " (incomplete)\n"); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; + struct binder_transaction_log *log = m->private; + unsigned int log_cur = atomic_read(&log->cur); + unsigned int count; + unsigned int cur; + int i; - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - print_binder_transaction_log(m, &context->transaction_log); - } - return 0; -} + count = log_cur + 1; + cur = count < ARRAY_SIZE(log->entry) && !log->full ? + 0 : count % ARRAY_SIZE(log->entry); + if (count > ARRAY_SIZE(log->entry) || log->full) + count = ARRAY_SIZE(log->entry); + for (i = 0; i < count; i++) { + unsigned int index = cur++ % ARRAY_SIZE(log->entry); -static int binder_failed_transaction_log_show(struct seq_file *m, void *unused) -{ - struct binder_device *device; - struct binder_context *context; - - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - print_binder_transaction_log(m, - &context->transaction_log_failed); + print_binder_transaction_log_entry(m, &log->entry[index]); } return 0; } @@ -4376,20 +5602,11 @@ BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); -BINDER_DEBUG_ENTRY(failed_transaction_log); - -static void __init free_binder_device(struct binder_device *device) -{ - if (device->context.binder_deferred_workqueue) - destroy_workqueue(device->context.binder_deferred_workqueue); - kfree(device); -} static int __init init_binder_device(const char *name) { int ret; struct binder_device *binder_device; - struct binder_context *context; binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); if (!binder_device) @@ -4399,65 +5616,34 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; - context = &binder_device->context; - context->binder_context_mgr_uid = INVALID_UID; - context->name = name; - - mutex_init(&context->binder_main_lock); - mutex_init(&context->binder_deferred_lock); - mutex_init(&context->binder_mmap_lock); - - context->binder_deferred_workqueue = - create_singlethread_workqueue(name); - - if (!context->binder_deferred_workqueue) { - ret = -ENOMEM; - goto err_create_singlethread_workqueue_failed; - } - - INIT_HLIST_HEAD(&context->binder_procs); - INIT_HLIST_HEAD(&context->binder_dead_nodes); - INIT_HLIST_HEAD(&context->binder_deferred_list); - INIT_WORK(&context->deferred_work, binder_deferred_func); + binder_device->context.binder_context_mgr_uid = INVALID_UID; + binder_device->context.name = name; + mutex_init(&binder_device->context.context_mgr_node_lock); ret = misc_register(&binder_device->miscdev); if (ret < 0) { - goto err_misc_register_failed; + kfree(binder_device); + return ret; } hlist_add_head(&binder_device->hlist, &binder_devices); - return ret; - -err_create_singlethread_workqueue_failed: -err_misc_register_failed: - free_binder_device(binder_device); return ret; } static int __init binder_init(void) { - int ret = 0; + int ret; char *device_name, *device_names; struct binder_device *device; struct hlist_node *tmp; - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); - if (!device_names) + atomic_set(&binder_transaction_log.cur, ~0U); + atomic_set(&binder_transaction_log_failed.cur, ~0U); + binder_deferred_workqueue = create_singlethread_workqueue("binder"); + if (!binder_deferred_workqueue) return -ENOMEM; - strcpy(device_names, binder_devices_param); - - while ((device_name = strsep(&device_names, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; - } - binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", @@ -4482,13 +5668,30 @@ static int __init binder_init(void) debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - NULL, + &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - NULL, - &binder_failed_transaction_log_fops); + &binder_transaction_log_failed, + &binder_transaction_log_fops); + } + + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } + strcpy(device_names, binder_devices_param); + + while ((device_name = strsep(&device_names, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; } return ret; @@ -4497,8 +5700,12 @@ err_init_binder_device_failed: hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { misc_deregister(&device->miscdev); hlist_del(&device->hlist); - free_binder_device(device); + kfree(device); } +err_alloc_device_names_failed: + debugfs_remove_recursive(binder_debugfs_dir_entry_root); + + destroy_workqueue(binder_deferred_workqueue); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c new file mode 100644 index 000000000000..aabfebac6e57 --- /dev/null +++ b/drivers/android/binder_alloc.c @@ -0,0 +1,802 @@ +/* binder_alloc.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "binder_alloc.h" +#include "binder_trace.h" + +static DEFINE_MUTEX(binder_alloc_mmap_lock); + +enum { + BINDER_DEBUG_OPEN_CLOSE = 1U << 1, + BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, +}; +static uint32_t binder_alloc_debug_mask; + +module_param_named(debug_mask, binder_alloc_debug_mask, + uint, S_IWUSR | S_IRUGO); + +#define binder_alloc_debug(mask, x...) \ + do { \ + if (binder_alloc_debug_mask & mask) \ + pr_info(x); \ + } while (0) + +static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + if (list_is_last(&buffer->entry, &alloc->buffers)) + return alloc->buffer + + alloc->buffer_size - (void *)buffer->data; + return (size_t)list_entry(buffer->entry.next, + struct binder_buffer, entry) - (size_t)buffer->data; +} + +static void binder_insert_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->free_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + size_t buffer_size; + size_t new_buffer_size; + + BUG_ON(!new_buffer->free); + + new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: add free buffer, size %zd, at %pK\n", + alloc->pid, new_buffer_size, new_buffer); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (new_buffer_size < buffer_size) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); +} + +static void binder_insert_allocated_buffer_locked( + struct binder_alloc *alloc, struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->allocated_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + + BUG_ON(new_buffer->free); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (new_buffer < buffer) + p = &parent->rb_left; + else if (new_buffer > buffer) + p = &parent->rb_right; + else + BUG(); + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); +} + +static struct binder_buffer *binder_alloc_prepare_to_free_locked( + struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct rb_node *n = alloc->allocated_buffers.rb_node; + struct binder_buffer *buffer; + struct binder_buffer *kern_ptr; + + kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset + - offsetof(struct binder_buffer, data)); + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (kern_ptr < buffer) + n = n->rb_left; + else if (kern_ptr > buffer) + n = n->rb_right; + else { + /* + * Guard against user threads attempting to + * free the buffer twice + */ + if (buffer->free_in_progress) { + pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", + alloc->pid, current->pid, (u64)user_ptr); + return NULL; + } + buffer->free_in_progress = 1; + return buffer; + } + } + return NULL; +} + +/** + * binder_alloc_buffer_lookup() - get buffer given user ptr + * @alloc: binder_alloc for this proc + * @user_ptr: User pointer to buffer data + * + * Validate userspace pointer to buffer data and return buffer corresponding to + * that user pointer. Search the rb tree for buffer that matches user data + * pointer. + * + * Return: Pointer to buffer or NULL + */ +struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + void *page_addr; + unsigned long user_page_addr; + struct page **page; + struct mm_struct *mm; + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: %s pages %pK-%pK\n", alloc->pid, + allocate ? "allocate" : "free", start, end); + + if (end <= start) + return 0; + + trace_binder_update_page_range(alloc, allocate, start, end); + + if (vma) + mm = NULL; + else + mm = get_task_mm(alloc->tsk); + + if (mm) { + down_write(&mm->mmap_sem); + vma = alloc->vma; + if (vma && mm != alloc->vma_vm_mm) { + pr_err("%d: vma mm and task mm mismatch\n", + alloc->pid); + vma = NULL; + } + } + + if (allocate == 0) + goto free_range; + + if (vma == NULL) { + pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); + goto err_no_vma; + } + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + int ret; + + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + + BUG_ON(*page); + *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (*page == NULL) { + pr_err("%d: binder_alloc_buf failed for page at %pK\n", + alloc->pid, page_addr); + goto err_alloc_page_failed; + } + ret = map_kernel_range_noflush((unsigned long)page_addr, + PAGE_SIZE, PAGE_KERNEL, page); + flush_cache_vmap((unsigned long)page_addr, + (unsigned long)page_addr + PAGE_SIZE); + if (ret != 1) { + pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", + alloc->pid, page_addr); + goto err_map_kernel_failed; + } + user_page_addr = + (uintptr_t)page_addr + alloc->user_buffer_offset; + ret = vm_insert_page(vma, user_page_addr, page[0]); + if (ret) { + pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", + alloc->pid, user_page_addr); + goto err_vm_insert_page_failed; + } + /* vm_insert_page does not seem to increment the refcount */ + } + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return 0; + +free_range: + for (page_addr = end - PAGE_SIZE; page_addr >= start; + page_addr -= PAGE_SIZE) { + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + if (vma) + zap_page_range(vma, (uintptr_t)page_addr + + alloc->user_buffer_offset, PAGE_SIZE, NULL); +err_vm_insert_page_failed: + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +err_map_kernel_failed: + __free_page(*page); + *page = NULL; +err_alloc_page_failed: + ; + } +err_no_vma: + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; +} + +struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t buffer_size; + struct rb_node *best_fit = NULL; + void *has_page_addr; + void *end_page_addr; + size_t size, data_offsets_size; + int ret; + + if (alloc->vma == NULL) { + pr_err("%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } + + data_offsets_size = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + + if (data_offsets_size < data_size || data_offsets_size < offsets_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd\n", + alloc->pid, data_size, offsets_size); + return ERR_PTR(-EINVAL); + } + size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); + if (size < data_offsets_size || size < extra_buffers_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid extra_buffers_size %zd\n", + alloc->pid, extra_buffers_size); + return ERR_PTR(-EINVAL); + } + if (is_async && + alloc->free_async_space < size + sizeof(struct binder_buffer)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd failed, no async space left\n", + alloc->pid, size); + return ERR_PTR(-ENOSPC); + } + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (size < buffer_size) { + best_fit = n; + n = n->rb_left; + } else if (size > buffer_size) + n = n->rb_right; + else { + best_fit = n; + break; + } + } + if (best_fit == NULL) { + size_t allocated_buffers = 0; + size_t largest_alloc_size = 0; + size_t total_alloc_size = 0; + size_t free_buffers = 0; + size_t largest_free_size = 0; + size_t total_free_size = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + for (n = rb_first(&alloc->free_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } + pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, largest_alloc_size, + total_free_size, free_buffers, largest_free_size); + return ERR_PTR(-ENOSPC); + } + if (n == NULL) { + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + } + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", + alloc->pid, size, buffer, buffer_size); + + has_page_addr = + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + if (n == NULL) { + if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) + buffer_size = size; /* no room for other buffers */ + else + buffer_size = size + sizeof(struct binder_buffer); + } + end_page_addr = + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); + if (end_page_addr > has_page_addr) + end_page_addr = has_page_addr; + ret = binder_update_page_range(alloc, 1, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + if (ret) + return ERR_PTR(ret); + + rb_erase(best_fit, &alloc->free_buffers); + buffer->free = 0; + buffer->free_in_progress = 0; + binder_insert_allocated_buffer_locked(alloc, buffer); + if (buffer_size != size) { + struct binder_buffer *new_buffer = (void *)buffer->data + size; + + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + } + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got %pK\n", + alloc->pid, size, buffer); + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->async_transaction = is_async; + buffer->extra_buffers_size = extra_buffers_size; + if (is_async) { + alloc->free_async_space -= size + sizeof(struct binder_buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_alloc_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + return buffer; +} + +/** + * binder_alloc_new_buf() - Allocate a new binder buffer + * @alloc: binder_alloc for this proc + * @data_size: size of user data buffer + * @offsets_size: user specified buffer offset + * @extra_buffers_size: size of extra space for meta-data (eg, security context) + * @is_async: buffer for async transaction + * + * Allocate a new buffer given the requested sizes. Returns + * the kernel version of the buffer pointer. The size allocated + * is the sum of the three given sizes (each rounded up to + * pointer-sized boundary) + * + * Return: The allocated buffer or %NULL if error + */ +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, + extra_buffers_size, is_async); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static void *buffer_start_page(struct binder_buffer *buffer) +{ + return (void *)((uintptr_t)buffer & PAGE_MASK); +} + +static void *buffer_end_page(struct binder_buffer *buffer) +{ + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); +} + +static void binder_delete_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + struct binder_buffer *prev, *next = NULL; + int free_page_end = 1; + int free_page_start = 1; + + BUG_ON(alloc->buffers.next == &buffer->entry); + prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); + BUG_ON(!prev->free); + if (buffer_end_page(prev) == buffer_start_page(buffer)) { + free_page_start = 0; + if (buffer_end_page(prev) == buffer_end_page(buffer)) + free_page_end = 0; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); + } + + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + if (buffer_start_page(next) == buffer_end_page(buffer)) { + free_page_end = 0; + if (buffer_start_page(next) == + buffer_start_page(buffer)) + free_page_start = 0; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); + } + } + list_del(&buffer->entry); + if (free_page_start || free_page_end) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", + alloc->pid, buffer, free_page_start ? "" : " end", + free_page_end ? "" : " start", prev, next); + binder_update_page_range(alloc, 0, free_page_start ? + buffer_start_page(buffer) : buffer_end_page(buffer), + (free_page_end ? buffer_end_page(buffer) : + buffer_start_page(buffer)) + PAGE_SIZE, NULL); + } +} + +static void binder_free_buf_locked(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t size, buffer_size; + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + size = ALIGN(buffer->data_size, sizeof(void *)) + + ALIGN(buffer->offsets_size, sizeof(void *)) + + ALIGN(buffer->extra_buffers_size, sizeof(void *)); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_free_buf %pK size %zd buffer_size %zd\n", + alloc->pid, buffer, size, buffer_size); + + BUG_ON(buffer->free); + BUG_ON(size > buffer_size); + BUG_ON(buffer->transaction != NULL); + BUG_ON((void *)buffer < alloc->buffer); + BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); + + if (buffer->async_transaction) { + alloc->free_async_space += size + sizeof(struct binder_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_free_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + + binder_update_page_range(alloc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); + + rb_erase(&buffer->rb_node, &alloc->allocated_buffers); + buffer->free = 1; + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + struct binder_buffer *next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + + if (next->free) { + rb_erase(&next->rb_node, &alloc->free_buffers); + binder_delete_free_buffer(alloc, next); + } + } + if (alloc->buffers.next != &buffer->entry) { + struct binder_buffer *prev = list_entry(buffer->entry.prev, + struct binder_buffer, entry); + + if (prev->free) { + binder_delete_free_buffer(alloc, buffer); + rb_erase(&prev->rb_node, &alloc->free_buffers); + buffer = prev; + } + } + binder_insert_free_buffer(alloc, buffer); +} + +/** + * binder_alloc_free_buf() - free a binder buffer + * @alloc: binder_alloc for this proc + * @buffer: kernel pointer to buffer + * + * Free the buffer allocated via binder_alloc_new_buffer() + */ +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_mmap_handler() - map virtual address space for proc + * @alloc: alloc structure for this proc + * @vma: vma passed to mmap() + * + * Called by binder_mmap() to initialize the space specified in + * vma for allocating binder buffers + * + * Return: + * 0 = success + * -EBUSY = address space already mapped + * -ENOMEM = failed to map memory to given address space + */ +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + int ret; + struct vm_struct *area; + const char *failure_string; + struct binder_buffer *buffer; + + mutex_lock(&binder_alloc_mmap_lock); + if (alloc->buffer) { + ret = -EBUSY; + failure_string = "already mapped"; + goto err_already_mapped; + } + + area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + if (area == NULL) { + ret = -ENOMEM; + failure_string = "get_vm_area"; + goto err_get_vm_area_failed; + } + alloc->buffer = area->addr; + alloc->user_buffer_offset = + vma->vm_start - (uintptr_t)alloc->buffer; + mutex_unlock(&binder_alloc_mmap_lock); + +#ifdef CONFIG_CPU_CACHE_VIPT + if (cache_is_vipt_aliasing()) { + while (CACHE_COLOUR( + (vma->vm_start ^ (uint32_t)alloc->buffer))) { + pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", + alloc->pid, vma->vm_start, vma->vm_end, + alloc->buffer); + vma->vm_start += PAGE_SIZE; + } + } +#endif + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + GFP_KERNEL); + if (alloc->pages == NULL) { + ret = -ENOMEM; + failure_string = "alloc page array"; + goto err_alloc_pages_failed; + } + alloc->buffer_size = vma->vm_end - vma->vm_start; + + if (binder_update_page_range(alloc, 1, alloc->buffer, + alloc->buffer + PAGE_SIZE, vma)) { + ret = -ENOMEM; + failure_string = "alloc small buf"; + goto err_alloc_small_buf_failed; + } + buffer = alloc->buffer; + INIT_LIST_HEAD(&alloc->buffers); + list_add(&buffer->entry, &alloc->buffers); + buffer->free = 1; + binder_insert_free_buffer(alloc, buffer); + alloc->free_async_space = alloc->buffer_size / 2; + barrier(); + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; + + return 0; + +err_alloc_small_buf_failed: + kfree(alloc->pages); + alloc->pages = NULL; +err_alloc_pages_failed: + mutex_lock(&binder_alloc_mmap_lock); + vfree(alloc->buffer); + alloc->buffer = NULL; +err_get_vm_area_failed: +err_already_mapped: + mutex_unlock(&binder_alloc_mmap_lock); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + + +void binder_alloc_deferred_release(struct binder_alloc *alloc) +{ + struct rb_node *n; + int buffers, page_count; + + BUG_ON(alloc->vma); + + buffers = 0; + mutex_lock(&alloc->mutex); + while ((n = rb_first(&alloc->allocated_buffers))) { + struct binder_buffer *buffer; + + buffer = rb_entry(n, struct binder_buffer, rb_node); + + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + + binder_free_buf_locked(alloc, buffer); + buffers++; + } + + page_count = 0; + if (alloc->pages) { + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + void *page_addr; + + if (!alloc->pages[i]) + continue; + + page_addr = alloc->buffer + i * PAGE_SIZE; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %pK not freed\n", + __func__, alloc->pid, i, page_addr); + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); + __free_page(alloc->pages[i]); + page_count++; + } + kfree(alloc->pages); + vfree(alloc->buffer); + } + mutex_unlock(&alloc->mutex); + + binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d buffers %d, pages %d\n", + __func__, alloc->pid, buffers, page_count); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", + prefix, buffer->debug_id, buffer->data, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); +} + +/** + * binder_alloc_print_allocated() - print buffer info + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + * + * Prints information about every buffer associated with + * the binder_alloc state to the given seq_file + */ +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct rb_node *n; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_get_allocated_count() - return count of buffers + * @alloc: binder_alloc for this proc + * + * Return: count of allocated buffers + */ +int binder_alloc_get_allocated_count(struct binder_alloc *alloc) +{ + struct rb_node *n; + int count = 0; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + mutex_unlock(&alloc->mutex); + return count; +} + + +/** + * binder_alloc_vma_close() - invalidate address space + * @alloc: binder_alloc for this proc + * + * Called from binder_vma_close() when releasing address space. + * Clears alloc->vma to prevent new incoming transactions from + * allocating more buffers. + */ +void binder_alloc_vma_close(struct binder_alloc *alloc) +{ + WRITE_ONCE(alloc->vma, NULL); + WRITE_ONCE(alloc->vma_vm_mm, NULL); +} + +/** + * binder_alloc_init() - called by binder_open() for per-proc initialization + * @alloc: binder_alloc for this proc + * + * Called from binder_open() to initialize binder_alloc fields for + * new binder proc + */ +void binder_alloc_init(struct binder_alloc *alloc) +{ + alloc->tsk = current->group_leader; + alloc->pid = current->group_leader->pid; + mutex_init(&alloc->mutex); +} + diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h new file mode 100644 index 000000000000..088e4ffc6230 --- /dev/null +++ b/drivers/android/binder_alloc.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_BINDER_ALLOC_H +#define _LINUX_BINDER_ALLOC_H + +#include +#include +#include +#include +#include +#include + +struct binder_transaction; + +/** + * struct binder_buffer - buffer used for binder transactions + * @entry: entry alloc->buffers + * @rb_node: node for allocated_buffers/free_buffers rb trees + * @free: true if buffer is free + * @allow_user_free: describe the second member of struct blah, + * @async_transaction: describe the second member of struct blah, + * @debug_id: describe the second member of struct blah, + * @transaction: describe the second member of struct blah, + * @target_node: describe the second member of struct blah, + * @data_size: describe the second member of struct blah, + * @offsets_size: describe the second member of struct blah, + * @extra_buffers_size: describe the second member of struct blah, + * @data:i describe the second member of struct blah, + * + * Bookkeeping structure for binder transaction buffers + */ +struct binder_buffer { + struct list_head entry; /* free and allocated entries by address */ + struct rb_node rb_node; /* free entry by size or allocated entry */ + /* by address */ + unsigned free:1; + unsigned allow_user_free:1; + unsigned async_transaction:1; + unsigned free_in_progress:1; + unsigned debug_id:28; + + struct binder_transaction *transaction; + + struct binder_node *target_node; + size_t data_size; + size_t offsets_size; + size_t extra_buffers_size; + uint8_t data[0]; +}; + +/** + * struct binder_alloc - per-binder proc state for binder allocator + * @vma: vm_area_struct passed to mmap_handler + * (invarient after mmap) + * @tsk: tid for task that called init for this proc + * (invariant after init) + * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) + * @buffer: base of per-proc address space mapped via mmap + * @user_buffer_offset: offset between user and kernel VAs for buffer + * @buffers: list of all buffers for this proc + * @free_buffers: rb tree of buffers available for allocation + * sorted by size + * @allocated_buffers: rb tree of allocated buffers sorted by address + * @free_async_space: VA space available for async buffers. This is + * initialized at mmap time to 1/2 the full VA space + * @pages: array of physical page addresses for each + * page of mmap'd space + * @buffer_size: size of address space specified via mmap + * @pid: pid for associated binder_proc (invariant after init) + * + * Bookkeeping structure for per-proc address space management for binder + * buffers. It is normally initialized during binder_init() and binder_mmap() + * calls. The address space is used for both user-visible buffers and for + * struct binder_buffer objects used to track the user buffers + */ +struct binder_alloc { + struct mutex mutex; + struct task_struct *tsk; + struct vm_area_struct *vma; + struct mm_struct *vma_vm_mm; + void *buffer; + ptrdiff_t user_buffer_offset; + struct list_head buffers; + struct rb_root free_buffers; + struct rb_root allocated_buffers; + size_t free_async_space; + struct page **pages; + size_t buffer_size; + uint32_t buffer_free; + int pid; +}; + +extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +extern void binder_alloc_init(struct binder_alloc *alloc); +extern void binder_alloc_vma_close(struct binder_alloc *alloc); +extern struct binder_buffer * +binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr); +extern void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +extern void binder_alloc_deferred_release(struct binder_alloc *alloc); +extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +extern void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); + +/** + * binder_alloc_get_free_async_space() - get free space available for async + * @alloc: binder_alloc for this proc + * + * Return: the bytes remaining in the address-space for async transactions + */ +static inline size_t +binder_alloc_get_free_async_space(struct binder_alloc *alloc) +{ + size_t free_async_space; + + mutex_lock(&alloc->mutex); + free_async_space = alloc->free_async_space; + mutex_unlock(&alloc->mutex); + return free_async_space; +} + +/** + * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs + * @alloc: binder_alloc for this proc + * + * Return: the offset between kernel and user-space addresses to use for + * virtual address conversion + */ +static inline ptrdiff_t +binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) +{ + /* + * user_buffer_offset is constant if vma is set and + * undefined if vma is not set. It is possible to + * get here with !alloc->vma if the target process + * is dying while a transaction is being initiated. + * Returning the old value is ok in this case and + * the transaction will fail. + */ + return alloc->user_buffer_offset; +} + +#endif /* _LINUX_BINDER_ALLOC_H */ + diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 7f20f3dc8369..7967db16ba5a 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -23,7 +23,8 @@ struct binder_buffer; struct binder_node; struct binder_proc; -struct binder_ref; +struct binder_alloc; +struct binder_ref_data; struct binder_thread; struct binder_transaction; @@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received, TRACE_EVENT(binder_transaction_node_to_ref, TP_PROTO(struct binder_transaction *t, struct binder_node *node, - struct binder_ref *ref), - TP_ARGS(t, node, ref), + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), TP_STRUCT__entry( __field(int, debug_id) @@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref, __entry->debug_id = t->debug_id; __entry->node_debug_id = node->debug_id; __entry->node_ptr = node->ptr; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; ), TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", __entry->debug_id, __entry->node_debug_id, @@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref, ); TRACE_EVENT(binder_transaction_ref_to_node, - TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), - TP_ARGS(t, ref), + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), TP_STRUCT__entry( __field(int, debug_id) @@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node, ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; - __entry->node_debug_id = ref->node->debug_id; - __entry->node_ptr = ref->node->ptr; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; + __entry->node_debug_id = node->debug_id; + __entry->node_ptr = node->ptr; ), TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", __entry->debug_id, __entry->node_debug_id, @@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node, ); TRACE_EVENT(binder_transaction_ref_to_ref, - TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, - struct binder_ref *dest_ref), - TP_ARGS(t, src_ref, dest_ref), + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *src_ref, + struct binder_ref_data *dest_ref), + TP_ARGS(t, node, src_ref, dest_ref), TP_STRUCT__entry( __field(int, debug_id) @@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref, ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->node_debug_id = src_ref->node->debug_id; + __entry->node_debug_id = node->debug_id; __entry->src_ref_debug_id = src_ref->debug_id; __entry->src_ref_desc = src_ref->desc; __entry->dest_ref_debug_id = dest_ref->debug_id; @@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TP_ARGS(buffer)); TRACE_EVENT(binder_update_page_range, - TP_PROTO(struct binder_proc *proc, bool allocate, + TP_PROTO(struct binder_alloc *alloc, bool allocate, void *start, void *end), - TP_ARGS(proc, allocate, start, end), + TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) __field(bool, allocate) @@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range, __field(size_t, size) ), TP_fast_assign( - __entry->proc = proc->pid; + __entry->proc = alloc->pid; __entry->allocate = allocate; - __entry->offset = start - proc->buffer; + __entry->offset = start - alloc->buffer; __entry->size = end - start; ), TP_printk("proc=%d allocate=%d offset=%zu size=%zu", diff --git a/drivers/base/core.c b/drivers/base/core.c index 3fa9096b27c2..5a56a8e9f006 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2105,7 +2105,11 @@ void device_shutdown(void) pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); - if (dev->bus && dev->bus->shutdown) { + if (dev->class && dev->class->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->class->shutdown(dev); + } else if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 212ca2eee257..a1696e1d199f 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -295,6 +295,7 @@ static void fw_free_buf(struct firmware_buf *buf) { struct firmware_cache *fwc = buf->fwc; if (!fwc) { + kfree_const(buf->fw_id); kfree(buf); return; } @@ -310,7 +311,8 @@ static const char * const fw_path[] = { "/lib/firmware/updates/" UTS_RELEASE, "/lib/firmware/updates", "/lib/firmware/" UTS_RELEASE, - "/lib/firmware" + "/lib/firmware", + "/lib64/firmware" }; /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9920916a6220..ae7f3ce90bd2 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -827,7 +827,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; if (count > PATH_MAX) return -EINVAL; @@ -840,12 +840,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -856,8 +859,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); + ssize_t len; - return sprintf(buf, "%s\n", pdev->driver_override); + device_lock(dev); + len = sprintf(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index a48824deabc5..78b0ece0c867 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1188,7 +1188,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, } dev->power.subsys_data->domain_data = &gpd_data->base; - dev->pm_domain = &genpd->domain; spin_unlock_irq(&dev->power.lock); @@ -1207,7 +1206,6 @@ static void genpd_free_dev_data(struct device *dev, { spin_lock_irq(&dev->power.lock); - dev->pm_domain = NULL; dev->power.subsys_data->domain_data = NULL; spin_unlock_irq(&dev->power.lock); @@ -1248,6 +1246,8 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev->pm_domain = &genpd->domain; + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1299,6 +1299,8 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev->pm_domain = NULL; + list_del_init(&pdd->list_node); mutex_unlock(&genpd->lock); @@ -1373,7 +1375,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *l, *link; int ret = -EINVAL; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) @@ -1388,7 +1390,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(link, &genpd->master_links, master_node) { + list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { if (link->slave != subdomain) continue; @@ -1642,10 +1644,10 @@ EXPORT_SYMBOL_GPL(__of_genpd_add_provider); */ void of_genpd_del_provider(struct device_node *np) { - struct of_genpd_provider *cp; + struct of_genpd_provider *cp, *tmp; mutex_lock(&of_genpd_mutex); - list_for_each_entry(cp, &of_genpd_providers, link) { + list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { if (cp->node == np) { list_del(&cp->link); of_node_put(cp->node); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3252429f96af..3a20dc594338 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && + dev->power.runtime_status != RPM_ACTIVE); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a7b46798c81d..39efa7e6c0c0 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 0e494108c20c..7af116e12e53 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +DEFINE_STATIC_SRCU(wakeup_srcu); + static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), @@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws) spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); - synchronize_rcu(); + synchronize_srcu(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -331,13 +333,14 @@ void device_wakeup_detach_irq(struct device *dev) void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->wakeirq) dev_pm_arm_wake_irq(ws->wakeirq); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -348,13 +351,14 @@ void device_wakeup_arm_wake_irqs(void) void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->wakeirq) dev_pm_disarm_wake_irq(ws->wakeirq); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -839,10 +843,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; - int active = 0; + int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); @@ -858,7 +862,7 @@ void pm_print_active_wakeup_sources(void) if (!active && last_activity_ws) pr_info("last active wakeup source: %s\n", last_activity_ws->name); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); @@ -985,8 +989,9 @@ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { @@ -1000,7 +1005,7 @@ void pm_wakep_autosleep_enabled(bool set) } spin_unlock_irq(&ws->lock); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ @@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m, static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; + int srcuidx; seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); print_wakeup_source_stats(m, &deleted_ws); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 41fb1a917b17..33e23a7a691f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -595,8 +595,6 @@ int xen_blkif_schedule(void *arg) unsigned long timeout; int ret; - xen_blkif_get(blkif); - while (!kthread_should_stop()) { if (try_to_freeze()) continue; @@ -650,7 +648,6 @@ purge_gnt_list: print_stats(blkif); blkif->xenblkd = NULL; - xen_blkif_put(blkif); return 0; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index f53cff42f8da..923308201375 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -221,7 +221,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); - blkif->xenblkd = NULL; } /* The above kthread_stop() guarantees that at this point we @@ -266,9 +265,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { - - xen_blkif_disconnect(blkif); + WARN_ON(xen_blkif_disconnect(blkif)); xen_vbd_free(&blkif->vbd); + kfree(blkif->be->mode); + kfree(blkif->be); /* Make sure everything is drained before shutting down */ BUG_ON(blkif->persistent_gnt_c != 0); @@ -445,8 +445,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) xen_blkif_put(be->blkif); } - kfree(be->mode); - kfree(be); return 0; } diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c index 969f755f5dc4..0a61186167ba 100644 --- a/drivers/bluetooth/btfm_slim.c +++ b/drivers/bluetooth/btfm_slim.c @@ -155,23 +155,22 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, rxport, 1); if (ret < 0) { BTFMSLIM_ERR("vendor_port_en failed ret[%d]", - ret); + ret); goto error; } } if (rxport) { BTFMSLIM_INFO("slim_connect_sink(port: %d, ch: %d)", - ch->port, ch->ch); + ch->port, ch->ch); /* Connect Port with channel given by Machine driver*/ ret = slim_connect_sink(btfmslim->slim_pgd, &ch->port_hdl, 1, ch->ch_hdl); if (ret < 0) { BTFMSLIM_ERR("slim_connect_sink failed ret[%d]", - ret); + ret); goto remove_channel; } - } else { BTFMSLIM_INFO("slim_connect_src(port: %d, ch: %d)", ch->port, ch->ch); @@ -180,7 +179,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, ch->ch_hdl); if (ret < 0) { BTFMSLIM_ERR("slim_connect_src failed ret[%d]", - ret); + ret); goto remove_channel; } } @@ -190,6 +189,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, BTFMSLIM_INFO( "port: %d, ch: %d, grp: %d, ch->grph: 0x%x, ch_hdl: 0x%x", chan->port, chan->ch, grp, chan->grph, chan->ch_hdl); + ret = slim_control_ch(btfmslim->slim_pgd, (grp ? chan->grph : chan->ch_hdl), SLIM_CH_ACTIVATE, true); if (ret < 0) { @@ -220,6 +220,7 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, BTFMSLIM_INFO("port:%d, grp: %d, ch->grph:0x%x, ch->ch_hdl:0x%x ", ch->port, grp, ch->grph, ch->ch_hdl); + /* Remove the channel immediately*/ ret = slim_control_ch(btfmslim->slim_pgd, (grp ? ch->grph : ch->ch_hdl), SLIM_CH_REMOVE, true); @@ -233,7 +234,6 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, goto error; } } - /* Disable port through registration setting */ for (i = 0; i < nchan; i++, ch++) { if (btfmslim->vendor_port_en) { @@ -246,9 +246,11 @@ int btfm_slim_disable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, } } } + error: return ret; } + static int btfm_slim_get_logical_addr(struct slim_device *slim) { int ret = 0; diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c index 1faebb1759e2..035e8d9fb5fd 100644 --- a/drivers/bluetooth/btfm_slim_codec.c +++ b/drivers/bluetooth/btfm_slim_codec.c @@ -26,6 +26,9 @@ #include #include +static int bt_soc_enable_status; + + static int btfm_slim_codec_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { @@ -38,8 +41,31 @@ static unsigned int btfm_slim_codec_read(struct snd_soc_codec *codec, return 0; } +static int bt_soc_status_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + ucontrol->value.integer.value[0] = bt_soc_enable_status; + return 1; +} + +static int bt_soc_status_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return 1; +} + +static const struct snd_kcontrol_new status_controls[] = { + SOC_SINGLE_EXT("BT SOC status", 0, 0, 1, 0, + bt_soc_status_get, + bt_soc_status_put) + +}; + + static int btfm_slim_codec_probe(struct snd_soc_codec *codec) { + snd_soc_add_codec_controls(codec, status_controls, + ARRAY_SIZE(status_controls)); return 0; } @@ -92,9 +118,6 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream, return; } - if (dai->id == BTFM_FM_SLIM_TX) - goto out; - /* Search for dai->id matched port handler */ for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) && (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) && @@ -108,7 +131,6 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream, } btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan); -out: btfm_slim_hw_deinit(btfmslim); } @@ -130,6 +152,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream, struct btfmslim *btfmslim = dai->dev->platform_data; struct btfmslim_ch *ch; uint8_t rxport, grp = false, nchan = 1; + bt_soc_enable_status = 0; BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name, dai->id, dai->rate); @@ -171,61 +194,10 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream, } ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan); - return ret; -} -static int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - int ret = -EINVAL, i; - struct btfmslim *btfmslim = dai->dev->platform_data; - struct btfmslim_ch *ch; - uint8_t rxport, grp = false, nchan = 1; - - BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name, - dai->id, dai->rate); - - switch (dai->id) { - case BTFM_FM_SLIM_TX: - grp = true; nchan = 2; - ch = btfmslim->tx_chs; - rxport = 0; - break; - case BTFM_BT_SCO_SLIM_TX: - ch = btfmslim->tx_chs; - rxport = 0; - break; - case BTFM_BT_SCO_A2DP_SLIM_RX: - case BTFM_BT_SPLIT_A2DP_SLIM_RX: - ch = btfmslim->rx_chs; - rxport = 1; - break; - case BTFM_SLIM_NUM_CODEC_DAIS: - default: - BTFMSLIM_ERR("dai->id is invalid:%d", dai->id); - goto out; - } - - if (dai->id != BTFM_FM_SLIM_TX) { - ret = 0; - goto out; - } - - /* Search for dai->id matched port handler */ - for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) && - (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) && - (ch->id != dai->id); ch++, i++) - ; - - if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) || - (ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) { - BTFMSLIM_ERR("ch is invalid!!"); - goto out; - } - - btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan); - -out: + /* save the enable channel status */ + if (ret == 0) + bt_soc_enable_status = 1; return ret; } @@ -371,7 +343,6 @@ static struct snd_soc_dai_ops btfmslim_dai_ops = { .shutdown = btfm_slim_dai_shutdown, .hw_params = btfm_slim_dai_hw_params, .prepare = btfm_slim_dai_prepare, - .hw_free = btfm_slim_dai_hw_free, .set_channel_map = btfm_slim_dai_set_channel_map, .get_channel_map = btfm_slim_dai_get_channel_map, }; diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c index 77e2973e023c..363b4692d228 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.c +++ b/drivers/bluetooth/btfm_slim_wcn3990.c @@ -82,18 +82,19 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num, uint8_t rxport, uint8_t enable) { int ret = 0; - uint8_t reg_val = 0; + uint8_t reg_val = 0, en; uint8_t port_bit = 0; uint16_t reg; BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable); + if (rxport) { - if (enable && btfmslim->sample_rate == 48000) { - /* For A2DP Rx */ + if (enable) { + /* For SCO Rx, A2DP Rx */ reg_val = 0x1; port_bit = port_num - 0x10; reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit); - BTFMSLIM_DBG("writing reg_val (%d) to reg(%x) for A2DP", + BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)", reg_val, reg); ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); if (ret) { @@ -117,7 +118,8 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num, reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num); ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); if (ret) { - BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg); + BTFMSLIM_ERR("failed to write (%d) reg 0x%x", + ret, reg); goto error; } } @@ -137,15 +139,15 @@ enable_disable_txport: reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num); enable_disable_rxport: - if (enable) { - if (is_fm_port(port_num)) - reg_val = CHRK_SB_PGD_PORT_ENABLE | - CHRK_SB_PGD_PORT_WM_L3; - else - reg_val = CHRK_SB_PGD_PORT_ENABLE | - CHRK_SB_PGD_PORT_WM_LB; - } else - reg_val = CHRK_SB_PGD_PORT_DISABLE; + if (enable) + en = CHRK_SB_PGD_PORT_ENABLE; + else + en = CHRK_SB_PGD_PORT_DISABLE; + + if (is_fm_port(port_num)) + reg_val = en | CHRK_SB_PGD_PORT_WM_L8; + else + reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_LB : en; ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); if (ret) diff --git a/drivers/bluetooth/btfm_slim_wcn3990.h b/drivers/bluetooth/btfm_slim_wcn3990.h index f6a260096c91..b637ac581201 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.h +++ b/drivers/bluetooth/btfm_slim_wcn3990.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -68,6 +68,7 @@ #define CHRK_SB_PGD_PORT_WM_L1 (0x1 << 1) #define CHRK_SB_PGD_PORT_WM_L2 (0x2 << 1) #define CHRK_SB_PGD_PORT_WM_L3 (0x3 << 1) +#define CHRK_SB_PGD_PORT_WM_L8 (0x8 << 1) #define CHRK_SB_PGD_PORT_WM_LB (0xB << 1) #define CHRK_SB_PGD_PORT_RX_NUM 16 diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 14c833691194..a8e23598ea58 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -58,6 +58,7 @@ #define FASTRPC_ENOSUCH 39 #define VMID_SSC_Q6 5 #define VMID_ADSP_Q6 6 +#define AC_VM_ADSP_HEAP_SHARED 33 #define DEBUGFS_SIZE 1024 #define RPC_TIMEOUT (5 * HZ) @@ -212,6 +213,7 @@ struct fastrpc_channel_ctx { struct device *dev; struct fastrpc_session_ctx session[NUM_SESSIONS]; struct completion work; + struct completion workport; struct notifier_block nb; struct kref kref; int channel; @@ -221,6 +223,7 @@ struct fastrpc_channel_ctx { int prevssrcount; int issubsystemup; int vmid; + int heap_vmid; int ramdumpenabled; void *remoteheap_ramdump_dev; struct fastrpc_glink_info link; @@ -290,6 +293,7 @@ struct fastrpc_file { int cid; int ssrcount; int pd; + int file_close; struct fastrpc_apps *apps; struct fastrpc_perf perf; struct dentry *debugfs_file; @@ -670,7 +674,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr, init_dma_attrs(&attrs); dma_set_attr(DMA_ATTR_EXEC_MAPPING, &attrs); - if (map->attr & FASTRPC_ATTR_NON_COHERENT) + if ((map->attr & FASTRPC_ATTR_NON_COHERENT) || + (sess->smmu.coherent && map->uncached)) dma_set_attr(DMA_ATTR_FORCE_NON_COHERENT, &attrs); else if (map->attr & FASTRPC_ATTR_COHERENT) @@ -698,7 +703,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr, if (vmid) { int srcVM[1] = {VMID_HLOS}; int destVM[2] = {VMID_HLOS, vmid}; - int destVMperm[2] = {PERM_READ | PERM_WRITE, + int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC, PERM_READ | PERM_WRITE | PERM_EXEC}; VERIFY(err, !hyp_assign_phys(map->phys, @@ -770,7 +775,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size, if (vmid) { int srcVM[1] = {VMID_HLOS}; int destVM[2] = {VMID_HLOS, vmid}; - int destVMperm[2] = {PERM_READ | PERM_WRITE, + int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC, PERM_READ | PERM_WRITE | PERM_EXEC}; VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size), @@ -1139,6 +1144,9 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) if (err) goto bail; } + if (ctx->buf->virt && metalen <= copylen) + memset(ctx->buf->virt, 0, metalen); + /* copy metadata */ rpra = ctx->buf->virt; ctx->rpra = rpra; @@ -1450,7 +1458,7 @@ static void smd_event_handler(void *priv, unsigned event) switch (event) { case SMD_EVENT_OPEN: - complete(&me->channel[cid].work); + complete(&me->channel[cid].workport); break; case SMD_EVENT_CLOSE: fastrpc_notify_drivers(me, cid); @@ -1471,6 +1479,7 @@ static void fastrpc_init(struct fastrpc_apps *me) me->channel = &gcinfo[0]; for (i = 0; i < NUM_CHANNELS; i++) { init_completion(&me->channel[i].work); + init_completion(&me->channel[i].workport); me->channel[i].sesscount = 0; } } @@ -1587,7 +1596,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_mmap *file = 0, *mem = 0; char *proc_name = NULL; int srcVM[1] = {VMID_HLOS}; - int destVM[1] = {VMID_ADSP_Q6}; + int destVM[1] = {gcinfo[0].heap_vmid}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; @@ -1686,6 +1695,9 @@ static int fastrpc_init_process(struct fastrpc_file *fl, int namelen; int pageslen; } inbuf; + + if (!init->filelen) + goto bail; VERIFY(err, proc_name = kzalloc(init->filelen, GFP_KERNEL)); if (err) goto bail; @@ -1694,7 +1706,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, if (err) goto bail; inbuf.pgid = current->tgid; - inbuf.namelen = strlen(proc_name)+1; + inbuf.namelen = init->filelen; inbuf.pageslen = 0; if (!me->staticpd_flags) { inbuf.pageslen = 1; @@ -1843,7 +1855,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { int srcVM[1] = {VMID_HLOS}; - int destVM[1] = {VMID_ADSP_Q6}; + int destVM[1] = {gcinfo[0].heap_vmid}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size, @@ -1859,7 +1871,7 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, struct fastrpc_mmap *map) { int err = 0; - int srcVM[1] = {VMID_ADSP_Q6}; + int srcVM[1] = {gcinfo[0].heap_vmid}; int destVM[1] = {VMID_HLOS}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; @@ -2129,7 +2141,7 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event) switch (event) { case GLINK_CONNECTED: link->port_state = FASTRPC_LINK_CONNECTED; - complete(&me->channel[cid].work); + complete(&me->channel[cid].workport); break; case GLINK_LOCAL_DISCONNECTED: link->port_state = FASTRPC_LINK_DISCONNECTED; @@ -2187,6 +2199,9 @@ static int fastrpc_file_free(struct fastrpc_file *fl) return 0; } (void)fastrpc_release_current_dsp_process(fl); + spin_lock(&fl->hlock); + fl->file_close = 1; + spin_unlock(&fl->hlock); fastrpc_context_list_dtor(fl); fastrpc_buf_list_free(fl); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { @@ -2279,8 +2294,7 @@ static void fastrpc_glink_close(void *chan, int cid) return; link = &gfa.channel[cid].link; - if (link->port_state == FASTRPC_LINK_CONNECTED || - link->port_state == FASTRPC_LINK_CONNECTING) { + if (link->port_state == FASTRPC_LINK_CONNECTED) { link->port_state = FASTRPC_LINK_DISCONNECTING; glink_close(chan); } @@ -2388,16 +2402,16 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, spin_lock(&fl->hlock); hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%s %p %s %p %s %llx\n", "buf:", - buf, "buf->virt:", buf->virt, - "buf->phys:", buf->phys); + "%s %pK %s %pK %s %llx\n", "buf:", + buf, "buf->virt:", buf->virt, + "buf->phys:", buf->phys); } len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "\n%s\n", "LIST OF MAPS:"); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%s %p %s %lx %s %llx\n", + "%s %pK %s %lx %s %llx\n", "map:", map, "map->va:", map->va, "map->phys:", map->phys); @@ -2407,7 +2421,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, "LIST OF PENDING SMQCONTEXTS:"); hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%s %p %s %u %s %u %s %u\n", + "%s %pK %s %u %s %u %s %u\n", "smqcontext:", ictx, "sc:", ictx->sc, "tid:", ictx->pid, @@ -2418,7 +2432,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, "LIST OF INTERRUPTED SMQCONTEXTS:"); hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%s %p %s %u %s %u %s %u\n", + "%s %pK %s %u %s %u %s %u\n", "smqcontext:", ictx, "sc:", ictx->sc, "tid:", ictx->pid, @@ -2478,8 +2492,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) if (err) goto bail; - VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work, - RPC_TIMEOUT)); + VERIFY(err, + wait_for_completion_timeout(&me->channel[cid].workport, + RPC_TIMEOUT)); if (err) { me->channel[cid].chan = 0; goto bail; @@ -2487,6 +2502,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) kref_init(&me->channel[cid].kref); pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); + err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64); + if (err) + pr_info("adsprpc: initial intent failed for %d\n", cid); if (cid == 0 && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { if (fastrpc_mmap_remove_ssr(fl)) @@ -2575,6 +2593,14 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, p.inv.fds = 0; p.inv.attrs = 0; + spin_lock(&fl->hlock); + if (fl->file_close == 1) { + err = EBADF; + pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP"); + spin_unlock(&fl->hlock); + goto bail; + } + spin_unlock(&fl->hlock); switch (ioctl_num) { case FASTRPC_IOCTL_INVOKE: @@ -2819,6 +2845,7 @@ static int fastrpc_cb_probe(struct device *dev) chan->sesscount++; debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root, NULL, &debugfs_fops); + bail: return err; } @@ -2932,6 +2959,12 @@ static int fastrpc_probe(struct platform_device *pdev) } return 0; } + if (of_property_read_bool(dev->of_node, + "qcom,fastrpc-vmid-heap-shared")) + gcinfo[0].heap_vmid = AC_VM_ADSP_HEAP_SHARED; + else + gcinfo[0].heap_vmid = VMID_ADSP_Q6; + pr_info("ADSPRPC: gcinfo[0].heap_vmid %d\n", gcinfo[0].heap_vmid); me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink"); VERIFY(err, !of_platform_populate(pdev->dev.of_node, fastrpc_match_table, diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 21994d53db91..e206d9db4d7d 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -61,7 +61,8 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = { { .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST }, { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST }, { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST }, - { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST } + { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }, + { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST } }; static int diag_apps_responds(void) @@ -319,6 +320,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask = NULL; struct diag_ctrl_msg_mask header; + uint8_t msg_mask_tbl_count_local; if (peripheral >= NUM_PERIPHERALS) return; @@ -359,6 +361,8 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) return; } buf = mask_info->update_buf; + msg_mask_tbl_count_local = driver->msg_mask_tbl_count; + mutex_unlock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); switch (mask_info->status) { case DIAG_CTRL_MASK_ALL_DISABLED: @@ -375,9 +379,11 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) goto err; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { - if (((first < mask->ssid_first) || - (last > mask->ssid_last_tools)) && first != ALL_SSID) { + for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) { + mutex_lock(&driver->msg_mask_lock); + if (((mask->ssid_first > first) || + (mask->ssid_last_tools < last)) && first != ALL_SSID) { + mutex_unlock(&driver->msg_mask_lock); continue; } @@ -418,19 +424,19 @@ proceed: if (mask_size > 0) memcpy(buf + header_len, mask->ptr, mask_size); mutex_unlock(&mask->lock); + mutex_unlock(&driver->msg_mask_lock); err = diagfwd_write(peripheral, TYPE_CNTL, buf, header_len + mask_size); if (err && err != -ENODEV) - pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n", - peripheral); + pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n", + peripheral, err); if (first != ALL_SSID) break; } err: mutex_unlock(&mask_info->lock); - mutex_unlock(&driver->msg_mask_lock); } static void diag_send_time_sync_update(uint8_t peripheral) @@ -710,8 +716,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, } req = (struct diag_msg_build_mask_t *)src_buf; - mutex_lock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); + mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)mask_info->ptr; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { if (i < (driver->msg_mask_tbl_count - 1)) { @@ -751,6 +757,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n", __func__, mask_size); mutex_unlock(&mask->lock); + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); return -ENOMEM; } mask->ptr = temp; @@ -769,8 +777,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, mask_info->status = DIAG_CTRL_MASK_VALID; break; } - mutex_unlock(&mask_info->lock); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(MSG_MASKS_TYPE); @@ -795,7 +803,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last); + mutex_unlock(&driver->md_session_lock); } end: return write_len; @@ -823,9 +833,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, } req = (struct diag_msg_config_rsp_t *)src_buf; - mutex_lock(&driver->msg_mask_lock); - mask = (struct diag_msg_mask_t *)mask_info->ptr; + mutex_lock(&mask_info->lock); + mutex_lock(&driver->msg_mask_lock); + + mask = (struct diag_msg_mask_t *)mask_info->ptr; mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : DIAG_CTRL_MASK_ALL_DISABLED; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { @@ -834,8 +846,8 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mask->range * sizeof(uint32_t)); mutex_unlock(&mask->lock); } - mutex_unlock(&mask_info->lock); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(MSG_MASKS_TYPE); @@ -855,7 +867,9 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -949,7 +963,9 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_event_mask_update(i); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -996,7 +1012,9 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_event_mask_update(i); + mutex_unlock(&driver->md_session_lock); } memcpy(dest_buf, &header, sizeof(header)); write_len += sizeof(header); @@ -1250,7 +1268,9 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_log_mask_update(i, req->equip_id); + mutex_unlock(&driver->md_session_lock); } end: return write_len; @@ -1301,7 +1321,9 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_log_mask_update(i, ALL_EQUIP_ID); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -1339,8 +1361,8 @@ static int diag_create_msg_mask_table(void) struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr; struct diag_ssid_range_t range; - mutex_lock(&driver->msg_mask_lock); mutex_lock(&msg_mask.lock); + mutex_lock(&driver->msg_mask_lock); driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { range.ssid_first = msg_mask_tbl[i].ssid_first; @@ -1349,8 +1371,8 @@ static int diag_create_msg_mask_table(void) if (err) break; } - mutex_unlock(&msg_mask.lock); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&msg_mask.lock); return err; } @@ -1363,8 +1385,8 @@ static int diag_create_build_time_mask(void) struct diag_msg_mask_t *build_mask = NULL; struct diag_ssid_range_t range; - mutex_lock(&driver->msg_mask_lock); mutex_lock(&msg_bt_mask.lock); + mutex_lock(&driver->msg_mask_lock); driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT; build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr; for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) { @@ -1477,9 +1499,8 @@ static int diag_create_build_time_mask(void) } memcpy(build_mask->ptr, tbl, tbl_size); } - mutex_unlock(&msg_bt_mask.lock); mutex_unlock(&driver->msg_mask_lock); - + mutex_unlock(&msg_bt_mask.lock); return err; } @@ -1649,8 +1670,8 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; - mutex_lock(&driver->msg_mask_lock); mutex_lock(&dest->lock); + mutex_lock(&driver->msg_mask_lock); src_mask = (struct diag_msg_mask_t *)src->ptr; dest_mask = (struct diag_msg_mask_t *)dest->ptr; @@ -1667,9 +1688,8 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) src_mask++; dest_mask++; } - mutex_unlock(&dest->lock); mutex_unlock(&driver->msg_mask_lock); - + mutex_unlock(&dest->lock); return err; } @@ -1680,15 +1700,15 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info) if (!mask_info) return; - mutex_lock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); + mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)mask_info->ptr; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { kfree(mask->ptr); mask->ptr = NULL; } - mutex_unlock(&mask_info->lock); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); __diag_mask_exit(mask_info); } @@ -1857,8 +1877,9 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, return -EIO; } mutex_unlock(&driver->diag_maskclear_mutex); - mutex_lock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); + mutex_lock(&driver->msg_mask_lock); + mask = (struct diag_msg_mask_t *)(mask_info->ptr); for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { ptr = mask_info->update_buf; @@ -1895,8 +1916,8 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, } total_len += len; } - mutex_unlock(&mask_info->lock); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); return err ? err : total_len; } @@ -1965,9 +1986,11 @@ void diag_send_updates_peripheral(uint8_t peripheral) diag_send_feature_mask_update(peripheral); if (driver->time_sync_enabled) diag_send_time_sync_update(peripheral); + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID); diag_send_log_mask_update(peripheral, ALL_EQUIP_ID); diag_send_event_mask_update(peripheral); + mutex_unlock(&driver->md_session_lock); diag_send_real_time_update(peripheral, driver->real_time_mode[DIAG_LOCAL_PROC]); diag_send_peripheral_buffering_mode( diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index 7b414bd7d808..a27f12883c8d 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -129,37 +129,6 @@ void diag_md_close_all() diag_ws_reset(DIAG_WS_MUX); } -static int diag_md_get_peripheral(int ctxt) -{ - int peripheral; - - if (driver->num_pd_session) { - peripheral = GET_PD_CTXT(ctxt); - switch (peripheral) { - case UPD_WLAN: - case UPD_AUDIO: - case UPD_SENSORS: - break; - case DIAG_ID_MPSS: - case DIAG_ID_LPASS: - case DIAG_ID_CDSP: - default: - peripheral = - GET_BUF_PERIPHERAL(ctxt); - if (peripheral > NUM_PERIPHERALS) - peripheral = -EINVAL; - break; - } - } else { - /* Account for Apps data as well */ - peripheral = GET_BUF_PERIPHERAL(ctxt); - if (peripheral > NUM_PERIPHERALS) - peripheral = -EINVAL; - } - - return peripheral; -} - int diag_md_write(int id, unsigned char *buf, int len, int ctx) { int i; @@ -254,8 +223,6 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, struct diag_md_session_t *session_info = NULL; struct pid *pid_struct = NULL; - mutex_lock(&driver->diagfwd_untag_mutex); - for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) { ch = &diag_md[i]; for (j = 0; j < ch->num_tbl_entries && !err; j++) { @@ -365,8 +332,6 @@ drop_data: if (drain_again) chk_logging_wakeup(); - mutex_unlock(&driver->diagfwd_untag_mutex); - return err; } diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c index d6f6ea7af8ea..8cc803eef552 100644 --- a/drivers/char/diag/diag_mux.c +++ b/drivers/char/diag/diag_mux.c @@ -153,12 +153,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx) upd = PERIPHERAL_CDSP; break; case UPD_WLAN: - if (!driver->num_pd_session) + if (!driver->pd_logging_mode[0]) upd = PERIPHERAL_MODEM; break; case UPD_AUDIO: + if (!driver->pd_logging_mode[1]) + upd = PERIPHERAL_LPASS; + break; case UPD_SENSORS: - if (!driver->num_pd_session) + if (!driver->pd_logging_mode[2]) upd = PERIPHERAL_LPASS; break; default: diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index a7d7fd176302..d81a39e2c637 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -547,7 +547,6 @@ struct diagchar_dev { struct mutex cmd_reg_mutex; uint32_t cmd_reg_count; struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS]; - struct mutex diagfwd_untag_mutex; /* Sizes that reflect memory pool sizes */ unsigned int poolsize; unsigned int poolsize_hdlc; @@ -579,6 +578,7 @@ struct diagchar_dev { unsigned char *buf_feature_mask_update; uint8_t hdlc_disabled; struct mutex hdlc_disable_mutex; + struct mutex hdlc_recovery_mutex; struct timer_list hdlc_reset_timer; struct mutex diag_hdlc_mutex; unsigned char *hdlc_buf; @@ -613,12 +613,6 @@ struct diagchar_dev { int pd_logging_mode[NUM_UPD]; int pd_session_clear[NUM_UPD]; int num_pd_session; - int cpd_len_1[NUM_PERIPHERALS]; - int cpd_len_2[NUM_PERIPHERALS]; - int upd_len_1_a[NUM_PERIPHERALS]; - int upd_len_1_b[NUM_PERIPHERALS]; - int upd_len_2_a; - int upd_len_2_b; int mask_check; uint32_t md_session_mask; uint8_t md_session_mode; @@ -648,6 +642,7 @@ struct diagchar_dev { #endif int time_sync_enabled; uint8_t uses_time_api; + struct platform_device *pdev; }; extern struct diagchar_dev *driver; diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 6be7c48f75a8..eaed3b101095 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #ifdef CONFIG_DIAG_OVER_USB #include #endif @@ -456,6 +458,7 @@ static void diag_close_logging_process(const int pid) { int i, j; int session_mask; + uint32_t p_mask; struct diag_md_session_t *session_info = NULL; struct diag_logging_mode_param_t params; @@ -475,26 +478,28 @@ static void diag_close_logging_process(const int pid) session_mask = session_info->peripheral_mask; diag_md_session_close(session_info); + p_mask = + diag_translate_kernel_to_user_mask(session_mask); + for (i = 0; i < NUM_MD_SESSIONS; i++) if (MD_PERIPHERAL_MASK(i) & session_mask) diag_mux_close_peripheral(DIAG_LOCAL_PROC, i); params.req_mode = USB_MODE; params.mode_param = 0; - params.peripheral_mask = - diag_translate_kernel_to_user_mask(session_mask); + params.pd_mask = 0; + params.peripheral_mask = p_mask; - for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) { - if (session_mask & - MD_PERIPHERAL_MASK(i)) { - j = i - UPD_WLAN; - driver->pd_session_clear[j] = 1; - driver->pd_logging_mode[j] = 0; - driver->num_pd_session -= 1; - params.pd_mask = - diag_translate_kernel_to_user_mask(session_mask); - } else - params.pd_mask = 0; + if (driver->num_pd_session > 0) { + for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) { + if (session_mask & MD_PERIPHERAL_MASK(i)) { + j = i - UPD_WLAN; + driver->pd_session_clear[j] = 1; + driver->pd_logging_mode[j] = 0; + driver->num_pd_session -= 1; + params.pd_mask = p_mask; + } + } } diag_switch_logging(¶ms); @@ -698,6 +703,11 @@ static void diag_cmd_invalidate_polling(int change_flag) driver->polling_reg_flag = 0; list_for_each_safe(start, temp, &driver->cmd_reg_list) { item = list_entry(start, struct diag_cmd_reg_t, link); + if (&item->entry == NULL) { + pr_err("diag: In %s, unable to search command\n", + __func__); + return; + } polling = diag_cmd_chk_polling(&item->entry); if (polling == DIAG_CMD_POLLING) { driver->polling_reg_flag = 1; @@ -839,6 +849,12 @@ void diag_cmd_remove_reg_by_pid(int pid) mutex_lock(&driver->cmd_reg_mutex); list_for_each_safe(start, temp, &driver->cmd_reg_list) { item = list_entry(start, struct diag_cmd_reg_t, link); + if (&item->entry == NULL) { + pr_err("diag: In %s, unable to search command\n", + __func__); + mutex_unlock(&driver->cmd_reg_mutex); + return; + } if (item->pid == pid) { list_del(&item->link); kfree(item); @@ -857,6 +873,12 @@ void diag_cmd_remove_reg_by_proc(int proc) mutex_lock(&driver->cmd_reg_mutex); list_for_each_safe(start, temp, &driver->cmd_reg_list) { item = list_entry(start, struct diag_cmd_reg_t, link); + if (&item->entry == NULL) { + pr_err("diag: In %s, unable to search command\n", + __func__); + mutex_unlock(&driver->cmd_reg_mutex); + return; + } if (item->proc == proc) { list_del(&item->link); kfree(item); @@ -1019,6 +1041,11 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len, else hdlc_disabled = driver->hdlc_disabled; if (hdlc_disabled) { + if (len < 4) { + pr_err("diag: In %s, invalid len: %d of non_hdlc pkt", + __func__, len); + return -EBADMSG; + } payload = *(uint16_t *)(buf + 2); if (payload > DIAG_MAX_HDLC_BUF_SIZE) { pr_err("diag: Dropping packet, payload size is %d\n", @@ -1027,11 +1054,21 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len, } driver->hdlc_encode_buf_len = payload; /* - * Adding 4 bytes for start (1 byte), version (1 byte) and - * payload (2 bytes) + * Adding 5 bytes for start (1 byte), version (1 byte), + * payload (2 bytes) and end (1 byte) */ - memcpy(driver->hdlc_encode_buf, buf + 4, payload); - goto send_data; + if (len == (payload + 5)) { + /* + * Adding 4 bytes for start (1 byte), version (1 byte) + * and payload (2 bytes) + */ + memcpy(driver->hdlc_encode_buf, buf + 4, payload); + goto send_data; + } else { + pr_err("diag: In %s, invalid len: %d of non_hdlc pkt", + __func__, len); + return -EBADMSG; + } } if (hdlc_flag) { @@ -1612,7 +1649,7 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask) static int diag_switch_logging(struct diag_logging_mode_param_t *param) { - int new_mode, i; + int new_mode, i = 0; int curr_mode; int err = 0; uint8_t do_switch = 1; @@ -1653,6 +1690,8 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) diag_mux->mux_mask)) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag_fr: User PD is already logging onto active peripheral logging\n"); + i = upd - UPD_WLAN; + driver->pd_session_clear[i] = 0; return -EINVAL; } peripheral_mask = @@ -1662,8 +1701,8 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) if (!driver->pd_session_clear[i]) { driver->pd_logging_mode[i] = 1; driver->num_pd_session += 1; - driver->pd_session_clear[i] = 0; } + driver->pd_session_clear[i] = 0; } else { peripheral_mask = diag_translate_mask(param->peripheral_mask); @@ -1816,14 +1855,18 @@ static int diag_ioctl_lsm_deinit(void) { int i; + mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == current->tgid) break; - if (i == driver->num_clients) + if (i == driver->num_clients) { + mutex_unlock(&driver->diagchar_mutex); return -EINVAL; + } driver->data_ready[i] |= DEINIT_TYPE; + mutex_unlock(&driver->diagchar_mutex); wake_up_interruptible(&driver->wait_q); return 1; @@ -3565,6 +3608,41 @@ static int diagchar_cleanup(void) return 0; } +static int diag_mhi_probe(struct platform_device *pdev) +{ + int ret; + + if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi")) + return -EPROBE_DEFER; + driver->pdev = pdev; + ret = diag_remote_init(); + if (ret) { + diag_remote_exit(); + return ret; + } + ret = diagfwd_bridge_init(); + if (ret) { + diagfwd_bridge_exit(); + return ret; + } + pr_debug("diag: mhi device is ready\n"); + return 0; +} + +static const struct of_device_id diag_mhi_table[] = { + {.compatible = "qcom,diag-mhi"}, + {}, +}; + +static struct platform_driver diag_mhi_driver = { + .probe = diag_mhi_probe, + .driver = { + .name = "DIAG MHI Platform", + .owner = THIS_MODULE, + .of_match_table = diag_mhi_table, + }, +}; + static int __init diagchar_init(void) { dev_t dev; @@ -3621,9 +3699,9 @@ static int __init diagchar_init(void) mutex_init(&driver->delayed_rsp_mutex); mutex_init(&apps_data_mutex); mutex_init(&driver->msg_mask_lock); + mutex_init(&driver->hdlc_recovery_mutex); for (i = 0; i < NUM_PERIPHERALS; i++) mutex_init(&driver->diagfwd_channel_mutex[i]); - mutex_init(&driver->diagfwd_untag_mutex); init_waitqueue_head(&driver->wait_q); INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn); INIT_WORK(&(driver->update_user_clients), @@ -3652,9 +3730,6 @@ static int __init diagchar_init(void) if (ret) goto fail; ret = diag_masks_init(); - if (ret) - goto fail; - ret = diag_remote_init(); if (ret) goto fail; ret = diag_mux_init(); @@ -3693,9 +3768,7 @@ static int __init diagchar_init(void) goto fail; pr_debug("diagchar initialized now"); - ret = diagfwd_bridge_init(); - if (ret) - diagfwd_bridge_exit(); + platform_driver_register(&diag_mhi_driver); return 0; fail: @@ -3709,9 +3782,7 @@ fail: diagfwd_cntl_exit(); diag_dci_exit(); diag_masks_exit(); - diag_remote_exit(); return -1; - } static void diagchar_exit(void) diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index 019bf1946ac3..7dc2eabf1bb9 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1405,7 +1405,9 @@ static void diag_hdlc_start_recovery(unsigned char *buf, int len, if (start_ptr) { /* Discard any partial packet reads */ + mutex_lock(&driver->hdlc_recovery_mutex); driver->incoming_pkt.processing = 0; + mutex_unlock(&driver->hdlc_recovery_mutex); diag_process_non_hdlc_pkt(start_ptr, len - i, info); } } @@ -1419,18 +1421,24 @@ void diag_process_non_hdlc_pkt(unsigned char *buf, int len, const uint32_t header_len = sizeof(struct diag_pkt_frame_t); struct diag_pkt_frame_t *actual_pkt = NULL; unsigned char *data_ptr = NULL; - struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt; + struct diag_partial_pkt_t *partial_pkt = NULL; - if (!buf || len <= 0) + mutex_lock(&driver->hdlc_recovery_mutex); + if (!buf || len <= 0) { + mutex_unlock(&driver->hdlc_recovery_mutex); return; - - if (!partial_pkt->processing) + } + partial_pkt = &driver->incoming_pkt; + if (!partial_pkt->processing) { + mutex_unlock(&driver->hdlc_recovery_mutex); goto start; + } if (partial_pkt->remaining > len) { if ((partial_pkt->read_len + len) > partial_pkt->capacity) { pr_err("diag: Invalid length %d, %d received in %s\n", partial_pkt->read_len, len, __func__); + mutex_unlock(&driver->hdlc_recovery_mutex); goto end; } memcpy(partial_pkt->data + partial_pkt->read_len, buf, len); @@ -1444,6 +1452,7 @@ void diag_process_non_hdlc_pkt(unsigned char *buf, int len, pr_err("diag: Invalid length during partial read %d, %d received in %s\n", partial_pkt->read_len, partial_pkt->remaining, __func__); + mutex_unlock(&driver->hdlc_recovery_mutex); goto end; } memcpy(partial_pkt->data + partial_pkt->read_len, buf, @@ -1457,20 +1466,27 @@ void diag_process_non_hdlc_pkt(unsigned char *buf, int len, if (partial_pkt->remaining == 0) { actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data); data_ptr = partial_pkt->data + header_len; - if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR) + if (*(uint8_t *)(data_ptr + actual_pkt->length) != + CONTROL_CHAR) { + mutex_unlock(&driver->hdlc_recovery_mutex); diag_hdlc_start_recovery(buf, len, info); + mutex_lock(&driver->hdlc_recovery_mutex); + } err = diag_process_apps_pkt(data_ptr, actual_pkt->length, info); if (err) { pr_err("diag: In %s, unable to process incoming data packet, err: %d\n", __func__, err); + mutex_unlock(&driver->hdlc_recovery_mutex); goto end; } partial_pkt->read_len = 0; partial_pkt->total_len = 0; partial_pkt->processing = 0; + mutex_unlock(&driver->hdlc_recovery_mutex); goto start; } + mutex_unlock(&driver->hdlc_recovery_mutex); goto end; start: @@ -1483,14 +1499,14 @@ start: diag_send_error_rsp(buf, len, info); goto end; } - + mutex_lock(&driver->hdlc_recovery_mutex); if (pkt_len + header_len > partial_pkt->capacity) { pr_err("diag: In %s, incoming data is too large for the request buffer %d\n", __func__, pkt_len); + mutex_unlock(&driver->hdlc_recovery_mutex); diag_hdlc_start_recovery(buf, len, info); break; } - if ((pkt_len + header_len) > (len - read_bytes)) { partial_pkt->read_len = len - read_bytes; partial_pkt->total_len = pkt_len + header_len; @@ -1498,19 +1514,27 @@ start: partial_pkt->read_len; partial_pkt->processing = 1; memcpy(partial_pkt->data, buf, partial_pkt->read_len); + mutex_unlock(&driver->hdlc_recovery_mutex); break; } data_ptr = buf + header_len; - if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR) + if (*(uint8_t *)(data_ptr + actual_pkt->length) != + CONTROL_CHAR) { + mutex_unlock(&driver->hdlc_recovery_mutex); diag_hdlc_start_recovery(buf, len, info); + mutex_lock(&driver->hdlc_recovery_mutex); + } else hdlc_reset = 0; err = diag_process_apps_pkt(data_ptr, actual_pkt->length, info); - if (err) + if (err) { + mutex_unlock(&driver->hdlc_recovery_mutex); break; + } read_bytes += header_len + pkt_len + 1; buf += header_len + pkt_len + 1; /* advance to next pkt */ + mutex_unlock(&driver->hdlc_recovery_mutex); } end: return; diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 4ae2158b5a6b..74777212e4cf 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -67,7 +67,6 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info) driver->feature[peripheral].sent_feature_mask = 0; driver->feature[peripheral].rcvd_feature_mask = 0; - flush_workqueue(driver->cntl_wq); reg_dirty |= PERIPHERAL_MASK(peripheral); diag_cmd_remove_reg_by_proc(peripheral); driver->feature[peripheral].stm_support = DISABLE_STM; diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c index 03d496c2dd91..f1f8f0b2b34b 100644 --- a/drivers/char/diag/diagfwd_glink.c +++ b/drivers/char/diag/diagfwd_glink.c @@ -361,13 +361,44 @@ static void diag_glink_read_work_fn(struct work_struct *work) diagfwd_channel_read(glink_info->fwd_ctxt); } +struct diag_glink_read_work { + struct diag_glink_info *glink_info; + const void *ptr_read_done; + const void *ptr_rx_done; + size_t ptr_read_size; + struct work_struct work; +}; + +static void diag_glink_notify_rx_work_fn(struct work_struct *work) +{ + struct diag_glink_read_work *read_work = container_of(work, + struct diag_glink_read_work, work); + struct diag_glink_info *glink_info = read_work->glink_info; + + if (!glink_info || !glink_info->hdl) { + kfree(read_work); + return; + } + + diagfwd_channel_read_done(glink_info->fwd_ctxt, + (unsigned char *)(read_work->ptr_read_done), + read_work->ptr_read_size); + + glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false); + + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n", + read_work->ptr_rx_done, (int)read_work->ptr_read_size, + glink_info->peripheral, glink_info->type); + kfree(read_work); +} static void diag_glink_notify_rx(void *hdl, const void *priv, const void *pkt_priv, const void *ptr, size_t size) { struct diag_glink_info *glink_info = (struct diag_glink_info *)priv; - int err = 0; + struct diag_glink_read_work *read_work; if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl) return; @@ -379,12 +410,25 @@ static void diag_glink_notify_rx(void *hdl, const void *priv, "diag: received a packet %pK of len:%d from periph:%d ch:%d\n", ptr, (int)size, glink_info->peripheral, glink_info->type); + read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC); + if (!read_work) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: Could not allocate read_work\n"); + glink_rx_done(glink_info->hdl, ptr, true); + return; + } + memcpy((void *)pkt_priv, ptr, size); - err = diagfwd_channel_read_done(glink_info->fwd_ctxt, - (unsigned char *)pkt_priv, size); - glink_rx_done(glink_info->hdl, ptr, false); + + read_work->glink_info = glink_info; + read_work->ptr_read_done = pkt_priv; + read_work->ptr_rx_done = ptr; + read_work->ptr_read_size = size; + INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn); + queue_work(glink_info->wq, &read_work->work); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n", + "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n", ptr, (int)size, glink_info->peripheral, glink_info->type); } @@ -473,6 +517,8 @@ static void diag_glink_connect_work_fn(struct work_struct *work) atomic_set(&glink_info->opened, 1); diagfwd_channel_open(glink_info->fwd_ctxt); diagfwd_late_open(glink_info->fwd_ctxt); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n", + glink_info->peripheral, glink_info->type); } static void diag_glink_remote_disconnect_work_fn(struct work_struct *work) @@ -494,9 +540,9 @@ static void diag_glink_late_init_work_fn(struct work_struct *work) late_init_work); if (!glink_info || !glink_info->hdl) return; - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d\n", - glink_info->peripheral, glink_info->type); diagfwd_channel_open(glink_info->fwd_ctxt); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n", + glink_info->peripheral, glink_info->type); } static void diag_glink_transport_notify_state(void *handle, const void *priv, diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h index a84fa4edfca0..6cad44522ab6 100644 --- a/drivers/char/diag/diagfwd_glink.h +++ b/drivers/char/diag/diagfwd_glink.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c index 03133a5a89aa..edfba6bb09c9 100644 --- a/drivers/char/diag/diagfwd_mhi.c +++ b/drivers/char/diag/diagfwd_mhi.c @@ -197,7 +197,7 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info) struct diag_mhi_buf_tbl_t *item = NULL; struct diag_mhi_ch_t *ch = NULL; - if (!mhi_info || !mhi_info->enabled) + if (!mhi_info) return; /* Clear all the pending reads */ @@ -678,7 +678,25 @@ static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch) atomic_set(&(ch->opened), 0); ctxt = SET_CH_CTXT(id, ch->type); ch->client_info.mhi_client_cb = mhi_notifier; - return mhi_register_channel(&ch->hdl, NULL); + ch->client_info.chan = ch->chan; + ch->client_info.dev = &driver->pdev->dev; + ch->client_info.node_name = "qcom,mhi"; + ch->client_info.user_data = (void *)(uintptr_t)ctxt; + return mhi_register_channel(&ch->hdl, &ch->client_info); +} + +static void diag_mhi_dev_exit(int dev) +{ + struct diag_mhi_info *mhi_info = NULL; + + mhi_info = &diag_mhi[dev]; + if (!mhi_info) + return; + if (mhi_info->mhi_wq) + destroy_workqueue(mhi_info->mhi_wq); + mhi_close(mhi_info->id); + if (mhi_info->mempool_init) + diagmem_exit(driver, mhi_info->mempool); } int diag_mhi_init() @@ -726,22 +744,16 @@ int diag_mhi_init() return 0; fail: - diag_mhi_exit(); + diag_mhi_dev_exit(i); return -ENOMEM; } void diag_mhi_exit() { int i; - struct diag_mhi_info *mhi_info = NULL; for (i = 0; i < NUM_MHI_DEV; i++) { - mhi_info = &diag_mhi[i]; - if (mhi_info->mhi_wq) - destroy_workqueue(mhi_info->mhi_wq); - mhi_close(mhi_info->id); - if (mhi_info->mempool_init) - diagmem_exit(driver, mhi_info->mempool); + diag_mhi_dev_exit(i); } } diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index e86dc8292bf0..78b8452b19b3 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -216,6 +216,45 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) return buf->len; } +int diag_md_get_peripheral(int ctxt) +{ + int peripheral; + + if (driver->num_pd_session) { + peripheral = GET_PD_CTXT(ctxt); + switch (peripheral) { + case UPD_WLAN: + if (!driver->pd_logging_mode[0]) + peripheral = PERIPHERAL_MODEM; + break; + case UPD_AUDIO: + if (!driver->pd_logging_mode[1]) + peripheral = PERIPHERAL_LPASS; + break; + case UPD_SENSORS: + if (!driver->pd_logging_mode[2]) + peripheral = PERIPHERAL_LPASS; + break; + case DIAG_ID_MPSS: + case DIAG_ID_LPASS: + case DIAG_ID_CDSP: + default: + peripheral = + GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + break; + } + } else { + /* Account for Apps data as well */ + peripheral = GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + } + + return peripheral; +} + static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, struct diagfwd_buf_t *buf, int len) { @@ -245,13 +284,15 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, mutex_lock(&driver->hdlc_disable_mutex); mutex_lock(&fwd_info->data_mutex); - peripheral = GET_PD_CTXT(buf->ctxt); - if (peripheral == DIAG_ID_MPSS) - peripheral = PERIPHERAL_MODEM; - if (peripheral == DIAG_ID_LPASS) - peripheral = PERIPHERAL_LPASS; - if (peripheral == DIAG_ID_CDSP) - peripheral = PERIPHERAL_CDSP; + peripheral = diag_md_get_peripheral(buf->ctxt); + if (peripheral < 0) { + pr_err("diag:%s:%d invalid peripheral = %d\n", + __func__, __LINE__, peripheral); + mutex_unlock(&fwd_info->data_mutex); + mutex_unlock(&driver->hdlc_disable_mutex); + diag_ws_release(); + return; + } session_info = diag_md_session_get_peripheral(peripheral); @@ -363,7 +404,6 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, if (driver->feature[peripheral].encode_hdlc && driver->feature[peripheral].untag_header && driver->peripheral_untag[peripheral]) { - mutex_lock(&driver->diagfwd_untag_mutex); temp_buf_cpd = buf; temp_buf_main = buf; if (fwd_info->buf_1 && @@ -463,10 +503,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, if (peripheral == PERIPHERAL_LPASS && fwd_info->type == TYPE_DATA && len_upd_2) { if (flag_buf_1) { - driver->upd_len_2_a = len_upd_2; + fwd_info->upd_len_2_a = len_upd_2; temp_ptr_upd = fwd_info->buf_upd_2_a; } else { - driver->upd_len_2_b = len_upd_2; + fwd_info->upd_len_2_b = len_upd_2; temp_ptr_upd = fwd_info->buf_upd_2_b; } temp_ptr_upd->ctxt &= 0x00FFFFFF; @@ -477,17 +517,17 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_ptr_upd, len_upd_2); } else { if (flag_buf_1) - driver->upd_len_2_a = 0; + fwd_info->upd_len_2_a = 0; if (flag_buf_2) - driver->upd_len_2_b = 0; + fwd_info->upd_len_2_b = 0; } if (fwd_info->type == TYPE_DATA && len_upd_1) { if (flag_buf_1) { - driver->upd_len_1_a[peripheral] = + fwd_info->upd_len_1_a = len_upd_1; temp_ptr_upd = fwd_info->buf_upd_1_a; } else { - driver->upd_len_1_b[peripheral] = + fwd_info->upd_len_1_b = len_upd_1; temp_ptr_upd = fwd_info->buf_upd_1_b; } @@ -499,15 +539,15 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_ptr_upd, len_upd_1); } else { if (flag_buf_1) - driver->upd_len_1_a[peripheral] = 0; + fwd_info->upd_len_1_a = 0; if (flag_buf_2) - driver->upd_len_1_b[peripheral] = 0; + fwd_info->upd_len_1_b = 0; } if (len_cpd) { if (flag_buf_1) - driver->cpd_len_1[peripheral] = len_cpd; + fwd_info->cpd_len_1 = len_cpd; else - driver->cpd_len_2[peripheral] = len_cpd; + fwd_info->cpd_len_2 = len_cpd; temp_ptr_cpd->ctxt &= 0x00FFFFFF; temp_ptr_cpd->ctxt |= (SET_PD_CTXT(ctxt_cpd)); @@ -515,11 +555,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_ptr_cpd, len_cpd); } else { if (flag_buf_1) - driver->cpd_len_1[peripheral] = 0; + fwd_info->cpd_len_1 = 0; if (flag_buf_2) - driver->cpd_len_2[peripheral] = 0; + fwd_info->cpd_len_2 = 0; } - mutex_unlock(&driver->diagfwd_untag_mutex); return; } else { diagfwd_data_read_done(fwd_info, buf, len); @@ -527,7 +566,6 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, } end: diag_ws_release(); - mutex_unlock(&driver->diagfwd_untag_mutex); if (temp_ptr_cpd) { diagfwd_write_done(fwd_info->peripheral, fwd_info->type, GET_BUF_NUM(temp_ptr_cpd->ctxt)); @@ -759,6 +797,12 @@ int diagfwd_peripheral_init(void) fwd_info->inited = 1; fwd_info->read_bytes = 0; fwd_info->write_bytes = 0; + fwd_info->cpd_len_1 = 0; + fwd_info->cpd_len_2 = 0; + fwd_info->upd_len_1_a = 0; + fwd_info->upd_len_1_b = 0; + fwd_info->upd_len_2_a = 0; + fwd_info->upd_len_2_a = 0; mutex_init(&fwd_info->buf_mutex); mutex_init(&fwd_info->data_mutex); spin_lock_init(&fwd_info->write_buf_lock); @@ -775,6 +819,12 @@ int diagfwd_peripheral_init(void) fwd_info->ch_open = 0; fwd_info->read_bytes = 0; fwd_info->write_bytes = 0; + fwd_info->cpd_len_1 = 0; + fwd_info->cpd_len_2 = 0; + fwd_info->upd_len_1_a = 0; + fwd_info->upd_len_1_b = 0; + fwd_info->upd_len_2_a = 0; + fwd_info->upd_len_2_a = 0; spin_lock_init(&fwd_info->write_buf_lock); mutex_init(&fwd_info->buf_mutex); mutex_init(&fwd_info->data_mutex); @@ -999,7 +1049,16 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral) dest_info->buf_ptr[i] = fwd_info->buf_ptr[i]; if (!check_channel_state(dest_info->ctxt)) diagfwd_late_open(dest_info); - diagfwd_cntl_open(dest_info); + + /* + * Open control channel to update masks after buffers are + * initialized for peripherals that have transport other than + * GLINK. GLINK supported peripheral mask update will + * happen after glink buffers are initialized. + */ + + if (dest_info->transport != TRANSPORT_GLINK) + diagfwd_cntl_open(dest_info); init_fn(peripheral); mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]); diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]); @@ -1190,7 +1249,18 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info) mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); fwd_info->ch_open = 1; diagfwd_buffers_init(fwd_info); - diagfwd_write_buffers_init(fwd_info); + + /* + * Initialize buffers for glink supported + * peripherals only. Open control channel to update + * masks after buffers are initialized. + */ + if (fwd_info->transport == TRANSPORT_GLINK) { + diagfwd_write_buffers_init(fwd_info); + if (fwd_info->type == TYPE_CNTL) + diagfwd_cntl_open(fwd_info); + } + if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open) fwd_info->c_ops->open(fwd_info); for (i = 0; i < NUM_WRITE_BUFFERS; i++) { @@ -1215,6 +1285,9 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info) if (!fwd_info) return -EIO; + if (fwd_info->type == TYPE_CNTL) + flush_workqueue(driver->cntl_wq); + mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); fwd_info->ch_open = 0; if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close) @@ -1273,11 +1346,11 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) if (ctxt == 1 && fwd_info->buf_1) { /* Buffer 1 for core PD is freed */ atomic_set(&fwd_info->buf_1->in_busy, 0); - driver->cpd_len_1[peripheral] = 0; + fwd_info->cpd_len_1 = 0; } else if (ctxt == 2 && fwd_info->buf_2) { /* Buffer 2 for core PD is freed */ atomic_set(&fwd_info->buf_2->in_busy, 0); - driver->cpd_len_2[peripheral] = 0; + fwd_info->cpd_len_2 = 0; } else if (ctxt == 3 && fwd_info->buf_upd_1_a) { /* Buffer 1 for user pd 1 is freed */ atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0); @@ -1286,17 +1359,17 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) /* if not data in cpd and other user pd * free the core pd buffer for LPASS */ - if (!driver->cpd_len_1[PERIPHERAL_LPASS] && - !driver->upd_len_2_a) + if (!fwd_info->cpd_len_1 && + !fwd_info->upd_len_2_a) atomic_set(&fwd_info->buf_1->in_busy, 0); } else { /* if not data in cpd * free the core pd buffer for MPSS */ - if (!driver->cpd_len_1[PERIPHERAL_MODEM]) + if (!fwd_info->cpd_len_1) atomic_set(&fwd_info->buf_1->in_busy, 0); } - driver->upd_len_1_a[peripheral] = 0; + fwd_info->upd_len_1_a = 0; } else if (ctxt == 4 && fwd_info->buf_upd_1_b) { /* Buffer 2 for user pd 1 is freed */ @@ -1305,17 +1378,17 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) /* if not data in cpd and other user pd * free the core pd buffer for LPASS */ - if (!driver->cpd_len_2[peripheral] && - !driver->upd_len_2_b) + if (!fwd_info->cpd_len_2 && + !fwd_info->upd_len_2_b) atomic_set(&fwd_info->buf_2->in_busy, 0); } else { /* if not data in cpd * free the core pd buffer for MPSS */ - if (!driver->cpd_len_2[PERIPHERAL_MODEM]) + if (!fwd_info->cpd_len_2) atomic_set(&fwd_info->buf_2->in_busy, 0); } - driver->upd_len_1_b[peripheral] = 0; + fwd_info->upd_len_1_b = 0; } else if (ctxt == 5 && fwd_info->buf_upd_2_a) { /* Buffer 1 for user pd 2 is freed */ @@ -1323,11 +1396,11 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) /* if not data in cpd and other user pd * free the core pd buffer for LPASS */ - if (!driver->cpd_len_1[PERIPHERAL_LPASS] && - !driver->upd_len_1_a[PERIPHERAL_LPASS]) + if (!fwd_info->cpd_len_1 && + !fwd_info->upd_len_1_a) atomic_set(&fwd_info->buf_1->in_busy, 0); - driver->upd_len_2_a = 0; + fwd_info->upd_len_2_a = 0; } else if (ctxt == 6 && fwd_info->buf_upd_2_b) { /* Buffer 2 for user pd 2 is freed */ @@ -1335,11 +1408,11 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) /* if not data in cpd and other user pd * free the core pd buffer for LPASS */ - if (!driver->cpd_len_2[PERIPHERAL_LPASS] && - !driver->upd_len_1_b[PERIPHERAL_LPASS]) + if (!fwd_info->cpd_len_2 && + !fwd_info->upd_len_1_b) atomic_set(&fwd_info->buf_2->in_busy, 0); - driver->upd_len_2_b = 0; + fwd_info->upd_len_2_b = 0; } else pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt); diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h index 760f139ff428..eda70dcfdcd9 100644 --- a/drivers/char/diag/diagfwd_peripheral.h +++ b/drivers/char/diag/diagfwd_peripheral.h @@ -83,6 +83,12 @@ struct diagfwd_info { struct diagfwd_buf_t *buf_upd_2_a; struct diagfwd_buf_t *buf_upd_2_b; struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS]; + int cpd_len_1; + int cpd_len_2; + int upd_len_1_a; + int upd_len_1_b; + int upd_len_2_a; + int upd_len_2_b; struct diag_peripheral_ops *p_ops; struct diag_channel_ops *c_ops; }; @@ -99,6 +105,9 @@ void diagfwd_early_open(uint8_t peripheral); void diagfwd_late_open(struct diagfwd_info *fwd_info); void diagfwd_close(uint8_t peripheral, uint8_t type); + +int diag_md_get_peripheral(int ctxt); + int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type, void *ctxt, struct diag_peripheral_ops *ops, struct diagfwd_info **fwd_ctxt); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index a084a4751fa9..25372dc381d4 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3877,6 +3877,9 @@ static void smi_recv_tasklet(unsigned long val) * because the lower layer is allowed to hold locks while calling * message delivery. */ + + rcu_read_lock(); + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -3899,6 +3902,8 @@ static void smi_recv_tasklet(unsigned long val) if (newmsg) intf->handlers->sender(intf->send_info, newmsg); + rcu_read_unlock(); + handle_new_recv_msgs(intf); } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0d83cfb9708f..f53e8ba2c718 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -758,6 +758,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, result, len, data[2]); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + /* + * Don't abort here, maybe it was a queued + * response to a previous command. + */ + ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Invalid response getting flags: %x %x\n", data[0], data[1]); } else { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 096f0cef4da1..40d400fe5bb7 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1162,10 +1162,11 @@ static int wdog_reboot_handler(struct notifier_block *this, ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - /* Set a long timer to let the reboot happens, but - reboot if it hangs, but only if the watchdog + /* Set a long timer to let the reboot happen or + reset if it hangs, but only if the watchdog timer was already running. */ - timeout = 120; + if (timeout < 120) + timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0975d23031ea..2898d19fadf5 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -346,7 +346,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; /* It's illegal to wrap around the end of the physical address space. */ - if (offset + (phys_addr_t)size < offset) + if (offset + (phys_addr_t)size - 1 < offset) return -EINVAL; if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index fc061f7c2bd1..a7de8ae185a5 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); if (rc <= 0) { - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); DEBUGP(2, dev, "<- cm4040_write (failed)\n"); if (rc == -ERESTARTSYS) return rc; @@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, for (i = 0; i < bytes_to_write; i++) { rc = wait_for_bulk_out_ready(dev); if (rc <= 0) { - DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", + DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", rc); DEBUGP(2, dev, "<- cm4040_write (failed)\n"); if (rc == -ERESTARTSYS) @@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); if (rc <= 0) { - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); DEBUGP(2, dev, "<- cm4040_write (failed)\n"); if (rc == -ERESTARTSYS) return rc; diff --git a/drivers/char/random.c b/drivers/char/random.c index d93dfebae0bb..1822472dffab 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1798,13 +1798,15 @@ int random_int_secret_init(void) return 0; } +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) + __aligned(sizeof(unsigned long)); + /* * Get a random word for internal kernel use only. Similar to urandom but * with the goal of minimal entropy pool depletion. As a result, the random * value is not cryptographically secure but for several uses the cost of * depleting entropy is too high */ -static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { __u32 *hash; diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c index 0823ed78485e..8161d77ca194 100644 --- a/drivers/char/rdbg.c +++ b/drivers/char/rdbg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,7 +22,7 @@ #include #include -#define SMP2P_NUM_PROCS 8 +#define SMP2P_NUM_PROCS 16 #define MAX_RETRIES 20 #define SM_VERSION 1 @@ -146,9 +146,17 @@ static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = { {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/ {0}, /*SMP2P_RESERVED_PROC_1*/ {"rdbg_wcnss", 0, 0}, /*WCNSS*/ - {0}, /*SMP2P_RESERVED_PROC_2*/ - {0}, /*SMP2P_POWER_PROC*/ - {0} /*SMP2P_REMOTE_MOCK_PROC*/ + {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/ + {NULL}, /*SMP2P_POWER_PROC*/ + {NULL}, /*SMP2P_TZ_PROC*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL} /*SMP2P_REMOTE_MOCK_PROC*/ }; static int smq_blockmap_get(struct smq_block_map *block_map, diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 252142524ff2..a0d9ac6b6cc9 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -29,33 +29,92 @@ #include "tpm.h" #include "tpm_eventlog.h" -static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); -static LIST_HEAD(tpm_chip_list); -static DEFINE_SPINLOCK(driver_lock); +DEFINE_IDR(dev_nums_idr); +static DEFINE_MUTEX(idr_lock); struct class *tpm_class; dev_t tpm_devt; -/* - * tpm_chip_find_get - return tpm_chip for a given chip number - * @chip_num the device number for the chip +/** + * tpm_try_get_ops() - Get a ref to the tpm_chip + * @chip: Chip to ref + * + * The caller must already have some kind of locking to ensure that chip is + * valid. This function will lock the chip so that the ops member can be + * accessed safely. The locking prevents tpm_chip_unregister from + * completing, so it should not be held for long periods. + * + * Returns -ERRNO if the chip could not be got. */ +int tpm_try_get_ops(struct tpm_chip *chip) +{ + int rc = -EIO; + + get_device(&chip->dev); + + down_read(&chip->ops_sem); + if (!chip->ops) + goto out_lock; + + if (!try_module_get(chip->dev.parent->driver->owner)) + goto out_lock; + + return 0; +out_lock: + up_read(&chip->ops_sem); + put_device(&chip->dev); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_try_get_ops); + +/** + * tpm_put_ops() - Release a ref to the tpm_chip + * @chip: Chip to put + * + * This is the opposite pair to tpm_try_get_ops(). After this returns chip may + * be kfree'd. + */ +void tpm_put_ops(struct tpm_chip *chip) +{ + module_put(chip->dev.parent->driver->owner); + up_read(&chip->ops_sem); + put_device(&chip->dev); +} +EXPORT_SYMBOL_GPL(tpm_put_ops); + +/** + * tpm_chip_find_get() - return tpm_chip for a given chip number + * @chip_num: id to find + * + * The return'd chip has been tpm_try_get_ops'd and must be released via + * tpm_put_ops + */ struct tpm_chip *tpm_chip_find_get(int chip_num) { - struct tpm_chip *pos, *chip = NULL; + struct tpm_chip *chip, *res = NULL; + int chip_prev; - rcu_read_lock(); - list_for_each_entry_rcu(pos, &tpm_chip_list, list) { - if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) - continue; + mutex_lock(&idr_lock); - if (try_module_get(pos->pdev->driver->owner)) { - chip = pos; - break; - } + if (chip_num == TPM_ANY_NUM) { + chip_num = 0; + do { + chip_prev = chip_num; + chip = idr_get_next(&dev_nums_idr, &chip_num); + if (chip && !tpm_try_get_ops(chip)) { + res = chip; + break; + } + } while (chip_prev != chip_num); + } else { + chip = idr_find_slowpath(&dev_nums_idr, chip_num); + if (chip && !tpm_try_get_ops(chip)) + res = chip; } - rcu_read_unlock(); - return chip; + + mutex_unlock(&idr_lock); + + return res; } /** @@ -68,12 +127,48 @@ static void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); - spin_lock(&driver_lock); - clear_bit(chip->dev_num, dev_mask); - spin_unlock(&driver_lock); + mutex_lock(&idr_lock); + idr_remove(&dev_nums_idr, chip->dev_num); + mutex_unlock(&idr_lock); + kfree(chip); } + +/** + * tpm_class_shutdown() - prepare the TPM device for loss of power. + * @dev: device to which the chip is associated. + * + * Issues a TPM2_Shutdown command prior to loss of power, as required by the + * TPM 2.0 spec. + * Then, calls bus- and device- specific shutdown code. + * + * XXX: This codepath relies on the fact that sysfs is not enabled for + * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2 + * has sysfs support enabled before TPM sysfs's implicit locking is fixed. + */ +static int tpm_class_shutdown(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + down_write(&chip->ops_sem); + tpm2_shutdown(chip, TPM2_SU_CLEAR); + chip->ops = NULL; + up_write(&chip->ops_sem); + } + /* Allow bus- and device-specific code to run. Note: since chip->ops + * is NULL, more-specific shutdown code will not be able to issue TPM + * commands. + */ + if (dev->bus && dev->bus->shutdown) + dev->bus->shutdown(dev); + else if (dev->driver && dev->driver->shutdown) + dev->driver->shutdown(dev); + return 0; +} + + /** * tpmm_chip_alloc() - allocate a new struct tpm_chip instance * @dev: device to which the chip is associated @@ -88,37 +183,35 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, const struct tpm_class_ops *ops) { struct tpm_chip *chip; + int rc; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return ERR_PTR(-ENOMEM); mutex_init(&chip->tpm_mutex); - INIT_LIST_HEAD(&chip->list); + init_rwsem(&chip->ops_sem); chip->ops = ops; - spin_lock(&driver_lock); - chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); - spin_unlock(&driver_lock); - - if (chip->dev_num >= TPM_NUM_DEVICES) { + mutex_lock(&idr_lock); + rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); + mutex_unlock(&idr_lock); + if (rc < 0) { dev_err(dev, "No available tpm device numbers\n"); kfree(chip); - return ERR_PTR(-ENOMEM); + return ERR_PTR(rc); } - - set_bit(chip->dev_num, dev_mask); + chip->dev_num = rc; scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); - chip->pdev = dev; - dev_set_drvdata(dev, chip); chip->dev.class = tpm_class; + chip->dev.class->shutdown = tpm_class_shutdown; chip->dev.release = tpm_dev_release; - chip->dev.parent = chip->pdev; + chip->dev.parent = dev; #ifdef CONFIG_ACPI chip->dev.groups = chip->groups; #endif @@ -133,7 +226,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, device_initialize(&chip->dev); cdev_init(&chip->cdev, &tpm_fops); - chip->cdev.owner = chip->pdev->driver->owner; + chip->cdev.owner = dev->driver->owner; chip->cdev.kobj.parent = &chip->dev.kobj; devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); @@ -167,6 +260,11 @@ static int tpm_add_char_device(struct tpm_chip *chip) return rc; } + /* Make the chip available. */ + mutex_lock(&idr_lock); + idr_replace(&dev_nums_idr, chip, chip->dev_num); + mutex_unlock(&idr_lock); + return rc; } @@ -174,6 +272,16 @@ static void tpm_del_char_device(struct tpm_chip *chip) { cdev_del(&chip->cdev); device_del(&chip->dev); + + /* Make the chip unavailable. */ + mutex_lock(&idr_lock); + idr_replace(&dev_nums_idr, NULL, chip->dev_num); + mutex_unlock(&idr_lock); + + /* Make the driver uncallable. */ + down_write(&chip->ops_sem); + chip->ops = NULL; + up_write(&chip->ops_sem); } static int tpm1_chip_register(struct tpm_chip *chip) @@ -228,17 +336,11 @@ int tpm_chip_register(struct tpm_chip *chip) if (rc) goto out_err; - /* Make the chip available. */ - spin_lock(&driver_lock); - list_add_tail_rcu(&chip->list, &tpm_chip_list); - spin_unlock(&driver_lock); - chip->flags |= TPM_CHIP_FLAG_REGISTERED; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { - rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj, - &chip->dev.kobj, - "ppi"); + rc = __compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); if (rc && rc != -ENOENT) { tpm_chip_unregister(chip); return rc; @@ -259,6 +361,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); * Takes the chip first away from the list of available TPM chips and then * cleans up all the resources reserved by tpm_chip_register(). * + * Once this function returns the driver call backs in 'op's will not be + * running and will no longer start. + * * NOTE: This function should be only called before deinitializing chip * resources. */ @@ -267,13 +372,8 @@ void tpm_chip_unregister(struct tpm_chip *chip) if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) return; - spin_lock(&driver_lock); - list_del_rcu(&chip->list); - spin_unlock(&driver_lock); - synchronize_rcu(); - if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - sysfs_remove_link(&chip->pdev->kobj, "ppi"); + sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); tpm1_chip_unregister(chip); tpm_del_char_device(chip); diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index 4f3137d9a35e..912ad30be585 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file) * by the check of is_open variable, which is protected * by driver_lock. */ if (test_and_set_bit(0, &chip->is_open)) { - dev_dbg(chip->pdev, "Another process owns this TPM\n"); + dev_dbg(&chip->dev, "Another process owns this TPM\n"); return -EBUSY; } @@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file) INIT_WORK(&priv->work, timeout_work); file->private_data = priv; - get_device(chip->pdev); return 0; } @@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, return -EFAULT; } - /* atomic tpm command send and result receive */ + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + mutex_unlock(&priv->buffer_mutex); + return -EPIPE; + } out_size = tpm_transmit(priv->chip, priv->data_buffer, sizeof(priv->data_buffer), 0); + + tpm_put_ops(priv->chip); if (out_size < 0) { mutex_unlock(&priv->buffer_mutex); return out_size; @@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file) file->private_data = NULL; atomic_set(&priv->data_pending, 0); clear_bit(0, &priv->chip->is_open); - put_device(priv->chip->pdev); kfree(priv); return 0; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 17abe52e6365..aaa5fa95dede 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, if (count == 0) return -ENODATA; if (count > bufsiz) { - dev_err(chip->pdev, + dev_err(&chip->dev, "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } @@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, rc = chip->ops->send(chip, (u8 *) buf, count); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; } @@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, goto out_recv; if (chip->ops->req_canceled(chip, status)) { - dev_err(chip->pdev, "Operation Canceled\n"); + dev_err(&chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; } @@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, } while (time_before(jiffies, stop)); chip->ops->cancel(chip); - dev_err(chip->pdev, "Operation Timed out\n"); + dev_err(&chip->dev, "Operation Timed out\n"); rc = -ETIME; goto out; out_recv: rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); if (rc < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: if (!(flags & TPM_TRANSMIT_UNLOCKED)) @@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, err = be32_to_cpu(header->return_code); if (err != 0 && desc) - dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, + dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); return err; @@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) if (rc == TPM_ERR_INVALID_POSTINIT) { /* The TPM is not started, we are the first to talk to it. Execute a startup command. */ - dev_info(chip->pdev, "Issuing TPM_STARTUP"); + dev_info(&chip->dev, "Issuing TPM_STARTUP"); if (tpm_startup(chip, TPM_ST_CLEAR)) return rc; @@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) 0, NULL); } if (rc) { - dev_err(chip->pdev, + dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n", rc); goto duration; @@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) /* Report adjusted timeouts */ if (chip->vendor.timeout_adjusted) { - dev_info(chip->pdev, + dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", old_timeout[0], new_timeout[0], old_timeout[1], new_timeout[1], @@ -612,7 +612,7 @@ duration: chip->vendor.duration[TPM_MEDIUM] *= 1000; chip->vendor.duration[TPM_LONG] *= 1000; chip->vendor.duration_adjusted = true; - dev_info(chip->pdev, "Adjusting TPM timeout parameters."); + dev_info(&chip->dev, "Adjusting TPM timeout parameters."); } return 0; } @@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num) rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) rc = tpm2_pcr_read(chip, pcr_idx, res_buf); else rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); @@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_pcr_extend(chip, pcr_idx, hash); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0, "attempting extend a PCR value"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); @@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip) * around 300ms while the self test is ongoing, keep trying * until the self test duration expires. */ if (rc == -ETIME) { - dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); + dev_info( + &chip->dev, HW_ERR + "TPM command timed out during continue self test"); msleep(delay_msec); continue; } @@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip) rc = be32_to_cpu(cmd.header.out.return_code); if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { - dev_info(chip->pdev, + dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n", rc); /* TPM is disabled and/or deactivated; driver can * proceed and TPM does handle commands for @@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_send); @@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev) } if (rc) - dev_err(chip->pdev, + dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n", rc); else if (try > 0) - dev_warn(chip->pdev, "TPM savestate took %dms\n", + dev_warn(&chip->dev, "TPM savestate took %dms\n", try * TPM_TIMEOUT_RETRY); return rc; @@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) if (chip->flags & TPM_CHIP_FLAG_TPM2) { err = tpm2_get_random(chip, out, max); - tpm_chip_put(chip); + tpm_put_ops(chip); return err; } @@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) num_bytes -= recd; } while (retries-- && total < max); - tpm_chip_put(chip); + tpm_put_ops(chip); return total ? total : -EIO; } EXPORT_SYMBOL_GPL(tpm_get_random); @@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_seal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_seal_trusted); @@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_unseal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); + return rc; } EXPORT_SYMBOL_GPL(tpm_unseal_trusted); @@ -1124,6 +1127,7 @@ static int __init tpm_init(void) static void __exit tpm_exit(void) { + idr_destroy(&dev_nums_idr); class_destroy(tpm_class); unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); } diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index f880856aa75e..06ac6e9657d2 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -38,6 +38,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, struct tpm_chip *chip = dev_get_drvdata(dev); + memset(&tpm_cmd, 0, sizeof(tpm_cmd)); + tpm_cmd.header.in = tpm_readpubek_header; err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0, "attempting to read the PUBEK"); @@ -284,16 +286,28 @@ static const struct attribute_group tpm_dev_group = { int tpm_sysfs_add_device(struct tpm_chip *chip) { int err; - err = sysfs_create_group(&chip->pdev->kobj, + + /* XXX: If you wish to remove this restriction, you must first update + * tpm_sysfs to explicitly lock chip->ops. + */ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return 0; + + err = sysfs_create_group(&chip->dev.parent->kobj, &tpm_dev_group); if (err) - dev_err(chip->pdev, + dev_err(&chip->dev, "failed to create sysfs attributes, %d\n", err); return err; } void tpm_sysfs_del_device(struct tpm_chip *chip) { - sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); + /* The sysfs routines rely on an implicit tpm_try_get_ops, this + * function is called before ops is null'd and the sysfs core + * synchronizes this removal so that no callbacks are running or can + * run again + */ + sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 2216861f89f1..772d99b3a8e4 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -34,7 +34,7 @@ enum tpm_const { TPM_MINOR = 224, /* officially assigned */ TPM_BUFSIZE = 4096, - TPM_NUM_DEVICES = 256, + TPM_NUM_DEVICES = 65536, TPM_RETRY = 50, /* 5 seconds */ }; @@ -171,11 +171,16 @@ enum tpm_chip_flags { }; struct tpm_chip { - struct device *pdev; /* Device stuff */ struct device dev; struct cdev cdev; + /* A driver callback under ops cannot be run unless ops_sem is held + * (sometimes implicitly, eg for the sysfs code). ops becomes null + * when the driver is unregistered, see tpm_try_get_ops. + */ + struct rw_semaphore ops_sem; const struct tpm_class_ops *ops; + unsigned int flags; int dev_num; /* /dev/tpm# */ @@ -195,17 +200,10 @@ struct tpm_chip { acpi_handle acpi_dev_handle; char ppi_version[TPM_PPI_VERSION_LEN + 1]; #endif /* CONFIG_ACPI */ - - struct list_head list; }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) -static inline void tpm_chip_put(struct tpm_chip *chip) -{ - module_put(chip->pdev->driver->owner); -} - static inline int tpm_read_index(int base, int index) { outb(index, base); @@ -497,6 +495,7 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) extern struct class *tpm_class; extern dev_t tpm_devt; extern const struct file_operations tpm_fops; +extern struct idr dev_nums_idr; enum tpm_transmit_flags { TPM_TRANSMIT_UNLOCKED = BIT(0), @@ -517,6 +516,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, wait_queue_head_t *, bool); struct tpm_chip *tpm_chip_find_get(int chip_num); +__must_check int tpm_try_get_ops(struct tpm_chip *chip); +void tpm_put_ops(struct tpm_chip *chip); + extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, const struct tpm_class_ops *ops); extern int tpm_chip_register(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index cb7e4f6b70ba..286bd090a488 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); if (rc) { - dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n", + dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", handle); return; } @@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "flushing context"); if (rc) - dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, + dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle, rc); tpm_buf_destroy(&buf); @@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) * except print the error code on a system failure. */ if (rc < 0) - dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", + dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", rc); } EXPORT_SYMBOL_GPL(tpm2_shutdown); @@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) * immediately. This is a workaround for that. */ if (rc == TPM2_RC_TESTING) { - dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); + dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); rc = 0; } diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index dfadad0916a1..a48a878f791d 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) for (i = 0; i < 6; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading header\n"); + dev_err(&chip->dev, "error reading header\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); @@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) size = be32_to_cpu(*native_size); if (count < size) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Recv size(%d) less than available space\n", size); for (; i < size; i++) { /* clear the waiting data anyway */ status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } } @@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) for (; i < size; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); @@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) status = ioread8(chip->vendor.iobase + 1); if (status & ATML_STATUS_DATA_AVAIL) { - dev_err(chip->pdev, "data available is stuck\n"); + dev_err(&chip->dev, "data available is stuck\n"); return -EIO; } @@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) { int i; - dev_dbg(chip->pdev, "tpm_atml_send:\n"); + dev_dbg(&chip->dev, "tpm_atml_send:\n"); for (i = 0; i < count; i++) { - dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); + dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); iowrite8(buf[i], chip->vendor.iobase); } diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 8dfb88b9739c..dd8f0eb3170a 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -52,7 +52,7 @@ struct priv_data { static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; priv->len = 0; @@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) status = i2c_master_send(client, buf, len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); return status; @@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); struct tpm_output_header *hdr = (struct tpm_output_header *)priv->buffer; u32 expected_len; @@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) return -ENOMEM; if (priv->len >= expected_len) { - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) } rc = i2c_master_recv(client, buf, expected_len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) static void i2c_atmel_cancel(struct tpm_chip *chip) { - dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); + dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); } static u8 i2c_atmel_read_status(struct tpm_chip *chip) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); int rc; /* The TPM fails the I2C read until it is ready, so we do the entire @@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip) /* Once the TPM has completed the command the command remains readable * until another command is issued. */ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s: sts=%d", __func__, rc); if (rc <= 0) return 0; diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 63d5d22e9e60..f2aa99e34b4b 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read first 10 bytes, including tag, paramsize, and result */ size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); + dev_err(&chip->dev, "Unable to read header\n"); goto out; } @@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); + dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); + dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 847f1597fe9b..a1e1474dda30 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, /* read TPM_STS register */ static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; u8 data; status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); if (status <= 0) { - dev_err(chip->pdev, "%s() error return %d\n", __func__, + dev_err(&chip->dev, "%s() error return %d\n", __func__, status); data = TPM_STS_ERR_VAL; } @@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) /* write commandReady to TPM_STS register */ static void i2c_nuvoton_ready(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; /* this causes the current command to be aborted */ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); if (status < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to write TPM_STS.commandReady\n", __func__); } @@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, return 0; } while (time_before(jiffies, stop)); } - dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, value); return -ETIMEDOUT; } @@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, &chip->vendor.read_queue) == 0) { burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to read burstCount=%d\n", __func__, burst_count); return -EIO; @@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, bytes2read, &buf[size]); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail on i2c_nuvoton_read_buf()=%d\n", __func__, rc); return -EIO; } - dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); + dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); size += bytes2read; } @@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, /* Read TPM command results */ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct device *dev = chip->pdev; + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; int expected, status, burst_count, retries, size = 0; @@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) break; } i2c_nuvoton_ready(chip); - dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); + dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); return size; } @@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) */ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) { - struct device *dev = chip->pdev; + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; size_t count = 0; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 6c488e635fdd..e3cf9f3545c5 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) - dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) - dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; @@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; - dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", + dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); @@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip) static void tpm_wtx_abort(struct tpm_chip *chip) { - dev_info(chip->pdev, "Aborting WTX\n"); + dev_info(&chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); @@ -257,7 +257,7 @@ recv_begin: } if (buf[0] != TPM_VL_VER) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } @@ -272,7 +272,7 @@ recv_begin: } if ((size == 0x6D00) && (buf[1] == 0x80)) { - dev_err(chip->pdev, "Error handling on vendor layer!\n"); + dev_err(&chip->dev, "Error handling on vendor layer!\n"); return -EIO; } @@ -284,7 +284,7 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX) { - dev_info(chip->pdev, "WTX-package received\n"); + dev_info(&chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; @@ -295,14 +295,14 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { - dev_info(chip->pdev, "WTX-abort acknowledged\n"); + dev_info(&chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { - dev_err(chip->pdev, "ERROR-package received:\n"); + dev_err(&chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) - dev_err(chip->pdev, + dev_err(&chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; @@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) ret = empty_fifo(chip, 1); if (ret) { - dev_err(chip->pdev, "Timeout while clearing FIFO\n"); + dev_err(&chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 289389ecef84..766370bed60c 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip) } while (time_before(jiffies, stop)); - dev_info(chip->pdev, "wait for ready failed\n"); + dev_info(&chip->dev, "wait for ready failed\n"); return -EBUSY; } @@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) return -EIO; if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { - dev_err(chip->pdev, "F0 timeout\n"); + dev_err(&chip->dev, "F0 timeout\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { - dev_err(chip->pdev, "not in normal mode (0x%x)\n", + dev_err(&chip->dev, "not in normal mode (0x%x)\n", data); return -EIO; } @@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) for (p = buffer; p < &buffer[count]; p++) { if (wait_for_stat (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "OBF timeout (while reading data)\n"); return -EIO; } @@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) if ((data & NSC_STATUS_F0) == 0 && (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { - dev_err(chip->pdev, "F0 not set\n"); + dev_err(&chip->dev, "F0 not set\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { - dev_err(chip->pdev, + dev_err(&chip->dev, "expected end of command(0x%x)\n", data); return -EIO; } @@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) return -EIO; if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { - dev_err(chip->pdev, "IBR timeout\n"); + dev_err(&chip->dev, "IBR timeout\n"); return -EIO; } for (i = 0; i < count; i++) { if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "IBF timeout (while writing data)\n"); return -EIO; } @@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) } if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index f10a107614b4..7f13221aeb30 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read first 10 bytes, including tag, paramsize, and result */ if ((size = recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); + dev_err(&chip->dev, "Unable to read header\n"); goto out; } @@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) if ((size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE)) < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); + dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } @@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) &chip->vendor.int_queue, false); status = tpm_tis_status(chip); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); + dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } @@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip) iowrite32(intmask, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); - devm_free_irq(chip->pdev, chip->vendor.irq, chip); + devm_free_irq(&chip->dev, chip->vendor.irq, chip); chip->vendor.irq = 0; } @@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) msleep(1); if (!priv->irq_tested) { disable_interrupts(chip); - dev_err(chip->pdev, + dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); } priv->irq_tested = true; @@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip) rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) { - dev_info(chip->pdev, "Detected an iTPM.\n"); + dev_info(&chip->dev, "Detected an iTPM.\n"); rc = 1; } else rc = -EFAULT; @@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, if (devm_request_irq (dev, i, tis_int_probe, IRQF_SHARED, chip->devname, chip) != 0) { - dev_info(chip->pdev, + dev_info(&chip->dev, "Unable to request irq: %d for probe\n", i); continue; @@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, if (devm_request_irq (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED, chip->devname, chip) != 0) { - dev_info(chip->pdev, + dev_info(&chip->dev, "Unable to request irq: %d for use\n", chip->vendor.irq); chip->vendor.irq = 0; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 31e8ae916ba0..be0b09a0fb44 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c index 076ead6aaf34..40d8d12cda82 100644 --- a/drivers/clk/msm/clock-local2.c +++ b/drivers/clk/msm/clock-local2.c @@ -1517,8 +1517,8 @@ static int set_rate_pixel(struct clk *clk, unsigned long rate) { struct rcg_clk *rcg = to_rcg_clk(clk); struct clk_freq_tbl *pixel_freq = rcg->current_freq; - int frac_num[] = {3, 2, 4, 1}; - int frac_den[] = {8, 9, 9, 1}; + int frac_num[] = {1, 2, 4, 3, 2}; + int frac_den[] = {1, 3, 9, 8, 9}; int delta = 100000; int i, rc; diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index 7cc1c56a2090..eb72217b9b1c 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -397,6 +397,7 @@ struct clk_osm { u32 acd_extint1_cfg; u32 acd_autoxfer_ctl; u32 acd_debugfs_addr; + u32 acd_debugfs_addr_size; bool acd_init; bool secure_init; bool red_fsm_en; @@ -606,6 +607,83 @@ static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask) return 0; } +static int clk_osm_acd_init(struct clk_osm *c) +{ + + int rc = 0; + u32 auto_xfer_mask = 0; + + if (!c->acd_init) + return 0; + + c->acd_debugfs_addr = ACD_HW_VERSION; + + /* Program ACD tunable-length delay register */ + clk_osm_acd_master_write_reg(c, c->acd_td, ACDTD); + auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD); + + /* Program ACD control register */ + clk_osm_acd_master_write_reg(c, c->acd_cr, ACDCR); + auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR); + + /* Program ACD soft start control register */ + clk_osm_acd_master_write_reg(c, c->acd_sscr, ACDSSCR); + auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR); + + /* Program initial ACD external interface configuration register */ + clk_osm_acd_master_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG); + auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG); + + /* Program ACD auto-register transfer control register */ + clk_osm_acd_master_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL); + + /* Ensure writes complete before transfers to local copy */ + clk_osm_acd_mb(c); + + /* Transfer master copies */ + rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask); + if (rc) + return rc; + + /* Switch CPUSS clock source to ACD clock */ + rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT, + ACD_GFMUX_CFG); + if (rc) + return rc; + + /* Program ACD_DCVS_SW */ + rc = clk_osm_acd_master_write_through_reg(c, + ACD_DCVS_SW_DCVS_IN_PRGR_SET, + ACD_DCVS_SW); + if (rc) + return rc; + + rc = clk_osm_acd_master_write_through_reg(c, + ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR, + ACD_DCVS_SW); + if (rc) + return rc; + + udelay(1); + + /* Program final ACD external interface configuration register */ + rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg, + ACD_EXTINT_CFG); + if (rc) + return rc; + + /* + * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG + * must be copied from master to local copy on PC exit. + */ + auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG); + clk_osm_acd_master_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG); + + /* ACD has been initialized and enabled for this cluster */ + c->acd_init = false; + return 0; +} + static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec) { u64 temp; @@ -729,6 +807,17 @@ static int clk_osm_set_rate(struct clk *c, unsigned long rate) static int clk_osm_enable(struct clk *c) { struct clk_osm *cpuclk = to_clk_osm(c); + int rc; + + rc = clk_osm_acd_init(cpuclk); + if (rc) { + pr_err("Failed to initialize ACD for cluster %d, rc=%d\n", + cpuclk->cluster_num, rc); + return rc; + } + + /* Wait for 5 usecs before enabling OSM */ + udelay(5); clk_osm_write_reg(cpuclk, 1, ENABLE_REG); @@ -1361,6 +1450,7 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } pwrcl_clk.pbases[ACD_BASE] = pbase; + pwrcl_clk.acd_debugfs_addr_size = resource_size(res); pwrcl_clk.vbases[ACD_BASE] = vbase; pwrcl_clk.acd_init = true; } else { @@ -1378,6 +1468,7 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } perfcl_clk.pbases[ACD_BASE] = pbase; + perfcl_clk.acd_debugfs_addr_size = resource_size(res); perfcl_clk.vbases[ACD_BASE] = vbase; perfcl_clk.acd_init = true; } else { @@ -2927,6 +3018,11 @@ static int debugfs_get_debug_reg(void *data, u64 *val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR) *val = readl_relaxed((char *)c->vbases[ACD_BASE] + c->acd_debugfs_addr); @@ -2939,6 +3035,11 @@ static int debugfs_set_debug_reg(void *data, u64 val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR) clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr); else @@ -2956,7 +3057,13 @@ static int debugfs_get_debug_reg_addr(void *data, u64 *val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + *val = c->acd_debugfs_addr; + return 0; } @@ -2964,7 +3071,16 @@ static int debugfs_set_debug_reg_addr(void *data, u64 val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + + if (val >= c->acd_debugfs_addr_size) + return -EINVAL; + c->acd_debugfs_addr = val; + return 0; } DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops, @@ -3105,81 +3221,6 @@ static int clk_osm_panic_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static int clk_osm_acd_init(struct clk_osm *c) -{ - - int rc = 0; - u32 auto_xfer_mask = 0; - - if (!c->acd_init) - return 0; - - c->acd_debugfs_addr = ACD_HW_VERSION; - - /* Program ACD tunable-length delay register */ - clk_osm_acd_master_write_reg(c, c->acd_td, ACDTD); - auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD); - - /* Program ACD control register */ - clk_osm_acd_master_write_reg(c, c->acd_cr, ACDCR); - auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR); - - /* Program ACD soft start control register */ - clk_osm_acd_master_write_reg(c, c->acd_sscr, ACDSSCR); - auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR); - - /* Program initial ACD external interface configuration register */ - clk_osm_acd_master_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG); - auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG); - - /* Program ACD auto-register transfer control register */ - clk_osm_acd_master_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL); - - /* Ensure writes complete before transfers to local copy */ - clk_osm_acd_mb(c); - - /* Transfer master copies */ - rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask); - if (rc) - return rc; - - /* Switch CPUSS clock source to ACD clock */ - rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT, - ACD_GFMUX_CFG); - if (rc) - return rc; - - /* Program ACD_DCVS_SW */ - rc = clk_osm_acd_master_write_through_reg(c, - ACD_DCVS_SW_DCVS_IN_PRGR_SET, - ACD_DCVS_SW); - if (rc) - return rc; - - rc = clk_osm_acd_master_write_through_reg(c, - ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR, - ACD_DCVS_SW); - if (rc) - return rc; - - udelay(1); - - /* Program final ACD external interface configuration register */ - rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg, - ACD_EXTINT_CFG); - if (rc) - return rc; - - /* - * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG - * must be copied from master to local copy on PC exit. - */ - auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG); - clk_osm_acd_master_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG); - - return 0; -} - static unsigned long init_rate = 300000000; static unsigned long osm_clk_init_rate = 200000000; @@ -3362,17 +3403,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) clk_osm_setup_cluster_pll(&perfcl_clk); } - rc = clk_osm_acd_init(&pwrcl_clk); - if (rc) { - pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc); - return rc; - } - rc = clk_osm_acd_init(&perfcl_clk); - if (rc) { - pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc); - return rc; - } - spin_lock_init(&pwrcl_clk.lock); spin_lock_init(&perfcl_clk.lock); diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c index eb69ed35f46d..040707e58e25 100644 --- a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c +++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c @@ -152,8 +152,6 @@ struct dsi_pll_regs { struct dsi_pll_config { u32 ref_freq; - bool div_override; - u32 output_div; bool ignore_frac; bool disable_prescaler; bool enable_ssc; @@ -212,7 +210,6 @@ static void dsi_pll_setup_config(struct dsi_pll_8998 *pll, struct dsi_pll_config *config = &pll->pll_configuration; config->ref_freq = 19200000; - config->output_div = 1; config->dec_bits = 8; config->frac_bits = 18; config->lock_timer = 64; @@ -222,7 +219,6 @@ static void dsi_pll_setup_config(struct dsi_pll_8998 *pll, config->thresh_cycles = 32; config->refclk_cycles = 256; - config->div_override = false; config->ignore_frac = false; config->disable_prescaler = false; config->enable_ssc = rsc->ssc_en; @@ -243,54 +239,14 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll, { struct dsi_pll_config *config = &pll->pll_configuration; struct dsi_pll_regs *regs = &pll->reg_setup; - u64 target_freq; u64 fref = rsc->vco_ref_clk_rate; - u32 computed_output_div, div_log = 0; u64 pll_freq; u64 divider; u64 dec, dec_multiple; u32 frac; u64 multiplier; - u32 i; - target_freq = rsc->vco_current_rate; - pr_debug("target_freq = %llu\n", target_freq); - - if (config->div_override) { - computed_output_div = config->output_div; - - /* - * Computed_output_div = 2 ^ div_log - * To get div_log from output div just get the index of the - * 1 bit in the value. - * div_log ranges from 0-3. so check the 4 lsbs - */ - - for (i = 0; i < 4; i++) { - if (computed_output_div & (1 << i)) { - div_log = i; - break; - } - } - - } else { - if (target_freq < MHZ_250) { - computed_output_div = 8; - div_log = 3; - } else if (target_freq < MHZ_500) { - computed_output_div = 4; - div_log = 2; - } else if (target_freq < MHZ_1000) { - computed_output_div = 2; - div_log = 1; - } else { - computed_output_div = 1; - div_log = 0; - } - } - pr_debug("computed_output_div = %d\n", computed_output_div); - - pll_freq = target_freq * computed_output_div; + pll_freq = rsc->vco_current_rate; if (config->disable_prescaler) divider = fref; @@ -315,7 +271,6 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll, else regs->pll_clock_inverters = 0; - regs->pll_outdiv_rate = div_log; regs->pll_lockdet_rate = config->lock_timer; regs->decimal_div_start = dec; regs->frac_div_start_low = (frac & 0xff); @@ -478,7 +433,6 @@ static void dsi_pll_commit(struct dsi_pll_8998 *pll, MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40); - MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate); MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06); MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10); MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); @@ -597,11 +551,23 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco) { int rc; struct mdss_pll_resources *rsc = vco->priv; + struct dsi_pll_8998 *pll = rsc->priv; + struct dsi_pll_regs *regs = &pll->reg_setup; dsi_pll_enable_pll_bias(rsc); if (rsc->slave) dsi_pll_enable_pll_bias(rsc->slave); + /* + * The PLL out dividers are fixed divider clocks and hence the + * set_div is not called during set_rate cycle of the tree. + * The outdiv rate is therefore set in the pll out mux's set_sel + * callback. But that will be called only after vco's set rate. + * Hence PLL out div value is set here before locking the PLL. + */ + MDSS_PLL_REG_W(rsc->pll_base, PLL_PLL_OUTDIV_RATE, + regs->pll_outdiv_rate); + /* Start PLL */ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01); @@ -728,7 +694,9 @@ static int vco_8998_prepare(struct clk *c) static unsigned long dsi_pll_get_vco_rate(struct clk *c) { struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *pll = vco->priv; + struct mdss_pll_resources *rsc = vco->priv; + struct dsi_pll_8998 *pll = rsc->priv; + struct dsi_pll_regs *regs = &pll->reg_setup; int rc; u64 ref_clk = vco->ref_clk_rate; u64 vco_rate; @@ -738,27 +706,30 @@ static unsigned long dsi_pll_get_vco_rate(struct clk *c) u32 outdiv; u64 pll_freq, tmp64; - rc = mdss_pll_resource_enable(pll, true); + rc = mdss_pll_resource_enable(rsc, true); if (rc) { pr_err("failed to enable pll(%d) resource, rc=%d\n", - pll->index, rc); + rsc->index, rc); return 0; } - dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1); + dec = MDSS_PLL_REG_R(rsc->pll_base, PLL_DECIMAL_DIV_START_1); dec &= 0xFF; - frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1); - frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) & + frac = MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_LOW_1); + frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_MID_1) & 0xFF) << 8); - frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) & + frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_HIGH_1) & 0x3) << 16); /* OUTDIV_1:0 field is (log(outdiv, 2)) */ - outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE); + outdiv = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE); outdiv &= 0x3; + + regs->pll_outdiv_rate = outdiv; + outdiv = 1 << outdiv; /* @@ -776,7 +747,7 @@ static unsigned long dsi_pll_get_vco_rate(struct clk *c) pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n", dec, frac, outdiv, vco_rate); - (void)mdss_pll_resource_enable(pll, false); + (void)mdss_pll_resource_enable(rsc, false); return (unsigned long)vco_rate; } @@ -930,6 +901,26 @@ static int bit_clk_set_div(struct div_clk *clk, int div) return rc; } +static int dsi_pll_out_set_mux_sel(struct mux_clk *clk, int sel) +{ + struct mdss_pll_resources *rsc = clk->priv; + struct dsi_pll_8998 *pll = rsc->priv; + struct dsi_pll_regs *regs = &pll->reg_setup; + + regs->pll_outdiv_rate = sel; + + return 0; +} + +static int dsi_pll_out_get_mux_sel(struct mux_clk *clk) +{ + struct mdss_pll_resources *rsc = clk->priv; + struct dsi_pll_8998 *pll = rsc->priv; + struct dsi_pll_regs *regs = &pll->reg_setup; + + return regs->pll_outdiv_rate; +} + static int post_vco_clk_get_div(struct div_clk *clk) { int rc; @@ -1125,52 +1116,75 @@ static struct clk_mux_ops mdss_mux_ops = { .get_mux_sel = mdss_get_mux_sel, }; +static struct clk_mux_ops mdss_pll_out_mux_ops = { + .set_mux_sel = dsi_pll_out_set_mux_sel, + .get_mux_sel = dsi_pll_out_get_mux_sel, +}; + /* * Clock tree for generating DSI byte and pixel clocks. * - * - * +---------------+ - * | vco_clk | - * +-------+-------+ - * | - * +----------------------+------------------+ - * | | | - * +-------v-------+ +-------v-------+ +-------v-------+ - * | bitclk_src | | post_vco_div1 | | post_vco_div4 | - * | DIV(1..15) | +-------+-------+ +-------+-------+ - * +-------+-------+ | | - * | +------------+ | - * +--------------------+ | | - * Shadow Path | | | | - * + +-------v-------+ +------v------+ +---v-----v------+ - * | | byteclk_src | |post_bit_div | \ post_vco_mux / - * | | DIV(8) | |DIV(1,2) | \ / - * | +-------+-------+ +------+------+ +---+------+ - * | | | | - * | | +------+ +----+ - * | +--------+ | | - * | | +----v-----v------+ - * +-v---------v----+ \ pclk_src_mux / - * \ byteclk_mux / \ / - * \ / +-----+-----+ - * +----+-----+ | Shadow Path - * | | + - * v +-----v------+ | - * dsi_byte_clk | pclk_src | | - * | DIV(1..15) | | - * +-----+------+ | - * | | - * | | - * +--------+ | - * | | - * +---v----v----+ - * \ pclk_mux / - * \ / - * +---+---+ - * | - * | - * v - * dsi_pclk + * +---------------+ + * | vco_clk | + * | | + * +-------+-------+ + * | + * | + * +-------+--------+------------------+-----------------+ + * | | | | + * +------v-------+ +------v-------+ +-------v------+ +------v-------+ + * | pll_out_div1 | | pll_out_div2 | | pll_out_div4 | | pll_out_div8 | + * | DIV(1) | | DIV(2) | | DIV(4) | | DIV(8) | + * +------+-------+ +------+-------+ +-------+------+ +------+-------+ + * | | | | + * +------------+ | +--------------+ | + * | | | +---------------------------+ + * | | | | + * +--v---v---v----v--+ + * \ pll_out_mux / + * \ / + * +------+-----+ + * | + * +---------------+-----------------+ + * | | | + * +------v-----+ +-------v-------+ +-------v-------+ + * | bitclk_src | | post_vco_div1 | | post_vco_div4 | + * | DIV(1..15) | + DIV(1) | | DIV(4) | + * +------+-----+ +-------+-------+ +-------+-------+ + * | | | + * Shadow | | +---------------------+ + * Path | +-----------------------------+ | + * + | | | + * | +---------------------------------+ | | + * | | | | | + * | +------v------=+ +------v-------+ +-v---------v----+ + * | | byteclk_src | | post_bit_div | \ post_vco_mux / + * | | DIV(8) | | DIV(1,2) | \ / + * | +------+-------+ +------+-------+ +---+------+ + * | | | | + * | | | +----------+ + * | | | | + * | | +----v-----v------+ + * +-v--------v---------+ \ pclk_src_mux / + * \ byteclk_mux / \ / + * \ / +-----+-----+ + * +------+-------+ | Shadow + * | | Path + * v +-----v------+ + + * dsi_byte_clk | pclk_src | | + * | DIV(1..15) | | + * +-----+------+ | + * | | + * +------+ | + * | | + * +---v----v----+ + * \ pclk_mux / + * \ / + * +---+---+ + * | + * | + * v + * dsi_pclk * */ @@ -1186,6 +1200,83 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = { }, }; +static struct div_clk dsi0pll_pll_out_div1 = { + .data = { + .div = 1, + .min_div = 1, + .max_div = 1, + }, + .c = { + .parent = &dsi0pll_vco_clk.c, + .dbg_name = "dsi0pll_pll_out_div1", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi0pll_pll_out_div1.c), + } +}; + +static struct div_clk dsi0pll_pll_out_div2 = { + .data = { + .div = 2, + .min_div = 2, + .max_div = 2, + }, + .c = { + .parent = &dsi0pll_vco_clk.c, + .dbg_name = "dsi0pll_pll_out_div2", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi0pll_pll_out_div2.c), + } +}; + +static struct div_clk dsi0pll_pll_out_div4 = { + .data = { + .div = 4, + .min_div = 4, + .max_div = 4, + }, + .c = { + .parent = &dsi0pll_vco_clk.c, + .dbg_name = "dsi0pll_pll_out_div4", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi0pll_pll_out_div4.c), + } +}; + +static struct div_clk dsi0pll_pll_out_div8 = { + .data = { + .div = 8, + .min_div = 8, + .max_div = 8, + }, + .c = { + .parent = &dsi0pll_vco_clk.c, + .dbg_name = "dsi0pll_pll_out_div8", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi0pll_pll_out_div8.c), + } +}; + +static struct mux_clk dsi0pll_pll_out_mux = { + .num_parents = 4, + .parents = (struct clk_src[]) { + {&dsi0pll_pll_out_div1.c, 0}, + {&dsi0pll_pll_out_div2.c, 1}, + {&dsi0pll_pll_out_div4.c, 2}, + {&dsi0pll_pll_out_div8.c, 3}, + }, + .ops = &mdss_pll_out_mux_ops, + .c = { + .parent = &dsi0pll_pll_out_div1.c, + .dbg_name = "dsi0pll_pll_out_mux", + .ops = &clk_ops_gen_mux, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi0pll_pll_out_mux.c), + } +}; static struct div_clk dsi0pll_bitclk_src = { .data = { .div = 1, @@ -1194,7 +1285,7 @@ static struct div_clk dsi0pll_bitclk_src = { }, .ops = &clk_bitclk_src_ops, .c = { - .parent = &dsi0pll_vco_clk.c, + .parent = &dsi0pll_pll_out_mux.c, .dbg_name = "dsi0pll_bitclk_src", .ops = &clk_ops_bitclk_src_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1210,7 +1301,7 @@ static struct div_clk dsi0pll_post_vco_div1 = { }, .ops = &clk_post_vco_div_ops, .c = { - .parent = &dsi0pll_vco_clk.c, + .parent = &dsi0pll_pll_out_mux.c, .dbg_name = "dsi0pll_post_vco_div1", .ops = &clk_ops_post_vco_div_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1226,7 +1317,7 @@ static struct div_clk dsi0pll_post_vco_div4 = { }, .ops = &clk_post_vco_div_ops, .c = { - .parent = &dsi0pll_vco_clk.c, + .parent = &dsi0pll_pll_out_mux.c, .dbg_name = "dsi0pll_post_vco_div4", .ops = &clk_ops_post_vco_div_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1355,6 +1446,84 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = { }, }; +static struct div_clk dsi1pll_pll_out_div1 = { + .data = { + .div = 1, + .min_div = 1, + .max_div = 1, + }, + .c = { + .parent = &dsi1pll_vco_clk.c, + .dbg_name = "dsi1pll_pll_out_div1", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi1pll_pll_out_div1.c), + } +}; + +static struct div_clk dsi1pll_pll_out_div2 = { + .data = { + .div = 2, + .min_div = 2, + .max_div = 2, + }, + .c = { + .parent = &dsi1pll_vco_clk.c, + .dbg_name = "dsi1pll_pll_out_div2", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi1pll_pll_out_div2.c), + } +}; + +static struct div_clk dsi1pll_pll_out_div4 = { + .data = { + .div = 4, + .min_div = 4, + .max_div = 4, + }, + .c = { + .parent = &dsi1pll_vco_clk.c, + .dbg_name = "dsi1pll_pll_out_div4", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi1pll_pll_out_div4.c), + } +}; + +static struct div_clk dsi1pll_pll_out_div8 = { + .data = { + .div = 8, + .min_div = 8, + .max_div = 8, + }, + .c = { + .parent = &dsi1pll_vco_clk.c, + .dbg_name = "dsi1pll_pll_out_div8", + .ops = &clk_ops_div, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi1pll_pll_out_div8.c), + } +}; + +static struct mux_clk dsi1pll_pll_out_mux = { + .num_parents = 4, + .parents = (struct clk_src[]) { + {&dsi1pll_pll_out_div1.c, 0}, + {&dsi1pll_pll_out_div2.c, 1}, + {&dsi1pll_pll_out_div4.c, 2}, + {&dsi1pll_pll_out_div8.c, 3}, + }, + .ops = &mdss_pll_out_mux_ops, + .c = { + .parent = &dsi1pll_pll_out_div1.c, + .dbg_name = "dsi1pll_pll_out_mux", + .ops = &clk_ops_gen_mux, + .flags = CLKFLAG_NO_RATE_CACHE, + CLK_INIT(dsi1pll_pll_out_mux.c), + } +}; + static struct div_clk dsi1pll_bitclk_src = { .data = { .div = 1, @@ -1363,7 +1532,7 @@ static struct div_clk dsi1pll_bitclk_src = { }, .ops = &clk_bitclk_src_ops, .c = { - .parent = &dsi1pll_vco_clk.c, + .parent = &dsi1pll_pll_out_mux.c, .dbg_name = "dsi1pll_bitclk_src", .ops = &clk_ops_bitclk_src_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1379,7 +1548,7 @@ static struct div_clk dsi1pll_post_vco_div1 = { }, .ops = &clk_post_vco_div_ops, .c = { - .parent = &dsi1pll_vco_clk.c, + .parent = &dsi1pll_pll_out_mux.c, .dbg_name = "dsi1pll_post_vco_div1", .ops = &clk_ops_post_vco_div_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1395,7 +1564,7 @@ static struct div_clk dsi1pll_post_vco_div4 = { }, .ops = &clk_post_vco_div_ops, .c = { - .parent = &dsi1pll_vco_clk.c, + .parent = &dsi1pll_pll_out_mux.c, .dbg_name = "dsi1pll_post_vco_div4", .ops = &clk_ops_post_vco_div_c, .flags = CLKFLAG_NO_RATE_CACHE, @@ -1523,6 +1692,11 @@ static struct clk_lookup mdss_dsi_pll0cc_8998[] = { CLK_LIST(dsi0pll_post_vco_div1), CLK_LIST(dsi0pll_post_vco_div4), CLK_LIST(dsi0pll_bitclk_src), + CLK_LIST(dsi0pll_pll_out_mux), + CLK_LIST(dsi0pll_pll_out_div8), + CLK_LIST(dsi0pll_pll_out_div4), + CLK_LIST(dsi0pll_pll_out_div2), + CLK_LIST(dsi0pll_pll_out_div1), CLK_LIST(dsi0pll_vco_clk), }; static struct clk_lookup mdss_dsi_pll1cc_8998[] = { @@ -1536,6 +1710,11 @@ static struct clk_lookup mdss_dsi_pll1cc_8998[] = { CLK_LIST(dsi1pll_post_vco_div1), CLK_LIST(dsi1pll_post_vco_div4), CLK_LIST(dsi1pll_bitclk_src), + CLK_LIST(dsi1pll_pll_out_mux), + CLK_LIST(dsi1pll_pll_out_div8), + CLK_LIST(dsi1pll_pll_out_div4), + CLK_LIST(dsi1pll_pll_out_div2), + CLK_LIST(dsi1pll_pll_out_div1), CLK_LIST(dsi1pll_vco_clk), }; @@ -1596,6 +1775,11 @@ int dsi_pll_clock_register_8998(struct platform_device *pdev, dsi0pll_post_vco_div1.priv = pll_res; dsi0pll_post_vco_div4.priv = pll_res; dsi0pll_bitclk_src.priv = pll_res; + dsi0pll_pll_out_div1.priv = pll_res; + dsi0pll_pll_out_div2.priv = pll_res; + dsi0pll_pll_out_div4.priv = pll_res; + dsi0pll_pll_out_div8.priv = pll_res; + dsi0pll_pll_out_mux.priv = pll_res; dsi0pll_vco_clk.priv = pll_res; rc = of_msm_clock_register(pdev->dev.of_node, @@ -1612,6 +1796,11 @@ int dsi_pll_clock_register_8998(struct platform_device *pdev, dsi1pll_post_vco_div1.priv = pll_res; dsi1pll_post_vco_div4.priv = pll_res; dsi1pll_bitclk_src.priv = pll_res; + dsi1pll_pll_out_div1.priv = pll_res; + dsi1pll_pll_out_div2.priv = pll_res; + dsi1pll_pll_out_div4.priv = pll_res; + dsi1pll_pll_out_div8.priv = pll_res; + dsi1pll_pll_out_mux.priv = pll_res; dsi1pll_vco_clk.priv = pll_res; rc = of_msm_clock_register(pdev->dev.of_node, diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 4c18181c047c..d3e88f40bdfd 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -9,6 +9,7 @@ clk-qcom-y += clk-rcg2.o clk-qcom-y += clk-branch.o clk-qcom-y += clk-regmap-divider.o clk-qcom-y += clk-regmap-mux.o +clk-qcom-y += clk-regmap-mux-div.o clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o clk-qcom-y += clk-hfpll.o clk-qcom-y += reset.o clk-voter.o diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 8bf45f572c5e..ddaeca1b29e4 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -384,6 +384,7 @@ struct clk_osm { u32 acd_extint1_cfg; u32 acd_autoxfer_ctl; u32 acd_debugfs_addr; + u32 acd_debugfs_addr_size; bool acd_init; bool secure_init; bool red_fsm_en; @@ -719,9 +720,22 @@ static int clk_osm_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static int clk_osm_acd_init(struct clk_osm *c); + static int clk_osm_enable(struct clk_hw *hw) { struct clk_osm *cpuclk = to_clk_osm(hw); + int rc; + + rc = clk_osm_acd_init(cpuclk); + if (rc) { + pr_err("Failed to initialize ACD for cluster %d, rc=%d\n", + cpuclk->cluster_num, rc); + return rc; + } + + /* Wait for 5 usecs before enabling OSM */ + udelay(5); clk_osm_write_reg(cpuclk, 1, ENABLE_REG); @@ -1358,6 +1372,7 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } pwrcl_clk.pbases[ACD_BASE] = pbase; + pwrcl_clk.acd_debugfs_addr_size = resource_size(res); pwrcl_clk.vbases[ACD_BASE] = vbase; pwrcl_clk.acd_init = true; } else { @@ -1375,6 +1390,7 @@ static int clk_osm_resources_init(struct platform_device *pdev) return -ENOMEM; } perfcl_clk.pbases[ACD_BASE] = pbase; + perfcl_clk.acd_debugfs_addr_size = resource_size(res); perfcl_clk.vbases[ACD_BASE] = vbase; perfcl_clk.acd_init = true; } else { @@ -2819,6 +2835,11 @@ static int debugfs_get_debug_reg(void *data, u64 *val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR) *val = readl_relaxed((char *)c->vbases[ACD_BASE] + c->acd_debugfs_addr); @@ -2831,6 +2852,11 @@ static int debugfs_set_debug_reg(void *data, u64 val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR) clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr); else @@ -2848,7 +2874,13 @@ static int debugfs_get_debug_reg_addr(void *data, u64 *val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + *val = c->acd_debugfs_addr; + return 0; } @@ -2856,6 +2888,14 @@ static int debugfs_set_debug_reg_addr(void *data, u64 val) { struct clk_osm *c = data; + if (!c->pbases[ACD_BASE]) { + pr_err("ACD base start not defined\n"); + return -EINVAL; + } + + if (val >= c->acd_debugfs_addr_size) + return -EINVAL; + c->acd_debugfs_addr = val; return 0; } @@ -3272,17 +3312,6 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) clk_osm_setup_cluster_pll(&perfcl_clk); } - rc = clk_osm_acd_init(&pwrcl_clk); - if (rc) { - pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc); - return rc; - } - rc = clk_osm_acd_init(&perfcl_clk); - if (rc) { - pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc); - return rc; - } - spin_lock_init(&pwrcl_clk.lock); spin_lock_init(&perfcl_clk.lock); diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c new file mode 100644 index 000000000000..942a68e2a650 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux-div.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "clk-regmap-mux-div.h" + +#define CMD_RCGR 0x0 +#define CMD_RCGR_UPDATE BIT(0) +#define CMD_RCGR_DIRTY_CFG BIT(4) +#define CMD_RCGR_ROOT_OFF BIT(31) +#define CFG_RCGR 0x4 + +#define to_clk_regmap_mux_div(_hw) \ + container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr) + +int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) +{ + int ret, count; + u32 val, mask; + const char *name = clk_hw_get_name(&md->clkr.hw); + + val = (div << md->hid_shift) | (src << md->src_shift); + mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | + ((BIT(md->src_width) - 1) << md->src_shift); + + ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset, + mask, val); + if (ret) + return ret; + + ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset, + CMD_RCGR_UPDATE, CMD_RCGR_UPDATE); + if (ret) + return ret; + + /* Wait for update to take effect */ + for (count = 500; count > 0; count--) { + ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, + &val); + if (ret) + return ret; + if (!(val & CMD_RCGR_UPDATE)) + return 0; + udelay(1); + } + + pr_err("%s: RCG did not update its configuration", name); + return -EBUSY; +} + +int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, + u32 *div) +{ + int ret = 0; + u32 val, __div, __src; + const char *name = clk_hw_get_name(&md->clkr.hw); + + ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val); + if (ret) + return ret; + + if (val & CMD_RCGR_DIRTY_CFG) { + pr_err("%s: RCG configuration is pending\n", name); + return -EBUSY; + } + + ret = regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val); + if (ret) + return ret; + + __src = (val >> md->src_shift); + __src &= BIT(md->src_width) - 1; + *src = __src; + + __div = (val >> md->hid_shift); + __div &= BIT(md->hid_width) - 1; + *div = __div; + + return ret; +} + +static int mux_div_enable(struct clk_hw *hw) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_src_div(md, md->src, md->div); +} + +static inline bool is_better_rate(unsigned long req, unsigned long best, + unsigned long new) +{ + return (req <= new && new < best) || (best < req && best < new); +} + +static int mux_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + unsigned int i, div, max_div; + unsigned long actual_rate, best_rate = 0; + unsigned long req_rate = req->rate; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(parent); + + max_div = BIT(md->hid_width) - 1; + for (div = 1; div < max_div; div++) { + parent_rate = mult_frac(req_rate, div, 2); + parent_rate = clk_hw_round_rate(parent, parent_rate); + actual_rate = mult_frac(parent_rate, 2, div); + + if (is_better_rate(req_rate, best_rate, actual_rate)) { + best_rate = actual_rate; + req->rate = best_rate; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } + + if (actual_rate < req_rate || best_rate <= req_rate) + break; + } + } + + if (!best_rate) + return -EINVAL; + + return 0; +} + +static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long prate, u32 src) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + int ret; + u32 div, max_div, best_src = 0, best_div = 0; + unsigned int i; + unsigned long actual_rate, best_rate = 0; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(parent); + + max_div = BIT(md->hid_width) - 1; + for (div = 1; div < max_div; div++) { + parent_rate = mult_frac(rate, div, 2); + parent_rate = clk_hw_round_rate(parent, parent_rate); + actual_rate = mult_frac(parent_rate, 2, div); + + if (is_better_rate(rate, best_rate, actual_rate)) { + best_rate = actual_rate; + best_src = md->parent_map[i].cfg; + best_div = div - 1; + } + + if (actual_rate < rate || best_rate <= rate) + break; + } + } + + ret = __mux_div_set_src_div(md, best_src, best_div); + if (!ret) { + md->div = best_div; + md->src = best_src; + } + + return ret; +} + +static u8 mux_div_get_parent(struct clk_hw *hw) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + const char *name = clk_hw_get_name(hw); + u32 i, div, src = 0; + + mux_div_get_src_div(md, &src, &div); + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) + if (src == md->parent_map[i].cfg) + return i; + + pr_err("%s: Can't find parent with src %d\n", name, src); + return 0; +} + +static int mux_div_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div); +} + +static int mux_div_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long prate) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_rate_and_parent(hw, rate, prate, md->src); +} + +static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long prate, u8 index) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_rate_and_parent(hw, rate, prate, + md->parent_map[index].cfg); +} + +static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + u32 div, src; + int i, num_parents = clk_hw_get_num_parents(hw); + const char *name = clk_hw_get_name(hw); + + mux_div_get_src_div(md, &src, &div); + for (i = 0; i < num_parents; i++) + if (src == md->parent_map[i].cfg) { + struct clk_hw *p = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(p); + + return mult_frac(parent_rate, 2, div + 1); + } + + pr_err("%s: Can't find parent %d\n", name, src); + return 0; +} + +static void mux_div_disable(struct clk_hw *hw) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + __mux_div_set_src_div(md, md->safe_src, md->safe_div); +} + +const struct clk_ops clk_regmap_mux_div_ops = { + .enable = mux_div_enable, + .disable = mux_div_disable, + .get_parent = mux_div_get_parent, + .set_parent = mux_div_set_parent, + .set_rate = mux_div_set_rate, + .set_rate_and_parent = mux_div_set_rate_and_parent, + .determine_rate = mux_div_determine_rate, + .recalc_rate = mux_div_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops); diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h new file mode 100644 index 000000000000..63a696a96033 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux-div.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__ +#define __QCOM_CLK_REGMAP_MUX_DIV_H__ + +#include +#include "clk-rcg.h" +#include "clk-regmap.h" + +/** + * struct mux_div_clk - combined mux/divider clock + * @reg_offset: offset of the mux/divider register + * @hid_width: number of bits in half integer divider + * @hid_shift: lowest bit of hid value field + * @src_width: number of bits in source select + * @src_shift: lowest bit of source select field + * @div: the divider raw configuration value + * @src: the mux index which will be used if the clock is enabled + * @safe_src: the safe source mux value we switch to, while the main PLL is + * reconfigured + * @safe_div: the safe divider value that we set, while the main PLL is + * reconfigured + * @safe_freq: When switching rates from A to B, the mux div clock will + * instead switch from A -> safe_freq -> B. This allows the + * mux_div clock to change rates while enabled, even if this + * behavior is not supported by the parent clocks. + * If changing the rate of parent A also causes the rate of + * parent B to change, then safe_freq must be defined. + * safe_freq is expected to have a source clock which is always + * on and runs at only one rate. + * @parent_map: pointer to parent_map struct + * @clkr: handle between common and hardware-specific interfaces + */ + +struct clk_regmap_mux_div { + u32 reg_offset; + u32 hid_width; + u32 hid_shift; + u32 src_width; + u32 src_shift; + u32 div; + u32 src; + u32 safe_src; + u32 safe_div; + unsigned long safe_freq; + const struct parent_map *parent_map; + struct clk_regmap clkr; +}; + +extern const struct clk_ops clk_regmap_mux_div_ops; +int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div); +int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div); + +#endif diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 3b2f46bacd77..1dfd1765319b 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -120,6 +120,15 @@ config CPU_FREQ_DEFAULT_GOV_SCHED cpu frequency using CPU utilization estimates from the scheduler. +config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL + bool "schedutil" + depends on SMP + select CPU_FREQ_GOV_SCHEDUTIL + select CPU_FREQ_GOV_PERFORMANCE + help + Use the 'schedutil' CPUFreq governor by default. If unsure, + have a look at the help section of that governor. The fallback + governor will be 'performance'. endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -239,6 +248,23 @@ config CPU_FREQ_GOV_SCHED If in doubt, say N. +config CPU_FREQ_GOV_SCHEDUTIL + bool "'schedutil' cpufreq policy governor" + depends on CPU_FREQ && SMP + select CPU_FREQ_GOV_ATTR_SET + select IRQ_WORK + help + This governor makes decisions based on the utilization data provided + by the scheduler. It sets the CPU frequency to be proportional to + the utilization/capacity ratio coming from the scheduler. If the + utilization is frequency-invariant, the new frequency is also + proportional to the maximum available frequency. If that is not the + case, it is proportional to the current frequency of the CPU. The + frequency tipping point is at utilization/capacity equal to 80% in + both cases. + + If in doubt, say N. + comment "CPU frequency scaling drivers" config CPUFREQ_DT diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 4a2f914e0752..6d4a7aeb506d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -1,5 +1,5 @@ # CPUfreq core -obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o cpufreq_governor_attr_set.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a0dba9beac05..5f0f983ce173 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -541,6 +541,38 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); +/** + * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported + * one. + * @target_freq: target frequency to resolve. + * + * The target to driver frequency mapping is cached in the policy. + * + * Return: Lowest driver-supported frequency greater than or equal to the + * given target_freq, subject to policy (min/max) and driver limitations. + */ +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + policy->cached_target_freq = target_freq; + + if (cpufreq_driver->target_index) { + int idx, rv; + + rv = cpufreq_frequency_table_target(policy, policy->freq_table, + target_freq, + CPUFREQ_RELATION_L, + &idx); + if (rv) + return target_freq; + policy->cached_resolved_idx = idx; + return policy->freq_table[idx].frequency; + } + + return target_freq; +} +EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); /********************************************************************* * SYSFS INTERFACE * @@ -2536,6 +2568,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && list_empty(&cpufreq_policy_list)) { /* if all ->init() calls failed, unregister */ + ret = -ENODEV; pr_debug("%s: No CPU initialized for driver %s\n", __func__, driver_data->name); goto err_if_unreg; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 1fa1deb6e91f..c395f9198fd2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, int ret; ret = sscanf(buf, "%u", &input); - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || + /* cannot be lower than 1 otherwise freq will not fall */ + if (ret != 1 || input < 1 || input > 100 || input >= cs_tuners->up_threshold) return -EINVAL; diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c new file mode 100644 index 000000000000..52841f807a7e --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -0,0 +1,84 @@ +/* + * Abstract code for CPUFreq governor tunable sysfs attributes. + * + * Copyright (C) 2016, Intel Corporation + * Author: Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "cpufreq_governor.h" + +static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) +{ + return container_of(kobj, struct gov_attr_set, kobj); +} + +static inline struct governor_attr *to_gov_attr(struct attribute *attr) +{ + return container_of(attr, struct governor_attr, attr); +} + +static ssize_t governor_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct governor_attr *gattr = to_gov_attr(attr); + + return gattr->show(to_gov_attr_set(kobj), buf); +} + +static ssize_t governor_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct gov_attr_set *attr_set = to_gov_attr_set(kobj); + struct governor_attr *gattr = to_gov_attr(attr); + int ret; + + mutex_lock(&attr_set->update_lock); + ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY; + mutex_unlock(&attr_set->update_lock); + return ret; +} + +const struct sysfs_ops governor_sysfs_ops = { + .show = governor_show, + .store = governor_store, +}; +EXPORT_SYMBOL_GPL(governor_sysfs_ops); + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node) +{ + INIT_LIST_HEAD(&attr_set->policy_list); + mutex_init(&attr_set->update_lock); + attr_set->usage_count = 1; + list_add(list_node, &attr_set->policy_list); +} +EXPORT_SYMBOL_GPL(gov_attr_set_init); + +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node) +{ + mutex_lock(&attr_set->update_lock); + attr_set->usage_count++; + list_add(list_node, &attr_set->policy_list); + mutex_unlock(&attr_set->update_lock); +} +EXPORT_SYMBOL_GPL(gov_attr_set_get); + +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node) +{ + unsigned int count; + + mutex_lock(&attr_set->update_lock); + list_del(list_node); + count = --attr_set->usage_count; + mutex_unlock(&attr_set->update_lock); + if (count) + return count; + + kobject_put(&attr_set->kobj); + mutex_destroy(&attr_set->update_lock); + return 0; +} +EXPORT_SYMBOL_GPL(gov_attr_set_put); diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index b91e115462ae..abbee61c99c8 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -479,6 +479,7 @@ static void cpufreq_interactive_timer(unsigned long data) bool skip_hispeed_logic, skip_min_sample_time; bool jump_to_max_no_ts = false; bool jump_to_max = false; + bool start_hyst = true; if (!down_read_trylock(&ppol->enable_sem)) return; @@ -588,8 +589,12 @@ static void cpufreq_interactive_timer(unsigned long data) } if (now - ppol->max_freq_hyst_start_time < - tunables->max_freq_hysteresis) + tunables->max_freq_hysteresis) { + if (new_freq < ppol->policy->max && + ppol->policy->max <= tunables->hispeed_freq) + start_hyst = false; new_freq = max(tunables->hispeed_freq, new_freq); + } if (!skip_hispeed_logic && ppol->target_freq >= tunables->hispeed_freq && @@ -646,7 +651,7 @@ static void cpufreq_interactive_timer(unsigned long data) ppol->floor_validate_time = now; } - if (new_freq >= ppol->policy->max && !jump_to_max_no_ts) + if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts) ppol->max_freq_hyst_start_time = now; if (ppol->target_freq == new_freq && diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index d6d425773fa4..5b2db3c6568f 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) rate = clk_get_rate(s3c_freq->hclk); if (rate < 133 * 1000 * 1000) { pr_err("cpufreq: HCLK not at 133MHz\n"); - clk_put(s3c_freq->hclk); ret = -EINVAL; goto err_armclk; } diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index b69e59eeeae1..584a1857624a 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -694,22 +694,21 @@ static int cpu_power_select(struct cpuidle_device *dev, int best_level = -1; uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, dev->cpu); - uint32_t sleep_us = - (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length())); + s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length()); uint32_t modified_time_us = 0; uint32_t next_event_us = 0; int i, idx_restrict; uint32_t lvl_latency_us = 0; uint64_t predicted = 0; uint32_t htime = 0, idx_restrict_time = 0; - uint32_t next_wakeup_us = sleep_us; + uint32_t next_wakeup_us = (uint32_t)sleep_us; uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu); uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu); if (!cpu) return -EINVAL; - if (sleep_disabled && !cpu_isolated(dev->cpu)) + if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0) return 0; idx_restrict = cpu->nlevels + 1; @@ -750,8 +749,8 @@ static int cpu_power_select(struct cpuidle_device *dev, if (next_wakeup_us > max_residency[i]) { predicted = lpm_cpuidle_predict(dev, cpu, &idx_restrict, &idx_restrict_time); - if (predicted < min_residency[i]) - predicted = 0; + if (predicted && (predicted < min_residency[i])) + predicted = min_residency[i]; } else invalidate_predict_history(dev); } @@ -1119,10 +1118,14 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, bool from_idle, int predicted) { struct lpm_cluster_level *level = &cluster->levels[idx]; + struct cpumask online_cpus; int ret, i; + cpumask_and(&online_cpus, &cluster->num_children_in_sync, + cpu_online_mask); + if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus) - || is_IPI_pending(&cluster->num_children_in_sync)) { + || is_IPI_pending(&online_cpus)) { return -EPERM; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 0dadb6332f0e..7abe908427df 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = atmel_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 99d5e11db194..e06cc5df30be 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index e1eaf4ff9762..3ce1d5cdcbd2 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c index c69dc2b86a68..90e5fa804e47 100644 --- a/drivers/crypto/msm/compat_qcedev.c +++ b/drivers/crypto/msm/compat_qcedev.c @@ -1,7 +1,7 @@ /* * QTI CE 32-bit compatibility syscall for 64-bit systems * - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -97,7 +97,6 @@ static int compat_get_qcedev_vbuf_info( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, &vbuf32->src[i].vaddr); - vbuf->src[i].vaddr = NULL; err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr); err |= get_user(len, &vbuf32->src[i].len); err |= put_user(len, &vbuf->src[i].len); @@ -105,7 +104,6 @@ static int compat_get_qcedev_vbuf_info( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, &vbuf32->dst[i].vaddr); - vbuf->dst[i].vaddr = NULL; err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr); err |= get_user(len, &vbuf32->dst[i].len); err |= put_user(len, &vbuf->dst[i].len); @@ -123,7 +121,6 @@ static int compat_put_qcedev_vbuf_info( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr); - vbuf32->src[i].vaddr = 0; err |= put_user(vaddr, &vbuf32->src[i].vaddr); err |= get_user(len, &vbuf->src[i].len); err |= put_user(len, &vbuf32->src[i].len); @@ -131,7 +128,6 @@ static int compat_put_qcedev_vbuf_info( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr); - vbuf32->dst[i].vaddr = 0; err |= put_user(vaddr, &vbuf32->dst[i].vaddr); err |= get_user(len, &vbuf->dst[i].len); err |= put_user(len, &vbuf32->dst[i].len); @@ -276,7 +272,6 @@ static int compat_get_qcedev_sha_op_req( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, &data32->data[i].vaddr); - data->data[i].vaddr = 0; err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr); err |= get_user(len, &data32->data[i].len); err |= put_user(len, &data->data[i].len); @@ -295,7 +290,6 @@ static int compat_get_qcedev_sha_op_req( err |= get_user(diglen, &data32->diglen); err |= put_user(diglen, &data->diglen); err |= get_user(authkey, &data32->authkey); - data->authkey = NULL; err |= put_user(authkey, (compat_uptr_t *)&data->authkey); err |= get_user(authklen, &data32->authklen); err |= put_user(authklen, &data->authklen); @@ -322,7 +316,6 @@ static int compat_put_qcedev_sha_op_req( for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr); - data32->data[i].vaddr = 0; err |= put_user(vaddr, &data32->data[i].vaddr); err |= get_user(len, &data->data[i].len); err |= put_user(len, &data32->data[i].len); @@ -341,7 +334,6 @@ static int compat_put_qcedev_sha_op_req( err |= get_user(diglen, &data->diglen); err |= put_user(diglen, &data32->diglen); err |= get_user(authkey, (compat_uptr_t *)&data->authkey); - data32->authkey = 0; err |= put_user(authkey, &data32->authkey); err |= get_user(authklen, &data->authklen); err |= put_user(authklen, &data32->authklen); diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 4002a5b57250..490f8d9ddb9f 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -125,6 +125,9 @@ static int qti_ice_setting_config(struct request *req, return -EPERM; } + if (!setting) + return -EINVAL; + if ((short)(crypto_data->key_index) >= 0) { memcpy(&setting->crypto_data, crypto_data, @@ -438,7 +441,7 @@ static int qcom_ice_enable(struct ice_device *ice_dev) (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) { reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS); if ((reg & 0x80000000) != 0x0) { - pr_err("%s: Bypass failed for ice = %p", + pr_err("%s: Bypass failed for ice = %pK", __func__, (void *)ice_dev); BUG(); } @@ -464,7 +467,7 @@ static int qcom_ice_verify_ice(struct ice_device *ice_dev) } ice_dev->ice_hw_version = rev; - dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n", + dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n", maj_rev, min_rev, step_rev, ice_dev->mmio); @@ -972,7 +975,8 @@ static int qcom_ice_secure_ice_init(struct ice_device *ice_dev) static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) { - int ret = 0, scm_ret = 0; + int ret = 0; + u64 scm_ret = 0; /* scm command buffer structure */ struct qcom_scm_cmd_buf { @@ -998,7 +1002,7 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) cbuf.device_id = ICE_TZ_DEV_ID; ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret); if (ret || scm_ret) { - pr_err("%s: failed, ret %d scm_ret %d\n", + pr_err("%s: failed, ret %d scm_ret %llu\n", __func__, ret, scm_ret); if (!ret) ret = scm_ret; @@ -1253,7 +1257,7 @@ static void qcom_ice_debug(struct platform_device *pdev) goto out; } - pr_err("%s: =========== REGISTER DUMP (%p)===========\n", + pr_err("%s: =========== REGISTER DUMP (%pK)===========\n", ice_dev->ice_instance_type, ice_dev); pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n", @@ -1429,7 +1433,7 @@ static int qcom_ice_config_start(struct platform_device *pdev, int ret = 0; bool is_pfe = false; - if (!pdev || !req || !setting) { + if (!pdev || !req) { pr_err("%s: Invalid params passed\n", __func__); return -EINVAL; } @@ -1567,7 +1571,7 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node) struct ice_device *ice_dev = NULL; if (!node) { - pr_err("%s: invalid node %p", __func__, node); + pr_err("%s: invalid node %pK", __func__, node); goto out; } @@ -1584,13 +1588,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node) list_for_each_entry(ice_dev, &ice_devices, list) { if (ice_dev->pdev->of_node == node) { - pr_info("%s: found ice device %p\n", __func__, ice_dev); + pr_info("%s: found ice device %pK\n", __func__, + ice_dev); break; } } ice_pdev = to_platform_device(ice_dev->pdev); - pr_info("%s: matching platform device %p\n", __func__, ice_pdev); + pr_info("%s: matching platform device %pK\n", __func__, ice_pdev); out: return ice_pdev; } @@ -1629,7 +1634,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) } ret = regulator_enable(ice_dev->reg); if (ret) { - pr_err("%s:%p: Could not enable regulator\n", + pr_err("%s:%pK: Could not enable regulator\n", __func__, ice_dev); goto out; } @@ -1637,7 +1642,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) /* Setup Clocks */ if (qcom_ice_enable_clocks(ice_dev, true)) { - pr_err("%s:%p:%s Could not enable clocks\n", __func__, + pr_err("%s:%pK:%s Could not enable clocks\n", __func__, ice_dev, ice_dev->ice_instance_type); goto out_reg; } @@ -1649,7 +1654,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) ret = qcom_ice_set_bus_vote(ice_dev, vote); if (ret) { - pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret); + pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret); goto out_clocks; } @@ -1681,19 +1686,19 @@ static int disable_ice_setup(struct ice_device *ice_dev) /* Setup Bus Vote */ vote = qcom_ice_get_bus_vote(ice_dev, "MIN"); if (vote < 0) { - pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev); + pr_err("%s:%pK: Unable to get bus vote\n", __func__, ice_dev); goto out_disable_clocks; } ret = qcom_ice_set_bus_vote(ice_dev, vote); if (ret) - pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret); + pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret); out_disable_clocks: /* Setup Clocks */ if (qcom_ice_enable_clocks(ice_dev, false)) - pr_err("%s:%p:%s Could not disable clocks\n", __func__, + pr_err("%s:%pK:%s Could not disable clocks\n", __func__, ice_dev, ice_dev->ice_instance_type); /* Setup Regulator */ @@ -1704,7 +1709,7 @@ out_disable_clocks: } ret = regulator_disable(ice_dev->reg); if (ret) { - pr_err("%s:%p: Could not disable regulator\n", + pr_err("%s:%pK: Could not disable regulator\n", __func__, ice_dev); goto out; } diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c index a568bf46f09f..96297fe7eaad 100644 --- a/drivers/crypto/msm/ota_crypto.c +++ b/drivers/crypto/msm/ota_crypto.c @@ -172,7 +172,7 @@ static int qcota_release(struct inode *inode, struct file *file) podev = file->private_data; if (podev != NULL && podev->magic != OTA_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); } @@ -444,7 +444,7 @@ static long qcota_ioctl(struct file *file, podev = file->private_data; if (podev == NULL || podev->magic != OTA_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); return -ENOENT; } diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index ee7e735761e2..b44f926a6ba0 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -294,11 +294,11 @@ static int _probe_ce_engine(struct qce_device *pce_dev) pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE; dev_info(pce_dev->pdev, - "CE device = 0x%x\n, " - "IO base, CE = 0x%p\n, " + "CE device = 0x%x\n" + "IO base, CE = 0x%pK\n" "Consumer (IN) PIPE %d, " "Producer (OUT) PIPE %d\n" - "IO base BAM = 0x%p\n" + "IO base BAM = 0x%pK\n" "BAM IRQ %d\n" "Engines Availability = 0x%x\n", pce_dev->ce_bam_info.ce_device, @@ -1160,7 +1160,7 @@ static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) #define QCE_WRITE_REG(val, addr) \ { \ - pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \ + pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \ writel_relaxed(val, addr); \ } @@ -2155,6 +2155,10 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info) pce_sps_data = &preq_info->ce_sps; qce_callback = preq_info->qce_cb; areq = (struct ahash_request *) preq_info->areq; + if (!areq) { + pr_err("sha operation error. areq is NULL\n"); + return -ENXIO; + } qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, DMA_TO_DEVICE); memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]), @@ -2730,7 +2734,7 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, sps_event->callback = NULL; } - pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n", + pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n", is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)", (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base); goto out; @@ -2895,7 +2899,7 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL; bam.options |= SPS_BAM_CACHED_WP; pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr); - pr_debug("bam virtual base=0x%p\n", bam.virt_addr); + pr_debug("bam virtual base=0x%pK\n", bam.virt_addr); /* Register CE Peripheral BAM device to SPS driver */ rc = sps_register_bam_device(&bam, &pbam->handle); @@ -2970,7 +2974,7 @@ static inline int qce_alloc_req_info(struct qce_device *pce_dev) request_index++; if (request_index >= MAX_QCE_BAM_REQ) request_index = 0; - if (xchg(&pce_dev->ce_request_info[request_index]. + if (atomic_xchg(&pce_dev->ce_request_info[request_index]. in_use, true) == false) { pce_dev->ce_request_index = request_index; return request_index; @@ -2986,7 +2990,8 @@ static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, bool is_complete) { pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST; - if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) { + if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use, + false) == true) { if (req_info < MAX_QCE_BAM_REQ && is_complete) atomic_dec(&pce_dev->no_of_queued_req); } else @@ -2998,7 +3003,7 @@ static void print_notify_debug(struct sps_event_notify *notify) phys_addr_t addr = DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags, notify->data.transfer.iovec.addr); - pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n", + pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n", notify->event_id, &addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags, @@ -4610,7 +4615,7 @@ static int qce_dummy_req(struct qce_device *pce_dev) { int ret = 0; - if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX]. + if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX]. in_use, true) == false)) return -EBUSY; ret = qce_process_sha_req(pce_dev, NULL); @@ -5969,7 +5974,7 @@ void *qce_open(struct platform_device *pdev, int *rc) } for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) - pce_dev->ce_request_info[i].in_use = false; + atomic_set(&pce_dev->ce_request_info[i].in_use, false); pce_dev->ce_request_index = 0; pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ; @@ -6133,12 +6138,13 @@ EXPORT_SYMBOL(qce_hw_support); void qce_dump_req(void *handle) { int i; + bool req_in_use; struct qce_device *pce_dev = (struct qce_device *)handle; for (i = 0; i < MAX_QCE_BAM_REQ; i++) { - pr_info("qce_dump_req %d %d\n", i, - pce_dev->ce_request_info[i].in_use); - if (pce_dev->ce_request_info[i].in_use == true) + req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use); + pr_info("qce_dump_req %d %d\n", i, req_in_use); + if (req_in_use == true) _qce_dump_descr_fifos(pce_dev, i); } } diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h index 6dba3664ff08..ab0d21da72c5 100644 --- a/drivers/crypto/msm/qce50.h +++ b/drivers/crypto/msm/qce50.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -214,7 +214,7 @@ struct ce_sps_data { }; struct ce_request_info { - bool in_use; + atomic_t in_use; bool in_prog; enum qce_xfer_type_enum xfer_type; struct ce_sps_data ce_sps; diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index beeb99e479c7..20bf034bb193 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -201,7 +201,7 @@ static int qcedev_release(struct inode *inode, struct file *file) handle = file->private_data; podev = handle->cntl; if (podev != NULL && podev->magic != QCEDEV_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); } kzfree(handle); @@ -1607,7 +1607,7 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) podev = handle->cntl; qcedev_areq.handle = handle; if (podev == NULL || podev->magic != QCEDEV_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); return -ENOENT; } diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index 893b0b6da6b8..f38fc422b35e 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -262,7 +262,7 @@ static void qcrypto_free_req_control(struct crypto_engine *pce, preq->arsp = NULL; /* free req */ if (xchg(&preq->in_use, false) == false) { - pr_warn("request info %p free already\n", preq); + pr_warn("request info %pK free already\n", preq); } else { atomic_dec(&pce->req_count); } @@ -437,6 +437,7 @@ struct qcrypto_cipher_req_ctx { u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH]; unsigned int ivsize; int aead; + int ccmtype; /* default: 0, rfc4309: 1 */ struct scatterlist asg; /* Formatted associated data sg */ unsigned char *adata; /* Pointer to formatted assoc data */ enum qce_cipher_alg_enum alg; @@ -1719,7 +1720,7 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest, } #ifdef QCRYPTO_DEBUG - dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n", + dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n", areq, ret); #endif if (digest) { @@ -1778,7 +1779,7 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, } #ifdef QCRYPTO_DEBUG - dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n", + dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n", areq, ret); #endif if (iv) @@ -1897,9 +1898,8 @@ static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) return 0; } -static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq) +static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen) { - struct aead_request *areq = (struct aead_request *) qreq->areq; unsigned int i = ((unsigned int)qreq->iv[0]) + 1; memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize); @@ -1908,7 +1908,7 @@ static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq) * NIST Special Publication 800-38C */ qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2)); - if (areq->assoclen) + if (assoclen) qreq->nonce[0] |= 64; if (i > MAX_NONCE) @@ -2118,24 +2118,31 @@ static int _qcrypto_process_aead(struct crypto_engine *pengine, qreq.flags = cipher_ctx->flags; if (qreq.mode == QCE_MODE_CCM) { + uint32_t assoclen; + if (qreq.dir == QCE_ENCRYPT) qreq.cryptlen = req->cryptlen; else qreq.cryptlen = req->cryptlen - qreq.authsize; + + /* if rfc4309 ccm, adjust assoclen */ + assoclen = req->assoclen; + if (rctx->ccmtype) + assoclen -= 8; /* Get NONCE */ - ret = qccrypto_set_aead_ccm_nonce(&qreq); + ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen); if (ret) return ret; - if (req->assoclen) { - rctx->adata = kzalloc((req->assoclen + 0x64), + if (assoclen) { + rctx->adata = kzalloc((assoclen + 0x64), GFP_ATOMIC); if (!rctx->adata) return -ENOMEM; /* Format Associated data */ ret = qcrypto_aead_ccm_format_adata(&qreq, - req->assoclen, + assoclen, req->src, rctx->adata); } else { @@ -2472,7 +2479,7 @@ static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2502,7 +2509,7 @@ static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2532,7 +2539,7 @@ static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2592,6 +2599,7 @@ static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CCM; rctx->iv = req->iv; + rctx->ccmtype = 0; pstat->aead_ccm_aes_enc++; return _qcrypto_queue_req(cp, ctx->pengine, &req->base); @@ -2606,6 +2614,8 @@ static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req) pstat = &_qcrypto_stat; + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; @@ -2615,6 +2625,7 @@ static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req) rctx->rfc4309_iv[0] = 3; /* L -1 */ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3); memcpy(&rctx->rfc4309_iv[4], req->iv, 8); + rctx->ccmtype = 1; rctx->iv = rctx->rfc4309_iv; pstat->aead_rfc4309_ccm_aes_enc++; return _qcrypto_queue_req(cp, ctx->pengine, &req->base); @@ -2716,7 +2727,7 @@ static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2746,7 +2757,7 @@ static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2776,7 +2787,7 @@ static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2922,6 +2933,7 @@ static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req) rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CCM; rctx->iv = req->iv; + rctx->ccmtype = 0; pstat->aead_ccm_aes_dec++; return _qcrypto_queue_req(cp, ctx->pengine, &req->base); @@ -2935,6 +2947,8 @@ static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req) struct crypto_stat *pstat; pstat = &_qcrypto_stat; + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; @@ -2944,6 +2958,7 @@ static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req) rctx->rfc4309_iv[0] = 3; /* L -1 */ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3); memcpy(&rctx->rfc4309_iv[4], req->iv, 8); + rctx->ccmtype = 1; rctx->iv = rctx->rfc4309_iv; pstat->aead_rfc4309_ccm_aes_dec++; return _qcrypto_queue_req(cp, ctx->pengine, &req->base); @@ -3338,7 +3353,7 @@ static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) #ifdef QCRYPTO_DEBUG dev_info(&ctx->pengine->pdev->dev, - "_qcrypto_aead_encrypt_aes_cbc: %p\n", req); + "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req); #endif rctx = aead_request_ctx(req); @@ -3369,7 +3384,7 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) #ifdef QCRYPTO_DEBUG dev_info(&ctx->pengine->pdev->dev, - "_qcrypto_aead_decrypt_aes_cbc: %p\n", req); + "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req); #endif rctx = aead_request_ctx(req); rctx->aead = 1; @@ -3957,6 +3972,7 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE); if (len <= SHA1_BLOCK_SIZE) { memcpy(&sha_ctx->authkey[0], key, len); @@ -3964,16 +3980,19 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, } else { sha_ctx->alg = QCE_HASH_SHA1; sha_ctx->diglen = SHA1_DIGEST_SIZE; - _sha_hmac_setkey(tfm, key, len); + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA1 hmac setkey failed\n"); sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE; } - return 0; + return ret; } static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE); if (len <= SHA256_BLOCK_SIZE) { @@ -3982,11 +4001,13 @@ static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, } else { sha_ctx->alg = QCE_HASH_SHA256; sha_ctx->diglen = SHA256_DIGEST_SIZE; - _sha_hmac_setkey(tfm, key, len); + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA256 hmac setkey failed\n"); sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE; } - return 0; + return ret; } static int _sha_hmac_init_ihash(struct ahash_request *req, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9a8a18aafd5c..6a60936b46e0 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev) * crypto alg */ #define TALITOS_CRA_PRIORITY 3000 -#define TALITOS_MAX_KEY_SIZE 96 +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ struct talitos_ctx { @@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + if (keylen > TALITOS_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 57ff46284f15..c97336a2ba92 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -325,6 +325,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | M2P_CONTROL_ENABLE; m2p_set_control(edmac, control); + edmac->buffer = 0; + return 0; } diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 690e3b4f8202..b36da3c1073f 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -64,6 +64,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f +#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 4ef0c5e07912..abb75ebd65ea 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -105,6 +105,8 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) }, + /* I/OAT v3.3 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, @@ -250,10 +252,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev) } } +static inline bool is_skx_ioat(struct pci_dev *pdev) +{ + return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; +} + static bool is_xeon_cb32(struct pci_dev *pdev) { return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || - is_hsw_ioat(pdev) || is_bdx_ioat(pdev); + is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev); } bool is_bwd_ioat(struct pci_dev *pdev) @@ -1350,6 +1357,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) device->version = readb(device->reg_base + IOAT_VER_OFFSET); if (device->version >= IOAT_VER_3_0) { + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; err = ioat3_dma_probe(device, ioat_dca_enabled); if (device->version >= IOAT_VER_3_3) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index b1bc945f008f..56410ea75ac5 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -117,7 +117,7 @@ struct usb_dmac { #define USB_DMASWR 0x0008 #define USB_DMASWR_SWR (1 << 0) #define USB_DMAOR 0x0060 -#define USB_DMAOR_AE (1 << 2) +#define USB_DMAOR_AE (1 << 1) #define USB_DMAOR_DME (1 << 0) #define USB_DMASAR 0x0000 diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index a415edbe61b1..149ec2bd9bc6 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -146,6 +146,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_am335x_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } @@ -310,6 +311,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index 26f69fa61ba1..6eab3ea187c6 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -88,12 +88,10 @@ static void mdm_enable_irqs(struct mdm_ctrl *mdm) return; if (mdm->irq_mask & IRQ_ERRFATAL) { enable_irq(mdm->errfatal_irq); - irq_set_irq_wake(mdm->errfatal_irq, 1); mdm->irq_mask &= ~IRQ_ERRFATAL; } if (mdm->irq_mask & IRQ_STATUS) { enable_irq(mdm->status_irq); - irq_set_irq_wake(mdm->status_irq, 1); mdm->irq_mask &= ~IRQ_STATUS; } if (mdm->irq_mask & IRQ_PBLRDY) { @@ -107,12 +105,10 @@ static void mdm_disable_irqs(struct mdm_ctrl *mdm) if (!mdm) return; if (!(mdm->irq_mask & IRQ_ERRFATAL)) { - irq_set_irq_wake(mdm->errfatal_irq, 0); disable_irq_nosync(mdm->errfatal_irq); mdm->irq_mask |= IRQ_ERRFATAL; } if (!(mdm->irq_mask & IRQ_STATUS)) { - irq_set_irq_wake(mdm->status_irq, 0); disable_irq_nosync(mdm->status_irq); mdm->irq_mask |= IRQ_STATUS; } @@ -701,6 +697,7 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev) goto errfatal_err; } mdm->errfatal_irq = irq; + irq_set_irq_wake(mdm->errfatal_irq, 1); errfatal_err: /* status irq */ @@ -719,6 +716,7 @@ errfatal_err: goto status_err; } mdm->status_irq = irq; + irq_set_irq_wake(mdm->status_irq, 1); status_err: if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) { irq = platform_get_irq_byname(pdev, "plbrdy_irq"); diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c index fa90330db93b..c893681f3bf3 100644 --- a/drivers/firmware/qcom/tz_log.c +++ b/drivers/firmware/qcom/tz_log.c @@ -477,10 +477,10 @@ static int _disp_tz_reset_stats(void) static int _disp_tz_interrupt_stats(void) { - int i, j, int_info_size; + int i, j; int len = 0; int *num_int; - unsigned char *ptr; + void *ptr; struct tzdbg_int_t *tzdbg_ptr; struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40; @@ -488,14 +488,12 @@ static int _disp_tz_interrupt_stats(void) (tzdbg.diag_buf->int_info_off - sizeof(uint32_t))); ptr = ((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->int_info_off); - int_info_size = ((tzdbg.diag_buf->ring_off - - tzdbg.diag_buf->int_info_off)/(*num_int)); pr_info("qsee_version = 0x%x\n", tzdbg.tz_version); if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) { + tzdbg_ptr = ptr; for (i = 0; i < (*num_int); i++) { - tzdbg_ptr = (struct tzdbg_int_t *)ptr; len += snprintf(tzdbg.disp_buf + len, (debug_rw_buf_size - 1) - len, " Interrupt Number : 0x%x\n" @@ -519,11 +517,11 @@ static int _disp_tz_interrupt_stats(void) __func__); break; } - ptr += int_info_size; + tzdbg_ptr++; } } else { + tzdbg_ptr_tz40 = ptr; for (i = 0; i < (*num_int); i++) { - tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr; len += snprintf(tzdbg.disp_buf + len, (debug_rw_buf_size - 1) - len, " Interrupt Number : 0x%x\n" @@ -547,7 +545,7 @@ static int _disp_tz_interrupt_stats(void) __func__); break; } - ptr += int_info_size; + tzdbg_ptr_tz40++; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 51a9942cdb40..f4cae5357e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -681,6 +681,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 25a3e2485cc2..2bc17a907ecf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 475c38fe9245..e40a6d8b0b92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1126,6 +1126,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos >= adev->mc.mc_vram_size) + return -ENXIO; + while (size) { unsigned long flags; uint32_t value; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 49aa35016653..247b088990dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 57a2e347f04d..0f0094b58d1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -893,6 +893,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (amdgpu_dpm_get_vrefresh(adev) > 120) + return true; + if (vblank_time < switch_limit) return true; else diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 05f6522c0457..b5c64edeb668 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,6 +113,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 9b8f0b975ca6..498a94069e6b 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); @@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,20 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -140,14 +192,11 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ ast->support_wide_screen = true; } break; @@ -212,29 +261,49 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; + return 0; + } - - ast_write32(ast, 0x10000, 0xfc600309); - - do { - if (pci_channel_offline(dev->pdev)) - return -EIO; - } while (ast_read32(ast, 0x10000) != 0x01); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) + if (mcr_cfg & 0x40) ast->dram_bus_width = 16; else ast->dram_bus_width = 32; if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { + switch (mcr_cfg & 0x03) { case 0: ast->dram_type = AST_DRAM_512Mx16; break; @@ -250,13 +319,13 @@ static int ast_get_dram_info(struct drm_device *dev) break; } } else { - switch (data & 0x0c) { + switch (mcr_cfg & 0x0c) { case 0: case 4: ast->dram_type = AST_DRAM_512Mx16; break; case 8: - if (data & 0x40) + if (mcr_cfg & 0x40) ast->dram_type = AST_DRAM_1Gx16; else ast->dram_type = AST_DRAM_512Mx32; @@ -267,17 +336,15 @@ static int ast_get_dram_info(struct drm_device *dev) } } - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) + if (mcr_scu_strap & 0x2000) ref_pll = 14318; else ref_pll = 12000; - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { case 3: div = 0x4; break; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 30672a3df8a9..c7c58becb25d 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -375,12 +375,17 @@ void ast_post_gpu(struct drm_device *dev) ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); - else - ast_init_dram_reg(dev); + if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); - ast_init_3rdtx(dev); + ast_init_3rdtx(dev); + } else { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } } /* AST 2300 DRAM settings */ diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index a3b96d691ac9..58bf94b69186 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -2163,7 +2170,7 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2178,12 +2185,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2195,21 +2202,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2265,7 +2283,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2317,7 +2340,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 39b8e171cad5..47c1747e7ae3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2716,6 +2716,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06 +#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05 #define EXTENDED_TAG 0x07 #define VIDEO_CAPABILITY_BLOCK 0x07 #define Y420_VIDEO_DATA_BLOCK 0x0E @@ -3526,11 +3527,17 @@ drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) (db[2] & (BIT(3) | BIT(2))) >> 2; connector->ce_scan_info = db[2] & (BIT(1) | BIT(0)); + connector->rgb_qs = + db[2] & BIT(6); + connector->yuv_qs = + db[2] & BIT(7); DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", (int) connector->pt_scan_info, (int) connector->it_scan_info, (int) connector->ce_scan_info); + DRM_DEBUG_KMS("rgb_quant_range_select %d", connector->rgb_qs); + DRM_DEBUG_KMS("ycc_quant_range_select %d", connector->yuv_qs); } static bool drm_edid_is_luminance_value_present( @@ -3588,6 +3595,50 @@ drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance); } +/* + * drm_extract_colorimetry_db - Parse the HDMI colorimetry extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI colorimetry extended block + * + * Parses the HDMI colorimetry block to extract sink info for @connector. + */ +static void +drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db) +{ + + if (!db) { + DRM_ERROR("invalid db\n"); + return; + } + + /* Bit 0: xvYCC_601 */ + if (db[2] & BIT(0)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_601; + /* Bit 0: xvYCC_709 */ + if (db[2] & BIT(1)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_709; + /* Bit 0: sYCC_601 */ + if (db[2] & BIT(2)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_sYCC_601; + /* Bit 0: ADBYCC_601 */ + if (db[2] & BIT(3)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADBYCC_601; + /* Bit 0: ADB_RGB */ + if (db[2] & BIT(4)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADB_RGB; + /* Bit 0: BT2020_CYCC */ + if (db[2] & BIT(5)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_CYCC; + /* Bit 0: BT2020_YCC */ + if (db[2] & BIT(6)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_YCC; + /* Bit 0: BT2020_RGB */ + if (db[2] & BIT(7)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_RGB; + + DRM_DEBUG_KMS("colorimetry fmt 0x%x\n", connector->color_enc_fmt); +} + /* * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks * @connector: connector corresponding to the HDMI sink @@ -3620,6 +3671,9 @@ struct edid *edid) case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK: drm_extract_hdr_db(connector, db); break; + case COLORIMETRY_EXTENDED_DATA_BLOCK: + drm_extract_clrmetry_db(connector, db); + break; default: break; } diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index ce0645d0c1e5..61e3a097a478 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -783,20 +783,23 @@ void psb_intel_lvds_init(struct drm_device *dev, if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, scan); + DRM_DEBUG_KMS("Using mode from DDC\n"); goto out; /* FIXME: check for quirks */ } } /* Failed to get EDID, what about VBT? do we need this? */ - if (mode_dev->vbt_mode) + if (dev_priv->lfp_lvds_vbt_mode) { mode_dev->panel_fixed_mode = - drm_mode_duplicate(dev, mode_dev->vbt_mode); + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (!mode_dev->panel_fixed_mode) - if (dev_priv->lfp_lvds_vbt_mode) - mode_dev->panel_fixed_mode = - drm_mode_duplicate(dev, - dev_priv->lfp_lvds_vbt_mode); + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + DRM_DEBUG_KMS("Using mode from VBT\n"); + goto out; + } + } /* * If we didn't get EDID, try checking if the panel is already turned @@ -813,6 +816,7 @@ void psb_intel_lvds_init(struct drm_device *dev, if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + DRM_DEBUG_KMS("Using pre-programmed mode\n"); goto out; /* FIXME: check for quirks */ } } diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index f3a8a8416c7a..4c082fff2fc5 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -13,6 +13,7 @@ msm_drm-y := \ hdmi/hdmi_connector.o \ hdmi/hdmi_hdcp.o \ hdmi/hdmi_i2c.o \ + hdmi/hdmi_util.o \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ @@ -47,10 +48,12 @@ msm_drm-y := \ sde/sde_backlight.o \ sde/sde_color_processing.o \ sde/sde_vbif.o \ + sde/sde_splash.o \ sde_dbg_evtlog.o \ sde_io_util.o \ dba_bridge.o \ - sde_edid_parser.o + sde_edid_parser.o \ + sde_hdcp_1x.o # use drm gpu driver only if qcom_kgsl driver not available ifneq ($(CONFIG_QCOM_KGSL),y) @@ -101,9 +104,11 @@ msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \ dsi-staging/dsi_display_test.o msm_drm-$(CONFIG_DRM_SDE_HDMI) += \ + hdmi-staging/sde_hdmi_util.o \ hdmi-staging/sde_hdmi.o \ hdmi-staging/sde_hdmi_bridge.o \ hdmi-staging/sde_hdmi_audio.o \ + hdmi-staging/sde_hdmi_hdcp2p2.o \ msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \ dsi/pll/dsi_pll_28nm.o @@ -144,6 +149,8 @@ msm_drm-$(CONFIG_DRM_MSM) += \ msm_rd.o \ msm_ringbuffer.o \ msm_prop.o \ - msm_snapshot.o + msm_snapshot.o \ + msm_submitqueue.o \ + msm_trace_points.o obj-$(CONFIG_DRM_MSM) += msm_drm.o diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index a417e42944fc..c085e173232b 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -409,8 +409,8 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A3XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); adreno_show(gpu, m); + gpu->funcs->pm_suspend(gpu); } #endif @@ -439,7 +439,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, - .last_fence = adreno_last_fence, .submitted_fence = adreno_submitted_fence, .submit = adreno_submit, .flush = adreno_flush, diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 069823f054f7..624c2a87d593 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -447,9 +447,9 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A4XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); adreno_show(gpu, m); + gpu->funcs->pm_suspend(gpu); } #endif @@ -522,7 +522,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = a4xx_pm_suspend, .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, - .last_fence = adreno_last_fence, .submitted_fence = adreno_submitted_fence, .submit = adreno_submit, .flush = adreno_flush, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_counters.c b/drivers/gpu/drm/msm/adreno/a5xx_counters.c index f1fac5535359..bc442039c308 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_counters.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_counters.c @@ -106,7 +106,7 @@ static void a5xx_counter_enable_pm4(struct msm_gpu *gpu, { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - struct msm_ringbuffer *ring = gpu->rb[MSM_GPU_MAX_RINGS - 1]; + struct msm_ringbuffer *ring = gpu->rb[0]; struct adreno_counter *counter = &group->counters[counterid]; mutex_lock(&gpu->dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 8a136fef86f1..765c1c087c76 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -13,7 +13,9 @@ #include "msm_gem.h" #include "msm_iommu.h" +#include "msm_trace.h" #include "a5xx_gpu.h" +#include #define SECURE_VA_START 0xc0000000 #define SECURE_VA_SIZE SZ_256M @@ -46,7 +48,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, struct msm_gem_address_space *aspace) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_mmu *mmu = aspace->mmu; struct msm_iommu *iommu = to_msm_iommu(mmu); @@ -75,17 +76,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, * reload the pagetable if the current ring gets preempted out. */ OUT_PKT7(ring, CP_MEM_WRITE, 4); - OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); - OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0))); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0))); OUT_RING(ring, lower_32_bits(iommu->ttbr0)); OUT_RING(ring, upper_32_bits(iommu->ttbr0)); /* Also write the current contextidr (ASID) */ OUT_PKT7(ring, CP_MEM_WRITE, 3); - OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, - contextidr))); - OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, - contextidr))); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr))); OUT_RING(ring, iommu->contextidr); /* Invalidate the draw state so we start off fresh */ @@ -103,12 +102,32 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring, OUT_RING(ring, 1); } +/* Inline PM4 code to get the current value of the 19.2 Mhz always on counter */ +static void a5xx_get_ticks(struct msm_ringbuffer *ring, uint64_t iova) +{ + /* + * Set bit[30] to make this command a 64 bit write operation. + * bits[18-29] is to specify number of consecutive registers + * to copy, so set this space with 2, since we want to copy + * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI]. + */ + + OUT_PKT7(ring, CP_REG_TO_MEM, 3); + OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO | + (1 << 30) | (2 << 18)); + OUT_RING(ring, lower_32_bits(iova)); + OUT_RING(ring, upper_32_bits(iova)); +} + static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[submit->ring]; unsigned int i, ibs = 0; + unsigned long flags; + u64 ticks; + ktime_t time; a5xx_set_pagetable(gpu, ring, submit->aspace); @@ -142,24 +161,15 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, 1); } - /* Record the always on counter before command execution */ - if (submit->profile_buf_iova) { - uint64_t gpuaddr = submit->profile_buf_iova + - offsetof(struct drm_msm_gem_submit_profile_buffer, - ticks_submitted); + /* Record the GPU ticks at command start for kernel side profiling */ + a5xx_get_ticks(ring, + RING_TICKS_IOVA(ring, submit->tick_index, started)); - /* - * Set bit[30] to make this command a 64 bit write operation. - * bits[18-29] is to specify number of consecutive registers - * to copy, so set this space with 2, since we want to copy - * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI]. - */ - OUT_PKT7(ring, CP_REG_TO_MEM, 3); - OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO | - (1 << 30) | (2 << 18)); - OUT_RING(ring, lower_32_bits(gpuaddr)); - OUT_RING(ring, upper_32_bits(gpuaddr)); - } + /* And for the user profiling too if it is enabled */ + if (submit->profile_buf_iova) + a5xx_get_ticks(ring, submit->profile_buf_iova + + offsetof(struct drm_msm_gem_submit_profile_buffer, + ticks_submitted)); /* Submit the commands */ for (i = 0; i < submit->nr_cmds; i++) { @@ -193,18 +203,15 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_YIELD_ENABLE, 1); OUT_RING(ring, 0x01); - /* Record the always on counter after command execution */ - if (submit->profile_buf_iova) { - uint64_t gpuaddr = submit->profile_buf_iova + - offsetof(struct drm_msm_gem_submit_profile_buffer, - ticks_retired); + /* Record the GPU ticks at command retire for kernel side profiling */ + a5xx_get_ticks(ring, + RING_TICKS_IOVA(ring, submit->tick_index, retired)); - OUT_PKT7(ring, CP_REG_TO_MEM, 3); - OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO | - (1 << 30) | (2 << 18)); - OUT_RING(ring, lower_32_bits(gpuaddr)); - OUT_RING(ring, upper_32_bits(gpuaddr)); - } + /* Record the always on counter after command execution */ + if (submit->profile_buf_iova) + a5xx_get_ticks(ring, submit->profile_buf_iova + + offsetof(struct drm_msm_gem_submit_profile_buffer, + ticks_retired)); /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); @@ -217,8 +224,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); - OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); - OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence))); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->fence); if (submit->secure) { @@ -240,33 +247,28 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Set bit 0 to trigger an interrupt on preempt complete */ OUT_RING(ring, 0x01); - if (submit->profile_buf_iova) { - unsigned long flags; - uint64_t ktime; - struct drm_msm_gem_submit_profile_buffer *profile_buf = - submit->profile_buf_vaddr; + /* + * Get the current kernel time and ticks with interrupts off so we don't + * get interrupted between the operations and skew the numbers + */ - /* - * With this profiling, we are trying to create closest - * possible mapping between the CPU time domain(monotonic clock) - * and the GPU time domain(ticks). In order to make this - * happen, we need to briefly turn off interrupts to make sure - * interrupts do not run between collecting these two samples. - */ - local_irq_save(flags); + local_irq_save(flags); + ticks = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO, + REG_A5XX_RBBM_ALWAYSON_COUNTER_HI); + time = ktime_get_raw(); + local_irq_restore(flags); - profile_buf->ticks_queued = gpu_read64(gpu, - REG_A5XX_RBBM_ALWAYSON_COUNTER_LO, - REG_A5XX_RBBM_ALWAYSON_COUNTER_HI); + if (submit->profile_buf) { + struct timespec64 ts = ktime_to_timespec64(time); - ktime = ktime_get_raw_ns(); - - local_irq_restore(flags); - - profile_buf->queue_time = ktime; - profile_buf->submit_time = ktime; + /* Write the data into the user-specified profile buffer */ + submit->profile_buf->time.tv_sec = ts.tv_sec; + submit->profile_buf->time.tv_nsec = ts.tv_nsec; + submit->profile_buf->ticks_queued = ticks; } + trace_msm_submitted(submit, ticks, ktime_to_ns(time)); + a5xx_flush(gpu, ring); /* Check to see if we need to start preemption */ @@ -374,6 +376,7 @@ static const struct { void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); unsigned int i; for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) @@ -390,6 +393,11 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); + + if (state) + set_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags); + else + clear_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags); } static int a5xx_me_init(struct msm_gpu *gpu) @@ -477,30 +485,14 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { - struct drm_device *drm = gpu->dev; struct drm_gem_object *bo; void *ptr; - bo = msm_gem_new(drm, fw->size - 4, - MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); + ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); - if (IS_ERR(bo)) - return bo; - - ptr = msm_gem_vaddr(bo); - if (!ptr) { - drm_gem_object_unreference_unlocked(bo); - return ERR_PTR(-ENOMEM); - } - - if (iova) { - int ret = msm_gem_get_iova(bo, gpu->aspace, iova); - - if (ret) { - drm_gem_object_unreference_unlocked(bo); - return ERR_PTR(ret); - } - } + if (IS_ERR(ptr)) + return ERR_CAST(ptr); memcpy(ptr, &fw->data[4], fw->size - 4); return bo; @@ -789,6 +781,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1); gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); + a5xx_gpu->timestamp_counter = adreno_get_counter(gpu, + MSM_COUNTER_GROUP_CP, 0, NULL, NULL); + /* Load the GPMU firmware before starting the HW init */ a5xx_gpmu_ucode_init(gpu); @@ -1175,11 +1170,26 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) { int ret; + /* + * Between suspend/resumes the GPU clocks need to be turned off + * but not a complete power down, typically between frames. Set the + * memory retention flags on the GPU core clock to retain memory + * across clock toggles. + */ + if (gpu->core_clk) { + clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_PERIPH); + clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_MEM); + } + /* Turn on the core power */ ret = msm_gpu_pm_resume(gpu); if (ret) return ret; + /* If we are already up, don't mess with what works */ + if (gpu->active_cnt > 1) + return 0; + /* Turn the RBCCU domain first to limit the chances of voltage droop */ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); @@ -1210,22 +1220,33 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - /* Clear the VBIF pipe before shutting down */ + /* Turn off the memory retention flag when not necessary */ + if (gpu->core_clk) { + clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_PERIPH); + clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_MEM); + } - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); - spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); + /* Only do this next bit if we are about to go down */ + if (gpu->active_cnt == 1) { + /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) + == 0xF); - /* - * Reset the VBIF before power collapse to avoid issue with FIFO - * entries - */ + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); - if (adreno_is_a530(adreno_gpu)) { - /* These only need to be done for A530 */ - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + /* + * Reset the VBIF before power collapse to avoid issue with FIFO + * entries + */ + if (adreno_is_a530(adreno_gpu)) { + /* These only need to be done for A530 */ + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, + 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, + 0x00000000); + } } return msm_gpu_pm_suspend(gpu); @@ -1233,8 +1254,11 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { - *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, - REG_A5XX_RBBM_PERFCTR_CP_0_HI); + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + *value = adreno_read_counter(gpu, MSM_COUNTER_GROUP_CP, + a5xx_gpu->timestamp_counter); return 0; } @@ -1242,13 +1266,29 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) #ifdef CONFIG_DEBUG_FS static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + bool enabled = test_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags); + gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); + + /* + * Temporarily disable hardware clock gating before going into + * adreno_show to avoid issues while reading the registers + */ + + if (enabled) + a5xx_set_hwcg(gpu, false); adreno_show(gpu, m); + + if (enabled) + a5xx_set_hwcg(gpu, true); + + gpu->funcs->pm_suspend(gpu); } #endif @@ -1267,7 +1307,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = a5xx_pm_suspend, .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, - .last_fence = adreno_last_fence, .submitted_fence = adreno_submitted_fence, .submit = a5xx_submit, .flush = a5xx_flush, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 8eb3838ffe90..c30b65785ab6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -23,6 +23,7 @@ enum { A5XX_ZAP_SHADER_LOADED = 1, + A5XX_HWCG_ENABLED = 2, }; struct a5xx_gpu { @@ -56,6 +57,8 @@ struct a5xx_gpu { struct a5xx_smmu_info *smmu_info; struct drm_gem_object *smmu_info_bo; uint64_t smmu_info_iova; + + int timestamp_counter; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -67,6 +70,8 @@ struct a5xx_gpu { * PREEMPT_NONE - no preemption in progress. Next state START. * PREEMPT_START - The trigger is evaulating if preemption is possible. Next * states: TRIGGERED, NONE + * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next + * state: NONE. * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next * states: FAULTED, PENDING * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger @@ -78,6 +83,7 @@ struct a5xx_gpu { enum preempt_state { PREEMPT_NONE = 0, PREEMPT_START, + PREEMPT_ABORT, PREEMPT_TRIGGERED, PREEMPT_FAULTED, PREEMPT_PENDING, @@ -181,7 +187,10 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot); /* Return true if we are in a preempt state */ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) { - return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE); + int preempt_state = atomic_read(&a5xx_gpu->preempt_state); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_ABORT); } int a5xx_counters_init(struct adreno_gpu *adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 0025922540df..647b61313fc2 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -458,18 +458,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; - a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, - MSM_BO_UNCACHED | MSM_BO_GPU_READONLY); - - if (IS_ERR(a5xx_gpu->gpmu_bo)) - goto err; - - if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, - &a5xx_gpu->gpmu_iova)) - goto err; - - ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo); - if (!ptr) + ptr = msm_gem_kernel_new(drm, bosize, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, + &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); + if (IS_ERR(ptr)) goto err; while (cmds_size > 0) { diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 57046089434c..44d4ca35fa09 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -15,41 +15,6 @@ #include "msm_iommu.h" #include "a5xx_gpu.h" -static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu, - size_t size, uint32_t flags, struct drm_gem_object **bo, - u64 *iova) -{ - struct drm_gem_object *_bo; - u64 _iova; - void *ptr; - int ret; - - _bo = msm_gem_new(drm, size, flags); - - if (IS_ERR(_bo)) - return _bo; - - ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova); - if (ret) - goto out; - - ptr = msm_gem_vaddr(_bo); - if (!ptr) { - ret = -ENOMEM; - goto out; - } - - if (bo) - *bo = _bo; - if (iova) - *iova = _iova; - - return ptr; -out: - drm_gem_object_unreference_unlocked(_bo); - return ERR_PTR(ret); -} - /* * Try to transition the preemption state from old to new. Return * true on success or false if the original state wasn't 'old' @@ -100,16 +65,20 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); unsigned long flags; int i; - for (i = gpu->nr_rings - 1; i >= 0; i--) { + /* + * Find the highest prority ringbuffer that isn't empty and jump + * to it (0 being the highest and gpu->nr_rings - 1 being the + * lowest) + */ + for (i = 0; i < gpu->nr_rings; i++) { bool empty; struct msm_ringbuffer *ring = gpu->rb[i]; spin_lock_irqsave(&ring->lock, flags); - empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]); + empty = (get_wptr(ring) == ring->memptrs->rptr); spin_unlock_irqrestore(&ring->lock, flags); if (!empty) @@ -159,9 +128,20 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) * one do nothing except to update the wptr to the latest and greatest */ if (!ring || (a5xx_gpu->cur_ring == ring)) { - update_wptr(gpu, ring); + /* + * Its possible that while a preemption request is in progress + * from an irq context, a user context trying to submit might + * fail to update the write pointer, because it determines + * that the preempt state is not PREEMPT_NONE. + * + * Close the race by introducing an intermediate + * state PREEMPT_ABORT to let the submit path + * know that the ringbuffer is not going to change + * and can safely update the write pointer. + */ - /* Set the state back to NONE */ + set_preempt_state(a5xx_gpu, PREEMPT_ABORT); + update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); return; } @@ -176,10 +156,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) /* Set the SMMU info for the preemption */ if (a5xx_gpu->smmu_info) { - a5xx_gpu->smmu_info->ttbr0 = - adreno_gpu->memptrs->ttbr0[ring->id]; - a5xx_gpu->smmu_info->contextidr = - adreno_gpu->memptrs->contextidr[ring->id]; + a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0; + a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr; } /* Set the address of the incoming preemption record */ @@ -278,10 +256,10 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, struct drm_gem_object *bo; u64 iova; - ptr = alloc_kernel_bo(gpu->dev, gpu, + ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, - &bo, &iova); + gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -296,7 +274,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->info = 0; ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; - ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr); + ptr->rptr_addr = rbmemptr(ring, rptr); ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; return 0; @@ -352,10 +330,10 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) { - ptr = alloc_kernel_bo(gpu->dev, gpu, + ptr = msm_gem_kernel_new(gpu->dev, sizeof(struct a5xx_smmu_info), MSM_BO_UNCACHED | MSM_BO_PRIVILEGED, - &bo, &iova); + gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c index c2773cb325d5..d1c1ab460c95 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c @@ -214,28 +214,14 @@ struct crashdump { static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump) { - struct drm_device *drm = gpu->dev; - int ret = -ENOMEM; + int ret = 0; - crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE, - MSM_BO_UNCACHED); - if (IS_ERR(crashdump->bo)) { - ret = PTR_ERR(crashdump->bo); - crashdump->bo = NULL; - return ret; - } - - crashdump->ptr = msm_gem_vaddr(crashdump->bo); - if (!crashdump->ptr) - goto out; - - ret = msm_gem_get_iova(crashdump->bo, gpu->aspace, - &crashdump->iova); - -out: - if (ret) { - drm_gem_object_unreference(crashdump->bo); - crashdump->bo = NULL; + crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev, + CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED, + gpu->aspace, &crashdump->bo, &crashdump->iova); + if (IS_ERR(crashdump->ptr)) { + ret = PTR_ERR(crashdump->ptr); + crashdump->ptr = NULL; } return ret; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index a498a60cd52d..4e4709d6172f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -173,6 +173,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); + mutex_lock(&dev->struct_mutex); + gpu->funcs->pm_suspend(gpu); + mutex_unlock(&dev->struct_mutex); gpu->funcs->destroy(gpu); gpu = NULL; } else { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 81fa37ee9671..04e0056f2a49 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -51,6 +51,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) if (adreno_gpu->funcs->get_timestamp) return adreno_gpu->funcs->get_timestamp(gpu, value); return -EINVAL; + case MSM_PARAM_NR_RINGS: + *value = gpu->nr_rings; + return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; @@ -90,7 +93,7 @@ int adreno_hw_init(struct msm_gpu *gpu) REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr)); + REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr)); return 0; } @@ -106,10 +109,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, * ensure that it won't be. If not then this is why your * a430 stopped working. */ - return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); - } else - return adreno_gpu->memptrs->rptr[ring->id]; + return ring->memptrs->rptr = + adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR); + } + + return ring->memptrs->rptr; } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) @@ -126,19 +130,8 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu, return ring->submitted_fence; } -uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - - if (!ring) - return 0; - - return adreno_gpu->memptrs->fence[ring->id]; -} - void adreno_recover(struct msm_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; struct msm_ringbuffer *ring; int ret, i; @@ -156,9 +149,8 @@ void adreno_recover(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno, discard anything pending: */ - adreno_gpu->memptrs->fence[ring->id] = - adreno_submitted_fence(gpu, ring); - adreno_gpu->memptrs->rptr[ring->id] = 0; + ring->memptrs->fence = adreno_submitted_fence(gpu, ring); + ring->memptrs->rptr = 0; } gpu->funcs->pm_resume(gpu); @@ -213,7 +205,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence)); + OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->fence); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ @@ -297,7 +289,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) continue; seq_printf(m, "rb %d: fence: %d/%d\n", i, - adreno_last_fence(gpu, ring), + ring->memptrs->fence, adreno_submitted_fence(gpu, ring)); seq_printf(m, " rptr: %d\n", @@ -305,8 +297,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "rb wptr: %d\n", get_wptr(ring)); } - gpu->funcs->pm_resume(gpu); - /* dump these out in a form that can be parsed by demsm: */ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { @@ -319,8 +309,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); } } - - gpu->funcs->pm_suspend(gpu); } #endif @@ -347,7 +335,7 @@ void adreno_dump_info(struct msm_gpu *gpu) continue; dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i, - adreno_last_fence(gpu, ring), + ring->memptrs->fence, adreno_submitted_fence(gpu, ring), get_rptr(adreno_gpu, ring), get_wptr(ring)); @@ -516,7 +504,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; @@ -541,77 +528,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); - if (ret) { + if (ret) dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", adreno_gpu->info->pfpfw, ret); - return ret; - } - mmu = gpu->aspace->mmu; - if (mmu) { - ret = mmu->funcs->attach(mmu, NULL, 0); - if (ret) - return ret; - } - - if (gpu->secure_aspace) { - mmu = gpu->secure_aspace->mmu; - if (mmu) { - ret = mmu->funcs->attach(mmu, NULL, 0); - if (ret) - return ret; - } - } - - adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), - MSM_BO_UNCACHED); - if (IS_ERR(adreno_gpu->memptrs_bo)) { - ret = PTR_ERR(adreno_gpu->memptrs_bo); - adreno_gpu->memptrs_bo = NULL; - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); - return ret; - } - - adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); - if (!adreno_gpu->memptrs) { - dev_err(drm->dev, "could not vmap memptrs\n"); - return -ENOMEM; - } - - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, - &adreno_gpu->memptrs_iova); - if (ret) { - dev_err(drm->dev, "could not map memptrs: %d\n", ret); - return ret; - } - - return 0; + return ret; } void adreno_gpu_cleanup(struct adreno_gpu *gpu) { - struct msm_gem_address_space *aspace = gpu->base.aspace; - - if (gpu->memptrs_bo) { - if (gpu->memptrs_iova) - msm_gem_put_iova(gpu->memptrs_bo, aspace); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); - } release_firmware(gpu->pm4); release_firmware(gpu->pfp); msm_gpu_cleanup(&gpu->base); - - if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu); - msm_gem_address_space_put(aspace); - } - - if (gpu->base.secure_aspace) { - aspace = gpu->base.secure_aspace; - aspace->mmu->funcs->detach(aspace->mmu); - msm_gem_address_space_put(aspace); - } } static void adreno_snapshot_os(struct msm_gpu *gpu, @@ -665,7 +594,7 @@ static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu, header.rptr = get_rptr(adreno_gpu, ring); header.wptr = get_wptr(ring); header.timestamp_queued = adreno_submitted_fence(gpu, ring); - header.timestamp_retired = adreno_last_fence(gpu, ring); + header.timestamp_retired = ring->memptrs->fence; /* Write the header even if the ringbuffer data is empty */ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 9e622fa06ce4..c96189fb805b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -83,22 +83,6 @@ struct adreno_info { const struct adreno_info *adreno_info(struct adreno_rev rev); -#define _sizeof(member) \ - sizeof(((struct adreno_rbmemptrs *) 0)->member[0]) - -#define _base(adreno_gpu, member) \ - ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) - -#define rbmemptr(adreno_gpu, index, member) \ - (_base((adreno_gpu), member) + ((index) * _sizeof(member))) - -struct adreno_rbmemptrs { - volatile uint32_t rptr[MSM_GPU_MAX_RINGS]; - volatile uint32_t fence[MSM_GPU_MAX_RINGS]; - volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS]; - volatile unsigned int contextidr[MSM_GPU_MAX_RINGS]; -}; - struct adreno_counter { u32 lo; u32 hi; @@ -137,13 +121,6 @@ struct adreno_gpu { /* firmware: */ const struct firmware *pm4, *pfp; - /* ringbuffer rptr/wptr: */ - // TODO should this be in msm_ringbuffer? I think it would be - // different for z180.. - struct adreno_rbmemptrs *memptrs; - struct drm_gem_object *memptrs_bo; - uint64_t memptrs_iova; - /* * Register offsets are different between some GPUs. * GPU specific offsets will be exported by GPU specific @@ -240,7 +217,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu) int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); -uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring); uint32_t adreno_submitted_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void adreno_recover(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index c34713a13332..309401eb3093 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -20,6 +20,7 @@ #include "msm_kms.h" #include "sde_connector.h" #include "dsi_drm.h" +#include "sde_trace.h" #define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base) #define to_dsi_state(x) container_of((x), struct dsi_connector_state, base) @@ -138,19 +139,24 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge) return; } + SDE_ATRACE_BEGIN("dsi_bridge_pre_enable"); rc = dsi_display_prepare(c_bridge->display); if (rc) { pr_err("[%d] DSI display prepare failed, rc=%d\n", c_bridge->id, rc); + SDE_ATRACE_END("dsi_bridge_pre_enable"); return; } + SDE_ATRACE_BEGIN("dsi_display_enable"); rc = dsi_display_enable(c_bridge->display); if (rc) { pr_err("[%d] DSI display enable failed, rc=%d\n", c_bridge->id, rc); (void)dsi_display_unprepare(c_bridge->display); } + SDE_ATRACE_END("dsi_display_enable"); + SDE_ATRACE_END("dsi_bridge_pre_enable"); } static void dsi_bridge_enable(struct drm_bridge *bridge) @@ -201,19 +207,25 @@ static void dsi_bridge_post_disable(struct drm_bridge *bridge) return; } + SDE_ATRACE_BEGIN("dsi_bridge_post_disable"); + SDE_ATRACE_BEGIN("dsi_display_disable"); rc = dsi_display_disable(c_bridge->display); if (rc) { pr_err("[%d] DSI display disable failed, rc=%d\n", c_bridge->id, rc); + SDE_ATRACE_END("dsi_display_disable"); return; } + SDE_ATRACE_END("dsi_display_disable"); rc = dsi_display_unprepare(c_bridge->display); if (rc) { pr_err("[%d] DSI display unprepare failed, rc=%d\n", c_bridge->id, rc); + SDE_ATRACE_END("dsi_bridge_post_disable"); return; } + SDE_ATRACE_END("dsi_bridge_post_disable"); } static void dsi_bridge_mode_set(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 437f88f29a69..c98f4511d644 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -29,6 +29,8 @@ #include "sde_connector.h" #include "msm_drv.h" #include "sde_hdmi.h" +#include "sde_hdmi_regs.h" +#include "hdmi.h" static DEFINE_MUTEX(sde_hdmi_list_lock); static LIST_HEAD(sde_hdmi_list); @@ -370,6 +372,135 @@ static ssize_t _sde_hdmi_edid_vendor_name_read(struct file *file, return len; } +static ssize_t _sde_hdmi_src_hdcp14_support_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[SZ_128]; + u32 len = 0; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl) { + SDE_ERROR("hdmi is NULL\n"); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + if (display->hdcp14_present) + len += snprintf(buf, SZ_128 - len, "true\n"); + else + len += snprintf(buf, SZ_128 - len, "false\n"); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_src_hdcp22_support_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[SZ_128]; + u32 len = 0; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl) { + SDE_ERROR("hdmi is NULL\n"); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + if (display->src_hdcp22_support) + len += snprintf(buf, SZ_128 - len, "true\n"); + else + len += snprintf(buf, SZ_128 - len, "false\n"); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_sink_hdcp22_support_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[SZ_128]; + u32 len = 0; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl) { + SDE_ERROR("hdmi is NULL\n"); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + if (display->sink_hdcp22_support) + len += snprintf(buf, SZ_128 - len, "true\n"); + else + len += snprintf(buf, SZ_128 - len, "false\n"); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_hdcp_state_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[SZ_128]; + u32 len = 0; + + if (!display) + return -ENODEV; + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + len += snprintf(buf, SZ_128 - len, "HDCP state : %s\n", + sde_hdcp_state_name(display->hdcp_status)); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + static const struct file_operations dump_info_fops = { .open = simple_open, .read = _sde_hdmi_debugfs_dump_info_read, @@ -405,13 +536,33 @@ static const struct file_operations edid_vendor_name_fops = { .read = _sde_hdmi_edid_vendor_name_read, }; -static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in) +static const struct file_operations hdcp_src_14_support_fops = { + .open = simple_open, + .read = _sde_hdmi_src_hdcp14_support_read, +}; + +static const struct file_operations hdcp_src_22_support_fops = { + .open = simple_open, + .read = _sde_hdmi_src_hdcp22_support_read, +}; + +static const struct file_operations hdcp_sink_22_support_fops = { + .open = simple_open, + .read = _sde_hdmi_sink_hdcp22_support_read, +}; + +static const struct file_operations sde_hdmi_hdcp_state_fops = { + .open = simple_open, + .read = _sde_hdmi_hdcp_state_read, +}; + +static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in) { u32 pclk_delta, pclk; u64 pclk_clip = pclk_in; /* as per standard, 0.5% of deviation is allowed */ - pclk = mode->clock * HDMI_KHZ_TO_HZ; + pclk = hdmi->pixclock; pclk_delta = pclk * 5 / 1000; if (pclk_in < (pclk - pclk_delta)) @@ -425,6 +576,118 @@ static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in) return pclk_clip; } +static void sde_hdmi_tx_hdcp_cb(void *ptr, enum sde_hdcp_states status) +{ + struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr; + struct hdmi *hdmi; + + if (!hdmi_ctrl) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + hdmi = hdmi_ctrl->ctrl.ctrl; + hdmi_ctrl->hdcp_status = status; + queue_delayed_work(hdmi->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4); +} + +void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl) +{ + + if (!hdmi_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + return; + } + + if (hdmi_ctrl->hdcp_ops) + hdmi_ctrl->hdcp_ops->off(hdmi_ctrl->hdcp_data); + + flush_delayed_work(&hdmi_ctrl->hdcp_cb_work); + + hdmi_ctrl->hdcp_ops = NULL; +} + +static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work) +{ + struct sde_hdmi *hdmi_ctrl = NULL; + struct delayed_work *dw = to_delayed_work(work); + int rc = 0; + struct hdmi *hdmi; + + hdmi_ctrl = container_of(dw, struct sde_hdmi, hdcp_cb_work); + if (!hdmi_ctrl) { + DEV_DBG("%s: invalid input\n", __func__); + return; + } + + hdmi = hdmi_ctrl->ctrl.ctrl; + + switch (hdmi_ctrl->hdcp_status) { + case HDCP_STATE_AUTHENTICATED: + hdmi_ctrl->auth_state = true; + + if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) && + sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) { + rc = sde_hdmi_config_avmute(hdmi, false); + } + + if (hdmi_ctrl->hdcp1_use_sw_keys && + hdmi_ctrl->hdcp14_present) { + if (!hdmi_ctrl->hdcp22_present) + hdcp1_set_enc(true); + } + break; + case HDCP_STATE_AUTH_FAIL: + if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) { + if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present) + hdcp1_set_enc(false); + } + + hdmi_ctrl->auth_state = false; + + if (sde_hdmi_tx_is_encryption_set(hdmi_ctrl) || + !sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) + rc = sde_hdmi_config_avmute(hdmi, true); + + if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) { + pr_debug("%s: Reauthenticating\n", __func__); + if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) { + rc = hdmi_ctrl->hdcp_ops->reauthenticate( + hdmi_ctrl->hdcp_data); + if (rc) + pr_err("%s: HDCP reauth failed. rc=%d\n", + __func__, rc); + } else + pr_err("%s: NULL HDCP Ops and Data\n", + __func__); + } else { + pr_debug("%s: Not reauthenticating. Cable not conn\n", + __func__); + } + + break; + case HDCP_STATE_AUTH_ENC_NONE: + hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE; + if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) + rc = sde_hdmi_config_avmute(hdmi, false); + break; + case HDCP_STATE_AUTH_ENC_1X: + case HDCP_STATE_AUTH_ENC_2P2: + hdmi_ctrl->enc_lvl = hdmi_ctrl->hdcp_status; + + if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) && + sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) { + rc = sde_hdmi_config_avmute(hdmi, false); + } else { + rc = sde_hdmi_config_avmute(hdmi, true); + } + break; + default: + break; + /* do nothing */ + } +} + /** * _sde_hdmi_update_pll_delta() - Update the HDMI pixel clock as per input ppm * @@ -437,23 +700,31 @@ static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in) static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm) { struct hdmi *hdmi = display->ctrl.ctrl; - struct drm_display_mode *current_mode = &display->mode; u64 cur_pclk, dst_pclk; u64 clip_pclk; int rc = 0; + mutex_lock(&display->display_lock); + if (!hdmi->power_on || !display->connected) { SDE_ERROR("HDMI display is not ready\n"); + mutex_unlock(&display->display_lock); + return -EINVAL; + } + + if (!display->pll_update_enable) { + SDE_ERROR("PLL update function is not enabled\n"); + mutex_unlock(&display->display_lock); return -EINVAL; } /* get current pclk */ - cur_pclk = hdmi->pixclock; + cur_pclk = hdmi->actual_pixclock; /* get desired pclk */ dst_pclk = cur_pclk * (1000000000 + ppm); do_div(dst_pclk, 1000000000); - clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk); + clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk); /* update pclk */ if (clip_pclk != cur_pclk) { @@ -462,13 +733,16 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm) rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk); if (rc < 0) { - SDE_ERROR("PLL update failed, reset clock rate\n"); + SDE_ERROR("HDMI PLL update failed\n"); + mutex_unlock(&display->display_lock); return rc; } - hdmi->pixclock = clip_pclk; + hdmi->actual_pixclock = clip_pclk; } + mutex_unlock(&display->display_lock); + return rc; } @@ -504,12 +778,119 @@ static const struct file_operations pll_delta_fops = { .write = _sde_hdmi_debugfs_pll_delta_write, }; +/** + * _sde_hdmi_enable_pll_update() - Enable the HDMI PLL update function + * + * @enable: non-zero to enable PLL update function, 0 to disable. + * return: 0 on success, non-zero in case of failure. + * + */ +static int _sde_hdmi_enable_pll_update(struct sde_hdmi *display, s32 enable) +{ + struct hdmi *hdmi = display->ctrl.ctrl; + int rc = 0; + + mutex_lock(&display->display_lock); + + if (!hdmi->power_on || !display->connected) { + SDE_ERROR("HDMI display is not ready\n"); + mutex_unlock(&display->display_lock); + return -EINVAL; + } + + if (!enable && hdmi->actual_pixclock != hdmi->pixclock) { + /* reset pixel clock when disable */ + rc = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock); + if (rc < 0) { + SDE_ERROR("reset clock rate failed\n"); + mutex_unlock(&display->display_lock); + return rc; + } + } + hdmi->actual_pixclock = hdmi->pixclock; + + display->pll_update_enable = !!enable; + + mutex_unlock(&display->display_lock); + + SDE_DEBUG("HDMI PLL update: %s\n", + display->pll_update_enable ? "enable" : "disable"); + + return rc; +} + +static ssize_t _sde_hdmi_debugfs_pll_enable_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char *buf; + u32 len = 0; + + if (!display) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_1K, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf, SZ_4K, "%s\n", + display->pll_update_enable ? "enable" : "disable"); + + if (copy_to_user(buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + + kfree(buf); + return len; +} + +static ssize_t _sde_hdmi_debugfs_pll_enable_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[10]; + int enable = 0; + + if (!display) + return -ENODEV; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtoint(buf, 0, &enable)) + return -EFAULT; + + _sde_hdmi_enable_pll_update(display, enable); + + return count; +} + +static const struct file_operations pll_enable_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_pll_enable_read, + .write = _sde_hdmi_debugfs_pll_enable_write, +}; + static int _sde_hdmi_debugfs_init(struct sde_hdmi *display) { int rc = 0; struct dentry *dir, *dump_file, *edid_modes; struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info; - struct dentry *edid_vcdb_info, *edid_vendor_name, *pll_file; + struct dentry *edid_vcdb_info, *edid_vendor_name; + struct dentry *src_hdcp14_support, *src_hdcp22_support; + struct dentry *sink_hdcp22_support, *hdmi_hdcp_state; + struct dentry *pll_delta_file, *pll_enable_file; dir = debugfs_create_dir(display->name, NULL); if (!dir) { @@ -531,18 +912,30 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display) goto error_remove_dir; } - pll_file = debugfs_create_file("pll_delta", + pll_delta_file = debugfs_create_file("pll_delta", 0644, dir, display, &pll_delta_fops); - if (IS_ERR_OR_NULL(pll_file)) { - rc = PTR_ERR(pll_file); + if (IS_ERR_OR_NULL(pll_delta_file)) { + rc = PTR_ERR(pll_delta_file); SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n", display->name, rc); goto error_remove_dir; } + pll_enable_file = debugfs_create_file("pll_enable", + 0644, + dir, + display, + &pll_enable_fops); + if (IS_ERR_OR_NULL(pll_enable_file)) { + rc = PTR_ERR(pll_enable_file); + SDE_ERROR("[%s]debugfs create pll_enable file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + edid_modes = debugfs_create_file("edid_modes", 0444, dir, @@ -620,6 +1013,58 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display) goto error_remove_dir; } + src_hdcp14_support = debugfs_create_file("src_hdcp14_support", + 0444, + dir, + display, + &hdcp_src_14_support_fops); + + if (IS_ERR_OR_NULL(src_hdcp14_support)) { + rc = PTR_ERR(src_hdcp14_support); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + src_hdcp22_support = debugfs_create_file("src_hdcp22_support", + 0444, + dir, + display, + &hdcp_src_22_support_fops); + + if (IS_ERR_OR_NULL(src_hdcp22_support)) { + rc = PTR_ERR(src_hdcp22_support); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + sink_hdcp22_support = debugfs_create_file("sink_hdcp22_support", + 0444, + dir, + display, + &hdcp_sink_22_support_fops); + + if (IS_ERR_OR_NULL(sink_hdcp22_support)) { + rc = PTR_ERR(sink_hdcp22_support); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + hdmi_hdcp_state = debugfs_create_file("hdmi_hdcp_state", + 0444, + dir, + display, + &sde_hdmi_hdcp_state_fops); + + if (IS_ERR_OR_NULL(hdmi_hdcp_state)) { + rc = PTR_ERR(hdmi_hdcp_state); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + display->root = dir; return rc; error_remove_dir: @@ -808,6 +1253,13 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) uint32_t hpd_ctrl; int i, ret; unsigned long flags; + struct drm_connector *connector; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + + connector = hdmi->connector; + priv = connector->dev->dev_private; + sde_kms = to_sde_kms(priv->kms); for (i = 0; i < config->hpd_reg_cnt; i++) { ret = regulator_enable(hdmi->hpd_regs[i]); @@ -847,9 +1299,11 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) } } - sde_hdmi_set_mode(hdmi, false); - _sde_hdmi_phy_reset(hdmi); - sde_hdmi_set_mode(hdmi, true); + if (!sde_kms->splash_info.handoff) { + sde_hdmi_set_mode(hdmi, false); + _sde_hdmi_phy_reset(hdmi); + sde_hdmi_set_mode(hdmi, true); + } hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); @@ -919,6 +1373,23 @@ static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display) else cec_notifier_set_phys_addr(display->notifier, CEC_PHYS_ADDR_INVALID); + +} + +static void _sde_hdmi_init_ddc(struct sde_hdmi *display, struct hdmi *hdmi) +{ + display->ddc_ctrl.io = &display->io[HDMI_TX_CORE_IO]; + init_completion(&display->ddc_ctrl.rx_status_done); +} + +static void _sde_hdmi_map_regs(struct sde_hdmi *display, struct hdmi *hdmi) +{ + display->io[HDMI_TX_CORE_IO].base = hdmi->mmio; + display->io[HDMI_TX_CORE_IO].len = hdmi->mmio_len; + display->io[HDMI_TX_QFPROM_IO].base = hdmi->qfprom_mmio; + display->io[HDMI_TX_QFPROM_IO].len = hdmi->qfprom_mmio_len; + display->io[HDMI_TX_HDCP_IO].base = hdmi->hdcp_mmio; + display->io[HDMI_TX_HDCP_IO].len = hdmi->hdcp_mmio_len; } static void _sde_hdmi_hotplug_work(struct work_struct *work) @@ -997,26 +1468,39 @@ static void _sde_hdmi_cec_irq(struct sde_hdmi *sde_hdmi) static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id) { - struct sde_hdmi *sde_hdmi = dev_id; + struct sde_hdmi *display = dev_id; struct hdmi *hdmi; - if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) { - SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi); + if (!display || !display->ctrl.ctrl) { + SDE_ERROR("sde_hdmi=%pK or hdmi is NULL\n", display); return IRQ_NONE; } - hdmi = sde_hdmi->ctrl.ctrl; + + hdmi = display->ctrl.ctrl; /* Process HPD: */ - _sde_hdmi_connector_irq(sde_hdmi); + _sde_hdmi_connector_irq(display); + + /* Process Scrambling ISR */ + sde_hdmi_ddc_scrambling_isr((void *)display); + + /* Process DDC2 */ + sde_hdmi_ddc_hdcp2p2_isr((void *)display); /* Process DDC: */ hdmi_i2c_irq(hdmi->i2c); /* Process HDCP: */ - if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported) - hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl); + if (display->hdcp_ops && display->hdcp_data) { + if (display->hdcp_ops->isr) { + if (display->hdcp_ops->isr( + display->hdcp_data)) + DEV_ERR("%s: hdcp_1x_isr failed\n", + __func__); + } + } /* Process CEC: */ - _sde_hdmi_cec_irq(sde_hdmi); + _sde_hdmi_cec_irq(display); return IRQ_HANDLED; } @@ -1192,84 +1676,8 @@ void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on) power_on ? "Enable" : "Disable", ctrl); } -int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len) -{ - int rc; - int retry = 5; - struct i2c_msg msgs[] = { - { - .addr = addr >> 1, - .flags = 0, - .len = 1, - .buf = &offset, - }, { - .addr = addr >> 1, - .flags = I2C_M_RD, - .len = data_len, - .buf = data, - } - }; - - SDE_HDMI_DEBUG("Start DDC read"); - retry: - rc = i2c_transfer(hdmi->i2c, msgs, 2); - - retry--; - if (rc == 2) - rc = 0; - else if (retry > 0) - goto retry; - else - rc = -EIO; - - SDE_HDMI_DEBUG("End DDC read %d", rc); - - return rc; -} - #define DDC_WRITE_MAX_BYTE_NUM 32 -int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len) -{ - int rc; - int retry = 10; - u8 buf[DDC_WRITE_MAX_BYTE_NUM]; - struct i2c_msg msgs[] = { - { - .addr = addr >> 1, - .flags = 0, - .len = 1, - } - }; - - SDE_HDMI_DEBUG("Start DDC write"); - if (data_len > (DDC_WRITE_MAX_BYTE_NUM - 1)) { - SDE_ERROR("%s: write size too big\n", __func__); - return -ERANGE; - } - - buf[0] = offset; - memcpy(&buf[1], data, data_len); - msgs[0].buf = buf; - msgs[0].len = data_len + 1; - retry: - rc = i2c_transfer(hdmi->i2c, msgs, 1); - - retry--; - if (rc == 1) - rc = 0; - else if (retry > 0) - goto retry; - else - rc = -EIO; - - SDE_HDMI_DEBUG("End DDC write %d", rc); - - return rc; -} - int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val) { int rc = 0; @@ -1326,7 +1734,8 @@ int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val) break; } - rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, data_buf, data_len); + rc = hdmi_ddc_read(hdmi, dev_addr, offset, data_buf, + data_len, true); if (rc) { SDE_ERROR("DDC Read failed for %d\n", data_type); return rc; @@ -1398,8 +1807,8 @@ int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val) dev_addr = 0xA8; data_len = 1; offset = HDMI_SCDC_TMDS_CONFIG; - rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, &read_val, - data_len); + rc = hdmi_ddc_read(hdmi, dev_addr, offset, &read_val, + data_len, true); if (rc) { SDE_ERROR("scdc read failed\n"); return rc; @@ -1423,7 +1832,8 @@ int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val) return -EINVAL; } - rc = sde_hdmi_ddc_write(hdmi, dev_addr, offset, data_buf, data_len); + rc = hdmi_ddc_write(hdmi, dev_addr, offset, data_buf, + data_len, true); if (rc) { SDE_ERROR("DDC Read failed for %d\n", data_type); return rc; @@ -1465,6 +1875,242 @@ int sde_hdmi_get_info(struct msm_display_info *info, return rc; } +static void sde_hdmi_panel_set_hdr_infoframe(struct sde_hdmi *display, +struct drm_msm_ext_panel_hdr_metadata *hdr_meta) +{ + u32 packet_payload = 0; + u32 packet_header = 0; + u32 packet_control = 0; + u32 const type_code = 0x87; + u32 const version = 0x01; + u32 const length = 0x1a; + u32 const descriptor_id = 0x00; + struct hdmi *hdmi; + struct drm_connector *connector; + + if (!display || !hdr_meta) { + SDE_ERROR("invalid input\n"); + return; + } + + hdmi = display->ctrl.ctrl; + connector = display->ctrl.ctrl->connector; + + if (!hdmi || !connector) { + SDE_ERROR("invalid input\n"); + return; + } + + /* Setup Packet header and payload */ + packet_header = type_code | (version << 8) | (length << 16); + hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header); + + packet_payload = (hdr_meta->eotf << 8); + if (connector->hdr_metadata_type_one) { + packet_payload |= (descriptor_id << 16) + | (HDMI_GET_LSB(hdr_meta->display_primaries_x[0]) + << 24); + hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload); + } else { + pr_debug("Metadata Type 1 not supported\n"); + hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload); + goto enable_packet_control; + } + + packet_payload = + (HDMI_GET_MSB(hdr_meta->display_primaries_x[0])) + | (HDMI_GET_LSB(hdr_meta->display_primaries_y[0]) << 8) + | (HDMI_GET_MSB(hdr_meta->display_primaries_y[0]) << 16) + | (HDMI_GET_LSB(hdr_meta->display_primaries_x[1]) << 24); + hdmi_write(hdmi, HDMI_GENERIC0_1, packet_payload); + + packet_payload = + (HDMI_GET_MSB(hdr_meta->display_primaries_x[1])) + | (HDMI_GET_LSB(hdr_meta->display_primaries_y[1]) << 8) + | (HDMI_GET_MSB(hdr_meta->display_primaries_y[1]) << 16) + | (HDMI_GET_LSB(hdr_meta->display_primaries_x[2]) << 24); + hdmi_write(hdmi, HDMI_GENERIC0_2, packet_payload); + + packet_payload = + (HDMI_GET_MSB(hdr_meta->display_primaries_x[2])) + | (HDMI_GET_LSB(hdr_meta->display_primaries_y[2]) << 8) + | (HDMI_GET_MSB(hdr_meta->display_primaries_y[2]) << 16) + | (HDMI_GET_LSB(hdr_meta->white_point_x) << 24); + hdmi_write(hdmi, HDMI_GENERIC0_3, packet_payload); + + packet_payload = + (HDMI_GET_MSB(hdr_meta->white_point_x)) + | (HDMI_GET_LSB(hdr_meta->white_point_y) << 8) + | (HDMI_GET_MSB(hdr_meta->white_point_y) << 16) + | (HDMI_GET_LSB(hdr_meta->max_luminance) << 24); + hdmi_write(hdmi, HDMI_GENERIC0_4, packet_payload); + + packet_payload = + (HDMI_GET_MSB(hdr_meta->max_luminance)) + | (HDMI_GET_LSB(hdr_meta->min_luminance) << 8) + | (HDMI_GET_MSB(hdr_meta->min_luminance) << 16) + | (HDMI_GET_LSB(hdr_meta->max_content_light_level) << 24); + hdmi_write(hdmi, HDMI_GENERIC0_5, packet_payload); + + packet_payload = + (HDMI_GET_MSB(hdr_meta->max_content_light_level)) + | (HDMI_GET_LSB(hdr_meta->max_average_light_level) << 8) + | (HDMI_GET_MSB(hdr_meta->max_average_light_level) << 16); + hdmi_write(hdmi, HDMI_GENERIC0_6, packet_payload); + +enable_packet_control: + /* + * GENERIC0_LINE | GENERIC0_CONT | GENERIC0_SEND + * Setup HDMI TX generic packet control + * Enable this packet to transmit every frame + * Enable HDMI TX engine to transmit Generic packet 1 + */ + packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL); + packet_control |= BIT(0) | BIT(1) | BIT(2) | BIT(16); + hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control); +} + +static void sde_hdmi_update_colorimetry(struct sde_hdmi *display, + bool use_bt2020) +{ + struct hdmi *hdmi; + struct drm_connector *connector; + bool mode_is_yuv = false; + struct drm_display_mode *mode; + u32 mode_fmt_flags = 0; + u8 checksum; + u32 avi_info0 = 0; + u32 avi_info1 = 0; + u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0}; + u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE]; + struct hdmi_avi_infoframe info; + + if (!display) { + SDE_ERROR("invalid input\n"); + return; + } + + hdmi = display->ctrl.ctrl; + + if (!hdmi) { + SDE_ERROR("invalid input\n"); + return; + } + + connector = display->ctrl.ctrl->connector; + + if (!connector) { + SDE_ERROR("invalid input\n"); + return; + } + + if (!connector->hdr_supported) { + SDE_DEBUG("HDR is not supported\n"); + return; + } + + /* If sink doesn't support BT2020, just return */ + if (!(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_YCC) || + !(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_RGB)) { + SDE_DEBUG("BT2020 colorimetry is not supported\n"); + return; + } + + /* If there is no change in colorimetry, just return */ + if (use_bt2020 && display->bt2020_colorimetry) + return; + else if (!use_bt2020 && !display->bt2020_colorimetry) + return; + + mode = &display->mode; + /* Cache the format flags before clearing */ + mode_fmt_flags = mode->flags; + /** + * Clear the RGB/YUV format flags before calling upstream API + * as the API also compares the flags and then returns a mode + */ + mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK; + drm_hdmi_avi_infoframe_from_display_mode(&info, mode); + /* Restore the format flags */ + mode->flags = mode_fmt_flags; + + /* Mode should only support YUV and not both to set the flag */ + if ((mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + && !(mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) { + mode_is_yuv = true; + } + + + if (!display->bt2020_colorimetry && use_bt2020) { + /** + * 1. Update colorimetry to use extended + * 2. Change extended to use BT2020 + * 3. Change colorspace based on mode + * 4. Use limited as BT2020 is always limited + */ + info.colorimetry = SDE_HDMI_USE_EXTENDED_COLORIMETRY; + info.extended_colorimetry = SDE_HDMI_BT2020_COLORIMETRY; + if (mode_is_yuv) + info.colorspace = HDMI_COLORSPACE_YUV420; + if (connector->yuv_qs) + info.ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + } else if (display->bt2020_colorimetry && !use_bt2020) { + /** + * 1. Update colorimetry to non-extended + * 2. Change colorspace based on mode + * 3. Restore quantization to full if QS + * is enabled + */ + info.colorimetry = SDE_HDMI_DEFAULT_COLORIMETRY; + if (mode_is_yuv) + info.colorspace = HDMI_COLORSPACE_YUV420; + if (connector->yuv_qs) + info.ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_FULL; + } + + hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe)); + checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1]; + avi_info0 = checksum | + LEFT_SHIFT_BYTE(avi_frame[0]) | + LEFT_SHIFT_WORD(avi_frame[1]) | + LEFT_SHIFT_24BITS(avi_frame[2]); + + avi_info1 = avi_frame[3] | + LEFT_SHIFT_BYTE(avi_frame[4]) | + LEFT_SHIFT_WORD(avi_frame[5]) | + LEFT_SHIFT_24BITS(avi_frame[6]); + + hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), avi_info0); + hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), avi_info1); + display->bt2020_colorimetry = use_bt2020; +} + +static void sde_hdmi_clear_hdr_infoframe(struct sde_hdmi *display) +{ + struct hdmi *hdmi; + struct drm_connector *connector; + u32 packet_control = 0; + + if (!display) { + SDE_ERROR("invalid input\n"); + return; + } + + hdmi = display->ctrl.ctrl; + connector = display->ctrl.ctrl->connector; + + if (!hdmi || !connector) { + SDE_ERROR("invalid input\n"); + return; + } + + packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL); + packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK; + hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control); +} + int sde_hdmi_set_property(struct drm_connector *connector, struct drm_connector_state *state, int property_index, @@ -1476,17 +2122,42 @@ int sde_hdmi_set_property(struct drm_connector *connector, if (!connector || !display) { SDE_ERROR("connector=%pK or display=%pK is NULL\n", connector, display); - return 0; + return -EINVAL; } SDE_DEBUG("\n"); - if (property_index == CONNECTOR_PROP_PLL_DELTA) + if (property_index == CONNECTOR_PROP_PLL_ENABLE) + rc = _sde_hdmi_enable_pll_update(display, value); + else if (property_index == CONNECTOR_PROP_PLL_DELTA) rc = _sde_hdmi_update_pll_delta(display, value); return rc; } +int sde_hdmi_get_property(struct drm_connector *connector, + struct drm_connector_state *state, + int property_index, + uint64_t *value, + void *display) +{ + struct sde_hdmi *hdmi_display = display; + int rc = 0; + + if (!connector || !hdmi_display) { + SDE_ERROR("connector=%pK or display=%pK is NULL\n", + connector, hdmi_display); + return -EINVAL; + } + + mutex_lock(&hdmi_display->display_lock); + if (property_index == CONNECTOR_PROP_PLL_ENABLE) + *value = hdmi_display->pll_update_enable ? 1 : 0; + mutex_unlock(&hdmi_display->display_lock); + + return rc; +} + u32 sde_hdmi_get_num_of_displays(void) { u32 count = 0; @@ -1540,6 +2211,145 @@ int sde_hdmi_connector_pre_deinit(struct drm_connector *connector, return 0; } +static void _sde_hdmi_get_tx_version(struct sde_hdmi *sde_hdmi) +{ + struct hdmi *hdmi = sde_hdmi->ctrl.ctrl; + + sde_hdmi->hdmi_tx_version = hdmi_read(hdmi, REG_HDMI_VERSION); + sde_hdmi->hdmi_tx_major_version = + SDE_GET_MAJOR_VER(sde_hdmi->hdmi_tx_version); + + switch (sde_hdmi->hdmi_tx_major_version) { + case (HDMI_TX_VERSION_3): + sde_hdmi->max_pclk_khz = HDMI_TX_3_MAX_PCLK_RATE; + break; + case (HDMI_TX_VERSION_4): + sde_hdmi->max_pclk_khz = HDMI_TX_4_MAX_PCLK_RATE; + break; + default: + sde_hdmi->max_pclk_khz = HDMI_DEFAULT_MAX_PCLK_RATE; + break; + } + SDE_DEBUG("sde_hdmi->hdmi_tx_version = 0x%x\n", + sde_hdmi->hdmi_tx_version); + SDE_DEBUG("sde_hdmi->hdmi_tx_major_version = 0x%x\n", + sde_hdmi->hdmi_tx_major_version); + SDE_DEBUG("sde_hdmi->max_pclk_khz = 0x%x\n", + sde_hdmi->max_pclk_khz); +} + +static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi) +{ + u32 hdmi_disabled, hdcp_disabled, reg_val; + int ret = 0; + struct hdmi *hdmi = sde_hdmi->ctrl.ctrl; + + /* check if hdmi and hdcp are disabled */ + if (sde_hdmi->hdmi_tx_major_version < HDMI_TX_VERSION_4) { + hdcp_disabled = hdmi_qfprom_read(hdmi, + QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31); + + hdmi_disabled = hdmi_qfprom_read(hdmi, + QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0); + } else { + reg_val = hdmi_qfprom_read(hdmi, + QFPROM_RAW_FEAT_CONFIG_ROW0_LSB + QFPROM_RAW_VERSION_4); + hdcp_disabled = reg_val & BIT(12); + + hdmi_disabled = reg_val & BIT(13); + + reg_val = hdmi_qfprom_read(hdmi, SEC_CTRL_HW_VERSION); + + SDE_DEBUG("SEC_CTRL_HW_VERSION reg_val = 0x%x\n", reg_val); + /* + * With HDCP enabled on capable hardware, check if HW + * or SW keys should be used. + */ + if (!hdcp_disabled && (reg_val >= HDCP_SEL_MIN_SEC_VERSION)) { + reg_val = hdmi_qfprom_read(hdmi, + QFPROM_RAW_FEAT_CONFIG_ROW0_MSB + + QFPROM_RAW_VERSION_4); + + if (!(reg_val & BIT(23))) + sde_hdmi->hdcp1_use_sw_keys = true; + } + } + + if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4) + sde_hdmi->dc_feature_supported = true; + + SDE_DEBUG("%s: Features \n", __func__, + hdmi_disabled ? "OFF" : "ON", + hdcp_disabled ? "OFF" : "ON", + sde_hdmi->dc_feature_supported ? "ON" : "OFF"); + + if (hdmi_disabled) { + DEV_ERR("%s: HDMI disabled\n", __func__); + ret = -ENODEV; + goto end; + } + + sde_hdmi->hdcp14_present = !hdcp_disabled; + + end: + return ret; +} /* hdmi_tx_check_capability */ + +static int _sde_hdmi_init_hdcp(struct sde_hdmi *hdmi_ctrl) +{ + struct sde_hdcp_init_data hdcp_init_data; + void *hdcp_data; + int rc = 0; + struct hdmi *hdmi; + + if (!hdmi_ctrl) { + SDE_ERROR("sde_hdmi is NULL\n"); + return -EINVAL; + } + + hdmi = hdmi_ctrl->ctrl.ctrl; + hdcp_init_data.phy_addr = hdmi->mmio_phy_addr; + hdcp_init_data.core_io = &hdmi_ctrl->io[HDMI_TX_CORE_IO]; + hdcp_init_data.qfprom_io = &hdmi_ctrl->io[HDMI_TX_QFPROM_IO]; + hdcp_init_data.hdcp_io = &hdmi_ctrl->io[HDMI_TX_HDCP_IO]; + hdcp_init_data.mutex = &hdmi_ctrl->hdcp_mutex; + hdcp_init_data.workq = hdmi->workq; + hdcp_init_data.notify_status = sde_hdmi_tx_hdcp_cb; + hdcp_init_data.cb_data = (void *)hdmi_ctrl; + hdcp_init_data.hdmi_tx_ver = hdmi_ctrl->hdmi_tx_major_version; + hdcp_init_data.sec_access = true; + hdcp_init_data.client_id = HDCP_CLIENT_HDMI; + hdcp_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl; + + if (hdmi_ctrl->hdcp14_present) { + hdcp_data = sde_hdcp_1x_init(&hdcp_init_data); + + if (IS_ERR_OR_NULL(hdcp_data)) { + DEV_ERR("%s: hdcp 1.4 init failed\n", __func__); + rc = -EINVAL; + kfree(hdcp_data); + goto end; + } else { + hdmi_ctrl->hdcp_feat_data[SDE_HDCP_1x] = hdcp_data; + SDE_HDMI_DEBUG("%s: HDCP 1.4 initialized\n", __func__); + } + } + + hdcp_data = sde_hdmi_hdcp2p2_init(&hdcp_init_data); + + if (IS_ERR_OR_NULL(hdcp_data)) { + DEV_ERR("%s: hdcp 2.2 init failed\n", __func__); + rc = -EINVAL; + goto end; + } else { + hdmi_ctrl->hdcp_feat_data[SDE_HDCP_2P2] = hdcp_data; + SDE_HDMI_DEBUG("%s: HDCP 2.2 initialized\n", __func__); + } + +end: + return rc; +} + int sde_hdmi_connector_post_init(struct drm_connector *connector, void *info, void *display) @@ -1572,6 +2382,37 @@ int sde_hdmi_connector_post_init(struct drm_connector *connector, if (rc) SDE_ERROR("failed to enable HPD: %d\n", rc); + _sde_hdmi_get_tx_version(sde_hdmi); + + sde_hdmi_tx_check_capability(sde_hdmi); + + _sde_hdmi_init_hdcp(sde_hdmi); + + return rc; +} + +int sde_hdmi_start_hdcp(struct drm_connector *connector) +{ + int rc; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + struct hdmi *hdmi = display->ctrl.ctrl; + + if (!hdmi) { + SDE_ERROR("%s: invalid input\n", __func__); + return -EINVAL; + } + + if (!sde_hdmi_tx_is_hdcp_enabled(display)) + return 0; + + if (sde_hdmi_tx_is_encryption_set(display)) + sde_hdmi_config_avmute(hdmi, true); + + rc = display->hdcp_ops->authenticate(display->hdcp_data); + if (rc) + SDE_ERROR("%s: hdcp auth failed. rc=%d\n", __func__, rc); + return rc; } @@ -1610,6 +2451,114 @@ sde_hdmi_connector_detect(struct drm_connector *connector, return status; } +int sde_hdmi_pre_kickoff(struct drm_connector *connector, + void *display, + struct msm_display_kickoff_params *params) +{ + struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display; + struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl; + struct drm_msm_ext_panel_hdr_metadata *hdr_meta; + u8 hdr_op; + + if (!connector || !display || !params || + !params->hdr_ctrl) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + hdr_ctrl = params->hdr_ctrl; + hdr_meta = &hdr_ctrl->hdr_meta; + + if (!hdr_meta) { + SDE_ERROR("Invalid params\n"); + return -EINVAL; + } + + hdr_op = sde_hdmi_hdr_get_ops(hdmi_display->curr_hdr_state, + hdr_ctrl->hdr_state); + + if (hdr_op == HDR_SEND_INFO) { + if (connector->hdr_supported) + sde_hdmi_panel_set_hdr_infoframe(display, + &hdr_ctrl->hdr_meta); + if (hdr_meta->eotf) + sde_hdmi_update_colorimetry(hdmi_display, + true); + else + sde_hdmi_update_colorimetry(hdmi_display, + false); + } else if (hdr_op == HDR_CLEAR_INFO) + sde_hdmi_clear_hdr_infoframe(display); + + hdmi_display->curr_hdr_state = hdr_ctrl->hdr_state; + + return 0; +} + +bool sde_hdmi_mode_needs_full_range(void *display) +{ + struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display; + struct drm_display_mode *mode; + u32 mode_fmt_flags = 0; + u32 cea_mode; + + if (!hdmi_display) { + SDE_ERROR("invalid input\n"); + return false; + } + + mode = &hdmi_display->mode; + /* Cache the format flags before clearing */ + mode_fmt_flags = mode->flags; + /** + * Clear the RGB/YUV format flags before calling upstream API + * as the API also compares the flags and then returns a mode + */ + mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK; + cea_mode = drm_match_cea_mode(mode); + /* Restore the format flags */ + mode->flags = mode_fmt_flags; + + if (cea_mode > SDE_HDMI_VIC_640x480) + return false; + + return true; +} + +enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn, + void *display) +{ + struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display; + struct sde_connector_state *c_state; + struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl; + struct drm_msm_ext_panel_hdr_metadata *hdr_meta; + + if (!hdmi_display || !conn) { + SDE_ERROR("invalid input\n"); + goto error; + } + + c_state = to_sde_connector_state(conn->state); + + if (!c_state) { + SDE_ERROR("invalid input\n"); + goto error; + } + + hdr_ctrl = &c_state->hdr_ctrl; + hdr_meta = &hdr_ctrl->hdr_meta; + + if ((hdr_ctrl->hdr_state == HDR_ENABLE) + && (hdr_meta->eotf != 0)) + return SDE_CSC_RGB2YUV_2020L; + else if (sde_hdmi_mode_needs_full_range(hdmi_display) + || conn->yuv_qs) + return SDE_CSC_RGB2YUV_601FR; + +error: + return SDE_CSC_RGB2YUV_601L; +} + int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display) { struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display; @@ -1688,6 +2637,12 @@ int sde_hdmi_dev_deinit(struct sde_hdmi *display) SDE_ERROR("Invalid params\n"); return -EINVAL; } + if (display->hdcp_feat_data[SDE_HDCP_1x]) + sde_hdcp_1x_deinit(display->hdcp_feat_data[SDE_HDCP_1x]); + + if (display->hdcp_feat_data[SDE_HDCP_2P2]) + sde_hdmi_hdcp2p2_deinit(display->hdcp_feat_data[SDE_HDCP_2P2]); + return 0; } @@ -1770,6 +2725,14 @@ static int sde_hdmi_bind(struct device *dev, struct device *master, void *data) display_ctrl->ctrl = priv->hdmi; display->drm_dev = drm; + _sde_hdmi_map_regs(display, priv->hdmi); + _sde_hdmi_init_ddc(display, priv->hdmi); + + display->enc_lvl = HDCP_STATE_AUTH_ENC_NONE; + + INIT_DELAYED_WORK(&display->hdcp_cb_work, + sde_hdmi_tx_hdcp_cb_work); + mutex_init(&display->hdcp_mutex); mutex_unlock(&display->display_lock); return rc; @@ -2104,6 +3067,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) struct msm_drm_private *priv = NULL; struct hdmi *hdmi; struct platform_device *pdev; + struct sde_kms *sde_kms; DBG(""); if (!display || !display->drm_dev || !enc) { @@ -2162,6 +3126,19 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) enc->bridge = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge; + /* + * After initialising HDMI bridge, we need to check + * whether the early display is enabled for HDMI. + * If yes, we need to increase refcount of hdmi power + * clocks. This can skip the clock disabling operation in + * clock_late_init when finding clk.count == 1. + */ + sde_kms = to_sde_kms(priv->kms); + if (sde_kms->splash_info.handoff) { + sde_hdmi_bridge_power_on(hdmi->bridge); + hdmi->power_on = true; + } + mutex_unlock(&display->display_lock); return 0; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index dff245dec764..672a9f188d27 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -20,20 +20,32 @@ #include #include #include +#include #include #include #include #include "hdmi.h" - +#include "sde_kms.h" +#include "sde_connector.h" +#include "msm_drv.h" #include "sde_edid_parser.h" +#include "sde_hdmi_util.h" +#include "sde_hdcp.h" +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) +#endif #ifdef HDMI_DEBUG_ENABLE #define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) #else #define SDE_HDMI_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args) #endif +/* HW Revisions for different SDE targets */ +#define SDE_GET_MAJOR_VER(rev)((rev) >> 28) +#define SDE_GET_MINOR_VER(rev)(((rev) >> 16) & 0xFFF) + /** * struct sde_hdmi_info - defines hdmi display properties * @display_type: Display type as defined by device tree. @@ -69,6 +81,18 @@ struct sde_hdmi_ctrl { u32 hdmi_ctrl_idx; }; +enum hdmi_tx_io_type { + HDMI_TX_CORE_IO, + HDMI_TX_QFPROM_IO, + HDMI_TX_HDCP_IO, + HDMI_TX_MAX_IO +}; + +enum hdmi_tx_feature_type { + SDE_HDCP_1x, + SDE_HDCP_2P2 +}; + /** * struct sde_hdmi - hdmi display information * @pdev: Pointer to platform device. @@ -88,6 +112,9 @@ struct sde_hdmi_ctrl { * @codec_ready: If audio codec is ready. * @client_notify_pending: If there is client notification pending. * @irq_domain: IRQ domain structure. + * @pll_update_enable: if it's allowed to update HDMI PLL ppm. + * @dc_enable: If deep color is enabled. Only DC_30 so far. + * @dc_feature_supported: If deep color feature is supported. * @notifier: CEC notifider to convey physical address information. * @root: Debug fs root entry. */ @@ -99,7 +126,7 @@ struct sde_hdmi { const char *display_type; struct list_head list; struct mutex display_lock; - + struct mutex hdcp_mutex; struct sde_hdmi_ctrl ctrl; struct platform_device *ext_pdev; @@ -112,14 +139,40 @@ struct sde_hdmi { struct drm_display_mode mode; bool connected; bool is_tpg_enabled; + u32 hdmi_tx_version; + u32 hdmi_tx_major_version; + u32 max_pclk_khz; + bool hdcp1_use_sw_keys; + u32 hdcp14_present; + u32 hdcp22_present; + u8 hdcp_status; + u32 enc_lvl; + u8 curr_hdr_state; + bool auth_state; + bool sink_hdcp22_support; + bool src_hdcp22_support; + /*hold final data + *based on hdcp support + */ + void *hdcp_data; + /*hold hdcp init data*/ + void *hdcp_feat_data[2]; + struct sde_hdcp_ops *hdcp_ops; + struct sde_hdmi_tx_ddc_ctrl ddc_ctrl; struct work_struct hpd_work; bool codec_ready; bool client_notify_pending; struct irq_domain *irq_domain; struct cec_notifier *notifier; + bool pll_update_enable; + bool dc_enable; + bool dc_feature_supported; + bool bt2020_colorimetry; + struct delayed_work hdcp_cb_work; + struct dss_io_data io[HDMI_TX_MAX_IO]; /* DEBUG FS */ struct dentry *root; }; @@ -144,6 +197,24 @@ enum hdmi_tx_scdc_access_type { #define HDMI_KHZ_TO_HZ 1000 #define HDMI_MHZ_TO_HZ 1000000 +#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2 +#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1 + +#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7 + +/* for AVI program */ +#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \ + (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE) +#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6) + +#define LEFT_SHIFT_BYTE(x) ((x) << 8) +#define LEFT_SHIFT_WORD(x) ((x) << 16) +#define LEFT_SHIFT_24BITS(x) ((x) << 24) + +/* Maximum pixel clock rates for hdmi tx */ +#define HDMI_DEFAULT_MAX_PCLK_RATE 148500 +#define HDMI_TX_3_MAX_PCLK_RATE 297000 +#define HDMI_TX_4_MAX_PCLK_RATE 600000 /** * hdmi_tx_ddc_timer_type() - hdmi DDC timer functionalities. */ @@ -296,6 +367,29 @@ int sde_hdmi_set_property(struct drm_connector *connector, int property_index, uint64_t value, void *display); +/** + * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on. + * @bridge: Handle to the drm bridge. + * + * Return: void. + */ +void sde_hdmi_bridge_power_on(struct drm_bridge *bridge); + +/** + * sde_hdmi_get_property() - get the connector properties + * @connector: Handle to the connector. + * @state: Handle to the connector state. + * @property_index: property index. + * @value: property value. + * @display: Handle to the display. + * + * Return: error code. + */ +int sde_hdmi_get_property(struct drm_connector *connector, + struct drm_connector_state *state, + int property_index, + uint64_t *value, + void *display); /** * sde_hdmi_bridge_init() - init sde hdmi bridge @@ -314,32 +408,6 @@ struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi); */ void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on); -/** - * sde_hdmi_ddc_read() - common hdmi ddc read API. - * @hdmi: Handle to the hdmi. - * @addr: Command address. - * @offset: Command offset. - * @data: Data buffer for read back. - * @data_len: Data buffer length. - * - * Return: error code. - */ -int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len); - -/** - * sde_hdmi_ddc_write() - common hdmi ddc write API. - * @hdmi: Handle to the hdmi. - * @addr: Command address. - * @offset: Command offset. - * @data: Data buffer for write. - * @data_len: Data buffer length. - * - * Return: error code. - */ -int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len); - /** * sde_hdmi_scdc_read() - hdmi 2.0 ddc read API. * @hdmi: Handle to the hdmi. @@ -406,6 +474,41 @@ void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected); void sde_hdmi_ack_state(struct drm_connector *connector, enum drm_connector_status status); +bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl); +bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl); +bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl); +bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl); +int sde_hdmi_start_hdcp(struct drm_connector *connector); +void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl); + + +/* + * sde_hdmi_pre_kickoff - program kickoff-time features + * @display: Pointer to private display structure + * @params: Parameters for kickoff-time programming + * Returns: Zero on success + */ +int sde_hdmi_pre_kickoff(struct drm_connector *connector, + void *display, + struct msm_display_kickoff_params *params); + +/* + * sde_hdmi_mode_needs_full_range - does mode need full range + * quantization + * @display: Pointer to private display structure + * Returns: true or false based on mode + */ +bool sde_hdmi_mode_needs_full_range(void *display); + +/* + * sde_hdmi_get_csc_type - returns the CSC type to be + * used based on state of HDR playback + * @conn: Pointer to DRM connector + * @display: Pointer to private display structure + * Returns: true or false based on mode + */ +enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn, + void *display); #else /*#ifdef CONFIG_DRM_SDE_HDMI*/ static inline u32 sde_hdmi_get_num_of_displays(void) @@ -464,12 +567,42 @@ static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display) return 0; } +bool hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl) +{ + return false; +} + +bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl) +{ + return false; +} + +bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl) +{ + return false; +} + +bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl) +{ + return false; +} + static inline int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) { return 0; } +int sde_hdmi_start_hdcp(struct drm_connector *connector) +{ + return 0; +} + +void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl) +{ + +} + static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display) { return 0; @@ -490,5 +623,16 @@ static inline int sde_hdmi_set_property(struct drm_connector *connector, return 0; } +static inline bool sde_hdmi_mode_needs_full_range(void *display) +{ + return false; +} + +enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn, + void *display) +{ + return 0; +} + #endif /*#else of CONFIG_DRM_SDE_HDMI*/ #endif /* _SDE_HDMI_H_ */ diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c index 48a3a9316a41..d6213dc0a4aa 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c @@ -355,37 +355,3 @@ void sde_hdmi_audio_off(struct hdmi *hdmi) SDE_DEBUG("HDMI Audio: Disabled\n"); } -int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set) -{ - u32 av_mute_status; - bool av_pkt_en = false; - - if (!hdmi) { - SDE_ERROR("invalid HDMI Ctrl\n"); - return -ENODEV; - } - - av_mute_status = hdmi_read(hdmi, HDMI_GC); - - if (set) { - if (!(av_mute_status & BIT(0))) { - hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0)); - av_pkt_en = true; - } - } else { - if (av_mute_status & BIT(0)) { - hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0)); - av_pkt_en = true; - } - } - - /* Enable AV Mute tranmission here */ - if (av_pkt_en) - hdmi_write(hdmi, HDMI_VBI_PKT_CTRL, - hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5))); - - SDE_DEBUG("AVMUTE %s\n", set ? "set" : "cleared"); - - return 0; -} - diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index 26a0638f7792..e6b6d15b5fb7 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -22,6 +22,71 @@ #include "sde_hdmi.h" #include "hdmi.h" +/* + * Add these register definitions to support the latest chipsets. These + * are derived from hdmi.xml.h and are going to be replaced by a chipset + * based mask approach. + */ +#define SDE_HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff +static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_START(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & + SDE_HDMI_ACTIVE_HSYNC_START__MASK; +} +#define SDE_HDMI_ACTIVE_HSYNC_END__MASK 0x1fff0000 +static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_END(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & + SDE_HDMI_ACTIVE_HSYNC_END__MASK; +} + +#define SDE_HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff +static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_START(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & + SDE_HDMI_ACTIVE_VSYNC_START__MASK; +} +#define SDE_HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000 +static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_END(uint32_t val) +{ + return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & + SDE_HDMI_ACTIVE_VSYNC_END__MASK; +} + +#define SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff +static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_START(uint32_t val) +{ + return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & + SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK; +} +#define SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000 +static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_END(uint32_t val) +{ + return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & + SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK; +} + +#define SDE_HDMI_TOTAL_H_TOTAL__MASK 0x00001fff +static inline uint32_t SDE_HDMI_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & + SDE_HDMI_TOTAL_H_TOTAL__MASK; +} + +#define SDE_HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000 +static inline uint32_t SDE_HDMI_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & + SDE_HDMI_TOTAL_V_TOTAL__MASK; +} + +#define SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff +static inline uint32_t SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) +{ + return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & + SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK; +} + struct sde_hdmi_bridge { struct drm_bridge base; struct hdmi *hdmi; @@ -32,20 +97,12 @@ struct sde_hdmi_bridge { #define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04 #define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000 #define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200 -/* default hsyncs for 4k@60 for 200ms */ -#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571 -/* for AVI program */ -#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \ - (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE) -#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6) + #define HDMI_SPD_INFOFRAME_BUFFER_SIZE \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE) #define HDMI_DEFAULT_VENDOR_NAME "unknown" #define HDMI_DEFAULT_PRODUCT_NAME "msm" -#define LEFT_SHIFT_BYTE(x) ((x) << 8) -#define LEFT_SHIFT_WORD(x) ((x) << 16) -#define LEFT_SHIFT_24BITS(x) ((x) << 24) #define HDMI_AVI_IFRAME_LINE_NUMBER 1 #define HDMI_VENDOR_IFRAME_LINE_NUMBER 3 @@ -177,39 +234,22 @@ static int _sde_hdmi_bridge_scrambler_ddc_check_status(struct hdmi *hdmi) return rc; } -static void _sde_hdmi_bridge_scrambler_ddc_reset(struct hdmi *hdmi) -{ - u32 reg_val; - - /* clear ack and disable interrupts */ - reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1); - hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val); - - /* Reset DDC timers */ - reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); - hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); - - reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); - reg_val &= ~BIT(0); - hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); -} - -static void _sde_hdmi_bridge_scrambler_ddc_disable(struct hdmi *hdmi) -{ - u32 reg_val; - - _sde_hdmi_bridge_scrambler_ddc_reset(hdmi); - /* Disable HW DDC access to RxStatus register */ - reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL); - reg_val &= ~(BIT(8) | BIT(9)); - hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val); -} - static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi, u32 timeout_hsync) { u32 reg_val; int rc; + struct sde_connector *c_conn; + struct drm_connector *connector = NULL; + struct sde_hdmi *display; + + if (!hdmi) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + connector = hdmi->connector; + c_conn = to_sde_connector(hdmi->connector); + display = (struct sde_hdmi *)c_conn->display; _sde_hdmi_bridge_ddc_clear_irq(hdmi, "scrambler"); @@ -243,7 +283,7 @@ static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi, if (rc) SDE_ERROR("scrambling ddc error %d\n", rc); - _sde_hdmi_bridge_scrambler_ddc_disable(hdmi); + _sde_hdmi_scrambler_ddc_disable((void *)display); return rc; } @@ -269,20 +309,6 @@ static int _sde_hdmi_bridge_setup_ddc_timers(struct hdmi *hdmi, return 0; } -static inline int _sde_hdmi_bridge_get_timeout_in_hysnc( - struct drm_display_mode *mode, u32 timeout_ms) -{ - /* - * pixel clock = h_total * v_total * fps - * 1 sec = pixel clock number of pixels are transmitted. - * time taken by one line (h_total) = 1s / (v_total * fps). - * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps)) - * = (time_ms * clock) / h_total - */ - - return (timeout_ms * mode->clock / mode->htotal); -} - static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, struct drm_display_mode *mode) { @@ -291,14 +317,17 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, u32 reg_val = 0; u32 tmds_clock_ratio = 0; bool scrambler_on = false; - + struct sde_connector *c_conn; struct drm_connector *connector = NULL; + struct sde_hdmi *display; if (!hdmi || !mode) { SDE_ERROR("invalid input\n"); return -EINVAL; } connector = hdmi->connector; + c_conn = to_sde_connector(hdmi->connector); + display = (struct sde_hdmi *)c_conn->display; /* Read HDMI version */ reg_val = hdmi_read(hdmi, REG_HDMI_VERSION); @@ -309,10 +338,13 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, return 0; } - if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) { + /* use actual clock instead of mode clock */ + if (hdmi->pixclock > + HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) { scrambler_on = true; tmds_clock_ratio = 1; } else { + tmds_clock_ratio = 0; scrambler_on = connector->supports_scramble; } @@ -344,9 +376,10 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, * status bit on the sink. Sink should set this bit * with in 200ms after scrambler is enabled. */ - timeout_hsync = _sde_hdmi_bridge_get_timeout_in_hysnc( - mode, + timeout_hsync = _sde_hdmi_get_timeout_in_hysnc( + (void *)display, HDMI_TX_SCRAMBLER_TIMEOUT_MSEC); + if (timeout_hsync <= 0) { SDE_ERROR("err in timeout hsync calc\n"); timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC; @@ -357,6 +390,14 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi, HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync); } else { + /* reset tmds clock ratio */ + rc = sde_hdmi_scdc_write(hdmi, + HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE, + tmds_clock_ratio); + /* scdc write can fail if sink doesn't support SCDC */ + if (rc && connector->scdc_present) + SDE_ERROR("SCDC present, TMDS clk ratio err\n"); + sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0); reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */ @@ -365,6 +406,38 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, return rc; } +static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi) +{ + struct drm_connector *connector = hdmi->connector; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + u32 hdmi_ctrl_reg, vbi_pkt_reg; + + SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off"); + + if (display->dc_enable) { + hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL); + + /* GC CD override */ + hdmi_ctrl_reg |= BIT(27); + + /* enable deep color for RGB888/YUV444/YUV420 30 bits */ + hdmi_ctrl_reg |= BIT(24); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg); + /* Enable GC_CONT and GC_SEND in General Control Packet + * (GCP) register so that deep color data is + * transmitted to the sink on every frame, allowing + * the sink to decode the data correctly. + * + * GC_CONT: 0x1 - Send GCP on every frame + * GC_SEND: 0x1 - Enable GCP Transmission + */ + vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL); + vbi_pkt_reg |= BIT(5) | BIT(4); + hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg); + } +} + static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge) { struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); @@ -396,12 +469,82 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge) mutex_unlock(&display->display_lock); } +static void sde_hdmi_update_hdcp_info(struct drm_connector *connector) +{ + void *fd = NULL; + struct sde_hdcp_ops *ops = NULL; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + + if (!display) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + /* check first if hdcp2p2 is supported */ + fd = display->hdcp_feat_data[SDE_HDCP_2P2]; + if (fd) + ops = sde_hdmi_hdcp2p2_start(fd); + + /* If ops is true, sink supports hdcp */ + if (ops) + display->sink_hdcp22_support = true; + + if (ops && ops->feature_supported) + display->hdcp22_present = ops->feature_supported(fd); + else + display->hdcp22_present = false; + + /* if hdcp22_present is true, src supports hdcp 2p2 */ + if (display->hdcp22_present) + display->src_hdcp22_support = true; + + if (!display->hdcp22_present) { + if (display->hdcp1_use_sw_keys) { + display->hdcp14_present = + hdcp1_check_if_supported_load_app(); + } + if (display->hdcp14_present) { + fd = display->hdcp_feat_data[SDE_HDCP_1x]; + if (fd) + ops = sde_hdcp_1x_start(fd); + } + } + + /* update internal data about hdcp */ + display->hdcp_data = fd; + display->hdcp_ops = ops; +} + static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge) { + struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + struct sde_connector *c_conn = to_sde_connector(hdmi->connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + + /* need to update hdcp info here to ensure right HDCP support*/ + sde_hdmi_update_hdcp_info(hdmi->connector); + + /* start HDCP authentication */ + sde_hdmi_start_hdcp(hdmi->connector); + + /* reset HDR state */ + display->curr_hdr_state = HDR_DISABLE; } static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge) { + struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + struct sde_connector *c_conn = to_sde_connector(hdmi->connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + + mutex_lock(&display->display_lock); + + display->pll_update_enable = false; + + mutex_unlock(&display->display_lock); } static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge) @@ -414,8 +557,8 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge) sde_hdmi_notify_clients(display, display->connected); - if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported) - hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl); + if (sde_hdmi_tx_is_hdcp_enabled(display)) + sde_hdmi_hdcp_off(display); sde_hdmi_audio_off(hdmi); @@ -432,15 +575,50 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge) } static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi, - const struct drm_display_mode *mode) + struct drm_display_mode *mode) { u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0}; u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE]; u8 checksum; u32 reg_val; + u32 mode_fmt_flags = 0; struct hdmi_avi_infoframe info; + struct drm_connector *connector; + if (!hdmi || !mode) { + SDE_ERROR("invalid input\n"); + return; + } + + connector = hdmi->connector; + + if (!connector) { + SDE_ERROR("invalid input\n"); + return; + } + + /* Cache the format flags before clearing */ + mode_fmt_flags = mode->flags; + /** + * Clear the RGB/YUV format flags before calling upstream API + * as the API also compares the flags and then returns a mode + */ + mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK; drm_hdmi_avi_infoframe_from_display_mode(&info, mode); + /* Restore the format flags */ + mode->flags = mode_fmt_flags; + + if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) { + info.colorspace = HDMI_COLORSPACE_YUV420; + /** + * If sink supports quantization select, + * override to full range + */ + if (connector->yuv_qs) + info.ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_FULL; + } + hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe)); checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1]; @@ -558,52 +736,98 @@ static inline void _sde_hdmi_save_mode(struct hdmi *hdmi, drm_mode_copy(&display->mode, mode); } +static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi, + struct drm_display_mode *mode) +{ + /* + * choose priority: + * 1. DC + RGB + * 2. DC + YUV + * 3. RGB + * 4. YUV + */ + int dc_format; + struct drm_connector *connector = hdmi->connector; + + dc_format = sde_hdmi_sink_dc_support(connector, mode); + if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE) + return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444 + | MSM_MODE_FLAG_RGB444_DC_ENABLE); + else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE) + return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 + | MSM_MODE_FLAG_YUV420_DC_ENABLE); + else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) + return MSM_MODE_FLAG_COLOR_FORMAT_RGB444; + else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) + return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420; + + SDE_ERROR("Can't get available best display format\n"); + + return MSM_MODE_FLAG_COLOR_FORMAT_RGB444; +} + static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + struct drm_connector *connector = hdmi->connector; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; int hstart, hend, vstart, vend; uint32_t frame_ctrl; + u32 div = 0; mode = adjusted_mode; - hdmi->pixclock = mode->clock * 1000; + display->dc_enable = mode->private_flags & + (MSM_MODE_FLAG_RGB444_DC_ENABLE | + MSM_MODE_FLAG_YUV420_DC_ENABLE); + /* compute pixclock as per color format and bit depth */ + hdmi->pixclock = sde_hdmi_calc_pixclk( + mode->clock * HDMI_KHZ_TO_HZ, + mode->private_flags, + display->dc_enable); + SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n", + hdmi->pixclock, mode->clock); - hstart = mode->htotal - mode->hsync_start; - hend = mode->htotal - mode->hsync_start + mode->hdisplay; + if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + div = 1; + + hstart = (mode->htotal - mode->hsync_start) >> div; + hend = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div; vstart = mode->vtotal - mode->vsync_start - 1; vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1; - DRM_DEBUG( + SDE_DEBUG( "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d", mode->htotal, mode->vtotal, hstart, hend, vstart, vend); hdmi_write(hdmi, REG_HDMI_TOTAL, - HDMI_TOTAL_H_TOTAL(mode->htotal - 1) | - HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); + SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) | + SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC, - HDMI_ACTIVE_HSYNC_START(hstart) | - HDMI_ACTIVE_HSYNC_END(hend)); + SDE_HDMI_ACTIVE_HSYNC_START(hstart) | + SDE_HDMI_ACTIVE_HSYNC_END(hend)); hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC, - HDMI_ACTIVE_VSYNC_START(vstart) | - HDMI_ACTIVE_VSYNC_END(vend)); + SDE_HDMI_ACTIVE_VSYNC_START(vstart) | + SDE_HDMI_ACTIVE_VSYNC_END(vend)); if (mode->flags & DRM_MODE_FLAG_INTERLACE) { hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, - HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal)); + SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal)); hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, - HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) | - HDMI_VSYNC_ACTIVE_F2_END(vend + 1)); + SDE_HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) | + SDE_HDMI_VSYNC_ACTIVE_F2_END(vend + 1)); } else { hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2, - HDMI_VSYNC_TOTAL_F2_V_TOTAL(0)); + SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(0)); hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2, - HDMI_VSYNC_ACTIVE_F2_START(0) | - HDMI_VSYNC_ACTIVE_F2_END(0)); + SDE_HDMI_VSYNC_ACTIVE_F2_START(0) | + SDE_HDMI_VSYNC_ACTIVE_F2_END(0)); } frame_ctrl = 0; @@ -629,9 +853,30 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode); DRM_DEBUG("hdmi setup info frame\n"); } - _sde_hdmi_bridge_setup_scrambler(hdmi, mode); _sde_hdmi_save_mode(hdmi, mode); + _sde_hdmi_bridge_setup_scrambler(hdmi, mode); + _sde_hdmi_bridge_setup_deep_color(hdmi); +} + +static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + + adjusted_mode->private_flags |= + _sde_hdmi_choose_best_format(hdmi, adjusted_mode); + SDE_DEBUG("Adjusted mode private flags: 0x%x\n", + adjusted_mode->private_flags); + + return true; +} + +void sde_hdmi_bridge_power_on(struct drm_bridge *bridge) +{ + _sde_hdmi_bridge_power_on(bridge); } static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { @@ -640,6 +885,7 @@ static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { .disable = _sde_hdmi_bridge_disable, .post_disable = _sde_hdmi_bridge_post_disable, .mode_set = _sde_hdmi_bridge_mode_set, + .mode_fixup = _sde_hdmi_bridge_mode_fixup, }; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c new file mode 100644 index 000000000000..1e673440f399 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c @@ -0,0 +1,994 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include +#include "sde_hdcp.h" +#include "video/msm_hdmi_hdcp_mgr.h" +#include "sde_hdmi_util.h" + +/* + * Defined addresses and offsets of standard HDCP 2.2 sink registers + * for DDC, as defined in HDCP 2.2 spec section 2.14 table 2.7 + */ +#define HDCP_SINK_DDC_SLAVE_ADDR 0x74 /* Sink DDC slave address */ +#define HDCP_SINK_DDC_HDCP2_VERSION 0x50 /* Does sink support HDCP2.2 */ +#define HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE 0x60 /* HDCP Tx writes here */ +#define HDCP_SINK_DDC_HDCP2_RXSTATUS 0x70 /* RxStatus, 2 bytes */ +#define HDCP_SINK_DDC_HDCP2_READ_MESSAGE 0x80 /* HDCP Tx reads here */ + +#define HDCP2P2_DEFAULT_TIMEOUT 500 + +/* + * HDCP 2.2 encryption requires the data encryption block that is present in + * HDMI controller version 4.0.0 and above + */ +#define MIN_HDMI_TX_MAJOR_VERSION 4 + +enum sde_hdmi_hdcp2p2_sink_status { + SINK_DISCONNECTED, + SINK_CONNECTED +}; + +enum sde_hdmi_auth_status { + HDMI_HDCP_AUTH_STATUS_FAILURE, + HDMI_HDCP_AUTH_STATUS_SUCCESS +}; + +struct sde_hdmi_hdcp2p2_ctrl { + atomic_t auth_state; + enum sde_hdmi_hdcp2p2_sink_status sink_status; /* Is sink connected */ + struct sde_hdcp_init_data init_data; /* Feature data from HDMI drv */ + struct mutex mutex; /* mutex to protect access to ctrl */ + struct mutex msg_lock; /* mutex to protect access to msg buffer */ + struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/ + struct sde_hdcp_ops *ops; + void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */ + struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */ + + enum hdmi_hdcp_wakeup_cmd wakeup_cmd; + enum sde_hdmi_auth_status auth_status; + char *send_msg_buf; + uint32_t send_msg_len; + uint32_t timeout; + uint32_t timeout_left; + + struct task_struct *thread; + struct kthread_worker worker; + struct kthread_work status; + struct kthread_work auth; + struct kthread_work send_msg; + struct kthread_work recv_msg; + struct kthread_work link; + struct kthread_work poll; +}; + +static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl); +static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl); +static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl); +static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl); +static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl); + +static bool sde_hdcp2p2_is_valid_state(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE) + return true; + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE) + return true; + + return false; +} + +static int sde_hdmi_hdcp2p2_copy_buf(struct sde_hdmi_hdcp2p2_ctrl *ctrl, + struct hdmi_hdcp_wakeup_data *data) +{ + mutex_lock(&ctrl->msg_lock); + + if (!data->send_msg_len) { + mutex_unlock(&ctrl->msg_lock); + return 0; + } + + ctrl->send_msg_len = data->send_msg_len; + + kzfree(ctrl->send_msg_buf); + + ctrl->send_msg_buf = kzalloc(data->send_msg_len, GFP_KERNEL); + + if (!ctrl->send_msg_buf) { + mutex_unlock(&ctrl->msg_lock); + return -ENOMEM; + } + + memcpy(ctrl->send_msg_buf, data->send_msg_buf, ctrl->send_msg_len); + + mutex_unlock(&ctrl->msg_lock); + + return 0; +} + +static int sde_hdmi_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + + if (!data) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + ctrl = data->context; + if (!ctrl) { + SDE_ERROR("invalid ctrl\n"); + return -EINVAL; + } + + mutex_lock(&ctrl->wakeup_mutex); + + SDE_HDCP_DEBUG("cmd: %s, timeout %dms\n", + hdmi_hdcp_cmd_to_str(data->cmd), + data->timeout); + + ctrl->wakeup_cmd = data->cmd; + + if (data->timeout) + ctrl->timeout = data->timeout * 2; + else + ctrl->timeout = HDCP2P2_DEFAULT_TIMEOUT; + + if (!sde_hdcp2p2_is_valid_state(ctrl)) { + SDE_ERROR("invalid state\n"); + goto exit; + } + + if (sde_hdmi_hdcp2p2_copy_buf(ctrl, data)) + goto exit; + + if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS) + ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS; + else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED) + ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE; + + switch (ctrl->wakeup_cmd) { + case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE: + queue_kthread_work(&ctrl->worker, &ctrl->send_msg); + break; + case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE: + queue_kthread_work(&ctrl->worker, &ctrl->recv_msg); + break; + case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS: + case HDMI_HDCP_WKUP_CMD_STATUS_FAILED: + queue_kthread_work(&ctrl->worker, &ctrl->status); + break; + case HDMI_HDCP_WKUP_CMD_LINK_POLL: + queue_kthread_work(&ctrl->worker, &ctrl->poll); + break; + case HDMI_HDCP_WKUP_CMD_AUTHENTICATE: + queue_kthread_work(&ctrl->worker, &ctrl->auth); + break; + default: + SDE_ERROR("invalid wakeup command %d\n", ctrl->wakeup_cmd); + } +exit: + mutex_unlock(&ctrl->wakeup_mutex); + return 0; +} + +static int sde_hdmi_hdcp2p2_wakeup_lib(struct sde_hdmi_hdcp2p2_ctrl *ctrl, + struct hdcp_lib_wakeup_data *data) +{ + int rc = 0; + + if (ctrl && ctrl->lib && ctrl->lib->wakeup && + data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) { + rc = ctrl->lib->wakeup(data); + if (rc) + SDE_ERROR("error sending %s to lib\n", + hdcp_lib_cmd_to_str(data->cmd)); + } + + return rc; +} + +static void sde_hdmi_hdcp2p2_reset(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + ctrl->sink_status = SINK_DISCONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); +} + +static void sde_hdmi_hdcp2p2_off(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE}; + + ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + sde_hdmi_hdcp2p2_reset(ctrl); + + flush_kthread_worker(&ctrl->worker); + + sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data); + + cdata.context = input; + sde_hdmi_hdcp2p2_wakeup(&cdata); +} + +static int sde_hdmi_hdcp2p2_authenticate(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = input; + struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE}; + u32 regval; + int rc = 0; + + /* Enable authentication success interrupt */ + regval = DSS_REG_R(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2); + regval |= BIT(1) | BIT(2); + + DSS_REG_W(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2, regval); + + flush_kthread_worker(&ctrl->worker); + + ctrl->sink_status = SINK_CONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING); + + /* make sure ddc is idle before starting hdcp 2.2 authentication */ + _sde_hdmi_scrambler_ddc_disable((void *)ctrl->init_data.cb_data); + sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data); + + cdata.context = input; + sde_hdmi_hdcp2p2_wakeup(&cdata); + + return rc; +} + +static int sde_hdmi_hdcp2p2_reauthenticate(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + + ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + sde_hdmi_hdcp2p2_reset(ctrl); + + return sde_hdmi_hdcp2p2_authenticate(input); +} + +static void sde_hdmi_hdcp2p2_min_level_change(void *client_ctx, +int min_enc_lvl) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = + (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx; + struct hdcp_lib_wakeup_data cdata = { + HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE}; + bool enc_notify = true; + enum sde_hdcp_states enc_lvl; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + switch (min_enc_lvl) { + case 0: + enc_lvl = HDCP_STATE_AUTH_ENC_NONE; + break; + case 1: + enc_lvl = HDCP_STATE_AUTH_ENC_1X; + break; + case 2: + enc_lvl = HDCP_STATE_AUTH_ENC_2P2; + break; + default: + enc_notify = false; + } + + SDE_HDCP_DEBUG("enc level changed %d\n", min_enc_lvl); + + cdata.context = ctrl->lib_ctx; + sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); + + if (enc_notify && ctrl->init_data.notify_status) + ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl); +} + +static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); + + sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data); + + /* notify hdmi tx about HDCP failure */ + ctrl->init_data.notify_status(ctrl->init_data.cb_data, + HDCP_STATE_AUTH_FAIL); +} + +static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl, + u8 *buf, int size, u32 timeout) +{ + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + + int rc; + + if (!ctrl) { + SDE_ERROR("invalid ctrl\n"); + return -EINVAL; + } + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid ddc data\n"); + return -EINVAL; + } + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + SDE_ERROR("hdcp is off\n"); + return -EINVAL; + } + + memset(ddc_data, 0, sizeof(*ddc_data)); + ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR; + ddc_data->offset = HDCP_SINK_DDC_HDCP2_READ_MESSAGE; + ddc_data->data_buf = buf; + ddc_data->data_len = size; + ddc_data->request_len = size; + ddc_data->retry = 0; + ddc_data->hard_timeout = timeout; + ddc_data->what = "HDCP2ReadMessage"; + + rc = sde_hdmi_ddc_read(ctrl->init_data.cb_data); + if (rc) + SDE_ERROR("Cannot read HDCP message register\n"); + + ctrl->timeout_left = ddc_data->timeout_left; + + return rc; +} + +static int sde_hdmi_hdcp2p2_ddc_wt_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl, + u8 *buf, size_t size) +{ + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + + int rc; + + if (!ctrl) { + SDE_ERROR("invalid ctrl\n"); + return -EINVAL; + } + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid ddc data\n"); + return -EINVAL; + } + + memset(ddc_data, 0, sizeof(*ddc_data)); + ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR; + ddc_data->offset = HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE; + ddc_data->data_buf = buf; + ddc_data->data_len = size; + ddc_data->hard_timeout = ctrl->timeout; + ddc_data->what = "HDCP2WriteMessage"; + + rc = sde_hdmi_ddc_write((void *)ctrl->init_data.cb_data); + if (rc) + SDE_ERROR("Cannot write HDCP message register\n"); + + ctrl->timeout_left = ddc_data->timeout_left; + + return rc; +} + +static int sde_hdmi_hdcp2p2_read_version(struct sde_hdmi_hdcp2p2_ctrl *ctrl, + u8 *hdcp2version) +{ + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + int rc; + + if (!ctrl) { + SDE_ERROR("invalid ctrl\n"); + return -EINVAL; + } + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid ddc data\n"); + return -EINVAL; + } + memset(ddc_data, 0, sizeof(*ddc_data)); + ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR; + ddc_data->offset = HDCP_SINK_DDC_HDCP2_VERSION; + ddc_data->data_buf = hdcp2version; + ddc_data->data_len = 1; + ddc_data->request_len = 1; + ddc_data->retry = 1; + ddc_data->what = "HDCP2Version"; + + rc = sde_hdmi_ddc_read((void *)ctrl->init_data.cb_data); + if (rc) { + SDE_ERROR("Cannot read HDCP2Version register"); + return rc; + } + + SDE_HDCP_DEBUG("Read HDCP2Version as %u\n", *hdcp2version); + return rc; +} + +static bool sde_hdmi_hdcp2p2_feature_supported(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = input; + struct hdcp_txmtr_ops *lib = NULL; + bool supported = false; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + goto end; + } + + lib = ctrl->lib; + if (!lib) { + SDE_ERROR("invalid lib ops data\n"); + goto end; + } + + if (lib->feature_supported) { + supported = lib->feature_supported( + ctrl->lib_ctx); + } + +end: + return supported; +} + +static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; + uint32_t msglen; + char *msg = NULL; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + rc = -EINVAL; + goto exit; + } + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + SDE_ERROR("hdcp is off\n"); + goto exit; + } + + mutex_lock(&ctrl->msg_lock); + msglen = ctrl->send_msg_len; + + if (!msglen) { + mutex_unlock(&ctrl->msg_lock); + rc = -EINVAL; + goto exit; + } + + msg = kzalloc(msglen, GFP_KERNEL); + if (!msg) { + mutex_unlock(&ctrl->msg_lock); + rc = -ENOMEM; + goto exit; + } + + memcpy(msg, ctrl->send_msg_buf, msglen); + mutex_unlock(&ctrl->msg_lock); + + /* Forward the message to the sink */ + rc = sde_hdmi_hdcp2p2_ddc_wt_message(ctrl, + msg, (size_t)msglen); + if (rc) { + SDE_ERROR("Error sending msg to sink %d\n", rc); + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED; + } else { + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS; + cdata.timeout = ctrl->timeout_left; + } +exit: + kfree(msg); + + sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static void sde_hdmi_hdcp2p2_send_msg_work(struct kthread_work *work) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, send_msg); + + sde_hdmi_hdcp2p2_send_msg(ctrl); +} + +static void sde_hdmi_hdcp2p2_link_cb(void *data) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = data; + + if (!ctrl) { + SDE_HDCP_DEBUG("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE) + queue_kthread_work(&ctrl->worker, &ctrl->link); +} + +static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + int timeout_hsync = 0, rc = 0; + char *recvd_msg_buf = NULL; + struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + rc = -EINVAL; + goto exit; + } + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + SDE_ERROR("hdcp is off\n"); + goto exit; + } + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + if (!ddc_ctrl) { + pr_err("invalid ddc ctrl\n"); + rc = -EINVAL; + goto exit; + } + + ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data; + memset(ddc_data, 0, sizeof(*ddc_data)); + + timeout_hsync = _sde_hdmi_get_timeout_in_hysnc( + (void *)ctrl->init_data.cb_data, ctrl->timeout); + + if (timeout_hsync <= 0) { + SDE_ERROR("err in timeout hsync calc\n"); + timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC; + } + + SDE_HDCP_DEBUG("timeout for rxstatus %dms, %d hsync\n", + ctrl->timeout, timeout_hsync); + + ddc_data->intr_mask = RXSTATUS_MESSAGE_SIZE | RXSTATUS_REAUTH_REQ; + ddc_data->timeout_ms = ctrl->timeout; + ddc_data->timeout_hsync = timeout_hsync; + ddc_data->periodic_timer_hsync = timeout_hsync / 20; + ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER; + ddc_data->wait = true; + + rc = sde_hdmi_hdcp2p2_read_rxstatus(ctrl->init_data.cb_data); + if (rc) { + SDE_ERROR("error reading rxstatus %d\n", rc); + goto exit; + } + + if (ddc_data->reauth_req) { + ddc_data->reauth_req = false; + + SDE_HDCP_DEBUG("reauth triggered by sink\n"); + rc = -EINVAL; + goto exit; + } + + ctrl->timeout_left = ddc_data->timeout_left; + + SDE_HDCP_DEBUG("timeout left after rxstatus %dms, msg size %d\n", + ctrl->timeout_left, ddc_data->message_size); + + if (!ddc_data->message_size) { + SDE_ERROR("recvd invalid message size\n"); + rc = -EINVAL; + goto exit; + } + + recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL); + if (!recvd_msg_buf) { + rc = -ENOMEM; + goto exit; + } + + rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf, + ddc_data->message_size, ctrl->timeout_left); + if (rc) { + SDE_ERROR("error reading message %d\n", rc); + goto exit; + } + + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS; + cdata.recvd_msg_buf = recvd_msg_buf; + cdata.recvd_msg_len = ddc_data->message_size; + cdata.timeout = ctrl->timeout_left; +exit: + if (rc == -ETIMEDOUT) + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT; + else if (rc) + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED; + + sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); + kfree(recvd_msg_buf); +} + +static void sde_hdmi_hdcp2p2_recv_msg_work(struct kthread_work *work) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, recv_msg); + + sde_hdmi_hdcp2p2_recv_msg(ctrl); +} + +static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data; + int timeout_hsync; + int ret; + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + + if (!ddc_ctrl) + return -EINVAL; + + sde_hdmi_ddc_config(ctrl->init_data.cb_data); + + ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data; + + memset(ddc_data, 0, sizeof(*ddc_data)); + + timeout_hsync = _sde_hdmi_get_timeout_in_hysnc( + (void *)ctrl->init_data.cb_data, + jiffies_to_msecs(HZ / 2)); + + if (timeout_hsync <= 0) { + SDE_ERROR("err in timeout hsync calc\n"); + timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC; + } + SDE_HDCP_DEBUG("timeout for rxstatus %d hsyncs\n", timeout_hsync); + + ddc_data->intr_mask = RXSTATUS_READY | RXSTATUS_MESSAGE_SIZE | + RXSTATUS_REAUTH_REQ; + ddc_data->timeout_hsync = timeout_hsync; + ddc_data->periodic_timer_hsync = timeout_hsync; + ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER; + ddc_data->link_cb = sde_hdmi_hdcp2p2_link_cb; + ddc_data->link_data = ctrl; + + ret = sde_hdmi_hdcp2p2_read_rxstatus((void *)ctrl->init_data.cb_data); + return ret; +} + +static void sde_hdmi_hdcp2p2_poll_work(struct kthread_work *work) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, poll); + + sde_hdmi_hdcp2p2_link_check(ctrl); +} + +static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + SDE_ERROR("hdcp is off\n"); + return; + } + + if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) { + ctrl->init_data.notify_status(ctrl->init_data.cb_data, + HDCP_STATE_AUTHENTICATED); + + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED); + } else { + sde_hdmi_hdcp2p2_auth_failed(ctrl); + } +} + +static void sde_hdmi_hdcp2p2_auth_status_work(struct kthread_work *work) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, status); + + sde_hdmi_hdcp2p2_auth_status(ctrl); +} + +static void sde_hdmi_hdcp2p2_link_work(struct kthread_work *work) +{ + int rc = 0; + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, link); + struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; + char *recvd_msg_buf = NULL; + struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + cdata.context = ctrl->lib_ctx; + + ddc_ctrl = ctrl->init_data.ddc_ctrl; + if (!ddc_ctrl) { + rc = -EINVAL; + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + goto exit; + } + + ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data; + + if (ddc_data->reauth_req) { + SDE_HDCP_DEBUG("reauth triggered by sink\n"); + + ddc_data->reauth_req = false; + rc = -ENOLINK; + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + goto exit; + } + + if (ddc_data->ready && ddc_data->message_size) { + SDE_HDCP_DEBUG("topology changed. rxstatus msg size %d\n", + ddc_data->message_size); + + ddc_data->ready = false; + + recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL); + if (!recvd_msg_buf) { + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + goto exit; + } + + rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf, + ddc_data->message_size, HDCP2P2_DEFAULT_TIMEOUT); + if (rc) { + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + SDE_ERROR("error reading message %d\n", rc); + } else { + cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS; + cdata.recvd_msg_buf = recvd_msg_buf; + cdata.recvd_msg_len = ddc_data->message_size; + } + + ddc_data->message_size = 0; + } +exit: + sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); + kfree(recvd_msg_buf); + + if (rc) { + sde_hdmi_hdcp2p2_auth_failed(ctrl); + return; + } +} + +static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; + int rc = 0; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING) + cdata.cmd = HDCP_LIB_WKUP_CMD_START; + else + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + + rc = sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); + if (rc) + sde_hdmi_hdcp2p2_auth_failed(ctrl); + + return rc; +} + +static void sde_hdmi_hdcp2p2_auth_work(struct kthread_work *work) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work, + struct sde_hdmi_hdcp2p2_ctrl, auth); + + sde_hdmi_hdcp2p2_auth(ctrl); +} + +void sde_hdmi_hdcp2p2_deinit(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; + + ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input; + + if (!ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + cdata.context = ctrl->lib_ctx; + sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata); + + kthread_stop(ctrl->thread); + + mutex_destroy(&ctrl->mutex); + mutex_destroy(&ctrl->msg_lock); + mutex_destroy(&ctrl->wakeup_mutex); + kfree(ctrl); +} + +void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data) +{ + int rc; + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + static struct sde_hdcp_ops ops = { + .reauthenticate = sde_hdmi_hdcp2p2_reauthenticate, + .authenticate = sde_hdmi_hdcp2p2_authenticate, + .feature_supported = sde_hdmi_hdcp2p2_feature_supported, + .off = sde_hdmi_hdcp2p2_off + }; + + static struct hdcp_client_ops client_ops = { + .wakeup = sde_hdmi_hdcp2p2_wakeup, + .notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change, + }; + + static struct hdcp_txmtr_ops txmtr_ops; + struct hdcp_register_data register_data; + + SDE_HDCP_DEBUG("HDCP2P2 feature initialization\n"); + + if (!init_data || !init_data->core_io || !init_data->mutex || + !init_data->ddc_ctrl || !init_data->notify_status || + !init_data->workq || !init_data->cb_data) { + SDE_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + if (init_data->hdmi_tx_ver < MIN_HDMI_TX_MAJOR_VERSION) { + SDE_ERROR("HDMI Tx does not support HDCP 2.2\n"); + return ERR_PTR(-ENODEV); + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return ERR_PTR(-ENOMEM); + + ctrl->init_data = *init_data; + ctrl->lib = &txmtr_ops; + + ctrl->sink_status = SINK_DISCONNECTED; + + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); + + ctrl->ops = &ops; + mutex_init(&ctrl->mutex); + mutex_init(&ctrl->msg_lock); + mutex_init(&ctrl->wakeup_mutex); + + register_data.hdcp_ctx = &ctrl->lib_ctx; + register_data.client_ops = &client_ops; + register_data.txmtr_ops = &txmtr_ops; + register_data.device_type = HDCP_TXMTR_HDMI; + register_data.client_ctx = ctrl; + + rc = hdcp_library_register(®ister_data); + if (rc) { + SDE_ERROR("Unable to register with HDCP 2.2 library\n"); + goto error; + } + + init_kthread_worker(&ctrl->worker); + + init_kthread_work(&ctrl->auth, sde_hdmi_hdcp2p2_auth_work); + init_kthread_work(&ctrl->send_msg, sde_hdmi_hdcp2p2_send_msg_work); + init_kthread_work(&ctrl->recv_msg, sde_hdmi_hdcp2p2_recv_msg_work); + init_kthread_work(&ctrl->status, sde_hdmi_hdcp2p2_auth_status_work); + init_kthread_work(&ctrl->link, sde_hdmi_hdcp2p2_link_work); + init_kthread_work(&ctrl->poll, sde_hdmi_hdcp2p2_poll_work); + + ctrl->thread = kthread_run(kthread_worker_fn, + &ctrl->worker, "hdmi_hdcp2p2"); + + if (IS_ERR(ctrl->thread)) { + SDE_ERROR("unable to start hdcp2p2 thread\n"); + rc = PTR_ERR(ctrl->thread); + ctrl->thread = NULL; + goto error; + } + + return ctrl; +error: + kfree(ctrl); + return ERR_PTR(rc); +} + +static bool sde_hdmi_hdcp2p2_supported(struct sde_hdmi_hdcp2p2_ctrl *ctrl) +{ + u8 hdcp2version = 0; + int rc = sde_hdmi_hdcp2p2_read_version(ctrl, &hdcp2version); + + if (rc) + goto error; + + if (hdcp2version & BIT(2)) { + SDE_HDCP_DEBUG("Sink is HDCP 2.2 capable\n"); + return true; + } + +error: + SDE_HDCP_DEBUG("Sink is not HDCP 2.2 capable\n"); + return false; +} + +struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input) +{ + struct sde_hdmi_hdcp2p2_ctrl *ctrl; + + ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input; + + SDE_HDCP_DEBUG("Checking sink capability\n"); + if (sde_hdmi_hdcp2p2_supported(ctrl)) + return ctrl->ops; + else + return NULL; + +} + diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c new file mode 100644 index 000000000000..a291a1112aeb --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c @@ -0,0 +1,956 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drm_edid.h" +#include "sde_kms.h" +#include "sde_hdmi.h" +#include "sde_hdmi_regs.h" +#include "hdmi.h" + +#define HDMI_SEC_TO_MS 1000 +#define HDMI_MS_TO_US 1000 +#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US) +#define HDMI_KHZ_TO_HZ 1000 +#define HDMI_BUSY_WAIT_DELAY_US 100 + +static void sde_hdmi_hdcp2p2_ddc_clear_status(struct sde_hdmi *display) +{ + u32 reg_val; + struct hdmi *hdmi; + + if (!display) { + pr_err("invalid ddc ctrl\n"); + return; + } + hdmi = display->ctrl.ctrl; + /* check for errors and clear status */ + reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_STATUS); + + if (reg_val & BIT(4)) { + pr_debug("ddc aborted\n"); + reg_val |= BIT(5); + } + + if (reg_val & BIT(8)) { + pr_debug("timed out\n"); + reg_val |= BIT(9); + } + + if (reg_val & BIT(12)) { + pr_debug("NACK0\n"); + reg_val |= BIT(13); + } + + if (reg_val & BIT(14)) { + pr_debug("NACK1\n"); + reg_val |= BIT(15); + } + + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_STATUS, reg_val); +} + +static const char *sde_hdmi_hdr_sname(enum sde_hdmi_hdr_state hdr_state) +{ + switch (hdr_state) { + case HDR_DISABLE: return "HDR_DISABLE"; + case HDR_ENABLE: return "HDR_ENABLE"; + default: return "HDR_INVALID_STATE"; + } +} + +/** + * sde_hdmi_dump_regs - utility to dump HDMI regs + * @hdmi_display: Pointer to private display handle + * Return : void + */ + +void sde_hdmi_dump_regs(void *hdmi_display) +{ + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct hdmi *hdmi; + int i; + u32 addr_off = 0; + u32 len = 0; + + if (!display) { + pr_err("invalid input\n"); + return; + } + + hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("invalid input\n"); + return; + } + + if (!hdmi->power_on || !display->connected) { + SDE_ERROR("HDMI display is not ready\n"); + return; + } + + len = hdmi->mmio_len; + + if (len % 16) + len += 16; + len /= 16; + + pr_info("HDMI CORE regs\n"); + for (i = 0; i < len; i++) { + u32 x0, x4, x8, xc; + + x0 = hdmi_read(hdmi, addr_off+0x0); + x4 = hdmi_read(hdmi, addr_off+0x4); + x8 = hdmi_read(hdmi, addr_off+0x8); + xc = hdmi_read(hdmi, addr_off+0xc); + + pr_info("%08x : %08x %08x %08x %08x\n", addr_off, x0, x4, x8, + xc); + + addr_off += 16; + } +} + +int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display) +{ + struct sde_hdmi_tx_hdcp2p2_ddc_data *data; + u32 intr0, intr2, intr5; + u32 msg_size; + int rc = 0; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct hdmi *hdmi; + + ddc_ctrl = &display->ddc_ctrl; + data = &ddc_ctrl->sde_hdcp2p2_ddc_data; + hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("invalid input\n"); + return -EINVAL; + } + + intr0 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL0); + intr2 = hdmi_read(hdmi, HDMI_HDCP_INT_CTRL2); + intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5); + + pr_debug("intr0: 0x%x, intr2: 0x%x, intr5: 0x%x\n", + intr0, intr2, intr5); + + /* check if encryption is enabled */ + if (intr2 & BIT(0)) { + /* + * ack encryption ready interrupt. + * disable encryption ready interrupt. + * enable encryption not ready interrupt. + */ + intr2 &= ~BIT(2); + intr2 |= BIT(1) | BIT(6); + + pr_info("HDCP 2.2 Encryption enabled\n"); + data->encryption_ready = true; + } + + /* check if encryption is disabled */ + if (intr2 & BIT(4)) { + /* + * ack encryption not ready interrupt. + * disable encryption not ready interrupt. + * enable encryption ready interrupt. + */ + intr2 &= ~BIT(6); + intr2 |= BIT(5) | BIT(2); + + pr_info("HDCP 2.2 Encryption disabled\n"); + data->encryption_ready = false; + } + + hdmi_write(hdmi, HDMI_HDCP_INT_CTRL2, intr2); + + /* get the message size bits 29:20 */ + msg_size = (intr0 & (0x3FF << 20)) >> 20; + + if (msg_size) { + /* ack and disable message size interrupt */ + intr0 |= BIT(30); + intr0 &= ~BIT(31); + + data->message_size = msg_size; + } + + /* check and disable ready interrupt */ + if (intr0 & BIT(16)) { + /* ack ready/not ready interrupt */ + intr0 |= BIT(17); + intr0 &= ~BIT(18); + pr_debug("got ready interrupt\n"); + data->ready = true; + } + + /* check for reauth req interrupt */ + if (intr0 & BIT(12)) { + /* ack and disable reauth req interrupt */ + intr0 |= BIT(13); + intr0 &= ~BIT(14); + pr_err("got reauth interrupt\n"); + data->reauth_req = true; + } + + /* check for ddc fail interrupt */ + if (intr0 & BIT(8)) { + /* ack ddc fail interrupt */ + intr0 |= BIT(9); + pr_err("got ddc fail interrupt\n"); + data->ddc_max_retries_fail = true; + } + + /* check for ddc done interrupt */ + if (intr0 & BIT(4)) { + /* ack ddc done interrupt */ + intr0 |= BIT(5); + pr_debug("got ddc done interrupt\n"); + data->ddc_done = true; + } + + /* check for ddc read req interrupt */ + if (intr0 & BIT(0)) { + /* ack read req interrupt */ + intr0 |= BIT(1); + + data->ddc_read_req = true; + } + + hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, intr0); + + if (intr5 & BIT(0)) { + pr_err("RXSTATUS_DDC_REQ_TIMEOUT\n"); + + /* ack and disable timeout interrupt */ + intr5 |= BIT(1); + intr5 &= ~BIT(2); + + data->ddc_timeout = true; + } + hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5); + + if (data->message_size || data->ready || data->reauth_req) { + if (data->wait) { + complete(&ddc_ctrl->rx_status_done); + } else if (data->link_cb && data->link_data) { + data->link_cb(data->link_data); + } else { + pr_err("new msg/reauth not handled\n"); + rc = -EINVAL; + } + } + + sde_hdmi_hdcp2p2_ddc_clear_status(display); + + return rc; +} + +int sde_hdmi_ddc_scrambling_isr(void *hdmi_display) +{ + + bool scrambler_timer_off = false; + u32 intr2, intr5; + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct hdmi *hdmi; + + + hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("invalid input\n"); + return -EINVAL; + } + + intr2 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL2); + intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5); + + pr_debug("intr2: 0x%x, intr5: 0x%x\n", intr2, intr5); + + if (intr2 & BIT(12)) { + pr_err("SCRAMBLER_STATUS_NOT\n"); + + intr2 |= BIT(14); + scrambler_timer_off = true; + } + + if (intr2 & BIT(8)) { + pr_err("SCRAMBLER_STATUS_DDC_FAILED\n"); + + intr2 |= BIT(9); + + scrambler_timer_off = true; + } + hdmi_write(hdmi, HDMI_DDC_INT_CTRL2, intr2); + + if (intr5 & BIT(8)) { + pr_err("SCRAMBLER_STATUS_DDC_REQ_TIMEOUT\n"); + intr5 |= BIT(9); + intr5 &= ~BIT(10); + scrambler_timer_off = true; + } + hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5); + + if (scrambler_timer_off) + _sde_hdmi_scrambler_ddc_disable((void *)display); + + return 0; +} + +static int sde_hdmi_ddc_read_retry(struct sde_hdmi *display) +{ + int status; + int busy_wait_us; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct sde_hdmi_tx_ddc_data *ddc_data; + struct hdmi *hdmi; + + if (!display) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + hdmi = display->ctrl.ctrl; + ddc_ctrl = &display->ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + if (!ddc_data->data_buf) { + status = -EINVAL; + SDE_ERROR("%s: invalid buf\n", ddc_data->what); + goto error; + } + + if (ddc_data->retry < 0) { + SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry); + status = -EINVAL; + goto error; + } + + do { + if (ddc_data->hard_timeout) { + HDMI_UTIL_DEBUG("using hard_timeout %dms\n", + ddc_data->hard_timeout); + + busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US; + hdmi->use_hard_timeout = true; + hdmi->busy_wait_us = busy_wait_us; + } + + /* Calling upstream ddc read method */ + status = hdmi_ddc_read(hdmi, ddc_data->dev_addr, + ddc_data->offset, + ddc_data->data_buf, ddc_data->request_len, + false); + + if (ddc_data->hard_timeout) + ddc_data->timeout_left = hdmi->timeout_count; + + + if (ddc_data->hard_timeout && !hdmi->timeout_count) { + HDMI_UTIL_DEBUG("%s: timedout\n", ddc_data->what); + status = -ETIMEDOUT; + } + + } while (status && ddc_data->retry--); + + if (status) { + HDMI_UTIL_ERROR("%s: failed status = %d\n", + ddc_data->what, status); + goto error; + } + + HDMI_UTIL_DEBUG("%s: success\n", ddc_data->what); + +error: + return status; +} /* sde_hdmi_ddc_read_retry */ + +int sde_hdmi_ddc_read(void *cb_data) +{ + int rc = 0; + int retry; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi *display = (struct sde_hdmi *)cb_data; + + if (!display) { + SDE_ERROR("invalid ddc ctrl\n"); + return -EINVAL; + } + + ddc_ctrl = &display->ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + retry = ddc_data->retry; + + rc = sde_hdmi_ddc_read_retry(display); + if (!rc) + return rc; + + if (ddc_data->retry_align) { + ddc_data->retry = retry; + + ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32); + rc = sde_hdmi_ddc_read_retry(display); + } + + return rc; +} /* hdmi_ddc_read */ + +int sde_hdmi_ddc_write(void *cb_data) +{ + int status; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + struct sde_hdmi_tx_ddc_data *ddc_data; + int busy_wait_us; + struct hdmi *hdmi; + struct sde_hdmi *display = (struct sde_hdmi *)cb_data; + + if (!display) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + hdmi = display->ctrl.ctrl; + ddc_ctrl = &display->ddc_ctrl; + + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + + if (!ddc_data->data_buf) { + status = -EINVAL; + SDE_ERROR("%s: invalid buf\n", ddc_data->what); + goto error; + } + + if (ddc_data->retry < 0) { + SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry); + status = -EINVAL; + goto error; + } + + do { + if (ddc_data->hard_timeout) { + busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US; + hdmi->use_hard_timeout = true; + hdmi->busy_wait_us = busy_wait_us; + } + + status = hdmi_ddc_write(hdmi, + ddc_data->dev_addr, ddc_data->offset, + ddc_data->data_buf, ddc_data->data_len, + false); + + if (ddc_data->hard_timeout) + ddc_data->timeout_left = hdmi->timeout_count; + + if (ddc_data->hard_timeout && !hdmi->timeout_count) { + HDMI_UTIL_ERROR("%s timout\n", ddc_data->what); + status = -ETIMEDOUT; + } + + } while (status && ddc_data->retry--); + + if (status) { + HDMI_UTIL_ERROR("%s: failed status = %d\n", + ddc_data->what, status); + goto error; + } + + HDMI_UTIL_DEBUG("%s: success\n", ddc_data->what); +error: + return status; +} /* hdmi_ddc_write */ + +bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl) +{ + if (!hdmi_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + return false; + } + + return (hdmi_ctrl->hdcp14_present || hdmi_ctrl->hdcp22_present) && + hdmi_ctrl->hdcp_ops; +} + +bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl) +{ + bool enc_en = true; + u32 reg_val; + struct hdmi *hdmi; + + if (!hdmi_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + goto end; + } + + hdmi = hdmi_ctrl->ctrl.ctrl; + + reg_val = hdmi_read(hdmi, HDMI_HDCP_CTRL2); + if ((reg_val & BIT(0)) && (reg_val & BIT(1))) + goto end; + + if (hdmi_read(hdmi, HDMI_CTRL) & BIT(2)) + goto end; + + return false; + +end: + return enc_en; +} /* sde_hdmi_tx_is_encryption_set */ + +bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl) +{ + bool ret; + + if (!hdmi_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + return false; + } + + switch (hdmi_ctrl->enc_lvl) { + case HDCP_STATE_AUTH_ENC_NONE: + ret = true; + break; + case HDCP_STATE_AUTH_ENC_1X: + ret = sde_hdmi_tx_is_hdcp_enabled(hdmi_ctrl) && + hdmi_ctrl->auth_state; + break; + case HDCP_STATE_AUTH_ENC_2P2: + ret = hdmi_ctrl->hdcp22_present && + hdmi_ctrl->auth_state; + break; + default: + ret = false; + } + + return ret; +} + +bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl) +{ + struct hdmi *hdmi; + + if (!hdmi_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + return false; + } + + hdmi = hdmi_ctrl->ctrl.ctrl; + + return hdmi_ctrl->connected && hdmi->power_on; +} + +int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set) +{ + u32 av_mute_status; + bool av_pkt_en = false; + + if (!hdmi) { + SDE_ERROR("invalid HDMI Ctrl\n"); + return -ENODEV; + } + + av_mute_status = hdmi_read(hdmi, HDMI_GC); + + if (set) { + if (!(av_mute_status & BIT(0))) { + hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0)); + av_pkt_en = true; + } + } else { + if (av_mute_status & BIT(0)) { + hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0)); + av_pkt_en = true; + } + } + + /* Enable AV Mute tranmission here */ + if (av_pkt_en) + hdmi_write(hdmi, HDMI_VBI_PKT_CTRL, + hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5))); + + pr_info("AVMUTE %s\n", set ? "set" : "cleared"); + + return 0; +} + +int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms) +{ + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct drm_display_mode mode = display->mode; + /* + * pixel clock = h_total * v_total * fps + * 1 sec = pixel clock number of pixels are transmitted. + * time taken by one line (h_total) = 1s / (v_total * fps). + * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps)) + * = (time_ms * clock) / h_total + */ + + return (timeout_ms * mode.clock / mode.htotal); +} + +static void sde_hdmi_hdcp2p2_ddc_reset(struct sde_hdmi *hdmi_ctrl) +{ + u32 reg_val; + struct hdmi *hdmi = hdmi_ctrl->ctrl.ctrl; + + if (!hdmi) { + pr_err("Invalid parameters\n"); + return; + } + + /* + * Clear acks for DDC_REQ, DDC_DONE, DDC_FAILED, RXSTATUS_READY, + * RXSTATUS_MSG_SIZE + */ + reg_val = BIT(30) | BIT(17) | BIT(13) | BIT(9) | BIT(5) | BIT(1); + hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val); + /* Reset DDC timers */ + reg_val = BIT(0) | hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL); + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val); + reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL); + reg_val &= ~BIT(0); + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val); +} + +void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display) +{ + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + u32 reg_val; + struct hdmi *hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("Invalid parameters\n"); + return; + } + + sde_hdmi_hdcp2p2_ddc_reset(display); + + /* Disable HW DDC access to RxStatus register */ + reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL); + reg_val &= ~(BIT(1) | BIT(0)); + + hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val); +} + +static void _sde_hdmi_scrambler_ddc_reset(struct hdmi *hdmi) +{ + u32 reg_val; + + /* clear ack and disable interrupts */ + reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1); + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val); + + /* Reset DDC timers */ + reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); + + reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); + reg_val &= ~BIT(0); + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); +} + +void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display) +{ + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + u32 reg_val; + + struct hdmi *hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("Invalid parameters\n"); + return; + } + + _sde_hdmi_scrambler_ddc_reset(hdmi); + /* Disable HW DDC access to RxStatus register */ + reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL); + reg_val &= ~(BIT(8) | BIT(9)); + hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val); +} + +void sde_hdmi_ddc_config(void *hdmi_display) +{ + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct hdmi *hdmi = display->ctrl.ctrl; + + if (!hdmi) { + pr_err("Invalid parameters\n"); + return; + } + hdmi_write(hdmi, REG_HDMI_DDC_SPEED, + HDMI_DDC_SPEED_THRESHOLD(2) | + HDMI_DDC_SPEED_PRESCALE(10)); + + hdmi_write(hdmi, REG_HDMI_DDC_SETUP, + HDMI_DDC_SETUP_TIMEOUT(0xff)); + + /* enable reference timer for 19us */ + hdmi_write(hdmi, REG_HDMI_DDC_REF, + HDMI_DDC_REF_REFTIMER_ENABLE | + HDMI_DDC_REF_REFTIMER(19)); +} + +int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display) +{ + u32 reg_val; + u32 intr_en_mask; + u32 timeout; + u32 timer; + int rc = 0; + int busy_wait_us; + struct sde_hdmi_tx_hdcp2p2_ddc_data *data; + struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display; + struct hdmi *hdmi = display->ctrl.ctrl; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + u32 rem; + + if (!hdmi) { + pr_err("Invalid ddc data\n"); + return -EINVAL; + } + + ddc_ctrl = &display->ddc_ctrl; + data = &ddc_ctrl->sde_hdcp2p2_ddc_data; + if (!data) { + pr_err("Invalid ddc data\n"); + return -EINVAL; + } + + rc = ddc_clear_irq(hdmi); + if (rc) { + pr_err("DDC clear irq failed\n"); + return rc; + } + intr_en_mask = data->intr_mask; + intr_en_mask |= BIT(HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK); + + /* Disable short read for now, sinks don't support it */ + reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL); + reg_val |= BIT(4); + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val); + /* + * Setup the DDC timers for HDMI_HDCP2P2_DDC_TIMER_CTRL1 and + * HDMI_HDCP2P2_DDC_TIMER_CTRL2. + * Following are the timers: + * 1. DDC_REQUEST_TIMER: Timeout in hsyncs in which to wait for the + * HDCP 2.2 sink to respond to an RxStatus request + * 2. DDC_URGENT_TIMER: Time period in hsyncs to issue an urgent flag + * when an RxStatus DDC request is made but not accepted by I2C + * engine + * 3. DDC_TIMEOUT_TIMER: Timeout in hsyncs which starts counting when + * a request is made and stops when it is accepted by DDC arbiter + */ + + timeout = data->timeout_hsync; + timer = data->periodic_timer_hsync; + + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL, timer); + /* Set both urgent and hw-timeout fields to the same value */ + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL2, + (timeout << 16 | timeout)); + /* enable interrupts */ + reg_val = intr_en_mask; + /* Clear interrupt status bits */ + reg_val |= intr_en_mask >> 1; + + hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val); + reg_val = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5); + /* clear and enable RxStatus read timeout */ + reg_val |= BIT(2) | BIT(1); + + hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, reg_val); + /* + * Enable hardware DDC access to RxStatus register + * + * HDMI_HW_DDC_CTRL:Bits 1:0 (RXSTATUS_DDC_ENABLE) read like this: + * + * 0 = disable HW controlled DDC access to RxStatus + * 1 = automatic on when HDCP 2.2 is authenticated and loop based on + * request timer (i.e. the hardware will loop automatically) + * 2 = force on and loop based on request timer (hardware will loop) + * 3 = enable by sw trigger and loop until interrupt is generated for + * RxStatus.reauth_req, RxStatus.ready or RxStatus.message_Size. + * + * Depending on the value of ddc_data::poll_sink, we make the decision + * to use either SW_TRIGGER(3) (poll_sink = false) which means that the + * hardware will poll sink and generate interrupt when sink responds, + * or use AUTOMATIC_LOOP(1) (poll_sink = true) which will poll the sink + * based on request timer + */ + + reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL); + reg_val &= ~(BIT(1) | BIT(0)); + + busy_wait_us = data->timeout_ms * HDMI_MS_TO_US; + + /* read method: HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER */ + reg_val |= BIT(1) | BIT(0); + hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val); + + hdmi_write(hdmi, HDMI_HDCP2P2_DDC_SW_TRIGGER, 1); + if (data->wait) { + reinit_completion(&ddc_ctrl->rx_status_done); + rem = wait_for_completion_timeout(&ddc_ctrl->rx_status_done, + HZ); + data->timeout_left = jiffies_to_msecs(rem); + + if (!data->timeout_left) { + pr_err("sw ddc rxstatus timeout\n"); + rc = -ETIMEDOUT; + } + sde_hdmi_hdcp2p2_ddc_disable((void *)display); + } + return rc; +} + +unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq, + u32 out_format, bool dc_enable) +{ + u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO; + + if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO; + + pixel_freq /= rate_ratio; + + if (dc_enable) + pixel_freq += pixel_freq >> 2; + + return pixel_freq; + +} + +bool sde_hdmi_validate_pixclk(struct drm_connector *connector, + unsigned long pclk) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ; + + if (connector->max_tmds_char) + max_pclk = MIN(max_pclk, + connector->max_tmds_char * HDMI_MHZ_TO_HZ); + else if (connector->max_tmds_clock) + max_pclk = MIN(max_pclk, + connector->max_tmds_clock * HDMI_MHZ_TO_HZ); + + SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk); + + return pclk < max_pclk; +} + +static bool sde_hdmi_check_dc_clock(struct drm_connector *connector, + struct drm_display_mode *mode, u32 format) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + + u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk( + mode->clock * HDMI_KHZ_TO_HZ, + format, + true); + + return (display->dc_feature_supported && + sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc)); +} + +int sde_hdmi_sink_dc_support(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int dc_format = 0; + + if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) && + (connector->display_info.edid_hdmi_dc_modes + & DRM_EDID_YCBCR420_DC_30)) + if (sde_hdmi_check_dc_clock(connector, mode, + MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) + dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE; + + if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) && + (connector->display_info.edid_hdmi_dc_modes + & DRM_EDID_HDMI_DC_30)) + if (sde_hdmi_check_dc_clock(connector, mode, + MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) + dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE; + + return dc_format; +} + +u8 sde_hdmi_hdr_get_ops(u8 curr_state, + u8 new_state) +{ + + /** There could be 3 valid state transitions: + * 1. HDR_DISABLE -> HDR_ENABLE + * + * In this transition, we shall start sending + * HDR metadata with metadata from the HDR clip + * + * 2. HDR_ENABLE -> HDR_ENABLE + * + * In this transition, we will keep sending + * HDR metadata but with EOTF and metadata as 0 + * + * 3. HDR_ENABLE -> HDR_DISABLE + * + * In this transition, we will stop sending + * metadata to the sink and clear PKT_CTRL register + * bits. + */ + + if ((curr_state == HDR_DISABLE) + && (new_state == HDR_ENABLE)) { + HDMI_UTIL_DEBUG("State changed %s ---> %s\n", + sde_hdmi_hdr_sname(curr_state), + sde_hdmi_hdr_sname(new_state)); + return HDR_SEND_INFO; + } else if ((curr_state == HDR_ENABLE) + && (new_state == HDR_ENABLE)) { + HDMI_UTIL_DEBUG("State changed %s ---> %s\n", + sde_hdmi_hdr_sname(curr_state), + sde_hdmi_hdr_sname(new_state)); + return HDR_SEND_INFO; + } else if ((curr_state == HDR_ENABLE) + && (new_state == HDR_DISABLE)) { + HDMI_UTIL_DEBUG("State changed %s ---> %s\n", + sde_hdmi_hdr_sname(curr_state), + sde_hdmi_hdr_sname(new_state)); + return HDR_CLEAR_INFO; + } + + HDMI_UTIL_DEBUG("Unsupported OR no state change\n"); + return HDR_UNSUPPORTED_OP; +} + diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h new file mode 100644 index 000000000000..3c6b0f1b9dd4 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _SDE_HDMI_UTIL_H_ +#define _SDE_HDMI_UTIL_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include "hdmi.h" +#include "sde_kms.h" +#include "sde_connector.h" +#include "msm_drv.h" +#include "sde_hdmi_regs.h" + +#ifdef HDMI_UTIL_DEBUG_ENABLE +#define HDMI_UTIL_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) +#else +#define HDMI_UTIL_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args) +#endif + +#define HDMI_UTIL_ERROR(fmt, args...) SDE_ERROR(fmt, ##args) + +/* + * Offsets in HDMI_DDC_INT_CTRL0 register + * + * The HDMI_DDC_INT_CTRL0 register is intended for HDCP 2.2 RxStatus + * register manipulation. It reads like this: + * + * Bit 31: RXSTATUS_MESSAGE_SIZE_MASK (1 = generate interrupt when size > 0) + * Bit 30: RXSTATUS_MESSAGE_SIZE_ACK (1 = Acknowledge message size intr) + * Bits 29-20: RXSTATUS_MESSAGE_SIZE (Actual size of message available) + * Bits 19-18: RXSTATUS_READY_MASK (1 = generate interrupt when ready = 1 + * 2 = generate interrupt when ready = 0) + * Bit 17: RXSTATUS_READY_ACK (1 = Acknowledge ready bit interrupt) + * Bit 16: RXSTATUS_READY (1 = Rxstatus ready bit read is 1) + * Bit 15: RXSTATUS_READY_NOT (1 = Rxstatus ready bit read is 0) + * Bit 14: RXSTATUS_REAUTH_REQ_MASK (1 = generate interrupt when reauth is + * requested by sink) + * Bit 13: RXSTATUS_REAUTH_REQ_ACK (1 = Acknowledge Reauth req interrupt) + * Bit 12: RXSTATUS_REAUTH_REQ (1 = Rxstatus reauth req bit read is 1) + * Bit 10: RXSTATUS_DDC_FAILED_MASK (1 = generate interrupt when DDC + * tranasaction fails) + * Bit 9: RXSTATUS_DDC_FAILED_ACK (1 = Acknowledge ddc failure interrupt) + * Bit 8: RXSTATUS_DDC_FAILED (1 = DDC transaction failed) + * Bit 6: RXSTATUS_DDC_DONE_MASK (1 = generate interrupt when DDC + * transaction completes) + * Bit 5: RXSTATUS_DDC_DONE_ACK (1 = Acknowledge ddc done interrupt) + * Bit 4: RXSTATUS_DDC_DONE (1 = DDC transaction is done) + * Bit 2: RXSTATUS_DDC_REQ_MASK (1 = generate interrupt when DDC Read + * request for RXstatus is made) + * Bit 1: RXSTATUS_DDC_REQ_ACK (1 = Acknowledge Rxstatus read interrupt) + * Bit 0: RXSTATUS_DDC_REQ (1 = RXStatus DDC read request is made) + * + */ + +#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_SHIFT 20 +#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_MASK 0x3ff00000 +#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_ACK_SHIFT 30 +#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_INTR_SHIFT 31 + +#define HDCP2P2_RXSTATUS_REAUTH_REQ_SHIFT 12 +#define HDCP2P2_RXSTATUS_REAUTH_REQ_MASK 1 +#define HDCP2P2_RXSTATUS_REAUTH_REQ_ACK_SHIFT 13 +#define HDCP2P2_RXSTATUS_REAUTH_REQ_INTR_SHIFT 14 + +#define HDCP2P2_RXSTATUS_READY_SHIFT 16 +#define HDCP2P2_RXSTATUS_READY_MASK 1 +#define HDCP2P2_RXSTATUS_READY_ACK_SHIFT 17 +#define HDCP2P2_RXSTATUS_READY_INTR_SHIFT 18 +#define HDCP2P2_RXSTATUS_READY_INTR_MASK 18 + +#define HDCP2P2_RXSTATUS_DDC_FAILED_SHIFT 8 +#define HDCP2P2_RXSTATUS_DDC_FAILED_ACKSHIFT 9 +#define HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK 10 +#define HDCP2P2_RXSTATUS_DDC_DONE 6 + +/* default hsyncs for 4k@60 for 200ms */ +#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571 + +#define HDMI_GET_MSB(x)(x >> 8) +#define HDMI_GET_LSB(x)(x & 0xff) + +#define SDE_HDMI_VIC_640x480 0x1 +#define SDE_HDMI_YCC_QUANT_MASK (0x3 << 14) +#define SDE_HDMI_COLORIMETRY_MASK (0x3 << 22) + +#define SDE_HDMI_DEFAULT_COLORIMETRY 0x0 +#define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3 +#define SDE_HDMI_BT2020_COLORIMETRY 0x6 + +/* + * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be + * read by the hardware + */ +#define HDCP2P2_RXSTATUS_HW_DDC_DISABLE 0 +#define HDCP2P2_RXSTATUS_HW_DDC_AUTOMATIC_LOOP 1 +#define HDCP2P2_RXSTATUS_HW_DDC_FORCE_LOOP 2 +#define HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER 3 + +struct sde_hdmi_tx_ddc_data { + char *what; + u8 *data_buf; + u32 data_len; + u32 dev_addr; + u32 offset; + u32 request_len; + u32 retry_align; + u32 hard_timeout; + u32 timeout_left; + int retry; +}; + +enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask { + RXSTATUS_MESSAGE_SIZE = BIT(31), + RXSTATUS_READY = BIT(18), + RXSTATUS_REAUTH_REQ = BIT(14), +}; + +enum sde_hdmi_hdr_state { + HDR_DISABLE, + HDR_ENABLE +}; + +enum sde_hdmi_hdr_op { + HDR_UNSUPPORTED_OP, + HDR_SEND_INFO, + HDR_CLEAR_INFO +}; + +struct sde_hdmi_tx_hdcp2p2_ddc_data { + enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask; + u32 timeout_ms; + u32 timeout_hsync; + u32 periodic_timer_hsync; + u32 timeout_left; + u32 read_method; + u32 message_size; + bool encryption_ready; + bool ready; + bool reauth_req; + bool ddc_max_retries_fail; + bool ddc_done; + bool ddc_read_req; + bool ddc_timeout; + bool wait; + int irq_wait_count; + void (*link_cb)(void *data); + void *link_data; +}; + +struct sde_hdmi_tx_ddc_ctrl { + struct completion rx_status_done; + struct dss_io_data *io; + struct sde_hdmi_tx_ddc_data ddc_data; + struct sde_hdmi_tx_hdcp2p2_ddc_data sde_hdcp2p2_ddc_data; +}; + +/* DDC */ +int sde_hdmi_ddc_write(void *cb_data); +int sde_hdmi_ddc_read(void *cb_data); +int sde_hdmi_ddc_scrambling_isr(void *hdmi_display); +int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms); +void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display); +void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display); +int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display); +void sde_hdmi_ddc_config(void *hdmi_display); +int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display); +void sde_hdmi_dump_regs(void *hdmi_display); +unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq, + u32 out_format, bool dc_enable); +bool sde_hdmi_validate_pixclk(struct drm_connector *connector, + unsigned long pclk); +int sde_hdmi_sink_dc_support(struct drm_connector *connector, + struct drm_display_mode *mode); +u8 sde_hdmi_hdr_get_ops(u8 curr_state, + u8 new_state); +#endif /* _SDE_HDMI_UTIL_H_ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 7915562057d6..7d660ba56594 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -95,7 +95,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) struct hdmi_platform_config *config = pdev->dev.platform_data; struct hdmi *hdmi = NULL; struct resource *res; - int i, ret; + int i, ret = 0; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) { @@ -119,9 +119,19 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) } } + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, config->mmio_name); + if (!res) { + dev_err(&pdev->dev, "failed to find ctrl resource\n"); + ret = -ENOMEM; + goto fail; + } + hdmi->mmio_len = (u32)resource_size(res); hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI"); if (IS_ERR(hdmi->mmio)) { ret = PTR_ERR(hdmi->mmio); + dev_info(&pdev->dev, "can't map hdmi resource\n"); + hdmi->mmio = NULL; goto fail; } @@ -130,13 +140,39 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) config->mmio_name); hdmi->mmio_phy_addr = res->start; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + config->qfprom_mmio_name); + + if (!res) { + dev_err(&pdev->dev, "failed to find qfprom resource\n"); + ret = -ENOMEM; + goto fail; + } + hdmi->qfprom_mmio_len = (u32)resource_size(res); + hdmi->qfprom_mmio = msm_ioremap(pdev, config->qfprom_mmio_name, "HDMI_QFPROM"); + if (IS_ERR(hdmi->qfprom_mmio)) { - dev_info(&pdev->dev, "can't find qfprom resource\n"); + dev_info(&pdev->dev, "can't map qfprom resource\n"); hdmi->qfprom_mmio = NULL; } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + config->hdcp_mmio_name); + if (!res) { + dev_err(&pdev->dev, "failed to find hdcp resource: %d\n", ret); + ret = -ENOMEM; + goto fail; + } + hdmi->hdcp_mmio_len = (u32)resource_size(res); + hdmi->hdcp_mmio = msm_ioremap(pdev, + config->hdcp_mmio_name, "HDMI_HDCP"); + if (IS_ERR(hdmi->hdcp_mmio)) { + dev_info(&pdev->dev, "can't map hdcp resource\n"); + hdmi->hdcp_mmio = NULL; + } + hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * config->hpd_reg_cnt, GFP_KERNEL); if (!hdmi->hpd_regs) { @@ -468,6 +504,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; + hdmi_cfg->hdcp_mmio_name = "hdcp_physical"; hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 9ce8ff513210..8ca7b36ee0c8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -27,6 +27,11 @@ #include "msm_drv.h" #include "hdmi.xml.h" +#define HDMI_SEC_TO_MS 1000 +#define HDMI_MS_TO_US 1000 +#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US) +#define HDMI_KHZ_TO_HZ 1000 +#define HDMI_BUSY_WAIT_DELAY_US 100 struct hdmi_phy; struct hdmi_platform_config; @@ -51,9 +56,14 @@ struct hdmi { /* video state: */ bool power_on; unsigned long int pixclock; + unsigned long int actual_pixclock; void __iomem *mmio; void __iomem *qfprom_mmio; + void __iomem *hdcp_mmio; + u32 mmio_len; + u32 qfprom_mmio_len; + u32 hdcp_mmio_len; phys_addr_t mmio_phy_addr; struct regulator **hpd_regs; @@ -72,10 +82,14 @@ struct hdmi { bool hdmi_mode; /* are we in hdmi mode? */ bool is_hdcp_supported; int irq; + void (*ddc_sw_done_cb)(void *data); + void *sw_done_cb_data; struct workqueue_struct *workq; struct hdmi_hdcp_ctrl *hdcp_ctrl; - + bool use_hard_timeout; + int busy_wait_us; + u32 timeout_count; /* * spinlock to protect registers shared by different execution * REG_HDMI_CTRL @@ -91,7 +105,7 @@ struct hdmi_platform_config { struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); const char *mmio_name; const char *qfprom_mmio_name; - + const char *hdcp_mmio_name; /* regulators that need to be on for hpd: */ const char **hpd_reg_names; int hpd_reg_cnt; @@ -116,8 +130,20 @@ struct hdmi_platform_config { int mux_lpm_gpio; }; +struct hdmi_i2c_adapter { + struct i2c_adapter base; + struct hdmi *hdmi; + bool sw_done; + wait_queue_head_t ddc_event; +}; + void hdmi_set_mode(struct hdmi *hdmi, bool power_on); +#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base) + +int ddc_clear_irq(struct hdmi *hdmi); +void init_ddc(struct hdmi *hdmi); + static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) { msm_writel(data, hdmi->mmio + reg); @@ -186,6 +212,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c); void hdmi_i2c_destroy(struct i2c_adapter *i2c); struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi); +/* + * DDC utility functions + */ +int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len, bool self_retry); +int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len, bool self_retry); /* * hdcp */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c index e56a8675c0a4..66be37bea4f6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -85,84 +85,6 @@ struct hdmi_hdcp_ctrl { bool max_dev_exceeded; }; -static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len) -{ - int rc; - int retry = 5; - struct i2c_msg msgs[] = { - { - .addr = addr >> 1, - .flags = 0, - .len = 1, - .buf = &offset, - }, { - .addr = addr >> 1, - .flags = I2C_M_RD, - .len = data_len, - .buf = data, - } - }; - - DBG("Start DDC read"); -retry: - rc = i2c_transfer(hdmi->i2c, msgs, 2); - - retry--; - if (rc == 2) - rc = 0; - else if (retry > 0) - goto retry; - else - rc = -EIO; - - DBG("End DDC read %d", rc); - - return rc; -} - -#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32 - -static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, - u8 *data, u16 data_len) -{ - int rc; - int retry = 10; - u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM]; - struct i2c_msg msgs[] = { - { - .addr = addr >> 1, - .flags = 0, - .len = 1, - } - }; - - DBG("Start DDC write"); - if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) { - pr_err("%s: write size too big\n", __func__); - return -ERANGE; - } - - buf[0] = offset; - memcpy(&buf[1], data, data_len); - msgs[0].buf = buf; - msgs[0].len = data_len + 1; -retry: - rc = i2c_transfer(hdmi->i2c, msgs, 1); - - retry--; - if (rc == 1) - rc = 0; - else if (retry > 0) - goto retry; - else - rc = -EIO; - - DBG("End DDC write %d", rc); - - return rc; -} - static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg, u32 *pdata, u32 count) { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c index f4ab7f70fed1..c65cc908b882 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -17,66 +17,16 @@ #include "hdmi.h" -struct hdmi_i2c_adapter { - struct i2c_adapter base; - struct hdmi *hdmi; - bool sw_done; - wait_queue_head_t ddc_event; -}; -#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base) - -static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c) -{ - struct hdmi *hdmi = hdmi_i2c->hdmi; - - hdmi_write(hdmi, REG_HDMI_DDC_CTRL, - HDMI_DDC_CTRL_SW_STATUS_RESET); - hdmi_write(hdmi, REG_HDMI_DDC_CTRL, - HDMI_DDC_CTRL_SOFT_RESET); - - hdmi_write(hdmi, REG_HDMI_DDC_SPEED, - HDMI_DDC_SPEED_THRESHOLD(2) | - HDMI_DDC_SPEED_PRESCALE(10)); - - hdmi_write(hdmi, REG_HDMI_DDC_SETUP, - HDMI_DDC_SETUP_TIMEOUT(0xff)); - - /* enable reference timer for 27us */ - hdmi_write(hdmi, REG_HDMI_DDC_REF, - HDMI_DDC_REF_REFTIMER_ENABLE | - HDMI_DDC_REF_REFTIMER(27)); -} - -static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c) -{ - struct hdmi *hdmi = hdmi_i2c->hdmi; - struct drm_device *dev = hdmi->dev; - uint32_t retry = 0xffff; - uint32_t ddc_int_ctrl; - - do { - --retry; - - hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, - HDMI_DDC_INT_CTRL_SW_DONE_ACK | - HDMI_DDC_INT_CTRL_SW_DONE_MASK); - - ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL); - - } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry); - - if (!retry) { - dev_err(dev->dev, "timeout waiting for DDC\n"); - return -ETIMEDOUT; - } - - hdmi_i2c->sw_done = false; - - return 0; -} - #define MAX_TRANSACTIONS 4 +#define SDE_DDC_TXN_CNT_MASK 0x07ff0000 +#define SDE_DDC_TXN_CNT_SHIFT 16 + +static inline uint32_t SDE_HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val) +{ + return ((val) << SDE_DDC_TXN_CNT_SHIFT) & SDE_DDC_TXN_CNT_MASK; +} + static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c) { struct hdmi *hdmi = hdmi_i2c->hdmi; @@ -115,12 +65,13 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c, WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE)); + if (num == 0) return num; - init_ddc(hdmi_i2c); + init_ddc(hdmi); - ret = ddc_clear_irq(hdmi_i2c); + ret = ddc_clear_irq(hdmi); if (ret) return ret; @@ -155,7 +106,7 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c, } } - i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) | + i2c_trans = SDE_HDMI_I2C_TRANSACTION_REG_CNT(p->len) | HDMI_I2C_TRANSACTION_REG_RW( (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) | HDMI_I2C_TRANSACTION_REG_START; @@ -177,9 +128,13 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c, ret = -ETIMEDOUT; dev_warn(dev->dev, "DDC timeout: %d\n", ret); DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x", - hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS), - hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS), - hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL)); + hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS), + hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS), + hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL)); + if (hdmi->use_hard_timeout) { + hdmi->use_hard_timeout = false; + hdmi->timeout_count = 0; + } return ret; } @@ -213,6 +168,10 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c, } } + if (hdmi->use_hard_timeout) { + hdmi->use_hard_timeout = false; + hdmi->timeout_count = jiffies_to_msecs(ret); + } return i; } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_util.c b/drivers/gpu/drm/msm/hdmi/hdmi_util.c new file mode 100644 index 000000000000..c7cfa38ed3ad --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_util.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "hdmi.h" + +void init_ddc(struct hdmi *hdmi) +{ + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, + HDMI_DDC_CTRL_SW_STATUS_RESET); + hdmi_write(hdmi, REG_HDMI_DDC_CTRL, + HDMI_DDC_CTRL_SOFT_RESET); + + hdmi_write(hdmi, REG_HDMI_DDC_SPEED, + HDMI_DDC_SPEED_THRESHOLD(2) | + HDMI_DDC_SPEED_PRESCALE(10)); + + hdmi_write(hdmi, REG_HDMI_DDC_SETUP, + HDMI_DDC_SETUP_TIMEOUT(0xff)); + + /* enable reference timer for 19us */ + hdmi_write(hdmi, REG_HDMI_DDC_REF, + HDMI_DDC_REF_REFTIMER_ENABLE | + HDMI_DDC_REF_REFTIMER(19)); +} + +int ddc_clear_irq(struct hdmi *hdmi) +{ + struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(hdmi->i2c); + struct drm_device *dev = hdmi->dev; + uint32_t retry = 0xffff; + uint32_t ddc_int_ctrl; + + do { + --retry; + + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, + HDMI_DDC_INT_CTRL_SW_DONE_ACK | + HDMI_DDC_INT_CTRL_SW_DONE_MASK); + + ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL); + + } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry); + + if (!retry) { + dev_err(dev->dev, "timeout waiting for DDC\n"); + return -ETIMEDOUT; + } + + hdmi_i2c->sw_done = false; + + return 0; +} + +int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, +u8 *data, u16 data_len, bool self_retry) +{ + int rc; + int retry = 10; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + .buf = &offset, + }, { + .addr = addr >> 1, + .flags = I2C_M_RD, + .len = data_len, + .buf = data, + } + }; + + DBG("Start DDC read"); +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 2); + retry--; + + if (rc == 2) + rc = 0; + else if (self_retry && (retry > 0)) + goto retry; + else + rc = -EIO; + + DBG("End DDC read %d", rc); + + return rc; +} + +#define HDCP_DDC_WRITE_MAX_BYTE_NUM 1024 + +int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len, bool self_retry) +{ + int rc; + int retry = 10; + u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM]; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + } + }; + + pr_debug("TESTING ! REMOVE RETRY Start DDC write"); + if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) { + pr_err("%s: write size too big\n", __func__); + return -ERANGE; + } + + buf[0] = offset; + memcpy(&buf[1], data, data_len); + msgs[0].buf = buf; + msgs[0].len = data_len + 1; +retry: + rc = i2c_transfer(hdmi->i2c, msgs, 1); + retry--; + if (rc == 1) + rc = 0; + else if (self_retry && (retry > 0)) + goto retry; + else + rc = -EIO; + + DBG("End DDC write %d", rc); + + return rc; +} diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index fa746d71cd3b..0c119ec5d97c 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -19,6 +19,7 @@ #include "msm_drv.h" #include "msm_kms.h" #include "msm_gem.h" +#include "sde_trace.h" struct msm_commit { struct drm_device *dev; @@ -108,6 +109,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) struct drm_crtc_state *old_crtc_state; int i; + SDE_ATRACE_BEGIN("msm_disable"); for_each_connector_in_state(old_state, connector, old_conn_state, i) { const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; @@ -188,6 +190,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) else funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } + SDE_ATRACE_END("msm_disable"); } static void @@ -297,6 +300,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, int bridge_enable_count = 0; int i; + SDE_ATRACE_BEGIN("msm_enable"); for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; @@ -364,8 +368,10 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, } /* If no bridges were pre_enabled, skip iterating over them again */ - if (bridge_enable_count == 0) + if (bridge_enable_count == 0) { + SDE_ATRACE_END("msm_enable"); return; + } for_each_connector_in_state(old_state, connector, old_conn_state, i) { struct drm_encoder *encoder; @@ -385,6 +391,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, drm_bridge_enable(encoder->bridge); } + SDE_ATRACE_END("msm_enable"); } /* The (potentially) asynchronous part of the commit. At this point @@ -457,7 +464,9 @@ static void _msm_drm_commit_work_cb(struct kthread_work *work) commit = container_of(work, struct msm_commit, commit_work); + SDE_ATRACE_BEGIN("complete_commit"); complete_commit(commit); + SDE_ATRACE_END("complete_commit"); } static struct msm_commit *commit_init(struct drm_atomic_state *state) @@ -553,9 +562,12 @@ int msm_atomic_commit(struct drm_device *dev, struct msm_commit *commit; int i, ret; + SDE_ATRACE_BEGIN("atomic_commit"); ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) + if (ret) { + SDE_ATRACE_END("atomic_commit"); return ret; + } commit = commit_init(state); if (IS_ERR_OR_NULL(commit)) { @@ -635,6 +647,7 @@ int msm_atomic_commit(struct drm_device *dev, if (async) { msm_queue_fence_cb(dev, &commit->fence_cb, commit->fence); + SDE_ATRACE_END("atomic_commit"); return 0; } @@ -645,9 +658,11 @@ int msm_atomic_commit(struct drm_device *dev, complete_commit(commit); + SDE_ATRACE_END("atomic_commit"); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); + SDE_ATRACE_END("atomic_commit"); return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a3bdc30b9620..c8b11425a817 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -26,11 +26,68 @@ #include "msm_gem.h" #include "msm_mmu.h" +static void msm_drm_helper_hotplug_event(struct drm_device *dev) +{ + struct drm_connector *connector; + char *event_string; + char const *connector_name; + char *envp[2]; + + if (!dev) { + DRM_ERROR("hotplug_event failed, invalid input\n"); + return; + } + + if (!dev->mode_config.poll_enabled) + return; + + event_string = kzalloc(SZ_4K, GFP_KERNEL); + if (!event_string) { + DRM_ERROR("failed to allocate event string\n"); + return; + } + + mutex_lock(&dev->mode_config.mutex); + drm_for_each_connector(connector, dev) { + /* Only handle HPD capable connectors. */ + if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) + continue; + + connector->status = connector->funcs->detect(connector, false); + + if (connector->name) + connector_name = connector->name; + else + connector_name = "unknown"; + + snprintf(event_string, SZ_4K, "name=%s status=%s\n", + connector_name, + drm_get_connector_status_name(connector->status)); + DRM_DEBUG("generating hotplug event [%s]\n", event_string); + envp[0] = event_string; + envp[1] = NULL; + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, + envp); + } + mutex_unlock(&dev->mode_config.mutex); + kfree(event_string); +} + static void msm_fb_output_poll_changed(struct drm_device *dev) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv = NULL; + + if (!dev) { + DRM_ERROR("output_poll_changed failed, invalid input\n"); + return; + } + + priv = dev->dev_private; + if (priv->fbdev) drm_fb_helper_hotplug_event(priv->fbdev); + else + msm_drm_helper_hotplug_event(dev); } static const struct drm_mode_config_funcs mode_config_funcs = { @@ -259,8 +316,8 @@ static int get_mdp_ver(struct platform_device *pdev) static const struct of_device_id match_types[] = { { .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE, - /* end node */ - } }; + }, + {} }; struct device *dev = &pdev->dev; const struct of_device_id *match; match = of_match_node(match_types, dev->of_node); @@ -602,7 +659,10 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) if (IS_ERR(ctx)) return PTR_ERR(ctx); - INIT_LIST_HEAD(&ctx->counters); + if (ctx) + INIT_LIST_HEAD(&ctx->counters); + + msm_submitqueue_init(ctx); file->driver_priv = ctx; @@ -632,12 +692,18 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file) if (kms && kms->funcs && kms->funcs->postclose) kms->funcs->postclose(kms, file); - if (priv->gpu) + if (!ctx) + return; + + msm_submitqueue_close(ctx); + + if (priv->gpu) { msm_gpu_cleanup_counters(priv->gpu, ctx); - if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) { - ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu); - msm_gem_address_space_put(ctx->aspace); + if (ctx->aspace && ctx->aspace != priv->gpu->aspace) { + ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu); + msm_gem_address_space_put(ctx->aspace); + } } kfree(ctx); @@ -1683,6 +1749,52 @@ static int msm_ioctl_counter_read(struct drm_device *dev, void *data, return -ENODEV; } + +static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_submitqueue *args = data; + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + + if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) + return -EINVAL; + + if ((gpu->nr_rings > 1) && + (!file->is_master && args->prio == 0)) { + DRM_ERROR("Only DRM master can set highest priority ringbuffer\n"); + return -EPERM; + } + + if (args->flags & MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT && + !capable(CAP_SYS_ADMIN)) { + DRM_ERROR( + "Only CAP_SYS_ADMIN processes can bypass the timer\n"); + return -EPERM; + } + + return msm_submitqueue_create(file->driver_priv, args->prio, + args->flags, &args->id); +} + +static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_submitqueue_query *args = data; + void __user *ptr = (void __user *)(uintptr_t) args->data; + + return msm_submitqueue_query(file->driver_priv, args->id, + args->param, ptr, args->len); +} + +static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_submitqueue *args = data; + + return msm_submitqueue_remove(file->driver_priv, args->id); +} + int msm_release(struct inode *inode, struct file *filp) { struct drm_file *file_priv = filp->private_data; @@ -1728,6 +1840,12 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SVM_NEW, msm_ioctl_gem_svm_new, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, + DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, + DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, + DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { @@ -1780,6 +1898,7 @@ static struct drm_driver msm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, + .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, @@ -1804,8 +1923,75 @@ static struct drm_driver msm_driver = { #ifdef CONFIG_PM_SLEEP static int msm_pm_suspend(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); + struct drm_device *ddev; + struct drm_modeset_acquire_ctx *ctx; + struct drm_connector *conn; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + struct msm_drm_private *priv; + int ret = 0; + if (!dev) + return -EINVAL; + + ddev = dev_get_drvdata(dev); + if (!ddev || !ddev->dev_private) + return -EINVAL; + + priv = ddev->dev_private; + SDE_EVT32(0); + + /* acquire modeset lock(s) */ + drm_modeset_lock_all(ddev); + ctx = ddev->mode_config.acquire_ctx; + + /* save current state for resume */ + if (priv->suspend_state) + drm_atomic_state_free(priv->suspend_state); + priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, ctx); + if (IS_ERR_OR_NULL(priv->suspend_state)) { + DRM_ERROR("failed to back up suspend state\n"); + priv->suspend_state = NULL; + goto unlock; + } + + /* create atomic state to disable all CRTCs */ + state = drm_atomic_state_alloc(ddev); + if (IS_ERR_OR_NULL(state)) { + DRM_ERROR("failed to allocate crtc disable state\n"); + goto unlock; + } + + state->acquire_ctx = ctx; + drm_for_each_connector(conn, ddev) { + + if (!conn->state || !conn->state->crtc || + conn->dpms != DRM_MODE_DPMS_ON) + continue; + + /* force CRTC to be inactive */ + crtc_state = drm_atomic_get_crtc_state(state, + conn->state->crtc); + if (IS_ERR_OR_NULL(crtc_state)) { + DRM_ERROR("failed to get crtc %d state\n", + conn->state->crtc->base.id); + drm_atomic_state_free(state); + goto unlock; + } + crtc_state->active = false; + } + + /* commit the "disable all" state */ + ret = drm_atomic_commit(state); + if (ret < 0) { + DRM_ERROR("failed to disable crtcs, %d\n", ret); + drm_atomic_state_free(state); + } + +unlock: + drm_modeset_unlock_all(ddev); + + /* disable hot-plug polling */ drm_kms_helper_poll_disable(ddev); return 0; @@ -1813,8 +1999,38 @@ static int msm_pm_suspend(struct device *dev) static int msm_pm_resume(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); + struct drm_device *ddev; + struct msm_drm_private *priv; + int ret; + if (!dev) + return -EINVAL; + + ddev = dev_get_drvdata(dev); + if (!ddev || !ddev->dev_private) + return -EINVAL; + + priv = ddev->dev_private; + + SDE_EVT32(priv->suspend_state != NULL); + + drm_mode_config_reset(ddev); + + drm_modeset_lock_all(ddev); + + if (priv->suspend_state) { + priv->suspend_state->acquire_ctx = + ddev->mode_config.acquire_ctx; + ret = drm_atomic_commit(priv->suspend_state); + if (ret < 0) { + DRM_ERROR("failed to restore state, %d\n", ret); + drm_atomic_state_free(priv->suspend_state); + } + priv->suspend_state = NULL; + } + drm_modeset_unlock_all(ddev); + + /* enable hot-plug polling */ drm_kms_helper_poll_enable(ddev); return 0; @@ -1979,6 +2195,7 @@ static struct platform_driver msm_platform_driver = { .name = "msm_drm", .of_match_table = dt_match, .pm = &msm_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .id_table = msm_id, }; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8f56a3126008..49b6029c3342 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -78,6 +78,9 @@ struct msm_gem_vma; struct msm_file_private { struct msm_gem_address_space *aspace; struct list_head counters; + rwlock_t queuelock; + struct list_head submitqueues; + int queueid; }; enum msm_mdp_plane_property { @@ -140,6 +143,8 @@ enum msm_mdp_crtc_property { enum msm_mdp_conn_property { /* blob properties, always put these first */ CONNECTOR_PROP_SDE_INFO, + CONNECTOR_PROP_HDR_INFO, + CONNECTOR_PROP_HDR_CONTROL, /* # of blob properties */ CONNECTOR_PROP_BLOBCOUNT, @@ -152,6 +157,7 @@ enum msm_mdp_conn_property { CONNECTOR_PROP_DST_W, CONNECTOR_PROP_DST_H, CONNECTOR_PROP_PLL_DELTA, + CONNECTOR_PROP_PLL_ENABLE, /* enum/bitmask properties */ CONNECTOR_PROP_TOPOLOGY_NAME, @@ -229,6 +235,14 @@ struct msm_display_info { enum msm_display_compression compression; }; +/** + * struct - msm_display_kickoff_params - info for display features at kickoff + * @hdr_ctrl: HDR control info passed from userspace + */ +struct msm_display_kickoff_params { + struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl; +}; + /** * struct msm_drm_event - defines custom event notification struct * @base: base object required for event notification by DRM framework. @@ -283,7 +297,6 @@ struct msm_drm_private { struct drm_fb_helper *fbdev; - uint32_t next_fence[MSM_GPU_MAX_RINGS]; uint32_t completed_fence[MSM_GPU_MAX_RINGS]; wait_queue_head_t fence_event; @@ -354,6 +367,9 @@ struct msm_drm_private { struct msm_vblank_ctrl vblank_ctrl; + /* saved atomic state during system suspend */ + struct drm_atomic_state *suspend_state; + /* list of clients waiting for events */ struct list_head client_event_list; }; @@ -401,6 +417,15 @@ void __msm_fence_worker(struct work_struct *work); (_cb)->func = _func; \ } while (0) +static inline bool msm_is_suspend_state(struct drm_device *dev) +{ + if (!dev || !dev->dev_private) + return false; + + return ((struct msm_drm_private *)dev->dev_private)->suspend_state != + NULL; +} + int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async); @@ -437,6 +462,7 @@ struct msm_gem_address_space * msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, const char *name); +void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); @@ -461,6 +487,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); @@ -490,7 +517,12 @@ int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev, struct drm_file *file, uint64_t hostptr, uint64_t size, uint32_t flags); - +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); void msm_framebuffer_cleanup(struct drm_framebuffer *fb, @@ -506,6 +538,19 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); +struct msm_gpu_submitqueue; +int msm_submitqueue_init(struct msm_file_private *ctx); +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id); +int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, + u32 flags, u32 *id); +int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param, + void __user *data, u32 len); +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); +void msm_submitqueue_close(struct msm_file_private *ctx); + +void msm_submitqueue_destroy(struct kref *kref); + struct hdmi; int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 6bb29c62378d..d66071672c62 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -653,7 +653,15 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); mutex_lock(&msm_obj->lock); - if (!msm_obj->vaddr) { + + if (msm_obj->vaddr) { + mutex_unlock(&msm_obj->lock); + return msm_obj->vaddr; + } + + if (obj->import_attach) { + msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); + } else { struct page **pages = get_pages(obj); if (IS_ERR(pages)) { mutex_unlock(&msm_obj->lock); @@ -1038,7 +1046,7 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev, { struct drm_gem_object *obj; struct msm_file_private *ctx = file->driver_priv; - struct msm_gem_address_space *aspace = ctx->aspace; + struct msm_gem_address_space *aspace; struct msm_gem_object *msm_obj; struct msm_gem_svm_object *msm_svm_obj; struct msm_gem_vma *domain = NULL; @@ -1048,6 +1056,9 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev, int write; int ret; + if (!ctx) + return ERR_PTR(-ENODEV); + /* if we don't have IOMMU, don't bother pretending we can import: */ if (!iommu_present(&platform_bus_type)) { dev_err_once(dev->dev, "cannot import without IOMMU\n"); @@ -1070,6 +1081,7 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev, drm_gem_private_object_init(dev, obj, size); msm_obj = to_msm_bo(obj); + aspace = ctx->aspace; domain = obj_add_domain(&msm_obj->base, aspace); if (IS_ERR(domain)) { drm_gem_object_unreference_unlocked(obj); @@ -1277,3 +1289,51 @@ void msm_mn_invalidate_range_start(struct mmu_notifier *mn, msm_gem_mn_put(msm_mn); } + +/* + * Helper function to consolidate in-kernel buffer allocations that usually need + * to allocate a buffer object, iova and a virtual address all in one shot + */ +static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova, bool locked) +{ + void *vaddr; + struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); + int ret; + + if (IS_ERR(obj)) + return ERR_CAST(obj); + + ret = msm_gem_get_iova(obj, aspace, iova); + if (ret) { + drm_gem_object_unreference(obj); + return ERR_PTR(ret); + } + + vaddr = msm_gem_vaddr(obj); + if (!vaddr) { + msm_gem_put_iova(obj, aspace); + drm_gem_object_unreference(obj); + return ERR_PTR(-ENOMEM); + } + + *bo = obj; + return vaddr; +} + +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, + false); +} + +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, + true); +} diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 04e6c658b5f3..0cd458fd184b 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -132,6 +132,9 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, return fence; } +/* Internal submit flags */ +#define SUBMIT_FLAG_SKIP_HANGCHECK 0x00000001 + /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, * associated with the cmdstream submission for synchronization (and * make it easier to unwind when things go wrong, etc). This only @@ -140,15 +143,17 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, struct msm_gem_submit { struct drm_device *dev; struct msm_gem_address_space *aspace; - struct list_head node; /* node in gpu submit_list */ + struct list_head node; /* node in ring submit list */ struct list_head bo_list; struct ww_acquire_ctx ticket; uint32_t fence; int ring; - bool valid; + u32 flags; uint64_t profile_buf_iova; - void *profile_buf_vaddr; + struct drm_msm_gem_submit_profile_buffer *profile_buf; bool secure; + struct msm_gpu_submitqueue *queue; + int tick_index; unsigned int nr_cmds; unsigned int nr_bos; struct { diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 678018804f3a..9f3c097d011b 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -80,3 +80,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) if (!obj->import_attach) msm_gem_put_pages(obj); } + +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + return msm_obj->resv; +} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c861bfd77537..f2b6aa29b410 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -18,6 +18,7 @@ #include "msm_drv.h" #include "msm_gpu.h" #include "msm_gem.h" +#include "msm_trace.h" /* * Cmdstream submission: @@ -35,7 +36,8 @@ static inline void __user *to_user_ptr(u64 address) static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gem_address_space *aspace, - uint32_t nr_bos, uint32_t nr_cmds) + uint32_t nr_bos, uint32_t nr_cmds, + struct msm_gpu_submitqueue *queue) { struct msm_gem_submit *submit; uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + @@ -48,17 +50,23 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (submit) { submit->dev = dev; submit->aspace = aspace; + submit->queue = queue; /* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; submit->nr_cmds = 0; - submit->profile_buf_vaddr = NULL; + submit->profile_buf = NULL; submit->profile_buf_iova = 0; submit->cmd = (void *)&submit->bos[nr_bos]; submit->secure = false; + /* + * Initalize node so we can safely list_del() on it if + * we fail in the submit path + */ + INIT_LIST_HEAD(&submit->node); INIT_LIST_HEAD(&submit->bo_list); ww_acquire_init(&submit->ticket, &reservation_ww_class); } @@ -74,6 +82,16 @@ copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) return -EFAULT; } +void msm_gem_submit_free(struct msm_gem_submit *submit) +{ + if (!submit) + return; + + msm_submitqueue_put(submit->queue); + list_del(&submit->node); + kfree(submit); +} + static int submit_lookup_objects(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) @@ -194,15 +212,8 @@ static int submit_validate_objects(struct msm_gpu *gpu, int contended, slow_locked = -1, i, ret = 0; retry: - submit->valid = true; - for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - struct msm_gem_address_space *aspace; - uint64_t iova; - - aspace = (msm_obj->flags & MSM_BO_SECURE) ? - gpu->secure_aspace : submit->aspace; if (slow_locked == i) slow_locked = -1; @@ -229,28 +240,6 @@ retry: goto fail; } } - - /* if locking succeeded, pin bo: */ - ret = msm_gem_get_iova(&msm_obj->base, aspace, &iova); - - /* this would break the logic in the fail path.. there is no - * reason for this to happen, but just to be on the safe side - * let's notice if this starts happening in the future: - */ - WARN_ON(ret == -EDEADLK); - - if (ret) - goto fail; - - submit->bos[i].flags |= BO_PINNED; - - if (iova == submit->bos[i].iova) { - submit->bos[i].flags |= BO_VALID; - } else { - submit->bos[i].iova = iova; - submit->bos[i].flags &= ~BO_VALID; - submit->valid = false; - } } ww_acquire_done(&submit->ticket); @@ -279,9 +268,14 @@ fail: return ret; } -static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, +static int submit_bo(struct msm_gpu *gpu, + struct msm_gem_submit *submit, uint32_t idx, struct msm_gem_object **obj, uint64_t *iova, bool *valid) { + struct msm_gem_object *msm_obj; + struct msm_gem_address_space *aspace; + int ret; + if (idx >= submit->nr_bos) { DRM_ERROR("invalid buffer index: %u (out of %u)\n", idx, submit->nr_bos); @@ -290,6 +284,39 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, if (obj) *obj = submit->bos[idx].obj; + + /* Only map and pin if the caller needs either the iova or valid */ + if (!iova && !valid) + return 0; + + if (!(submit->bos[idx].flags & BO_PINNED)) { + uint64_t buf_iova; + + msm_obj = submit->bos[idx].obj; + aspace = (msm_obj->flags & MSM_BO_SECURE) ? + gpu->secure_aspace : submit->aspace; + + ret = msm_gem_get_iova(&msm_obj->base, aspace, &buf_iova); + + /* this would break the logic in the fail path.. there is no + * reason for this to happen, but just to be on the safe side + * let's notice if this starts happening in the future: + */ + WARN_ON(ret == -EDEADLK); + + if (ret) + return ret; + + submit->bos[idx].flags |= BO_PINNED; + + if (buf_iova == submit->bos[idx].iova) { + submit->bos[idx].flags |= BO_VALID; + } else { + submit->bos[idx].iova = buf_iova; + submit->bos[idx].flags &= ~BO_VALID; + } + } + if (iova) *iova = submit->bos[idx].iova; if (valid) @@ -299,8 +326,10 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, } /* process the reloc's and patch up the cmdstream as needed: */ -static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, - uint32_t offset, uint32_t nr_relocs, uint64_t relocs) +static int submit_reloc(struct msm_gpu *gpu, + struct msm_gem_submit *submit, + struct msm_gem_object *obj, uint32_t offset, + uint32_t nr_relocs, uint64_t relocs) { uint32_t i, last_offset = 0; uint32_t *ptr; @@ -316,6 +345,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob return -EINVAL; } + if (nr_relocs == 0) + return 0; + /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ @@ -354,7 +386,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob return -EINVAL; } - ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); + ret = submit_bo(gpu, submit, submit_reloc.reloc_idx, + NULL, &iova, &valid); if (ret) return ret; @@ -381,6 +414,9 @@ static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit, { unsigned i; + if (!submit) + return; + for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_unlock_unpin_bo(gpu, submit, i); @@ -398,6 +434,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; + struct msm_gpu_submitqueue *queue; struct msm_gpu *gpu; unsigned i; int ret; @@ -409,12 +446,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; gpu = priv->gpu; - if (!gpu) + if (!gpu || !ctx) return -ENXIO; + queue = msm_submitqueue_get(ctx, args->queueid); + if (!queue) + return -ENOENT; + mutex_lock(&dev->struct_mutex); - submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds); + submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds, + queue); if (!submit) { ret = -ENOMEM; goto out; @@ -434,6 +476,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint64_t iova; + size_t size; ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); if (ret) { @@ -454,7 +497,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; } - ret = submit_bo(submit, submit_cmd.submit_idx, + ret = submit_bo(gpu, submit, submit_cmd.submit_idx, &msm_obj, &iova, NULL); if (ret) goto out; @@ -466,10 +509,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; } - if (!(submit_cmd.size) || - ((submit_cmd.size + submit_cmd.submit_offset) > - msm_obj->base.size)) { - DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); + size = submit_cmd.size + submit_cmd.submit_offset; + + if (!submit_cmd.size || (size < submit_cmd.size) || + (size > msm_obj->base.size)) { + DRM_ERROR("invalid cmdstream offset/size: %u/%u\n", + submit_cmd.submit_offset, submit_cmd.size); ret = -EINVAL; goto out; } @@ -481,15 +526,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) { submit->profile_buf_iova = submit->cmd[i].iova; - submit->profile_buf_vaddr = - msm_gem_vaddr(&msm_obj->base); + submit->profile_buf = msm_gem_vaddr(&msm_obj->base) + + submit_cmd.submit_offset; } - if (submit->valid) - continue; - - ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset, - submit_cmd.nr_relocs, submit_cmd.relocs); + ret = submit_reloc(gpu, submit, msm_obj, + submit_cmd.submit_offset, submit_cmd.nr_relocs, + submit_cmd.relocs); if (ret) goto out; } @@ -497,17 +540,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->nr_cmds = i; /* Clamp the user submitted ring to the range of available rings */ - submit->ring = clamp_t(uint32_t, - (args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT, - 0, gpu->nr_rings - 1); + submit->ring = clamp_t(uint32_t, queue->prio, 0, gpu->nr_rings - 1); ret = msm_gpu_submit(gpu, submit); args->fence = submit->fence; out: - if (submit) - submit_cleanup(gpu, submit, !!ret); + submit_cleanup(gpu, submit, !!ret); + if (ret) + msm_gem_submit_free(submit); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 81bab9cc22af..7c109fdab545 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -18,7 +18,7 @@ #include "msm_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" - +#include "msm_trace.h" /* * Power Management: @@ -274,19 +274,32 @@ static void inactive_start(struct msm_gpu *gpu) round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES)); } +static void retire_guilty_submit(struct msm_gpu *gpu, + struct msm_ringbuffer *ring) +{ + struct msm_gem_submit *submit = list_first_entry_or_null(&ring->submits, + struct msm_gem_submit, node); + + if (!submit) + return; + + submit->queue->faults++; + + msm_gem_submit_free(submit); +} + /* * Hangcheck detection for locked gpu: */ -static void retire_submits(struct msm_gpu *gpu, uint32_t fence); +static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + uint32_t fence); static void recover_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; - dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); - mutex_lock(&dev->struct_mutex); if (msm_gpu_active(gpu)) { struct msm_gem_submit *submit; @@ -295,30 +308,21 @@ static void recover_worker(struct work_struct *work) inactive_cancel(gpu); - FOR_EACH_RING(gpu, ring, i) { - uint32_t fence; + /* Retire all events that have already passed */ + FOR_EACH_RING(gpu, ring, i) + retire_submits(gpu, ring, ring->memptrs->fence); - if (!ring) - continue; - - fence = gpu->funcs->last_fence(gpu, ring); - - /* - * Retire the faulting command on the active ring and - * make sure the other rings are cleaned up - */ - if (ring == gpu->funcs->active_ring(gpu)) - retire_submits(gpu, fence + 1); - else - retire_submits(gpu, fence); - } + retire_guilty_submit(gpu, gpu->funcs->active_ring(gpu)); /* Recover the GPU */ gpu->funcs->recover(gpu); - /* replay the remaining submits for all rings: */ - list_for_each_entry(submit, &gpu->submit_list, node) { - gpu->funcs->submit(gpu, submit); + /* Replay the remaining on all rings, highest priority first */ + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + list_for_each_entry(submit, &ring->submits, node) + gpu->funcs->submit(gpu, submit); } } mutex_unlock(&dev->struct_mutex); @@ -339,15 +343,29 @@ static void hangcheck_handler(unsigned long data) struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); - uint32_t fence = gpu->funcs->last_fence(gpu, ring); + uint32_t fence = ring->memptrs->fence; uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring); - if (fence != gpu->hangcheck_fence[ring->id]) { + if (fence != ring->hangcheck_fence) { /* some progress has been made.. ya! */ - gpu->hangcheck_fence[ring->id] = fence; + ring->hangcheck_fence = fence; } else if (fence < submitted) { - /* no progress and not done.. hung! */ - gpu->hangcheck_fence[ring->id] = fence; + struct msm_gem_submit *submit; + + ring->hangcheck_fence = fence; + + /* + * No progress done, but see if the current submit is + * intentionally skipping the hangcheck + */ + submit = list_first_entry_or_null(&ring->submits, + struct msm_gem_submit, node); + + if (!submit || (submit->queue->flags & + MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT)) + goto out; + + /* no progress and not done and not special .. hung! */ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", gpu->name, ring->id); dev_err(dev->dev, "%s: completed fence: %u\n", @@ -358,8 +376,9 @@ static void hangcheck_handler(unsigned long data) queue_work(priv->wq, &gpu->recover_work); } +out: /* if still more pending work, reset the hangcheck timer: */ - if (submitted > gpu->hangcheck_fence[ring->id]) + if (submitted > ring->hangcheck_fence) hangcheck_timer_reset(gpu); /* workaround for missing irq: */ @@ -465,23 +484,28 @@ out: * Cmdstream submission/retirement: */ -static void retire_submits(struct msm_gpu *gpu, uint32_t fence) +static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + uint32_t fence) { struct drm_device *dev = gpu->dev; struct msm_gem_submit *submit, *tmp; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - /* - * Find and retire all the submits in the same ring that are older than - * or equal to 'fence' - */ + list_for_each_entry_safe(submit, tmp, &ring->submits, node) { + struct msm_memptr_ticks *ticks; - list_for_each_entry_safe(submit, tmp, &gpu->submit_list, node) { - if (COMPARE_FENCE_LTE(submit->fence, fence)) { - list_del(&submit->node); - kfree(submit); - } + if (submit->fence > fence) + break; + + ticks = &(ring->memptrs->ticks[submit->tick_index]); + + /* Add memory barrier to ensure the timer ticks are posted */ + rmb(); + + trace_msm_retired(submit, ticks->started, ticks->retired); + + msm_gem_submit_free(submit); } } @@ -493,11 +517,12 @@ static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence) return COMPARE_FENCE_LTE(obj->read_fence, fence); } -static void _retire_ring(struct msm_gpu *gpu, uint32_t fence) +static void _retire_ring(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + uint32_t fence) { struct msm_gem_object *obj, *tmp; - retire_submits(gpu, fence); + retire_submits(gpu, ring, fence); list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) { if (_fence_signaled(obj, fence)) { @@ -516,16 +541,13 @@ static void retire_worker(struct work_struct *work) int i; FOR_EACH_RING(gpu, ring, i) { - uint32_t fence; - if (!ring) continue; - fence = gpu->funcs->last_fence(gpu, ring); - msm_update_fence(gpu->dev, fence); + msm_update_fence(gpu->dev, ring->memptrs->fence); mutex_lock(&dev->struct_mutex); - _retire_ring(gpu, fence); + _retire_ring(gpu, ring, ring->memptrs->fence); mutex_unlock(&dev->struct_mutex); } @@ -545,22 +567,27 @@ void msm_gpu_retire(struct msm_gpu *gpu) int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_ringbuffer *ring = gpu->rb[submit->ring]; int i; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - submit->fence = FENCE(submit->ring, ++priv->next_fence[submit->ring]); + submit->fence = FENCE(submit->ring, ++ring->seqno); inactive_cancel(gpu); - list_add_tail(&submit->node, &gpu->submit_list); + list_add_tail(&submit->node, &ring->submits); msm_rd_dump_submit(submit); ring->submitted_fence = submit->fence; + submit->tick_index = ring->tick_index; + ring->tick_index = (ring->tick_index + 1) % + ARRAY_SIZE(ring->memptrs->ticks); + + trace_msm_queued(submit); + update_sw_cntrs(gpu); for (i = 0; i < submit->nr_bos; i++) { @@ -652,6 +679,9 @@ int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data, { struct msm_context_counter *entry; + if (!gpu || !ctx) + return -ENODEV; + list_for_each_entry(entry, &ctx->counters, node) { if (entry->groupid == data->groupid && entry->counterid == data->counterid) { @@ -793,17 +823,39 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev, gpu->name, name, PTR_ERR(aspace)); iommu_domain_free(iommu); - aspace = NULL; + return NULL; + } + + if (aspace->mmu) { + int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); + + if (ret) { + dev_err(gpu->dev->dev, + "%s: failed to atach IOMMU '%s': %d\n", + gpu->name, name, ret); + msm_gem_address_space_put(aspace); + aspace = ERR_PTR(ret); + } } return aspace; } +static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace) +{ + if (!IS_ERR_OR_NULL(aspace) && aspace->mmu) + aspace->mmu->funcs->detach(aspace->mmu); + + msm_gem_address_space_put(aspace); +} + int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) { int i, ret, nr_rings; + void *memptrs; + uint64_t memptrs_iova; if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); @@ -811,14 +863,20 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; - gpu->inactive = true; + /* + * Set the inactive flag to false, so that when the retire worker + * kicks in from the init path, it knows that it has to turn off the + * clocks. This should be fine to do since this is the init sequence + * and we have an init_lock in msm_open() to protect against bad things + * from happening. + */ + gpu->inactive = false; INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->inactive_work, inactive_worker); INIT_WORK(&gpu->recover_work, recover_worker); - INIT_LIST_HEAD(&gpu->submit_list); setup_timer(&gpu->inactive_timer, inactive_handler, (unsigned long)gpu); @@ -846,6 +904,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, IRQF_TRIGGER_HIGH, gpu->name, gpu); if (ret) { + gpu->irq = ret; dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); goto fail; } @@ -887,10 +946,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, nr_rings = ARRAY_SIZE(gpu->rb); } + /* Allocate one buffer to hold all the memptr records for the rings */ + memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings, + MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova); + + if (IS_ERR(memptrs)) { + ret = PTR_ERR(memptrs); + goto fail; + } + /* Create ringbuffer(s): */ for (i = 0; i < nr_rings; i++) { - - gpu->rb[i] = msm_ringbuffer_new(gpu, i); + gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); if (IS_ERR(gpu->rb[i])) { ret = PTR_ERR(gpu->rb[i]); gpu->rb[i] = NULL; @@ -898,6 +965,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, "could not create ringbuffer %d: %d\n", i, ret); goto fail; } + + memptrs += sizeof(struct msm_memptrs); + memptrs_iova += sizeof(struct msm_memptrs); } gpu->nr_rings = nr_rings; @@ -919,11 +989,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, return 0; fail: - for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { - if (gpu->rb[i]) - msm_ringbuffer_destroy(gpu->rb[i]); + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) + msm_ringbuffer_destroy(gpu->rb[i]); + + if (gpu->memptrs_bo) { + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } + msm_gpu_destroy_address_space(gpu->aspace); + msm_gpu_destroy_address_space(gpu->secure_aspace); + pm_runtime_disable(&pdev->dev); return ret; } @@ -939,18 +1015,42 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) WARN_ON(!list_empty(&gpu->active_list)); + if (gpu->irq >= 0) { + disable_irq(gpu->irq); + devm_free_irq(&pdev->dev, gpu->irq, gpu); + } + bs_fini(gpu); - for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { - if (!gpu->rb[i]) - continue; - - if (gpu->rb[i]->iova) - msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace); - + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) msm_ringbuffer_destroy(gpu->rb[i]); + + if (gpu->memptrs_bo) { + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } msm_snapshot_destroy(gpu, gpu->snapshot); pm_runtime_disable(&pdev->dev); + + msm_gpu_destroy_address_space(gpu->aspace); + msm_gpu_destroy_address_space(gpu->secure_aspace); + + if (gpu->gpu_reg) + devm_regulator_put(gpu->gpu_reg); + + if (gpu->gpu_cx) + devm_regulator_put(gpu->gpu_cx); + + if (gpu->ebi1_clk) + devm_clk_put(&pdev->dev, gpu->ebi1_clk); + + for (i = gpu->nr_clocks - 1; i >= 0; i--) + if (gpu->grp_clks[i]) + devm_clk_put(&pdev->dev, gpu->grp_clks[i]); + + devm_kfree(&pdev->dev, gpu->grp_clks); + + if (gpu->mmio) + devm_iounmap(&pdev->dev, gpu->mmio); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a47eae68dd9b..eeebfb746f7f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -64,8 +64,6 @@ struct msm_gpu_funcs { void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); irqreturn_t (*irq)(struct msm_gpu *irq); - uint32_t (*last_fence)(struct msm_gpu *gpu, - struct msm_ringbuffer *ring); uint32_t (*submitted_fence)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); @@ -131,6 +129,8 @@ struct msm_gpu { struct pm_qos_request pm_qos_req_dma; + struct drm_gem_object *memptrs_bo; + #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; uint32_t bsc; @@ -145,14 +145,19 @@ struct msm_gpu { #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; - uint32_t hangcheck_fence[MSM_GPU_MAX_RINGS]; struct work_struct recover_work; - - struct list_head submit_list; - struct msm_snapshot *snapshot; }; +struct msm_gpu_submitqueue { + int id; + u32 flags; + u32 prio; + int faults; + struct list_head node; + struct kref ref; +}; + /* It turns out that all targets use the same ringbuffer size. */ #define MSM_GPU_RINGBUFFER_SZ SZ_32K #define MSM_GPU_RINGBUFFER_BLKSIZE 32 @@ -178,7 +183,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu) FOR_EACH_RING(gpu, ring, i) { if (gpu->funcs->submitted_fence(gpu, ring) > - gpu->funcs->last_fence(gpu, ring)) + ring->memptrs->fence) return true; } @@ -280,4 +285,10 @@ void msm_gpu_cleanup_counters(struct msm_gpu *gpu, u64 msm_gpu_counter_read(struct msm_gpu *gpu, struct drm_msm_counter_read *data); +static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) +{ + if (queue) + kref_put(&queue->ref, msm_submitqueue_destroy); +} + #endif /* __MSM_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 2ab50919f514..ed0ba928f170 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -34,6 +34,24 @@ #define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0) /* Transition to new mode requires a wait-for-vblank before the modeset */ #define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1) +/* + * We need setting some flags in bridge, and using them in encoder. Add them in + * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are + * flags that indicating the SINK supported color formats read from EDID. While, + * these flags defined here indicate the best color/bit depth foramt we choosed + * that would be better for display. For example the best mode display like: + * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at + * the same time. And also RGB_DC only set when RGB format is set,the same for + * YUV_DC. + */ +/* Enable RGB444 30 bit deep color */ +#define MSM_MODE_FLAG_RGB444_DC_ENABLE (1<<2) +/* Enable YUV420 30 bit deep color */ +#define MSM_MODE_FLAG_YUV420_DC_ENABLE (1<<3) +/* Choose RGB444 format to display */ +#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444 (1<<4) +/* Choose YUV420 format to display */ +#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 (1<<5) /* As there are different display controller blocks depending on the * snapdragon version, the kms support is split out and the appropriate diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 8148d3e9e850..cd3a710f8f27 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -46,6 +46,8 @@ struct msm_mmu_funcs { void (*destroy)(struct msm_mmu *mmu); void (*enable)(struct msm_mmu *mmu); void (*disable)(struct msm_mmu *mmu); + int (*set_property)(struct msm_mmu *mmu, + enum iommu_attr attr, void *data); }; struct msm_mmu { diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c index 5f3d1b6356aa..10f89de25831 100644 --- a/drivers/gpu/drm/msm/msm_prop.c +++ b/drivers/gpu/drm/msm/msm_prop.c @@ -304,7 +304,7 @@ void msm_property_install_rotation(struct msm_property_info *info, void msm_property_install_enum(struct msm_property_info *info, const char *name, int flags, int is_bitmask, const struct drm_prop_enum_list *values, int num_values, - uint32_t property_idx) + uint32_t property_idx, uint64_t default_value) { struct drm_property **prop; @@ -337,7 +337,7 @@ void msm_property_install_enum(struct msm_property_info *info, } /* save init value for later */ - info->property_data[property_idx].default_value = 0; + info->property_data[property_idx].default_value = default_value; info->property_data[property_idx].force_dirty = false; /* always attach property, if created */ diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h index 1430551700c7..6e600c4fd02f 100644 --- a/drivers/gpu/drm/msm/msm_prop.h +++ b/drivers/gpu/drm/msm/msm_prop.h @@ -267,6 +267,7 @@ void msm_property_install_rotation(struct msm_property_info *info, * @values: Array of allowable enumeration/bitmask values * @num_values: Size of values array * @property_idx: Property index + * @default_value: Default value of current property */ void msm_property_install_enum(struct msm_property_info *info, const char *name, @@ -274,7 +275,8 @@ void msm_property_install_enum(struct msm_property_info *info, int is_bitmask, const struct drm_prop_enum_list *values, int num_values, - uint32_t property_idx); + uint32_t property_idx, + uint64_t default_value); /** * msm_property_install_blob - install standard drm blob property diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 2d112f24a902..edf3ff2a7a61 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -27,6 +27,11 @@ * This bypasses drm_debugfs_create_files() mainly because we need to use * our own fops for a bit more control. In particular, we don't want to * do anything if userspace doesn't have the debugfs file open. + * + * The module-param "rd_full", which defaults to false, enables snapshotting + * all (non-written) buffers in the submit, rather than just cmdstream bo's. + * This is useful to capture the contents of (for example) vbo's or textures, + * or shader programs (if not emitted inline in cmdstream). */ #ifdef CONFIG_DEBUG_FS @@ -40,6 +45,10 @@ #include "msm_gpu.h" #include "msm_gem.h" +static bool rd_full = false; +MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); +module_param_named(rd_full, rd_full, bool, 0600); + enum rd_sect_type { RD_NONE, RD_TEST, /* ascii text */ @@ -277,6 +286,36 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor) kfree(rd); } +static void snapshot_buf(struct msm_rd_state *rd, + struct msm_gem_submit *submit, int idx, + uint64_t iova, uint32_t size) +{ + struct msm_gem_object *obj = submit->bos[idx].obj; + uint64_t offset = 0; + + if (iova) { + offset = iova - submit->bos[idx].iova; + } else { + iova = submit->bos[idx].iova; + size = obj->base.size; + } + + /* Always write the RD_GPUADDR so we know how big the buffer is */ + rd_write_section(rd, RD_GPUADDR, + (uint64_t[2]) { iova, size }, 16); + + /* But only dump contents for buffers marked as read and not secure */ + if (submit->bos[idx].flags & MSM_SUBMIT_BO_READ && + !(obj->flags & MSM_BO_SECURE)) { + const char *buf = msm_gem_vaddr(&obj->base); + + if (IS_ERR_OR_NULL(buf)) + return; + + rd_write_section(rd, RD_BUFFER_CONTENTS, buf + offset, size); + } +} + /* called under struct_mutex */ void msm_rd_dump_submit(struct msm_gem_submit *submit) { @@ -300,24 +339,20 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); - /* could be nice to have an option (module-param?) to snapshot - * all the bo's associated with the submit. Handy to see vtx - * buffers, etc. For now just the cmdstream bo's is enough. - */ + if (rd_full) { + for (i = 0; i < submit->nr_bos; i++) + snapshot_buf(rd, submit, i, 0, 0); + } for (i = 0; i < submit->nr_cmds; i++) { - uint32_t idx = submit->cmd[i].idx; uint64_t iova = submit->cmd[i].iova; uint32_t szd = submit->cmd[i].size; /* in dwords */ - struct msm_gem_object *obj = submit->bos[idx].obj; - const char *buf = msm_gem_vaddr(&obj->base); - buf += iova - submit->bos[idx].iova; - - rd_write_section(rd, RD_GPUADDR, - (uint64_t[2]) { iova, szd * 4 }, 16); - rd_write_section(rd, RD_BUFFER_CONTENTS, - buf, szd * 4); + /* snapshot cmdstream bo's (if we haven't already): */ + if (!rd_full) { + snapshot_buf(rd, submit, submit->cmd[i].idx, + submit->cmd[i].iova, szd * 4); + } switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 382c71bb0ebe..2a5843e6f81b 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -18,7 +18,8 @@ #include "msm_ringbuffer.h" #include "msm_gpu.h" -struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id) +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, + struct msm_memptrs *memptrs, uint64_t memptrs_iova) { struct msm_ringbuffer *ring; int ret; @@ -42,11 +43,16 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id) goto fail; } + ring->memptrs = memptrs; + ring->memptrs_iova = memptrs_iova; + + ring->start = msm_gem_vaddr(ring->bo); ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); ring->next = ring->start; ring->cur = ring->start; + INIT_LIST_HEAD(&ring->submits); spin_lock_init(&ring->lock); return ring; @@ -59,7 +65,10 @@ fail: void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) { - if (ring->bo) + if (ring && ring->bo) { + msm_gem_put_iova(ring->bo, ring->gpu->aspace); drm_gem_object_unreference_unlocked(ring->bo); + } + kfree(ring); } diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 1e84905073bf..e9678d57fffd 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -20,17 +20,46 @@ #include "msm_drv.h" +#define rbmemptr(ring, member) \ + ((ring)->memptrs_iova + offsetof(struct msm_memptrs, member)) + +struct msm_memptr_ticks { + uint64_t started; + uint64_t retired; +}; + +struct msm_memptrs { + volatile uint32_t rptr; + volatile uint32_t fence; + volatile uint64_t ttbr0; + volatile unsigned int contextidr; + struct msm_memptr_ticks ticks[128]; +}; + +#define RING_TICKS_IOVA(ring, index, field) \ + ((ring)->memptrs_iova + offsetof(struct msm_memptrs, ticks) + \ + ((index) * sizeof(struct msm_memptr_ticks)) + \ + offsetof(struct msm_memptr_ticks, field)) + struct msm_ringbuffer { struct msm_gpu *gpu; int id; struct drm_gem_object *bo; uint32_t *start, *end, *cur, *next; uint64_t iova; + uint32_t seqno; uint32_t submitted_fence; spinlock_t lock; + struct list_head submits; + uint32_t hangcheck_fence; + + struct msm_memptrs *memptrs; + uint64_t memptrs_iova; + int tick_index; }; -struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id); +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, + struct msm_memptrs *memptrs, uint64_t memptrs_iova); void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */ diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 7d0dda032c59..4247243055b6 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -120,16 +120,30 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; int ret; - if (priv) - ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL, priv); - else - ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL); + if (!client || !sgt) + return -EINVAL; - return (ret != sgt->nents) ? -ENOMEM : 0; + if (iova != 0) { + if (!client->mmu_mapping || !client->mmu_mapping->domain) + return -EINVAL; + + domain = client->mmu_mapping->domain; + + return iommu_map_sg(domain, iova, sgt->sgl, + sgt->nents, flags); + } else { + if (priv) + ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, + sgt->nents, DMA_BIDIRECTIONAL, priv); + else + ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, + DMA_BIDIRECTIONAL); + + return (ret != sgt->nents) ? -ENOMEM : 0; + } } static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova, @@ -156,12 +170,36 @@ static void msm_smmu_destroy(struct msm_mmu *mmu) kfree(smmu); } +/* user can call this API to set the attribute of smmu*/ +static int msm_smmu_set_property(struct msm_mmu *mmu, + enum iommu_attr attr, void *data) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; + int ret = 0; + + if (!client) + return -EINVAL; + + domain = client->mmu_mapping->domain; + if (!domain) + return -EINVAL; + + ret = iommu_domain_set_attr(domain, attr, data); + if (ret) + DRM_ERROR("set domain attribute failed\n"); + + return ret; +} + static const struct msm_mmu_funcs funcs = { .attach = msm_smmu_attach, .detach = msm_smmu_detach, .map = msm_smmu_map, .unmap = msm_smmu_unmap, .destroy = msm_smmu_destroy, + .set_property = msm_smmu_set_property, }; static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { @@ -173,8 +211,8 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { }, [MSM_SMMU_DOMAIN_SECURE] = { .label = "mdp_s", - .va_start = 0, - .va_size = SZ_4G, + .va_start = SZ_128K, + .va_size = SZ_4G - SZ_128K, .secure = true, }, [MSM_SMMU_DOMAIN_NRT_UNSECURE] = { @@ -185,8 +223,8 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { }, [MSM_SMMU_DOMAIN_NRT_SECURE] = { .label = "rot_s", - .va_start = 0, - .va_size = SZ_4G, + .va_start = SZ_128K, + .va_size = SZ_4G - SZ_128K, .secure = true, }, }; diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c new file mode 100644 index 000000000000..f79e74071c79 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -0,0 +1,151 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "msm_gpu.h" + +void msm_submitqueue_destroy(struct kref *kref) +{ + struct msm_gpu_submitqueue *queue = container_of(kref, + struct msm_gpu_submitqueue, ref); + + kfree(queue); +} + +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id) +{ + struct msm_gpu_submitqueue *entry; + + if (!ctx) + return NULL; + + read_lock(&ctx->queuelock); + + list_for_each_entry(entry, &ctx->submitqueues, node) { + if (entry->id == id) { + kref_get(&entry->ref); + read_unlock(&ctx->queuelock); + + return entry; + } + } + + read_unlock(&ctx->queuelock); + return NULL; +} + +void msm_submitqueue_close(struct msm_file_private *ctx) +{ + struct msm_gpu_submitqueue *entry, *tmp; + + /* + * No lock needed in close and there won't + * be any more user ioctls coming our way + */ + + list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) + msm_submitqueue_put(entry); +} + +int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags, + u32 *id) +{ + struct msm_gpu_submitqueue *queue = kzalloc(sizeof(*queue), GFP_KERNEL); + + if (!queue) + return -ENOMEM; + + kref_init(&queue->ref); + queue->flags = flags; + queue->prio = prio; + + write_lock(&ctx->queuelock); + + queue->id = ctx->queueid++; + + if (id) + *id = queue->id; + + list_add_tail(&queue->node, &ctx->submitqueues); + + write_unlock(&ctx->queuelock); + + return 0; +} + +int msm_submitqueue_init(struct msm_file_private *ctx) +{ + INIT_LIST_HEAD(&ctx->submitqueues); + + rwlock_init(&ctx->queuelock); + + /* + * Add the "default" submitqueue with id 0 + * "low" priority (2) and no flags + */ + + return msm_submitqueue_create(ctx, 2, 0, NULL); +} + +int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param, + void __user *data, u32 len) +{ + struct msm_gpu_submitqueue *queue = msm_submitqueue_get(ctx, id); + int ret = 0; + + if (!queue) + return -ENOENT; + + if (param == MSM_SUBMITQUEUE_PARAM_FAULTS) { + u32 size = min_t(u32, len, sizeof(queue->faults)); + + if (copy_to_user(data, &queue->faults, size)) + ret = -EFAULT; + } else { + ret = -EINVAL; + } + + msm_submitqueue_put(queue); + + return ret; +} + +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) +{ + struct msm_gpu_submitqueue *entry; + + /* + * id 0 is the "default" queue and can't be destroyed + * by the user + */ + + if (!id) + return -ENOENT; + + write_lock(&ctx->queuelock); + + list_for_each_entry(entry, &ctx->submitqueues, node) { + if (entry->id == id) { + list_del(&entry->node); + write_unlock(&ctx->queuelock); + + msm_submitqueue_put(entry); + return 0; + } + } + + write_unlock(&ctx->queuelock); + return -ENOENT; +} + diff --git a/drivers/gpu/drm/msm/msm_trace.h b/drivers/gpu/drm/msm/msm_trace.h new file mode 100644 index 000000000000..68c7ff78ffc2 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_trace.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#if !defined(_MSM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _MSM_TRACE_H_ + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msm_drm +#define TRACE_INCLUDE_FILE msm_trace + +TRACE_EVENT(msm_queued, + TP_PROTO(struct msm_gem_submit *submit), + TP_ARGS(submit), + TP_STRUCT__entry( + __field(uint32_t, queue_id) + __field(uint32_t, fence_id) + __field(int, ring) + ), + TP_fast_assign( + __entry->queue_id = submit->queue->id; + __entry->fence_id = submit->fence; + __entry->ring = submit->ring; + ), + TP_printk( + "queue=%u fence=%u ring=%d", + __entry->queue_id, __entry->fence_id, __entry->ring + ) +); + +TRACE_EVENT(msm_submitted, + TP_PROTO(struct msm_gem_submit *submit, uint64_t ticks, uint64_t nsecs), + TP_ARGS(submit, ticks, nsecs), + TP_STRUCT__entry( + __field(uint32_t, queue_id) + __field(uint32_t, fence_id) + __field(int, ring) + __field(uint64_t, ticks) + __field(uint64_t, nsecs) + ), + TP_fast_assign( + __entry->queue_id = submit->queue->id; + __entry->fence_id = submit->fence; + __entry->ring = submit->ring; + __entry->ticks = ticks; + __entry->nsecs = nsecs; + ), + TP_printk( + "queue=%u fence=%u ring=%d ticks=%lld nsecs=%llu", + __entry->queue_id, __entry->fence_id, __entry->ring, + __entry->ticks, __entry->nsecs + ) +); + +TRACE_EVENT(msm_retired, + TP_PROTO(struct msm_gem_submit *submit, uint64_t start_ticks, + uint64_t retire_ticks), + TP_ARGS(submit, start_ticks, retire_ticks), + TP_STRUCT__entry( + __field(uint32_t, queue_id) + __field(uint32_t, fence_id) + __field(int, ring) + __field(uint64_t, start_ticks) + __field(uint64_t, retire_ticks) + ), + TP_fast_assign( + __entry->queue_id = submit->queue->id; + __entry->fence_id = submit->fence; + __entry->ring = submit->ring; + __entry->start_ticks = start_ticks; + __entry->retire_ticks = retire_ticks; + ), + TP_printk( + "queue=%u fence=%u ring=%d started=%lld retired=%lld", + __entry->queue_id, __entry->fence_id, __entry->ring, + __entry->start_ticks, __entry->retire_ticks + ) +); + + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include + diff --git a/drivers/gpu/drm/msm/msm_trace_points.c b/drivers/gpu/drm/msm/msm_trace_points.c new file mode 100644 index 000000000000..41d9a975ac92 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_trace_points.c @@ -0,0 +1,18 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_gem.h" +#include "msm_gpu.h" + +#define CREATE_TRACE_POINTS +#include "msm_trace.h" diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index ef7492817983..a0f6b5c6a732 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -344,8 +344,8 @@ static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc, prop = priv->cp_property[feature]; if (!prop) { - prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE, - name, 0); + prop = drm_property_create_range(crtc->dev, + DRM_MODE_PROP_IMMUTABLE, name, 0, 1); if (!prop) { DRM_ERROR("property create failed: %s\n", name); kfree(prop_node); diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 7538927a4993..5fa4c21060f9 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -17,6 +17,12 @@ #include "sde_connector.h" #include "sde_backlight.h" +#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\ + (c) ? (c)->base.base.id : -1, ##__VA_ARGS__) + +#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\ + (c) ? (c)->base.base.id : -1, ##__VA_ARGS__) + static const struct drm_prop_enum_list e_topology_name[] = { {SDE_RM_TOPOLOGY_UNKNOWN, "sde_unknown"}, {SDE_RM_TOPOLOGY_SINGLEPIPE, "sde_singlepipe"}, @@ -54,6 +60,80 @@ int sde_connector_get_info(struct drm_connector *connector, return c_conn->ops.get_info(info, c_conn->display); } +int sde_connector_pre_kickoff(struct drm_connector *connector) +{ + struct sde_connector *c_conn; + struct sde_connector_state *c_state; + struct msm_display_kickoff_params params; + int rc; + + if (!connector) { + SDE_ERROR("invalid argument\n"); + return -EINVAL; + } + + c_conn = to_sde_connector(connector); + c_state = to_sde_connector_state(connector->state); + + if (!c_conn->display) { + SDE_ERROR("invalid argument\n"); + return -EINVAL; + } + + if (!c_conn->ops.pre_kickoff) + return 0; + + params.hdr_ctrl = &c_state->hdr_ctrl; + + rc = c_conn->ops.pre_kickoff(connector, c_conn->display, ¶ms); + + return rc; +} + +enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn) +{ + struct sde_connector *c_conn; + + if (!conn) { + SDE_ERROR("invalid argument\n"); + return -EINVAL; + } + + c_conn = to_sde_connector(conn); + + if (!c_conn->display) { + SDE_ERROR("invalid argument\n"); + return -EINVAL; + } + + if (!c_conn->ops.get_csc_type) + return SDE_CSC_RGB2YUV_601L; + + return c_conn->ops.get_csc_type(conn, c_conn->display); +} + +bool sde_connector_mode_needs_full_range(struct drm_connector *connector) +{ + struct sde_connector *c_conn; + + if (!connector) { + SDE_ERROR("invalid argument\n"); + return false; + } + + c_conn = to_sde_connector(connector); + + if (!c_conn->display) { + SDE_ERROR("invalid argument\n"); + return false; + } + + if (!c_conn->ops.mode_needs_full_range) + return false; + + return c_conn->ops.mode_needs_full_range(c_conn->display); +} + static void sde_connector_destroy(struct drm_connector *connector) { struct sde_connector *c_conn; @@ -70,7 +150,8 @@ static void sde_connector_destroy(struct drm_connector *connector) if (c_conn->blob_caps) drm_property_unreference_blob(c_conn->blob_caps); - + if (c_conn->blob_hdr) + drm_property_unreference_blob(c_conn->blob_hdr); msm_property_destroy(&c_conn->property_info); drm_connector_unregister(connector); @@ -204,6 +285,74 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector) return &c_state->base; } +static int _sde_connector_set_hdr_info( + struct sde_connector *c_conn, + struct sde_connector_state *c_state, + void *usr_ptr) +{ + struct drm_connector *connector; + struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl; + struct drm_msm_ext_panel_hdr_metadata *hdr_meta; + int i; + + if (!c_conn || !c_state) { + SDE_ERROR_CONN(c_conn, "invalid args\n"); + return -EINVAL; + } + + connector = &c_conn->base; + + if (!connector->hdr_supported) { + SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n"); + return -ENOTSUPP; + } + + memset(&c_state->hdr_ctrl, 0, sizeof(c_state->hdr_ctrl)); + + if (!usr_ptr) { + SDE_DEBUG_CONN(c_conn, "hdr control cleared\n"); + return 0; + } + + if (copy_from_user(&c_state->hdr_ctrl, + (void __user *)usr_ptr, + sizeof(*hdr_ctrl))) { + SDE_ERROR_CONN(c_conn, "failed to copy hdr control\n"); + return -EFAULT; + } + + hdr_ctrl = &c_state->hdr_ctrl; + + SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n", + hdr_ctrl->hdr_state); + + hdr_meta = &hdr_ctrl->hdr_meta; + + SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n", + hdr_meta->hdr_supported); + SDE_DEBUG_CONN(c_conn, "eotf %d\n", + hdr_meta->eotf); + SDE_DEBUG_CONN(c_conn, "white_point_x %d\n", + hdr_meta->white_point_x); + SDE_DEBUG_CONN(c_conn, "white_point_y %d\n", + hdr_meta->white_point_y); + SDE_DEBUG_CONN(c_conn, "max_luminance %d\n", + hdr_meta->max_luminance); + SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n", + hdr_meta->max_content_light_level); + SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n", + hdr_meta->max_average_light_level); + + for (i = 0; i < HDR_PRIMARIES_COUNT; i++) { + SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n", + hdr_meta->display_primaries_x[i]); + SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n", + hdr_meta->display_primaries_y[i]); + } + + return 0; +} + static int sde_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, @@ -263,6 +412,12 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, SDE_ERROR("invalid topology_control: 0x%llX\n", val); } + if (idx == CONNECTOR_PROP_HDR_CONTROL) { + rc = _sde_connector_set_hdr_info(c_conn, c_state, (void *)val); + if (rc) + SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc); + } + /* check for custom property handling */ if (!rc && c_conn->ops.set_property) { rc = c_conn->ops.set_property(connector, @@ -355,6 +510,32 @@ void sde_connector_complete_commit(struct drm_connector *connector) sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0); } +static void sde_connector_update_hdr_props(struct drm_connector *connector) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct drm_msm_ext_panel_hdr_properties hdr_prop = {}; + + hdr_prop.hdr_supported = connector->hdr_supported; + + if (hdr_prop.hdr_supported) { + hdr_prop.hdr_eotf = + connector->hdr_eotf; + hdr_prop.hdr_metadata_type_one = + connector->hdr_metadata_type_one; + hdr_prop.hdr_max_luminance = + connector->hdr_max_luminance; + hdr_prop.hdr_avg_luminance = + connector->hdr_avg_luminance; + hdr_prop.hdr_min_luminance = + connector->hdr_min_luminance; + } + msm_property_set_blob(&c_conn->property_info, + &c_conn->blob_hdr, + &hdr_prop, + sizeof(hdr_prop), + CONNECTOR_PROP_HDR_INFO); +} + static enum drm_connector_status sde_connector_detect(struct drm_connector *connector, bool force) { @@ -392,6 +573,7 @@ static const struct drm_connector_funcs sde_connector_ops = { static int sde_connector_get_modes(struct drm_connector *connector) { struct sde_connector *c_conn; + int ret = 0; if (!connector) { SDE_ERROR("invalid connector\n"); @@ -403,8 +585,11 @@ static int sde_connector_get_modes(struct drm_connector *connector) SDE_DEBUG("missing get_modes callback\n"); return 0; } + ret = c_conn->ops.get_modes(connector, c_conn->display); + if (ret) + sde_connector_update_hdr_props(connector); - return c_conn->ops.get_modes(connector, c_conn->display); + return ret; } static enum drm_mode_status @@ -463,6 +648,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, struct sde_kms *sde_kms; struct sde_kms_info *info; struct sde_connector *c_conn = NULL; + struct sde_splash_info *sinfo; int rc; if (!dev || !dev->dev_private || !encoder) { @@ -575,6 +761,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, kfree(info); } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + msm_property_install_blob(&c_conn->property_info, + "hdr_properties", + DRM_MODE_PROP_IMMUTABLE, + CONNECTOR_PROP_HDR_INFO); + } + + msm_property_install_volatile_range(&c_conn->property_info, + "hdr_control", 0x0, 0, ~0, 0, + CONNECTOR_PROP_HDR_CONTROL); + msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE", 0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE); @@ -582,15 +779,19 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, "PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0, CONNECTOR_PROP_PLL_DELTA); + msm_property_install_volatile_range(&c_conn->property_info, + "PLL_ENABLE", 0x0, 0, 1, 0, + CONNECTOR_PROP_PLL_ENABLE); + /* enum/bitmask properties */ msm_property_install_enum(&c_conn->property_info, "topology_name", DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name, ARRAY_SIZE(e_topology_name), - CONNECTOR_PROP_TOPOLOGY_NAME); + CONNECTOR_PROP_TOPOLOGY_NAME, 0); msm_property_install_enum(&c_conn->property_info, "topology_control", 0, 1, e_topology_control, ARRAY_SIZE(e_topology_control), - CONNECTOR_PROP_TOPOLOGY_CONTROL); + CONNECTOR_PROP_TOPOLOGY_CONTROL, 0); rc = msm_property_install_get_status(&c_conn->property_info); if (rc) { @@ -601,6 +802,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, SDE_DEBUG("connector %d attach encoder %d\n", c_conn->base.base.id, encoder->base.id); + sinfo = &sde_kms->splash_info; + if (sinfo && sinfo->handoff) + sde_splash_setup_connector_count(sinfo, connector_type); + priv->connectors[priv->num_connectors++] = &c_conn->base; return &c_conn->base; @@ -608,6 +813,8 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, error_destroy_property: if (c_conn->blob_caps) drm_property_unreference_blob(c_conn->blob_caps); + if (c_conn->blob_hdr) + drm_property_unreference_blob(c_conn->blob_hdr); msm_property_destroy(&c_conn->property_info); error_unregister_conn: drm_connector_unregister(&c_conn->base); diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index 3f26ee7d5965..b76ce0aaf577 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -122,6 +122,37 @@ struct sde_connector_ops { int (*get_info)(struct msm_display_info *info, void *display); int (*set_backlight)(void *display, u32 bl_lvl); + + + /** + * pre_kickoff - trigger display to program kickoff-time features + * @connector: Pointer to drm connector structure + * @display: Pointer to private display structure + * @params: Parameter bundle of connector-stored information for + * kickoff-time programming into the display + * Returns: Zero on success + */ + int (*pre_kickoff)(struct drm_connector *connector, + void *display, + struct msm_display_kickoff_params *params); + + /** + * mode_needs_full_range - does the mode need full range + * quantization + * @display: Pointer to private display structure + * Returns: true or false based on whether full range is needed + */ + bool (*mode_needs_full_range)(void *display); + + /** + * get_csc_type - returns the CSC type to be used + * by the CDM block based on HDR state + * @connector: Pointer to drm connector structure + * @display: Pointer to private display structure + * Returns: type of CSC matrix to be used + */ + enum sde_csc_type (*get_csc_type)(struct drm_connector *connector, + void *display); }; /** @@ -139,6 +170,7 @@ struct sde_connector_ops { * @property_info: Private structure for generic property handling * @property_data: Array of private data for generic property handling * @blob_caps: Pointer to blob structure for 'capabilities' property + * @blob_hdr: Pointer to blob structure for 'hdr_properties' property */ struct sde_connector { struct drm_connector base; @@ -159,6 +191,7 @@ struct sde_connector { struct msm_property_info property_info; struct msm_property_data property_data[CONNECTOR_PROP_COUNT]; struct drm_property_blob *blob_caps; + struct drm_property_blob *blob_hdr; }; /** @@ -206,12 +239,14 @@ struct sde_connector { * @out_fb: Pointer to output frame buffer, if applicable * @aspace: Address space for accessing frame buffer objects, if applicable * @property_values: Local cache of current connector property values + * @hdr_ctrl: HDR control info passed from userspace */ struct sde_connector_state { struct drm_connector_state base; struct drm_framebuffer *out_fb; struct msm_gem_address_space *aspace; uint64_t property_values[CONNECTOR_PROP_COUNT]; + struct drm_msm_ext_panel_hdr_ctrl hdr_ctrl; }; /** @@ -303,5 +338,28 @@ void sde_connector_complete_commit(struct drm_connector *connector); int sde_connector_get_info(struct drm_connector *connector, struct msm_display_info *info); +/** + * sde_connector_pre_kickoff - trigger kickoff time feature programming + * @connector: Pointer to drm connector object + * Returns: Zero on success + */ +int sde_connector_pre_kickoff(struct drm_connector *connector); + +/** + * sde_connector_mode_needs_full_range - query quantization type + * for the connector mode + * @connector: Pointer to drm connector object + * Returns: true OR false based on connector mode + */ +bool sde_connector_mode_needs_full_range(struct drm_connector *connector); + +/** + * sde_connector_get_csc_type - query csc type + * to be used for the connector + * @connector: Pointer to drm connector object + * Returns: csc type based on connector HDR state + */ +enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn); + #endif /* _SDE_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index cb5f7d3cf19f..2e9e2192670d 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -36,6 +36,7 @@ #include "sde_connector.h" #include "sde_power_handle.h" #include "sde_core_perf.h" +#include "sde_trace.h" /* default input fence timeout, in ms */ #define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000 @@ -56,7 +57,17 @@ static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc) { - struct msm_drm_private *priv = crtc->dev->dev_private; + struct msm_drm_private *priv; + + if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + SDE_ERROR("invalid crtc\n"); + return NULL; + } + priv = crtc->dev->dev_private; + if (!priv || !priv->kms) { + SDE_ERROR("invalid kms\n"); + return NULL; + } return to_sde_kms(priv->kms); } @@ -76,10 +87,10 @@ static void sde_crtc_destroy(struct drm_crtc *crtc) sde_cp_crtc_destroy_properties(crtc); debugfs_remove_recursive(sde_crtc->debugfs_root); - mutex_destroy(&sde_crtc->crtc_lock); sde_fence_deinit(&sde_crtc->output_fence); drm_crtc_cleanup(crtc); + mutex_destroy(&sde_crtc->crtc_lock); kfree(sde_crtc); } @@ -367,12 +378,6 @@ void sde_crtc_prepare_commit(struct drm_crtc *crtc, cstate->is_rt = true; } - if (cstate->num_connectors > 0 && cstate->connectors[0]->encoder) - cstate->intf_mode = sde_encoder_get_intf_mode( - cstate->connectors[0]->encoder); - else - cstate->intf_mode = INTF_MODE_NONE; - /* prepare main output fence */ sde_fence_prepare(&sde_crtc->output_fence); } @@ -423,6 +428,22 @@ static void _sde_crtc_complete_flip(struct drm_crtc *crtc, spin_unlock_irqrestore(&dev->event_lock, flags); } +enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + if (!crtc || !crtc->dev) { + SDE_ERROR("invalid crtc\n"); + return INTF_MODE_NONE; + } + + drm_for_each_encoder(encoder, crtc->dev) + if (encoder->crtc == crtc) + return sde_encoder_get_intf_mode(encoder); + + return INTF_MODE_NONE; +} + static void sde_crtc_vblank_cb(void *data) { struct drm_crtc *crtc = (struct drm_crtc *)data; @@ -579,14 +600,23 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; + struct drm_connector *conn; + struct sde_connector *c_conn; + struct drm_device *dev; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; int i; - if (!crtc || !crtc->state) { + if (!crtc || !crtc->state || !crtc->dev) { SDE_ERROR("invalid crtc\n"); return; } + dev = crtc->dev; + priv = dev->dev_private; + sde_crtc = to_sde_crtc(crtc); + sde_kms = _sde_crtc_get_kms(crtc); cstate = to_sde_crtc_state(crtc->state); SDE_EVT32(DRMID(crtc)); @@ -595,6 +625,22 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, for (i = 0; i < cstate->num_connectors; ++i) sde_connector_complete_commit(cstate->connectors[i]); + + if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) { + mutex_lock(&dev->mode_config.mutex); + drm_for_each_connector(conn, crtc->dev) { + if (conn->state->crtc != crtc) + continue; + + c_conn = to_sde_connector(conn); + + sde_splash_clean_up_free_resource(priv->kms, + &priv->phandle, + c_conn->connector_type, + c_conn->display); + } + mutex_unlock(&dev->mode_config.mutex); + } } /** @@ -642,6 +688,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc) * that each plane can check its fence status and react appropriately * if its fence has timed out. */ + SDE_ATRACE_BEGIN("plane_wait_input_fence"); drm_atomic_crtc_for_each_plane(plane, crtc) { if (wait_ms) { /* determine updated wait time */ @@ -653,6 +700,7 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc) } sde_plane_wait_input_fence(plane, wait_ms); } + SDE_ATRACE_END("plane_wait_input_fence"); } static void _sde_crtc_setup_mixer_for_encoder( @@ -887,6 +935,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) sde_kms = _sde_crtc_get_kms(crtc); priv = sde_kms->dev->dev_private; + SDE_ATRACE_BEGIN("crtc_commit"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; @@ -903,7 +952,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 0); - return; + goto end; } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ SDE_DEBUG("crtc%d first commit\n", crtc->base.id); @@ -921,6 +970,115 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) sde_encoder_kickoff(encoder); } +end: + SDE_ATRACE_END("crtc_commit"); + return; +} + +/** + * _sde_crtc_vblank_enable_nolock - update power resource and vblank request + * @sde_crtc: Pointer to sde crtc structure + * @enable: Whether to enable/disable vblanks + */ +static void _sde_crtc_vblank_enable_nolock( + struct sde_crtc *sde_crtc, bool enable) +{ + struct drm_device *dev; + struct drm_crtc *crtc; + struct drm_encoder *enc; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + + if (!sde_crtc) { + SDE_ERROR("invalid crtc\n"); + return; + } + + crtc = &sde_crtc->base; + dev = crtc->dev; + priv = dev->dev_private; + + if (!priv->kms) { + SDE_ERROR("invalid kms\n"); + return; + } + sde_kms = to_sde_kms(priv->kms); + + if (enable) { + sde_power_resource_enable(&priv->phandle, + sde_kms->core_client, true); + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + if (enc->crtc != crtc) + continue; + + SDE_EVT32(DRMID(crtc), DRMID(enc), enable); + + sde_encoder_register_vblank_callback(enc, + sde_crtc_vblank_cb, (void *)crtc); + } + } else { + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + if (enc->crtc != crtc) + continue; + + SDE_EVT32(DRMID(crtc), DRMID(enc), enable); + + sde_encoder_register_vblank_callback(enc, NULL, NULL); + } + sde_power_resource_enable(&priv->phandle, + sde_kms->core_client, false); + } +} + +/** + * _sde_crtc_set_suspend - notify crtc of suspend enable/disable + * @crtc: Pointer to drm crtc object + * @enable: true to enable suspend, false to indicate resume + */ +static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable) +{ + struct sde_crtc *sde_crtc; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + + if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + SDE_ERROR("invalid crtc\n"); + return; + } + sde_crtc = to_sde_crtc(crtc); + priv = crtc->dev->dev_private; + + if (!priv->kms) { + SDE_ERROR("invalid crtc kms\n"); + return; + } + sde_kms = to_sde_kms(priv->kms); + + SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable); + + mutex_lock(&sde_crtc->crtc_lock); + + /* + * Update CP on suspend/resume transitions + */ + if (enable && !sde_crtc->suspend) + sde_cp_crtc_suspend(crtc); + else if (!enable && sde_crtc->suspend) + sde_cp_crtc_resume(crtc); + + /* + * If the vblank refcount != 0, release a power reference on suspend + * and take it back during resume (if it is still != 0). + */ + if (sde_crtc->suspend == enable) + SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n", + crtc->base.id, enable); + else if (atomic_read(&sde_crtc->vblank_refcount) != 0) + _sde_crtc_vblank_enable_nolock(sde_crtc, !enable); + + sde_crtc->suspend = enable; + + mutex_unlock(&sde_crtc->crtc_lock); } /** @@ -973,6 +1131,10 @@ static void sde_crtc_reset(struct drm_crtc *crtc) return; } + /* revert suspend actions, if necessary */ + if (msm_is_suspend_state(crtc->dev)) + _sde_crtc_set_suspend(crtc, false); + /* remove previous state, if present */ if (crtc->state) { sde_crtc_destroy_state(crtc, crtc->state); @@ -996,37 +1158,67 @@ static void sde_crtc_reset(struct drm_crtc *crtc) crtc->state = &cstate->base; } +static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en) +{ + if (!sde_crtc) { + SDE_ERROR("invalid crtc\n"); + return -EINVAL; + } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) { + SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id); + if (!sde_crtc->suspend) + _sde_crtc_vblank_enable_nolock(sde_crtc, true); + } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) { + SDE_ERROR("crtc%d invalid vblank disable\n", + sde_crtc->base.base.id); + return -EINVAL; + } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) { + SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id); + if (!sde_crtc->suspend) + _sde_crtc_vblank_enable_nolock(sde_crtc, false); + } else { + SDE_DEBUG("crtc%d vblank %s refcount:%d\n", + sde_crtc->base.base.id, + en ? "enable" : "disable", + atomic_read(&sde_crtc->vblank_refcount)); + } + + return 0; +} + static void sde_crtc_disable(struct drm_crtc *crtc) { - struct msm_drm_private *priv; - struct sde_crtc *sde_crtc; struct drm_encoder *encoder; + struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; + struct msm_drm_private *priv; - if (!crtc) { + if (!crtc || !crtc->dev || !crtc->state) { SDE_ERROR("invalid crtc\n"); return; } sde_crtc = to_sde_crtc(crtc); sde_kms = _sde_crtc_get_kms(crtc); + if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) { + SDE_ERROR("invalid kms handle\n"); + return; + } priv = sde_kms->dev->dev_private; SDE_DEBUG("crtc%d\n", crtc->base.id); + if (msm_is_suspend_state(crtc->dev)) + _sde_crtc_set_suspend(crtc, true); + mutex_lock(&sde_crtc->crtc_lock); SDE_EVT32(DRMID(crtc)); - if (atomic_read(&sde_crtc->vblank_refcount)) { + if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) { SDE_ERROR("crtc%d invalid vblank refcount\n", crtc->base.id); - SDE_EVT32(DRMID(crtc)); - drm_for_each_encoder(encoder, crtc->dev) { - if (encoder->crtc != crtc) - continue; - sde_encoder_register_vblank_callback(encoder, NULL, - NULL); - } - atomic_set(&sde_crtc->vblank_refcount, 0); + SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount)); + while (atomic_read(&sde_crtc->vblank_refcount)) + if (_sde_crtc_vblank_no_lock(sde_crtc, false)) + break; } if (atomic_read(&sde_crtc->frame_pending)) { @@ -1245,40 +1437,20 @@ end: int sde_crtc_vblank(struct drm_crtc *crtc, bool en) { - struct sde_crtc *sde_crtc = to_sde_crtc(crtc); - struct drm_encoder *encoder; - struct drm_device *dev = crtc->dev; + struct sde_crtc *sde_crtc; + int rc; - if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) { - SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id); - } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) { - SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id); + if (!crtc) { + SDE_ERROR("invalid crtc\n"); return -EINVAL; - } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) { - SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id); - } else { - SDE_DEBUG("crtc%d vblank %s refcount:%d\n", - crtc->base.id, - en ? "enable" : "disable", - atomic_read(&sde_crtc->vblank_refcount)); - return 0; } + sde_crtc = to_sde_crtc(crtc); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc != crtc) - continue; + mutex_lock(&sde_crtc->crtc_lock); + rc = _sde_crtc_vblank_no_lock(sde_crtc, en); + mutex_unlock(&sde_crtc->crtc_lock); - SDE_EVT32(DRMID(crtc), en); - - if (en) - sde_encoder_register_vblank_callback(encoder, - sde_crtc_vblank_cb, (void *)crtc); - else - sde_encoder_register_vblank_callback(encoder, NULL, - NULL); - } - - return 0; + return rc; } void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, @@ -1354,7 +1526,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, msm_property_install_enum(&sde_crtc->property_info, "security_level", 0x0, 0, e_secure_level, ARRAY_SIZE(e_secure_level), - CRTC_PROP_SECURITY_LEVEL); + CRTC_PROP_SECURITY_LEVEL, SDE_DRM_SEC_NON_SEC); sde_kms_info_reset(info); @@ -1368,6 +1540,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3) sde_kms_info_add_keystr(info, "qseed_type", "qseed3"); sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split); + sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr); if (catalog->perf.max_bw_low) sde_kms_info_add_keyint(info, "max_bandwidth_low", catalog->perf.max_bw_low); @@ -1658,7 +1831,7 @@ static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v) seq_printf(s, "num_connectors: %d\n", cstate->num_connectors); seq_printf(s, "is_rt: %d\n", cstate->is_rt); - seq_printf(s, "intf_mode: %d\n", cstate->intf_mode); + seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc)); seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl); seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate); seq_printf(s, "max_per_pipe_ib: %llu\n", @@ -1721,6 +1894,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, crtc->dev = dev; atomic_set(&sde_crtc->vblank_refcount, 0); + mutex_init(&sde_crtc->crtc_lock); spin_lock_init(&sde_crtc->spin_lock); atomic_set(&sde_crtc->frame_pending, 0); @@ -1742,7 +1916,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); /* initialize output fence support */ - mutex_init(&sde_crtc->crtc_lock); sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id); /* initialize debugfs support */ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 97a20b987ef5..6b8483d574b1 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -82,6 +82,7 @@ struct sde_crtc_frame_event { * @vblank_cb_count : count of vblank callback since last reset * @vblank_cb_time : ktime at vblank count reset * @vblank_refcount : reference count for vblank enable request + * @suspend : whether or not a suspend operation is in progress * @feature_list : list of color processing features supported on a crtc * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty @@ -117,6 +118,7 @@ struct sde_crtc { u32 vblank_cb_count; ktime_t vblank_cb_time; atomic_t vblank_refcount; + bool suspend; struct list_head feature_list; struct list_head active_list; @@ -246,16 +248,10 @@ void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); bool sde_crtc_is_rt(struct drm_crtc *crtc); /** - * sde_crtc_get_intf_mode - get interface mode of the given crtc + * sde_crtc_get_intf_mode - get primary interface mode of the given crtc * @crtc: Pointert to crtc */ -static inline enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc) -{ - struct sde_crtc_state *cstate = - crtc ? to_sde_crtc_state(crtc->state) : NULL; - - return cstate ? cstate->intf_mode : INTF_MODE_NONE; -} +enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc); /** * sde_core_perf_crtc_is_wb - check if writeback is primary output of this crtc diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 030b192e5df4..23fb79241d84 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -32,6 +32,7 @@ #include "sde_formats.h" #include "sde_encoder_phys.h" #include "sde_color_processing.h" +#include "sde_trace.h" #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) @@ -54,6 +55,69 @@ #define MAX_CHANNELS_PER_ENC 2 +/* rgb to yuv color space conversion matrix */ +static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = { + [SDE_CSC_RGB2YUV_601L] = { + { + TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032), + TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1), + TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb), + }, + { 0x0, 0x0, 0x0,}, + { 0x0040, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, + }, + + [SDE_CSC_RGB2YUV_601FR] = { + { + TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a), + TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100), + TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6), + }, + { 0x0, 0x0, 0x0,}, + { 0x0000, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, + + [SDE_CSC_RGB2YUV_709L] = { + { + TO_S15D16(0x005d), TO_S15D16(0x013a), TO_S15D16(0x0020), + TO_S15D16(0xffcc), TO_S15D16(0xff53), TO_S15D16(0x00e1), + TO_S15D16(0x00e1), TO_S15D16(0xff34), TO_S15D16(0xffeb), + }, + { 0x0, 0x0, 0x0,}, + { 0x0040, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, + }, + + [SDE_CSC_RGB2YUV_2020L] = { + { + TO_S15D16(0x0073), TO_S15D16(0x0129), TO_S15D16(0x001a), + TO_S15D16(0xffc1), TO_S15D16(0xff5e), TO_S15D16(0x00e0), + TO_S15D16(0x00e0), TO_S15D16(0xff32), TO_S15D16(0xffee), + }, + { 0x0, 0x0, 0x0,}, + { 0x0040, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, + }, + + [SDE_CSC_RGB2YUV_2020FR] = { + { + TO_S15D16(0x0086), TO_S15D16(0x015b), TO_S15D16(0x001e), + TO_S15D16(0xffb9), TO_S15D16(0xff47), TO_S15D16(0x0100), + TO_S15D16(0x0100), TO_S15D16(0xff15), TO_S15D16(0xffeb), + }, + { 0x0, 0x0, 0x0,}, + { 0x0, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, +}; + /** * struct sde_encoder_virt - virtual encoder. Container of one or more physical * encoders. Virtual encoder manages one "logical" display. Physical @@ -514,6 +578,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc, if (!drm_enc || !phy_enc) return; + SDE_ATRACE_BEGIN("encoder_vblank_callback"); sde_enc = to_sde_encoder_virt(drm_enc); spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); @@ -522,6 +587,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc, spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); atomic_inc(&phy_enc->vsync_cnt); + SDE_ATRACE_END("encoder_vblank_callback"); } static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc, @@ -530,8 +596,10 @@ static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc, if (!phy_enc) return; + SDE_ATRACE_BEGIN("encoder_underrun_callback"); atomic_inc(&phy_enc->underrun_cnt); SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt)); + SDE_ATRACE_END("encoder_underrun_callback"); } void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, @@ -794,7 +862,13 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; + struct drm_connector *conn_mas = NULL; unsigned int i; + enum sde_csc_type conn_csc; + struct drm_display_mode *mode; + struct sde_hw_cdm *hw_cdm; + int mode_is_yuv = 0; + int rc; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); @@ -811,6 +885,49 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys); } + + if (sde_enc->cur_master && sde_enc->cur_master->connector) { + conn_mas = sde_enc->cur_master->connector; + rc = sde_connector_pre_kickoff(conn_mas); + if (rc) + SDE_ERROR_ENC(sde_enc, + "kickoff conn%d failed rc %d\n", + conn_mas->base.id, + rc); + + for (i = 0; i < sde_enc->num_phys_encs; i++) { + phys = sde_enc->phys_encs[i]; + if (phys) { + mode = &phys->cached_mode; + mode_is_yuv = (mode->private_flags & + MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420); + } + /** + * Check the CSC matrix type to which the + * CDM CSC matrix should be updated to based + * on the connector HDR state + */ + conn_csc = sde_connector_get_csc_type(conn_mas); + if (phys && mode_is_yuv) { + if (phys->enc_cdm_csc != conn_csc) { + hw_cdm = phys->hw_cdm; + rc = hw_cdm->ops.setup_csc_data(hw_cdm, + &sde_csc_10bit_convert[conn_csc]); + + if (rc) + SDE_ERROR_ENC(sde_enc, + "CSC setup failed rc %d\n", + rc); + SDE_DEBUG_ENC(sde_enc, + "updating CSC %d to %d\n", + phys->enc_cdm_csc, + conn_csc); + phys->enc_cdm_csc = conn_csc; + + } + } + } + } } void sde_encoder_kickoff(struct drm_encoder *drm_enc) @@ -823,6 +940,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc) SDE_ERROR("invalid encoder\n"); return; } + SDE_ATRACE_BEGIN("encoder_kickoff"); sde_enc = to_sde_encoder_virt(drm_enc); SDE_DEBUG_ENC(sde_enc, "\n"); @@ -842,6 +960,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc) if (phys && phys->ops.handle_post_kickoff) phys->ops.handle_post_kickoff(phys); } + SDE_ATRACE_END("encoder_kickoff"); } static int _sde_encoder_status_show(struct seq_file *s, void *data) @@ -1358,3 +1477,128 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder) return INTF_MODE_NONE; } + +/** + * sde_encoder_phys_setup_cdm - setup chroma down block + * @phys_enc: Pointer to physical encoder + * @output_type: HDMI/WB + * @format: Output format + * @roi: Output size + */ +void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, + const struct sde_format *format, u32 output_type, + struct sde_rect *roi) +{ + struct drm_encoder *encoder = phys_enc->parent; + struct sde_encoder_virt *sde_enc = NULL; + struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm; + struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg; + struct drm_connector *connector = phys_enc->connector; + int ret; + u32 csc_type = 0; + + if (!encoder) { + SDE_ERROR("invalid encoder\n"); + return; + } + sde_enc = to_sde_encoder_virt(encoder); + + if (!SDE_FORMAT_IS_YUV(format)) { + SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n", + format->base.pixel_format); + + if (hw_cdm && hw_cdm->ops.disable) + hw_cdm->ops.disable(hw_cdm); + + return; + } + + memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg)); + + cdm_cfg->output_width = roi->w; + cdm_cfg->output_height = roi->h; + cdm_cfg->output_fmt = format; + cdm_cfg->output_type = output_type; + cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ? + CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; + + /* enable 10 bit logic */ + switch (cdm_cfg->output_fmt->chroma_sample) { + case SDE_CHROMA_RGB: + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case SDE_CHROMA_H2V1: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case SDE_CHROMA_420: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; + break; + case SDE_CHROMA_H1V2: + default: + SDE_ERROR("unsupported chroma sampling type\n"); + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + } + + SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", + cdm_cfg->output_width, + cdm_cfg->output_height, + cdm_cfg->output_fmt->base.pixel_format, + cdm_cfg->output_type, + cdm_cfg->output_bit_depth, + cdm_cfg->h_cdwn_type, + cdm_cfg->v_cdwn_type); + + /** + * Choose CSC matrix based on following rules: + * 1. If connector supports quantization select, + * pick Full-Range for better quality. + * 2. If non-CEA mode, then pick Full-Range as per CEA spec + * 3. Otherwise, pick Limited-Range as all other CEA modes + * need a limited range + */ + + if (output_type == CDM_CDWN_OUTPUT_HDMI) { + if (connector && connector->yuv_qs) + csc_type = SDE_CSC_RGB2YUV_601FR; + else if (connector && + sde_connector_mode_needs_full_range(connector)) + csc_type = SDE_CSC_RGB2YUV_601FR; + else + csc_type = SDE_CSC_RGB2YUV_601L; + } else if (output_type == CDM_CDWN_OUTPUT_WB) { + csc_type = SDE_CSC_RGB2YUV_601L; + } + + if (hw_cdm && hw_cdm->ops.setup_csc_data) { + ret = hw_cdm->ops.setup_csc_data(hw_cdm, + &sde_csc_10bit_convert[csc_type]); + if (ret < 0) { + SDE_ERROR("failed to setup CSC %d\n", ret); + return; + } + } + + /* Cache the CSC default matrix type */ + phys_enc->enc_cdm_csc = csc_type; + + if (hw_cdm && hw_cdm->ops.setup_cdwn) { + ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg); + if (ret < 0) { + SDE_ERROR("failed to setup CDM %d\n", ret); + return; + } + } + + if (hw_cdm && hw_cdm->ops.enable) { + ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); + if (ret < 0) { + SDE_ERROR("failed to enable CDM %d\n", ret); + return; + } + } +} diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 2205dd98a927..aec844d640bd 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -174,6 +174,7 @@ enum sde_intr_idx { * @split_role: Role to play in a split-panel configuration * @intf_mode: Interface mode * @intf_idx: Interface index on sde hardware + * @enc_cdm_csc: Cached CSC type of CDM block * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enable_state: Enable state tracking * @vblank_refcount: Reference count of vblank request @@ -201,6 +202,7 @@ struct sde_encoder_phys { enum sde_enc_split_role split_role; enum sde_intf_mode intf_mode; enum sde_intf intf_idx; + enum sde_csc_type enc_cdm_csc; spinlock_t *enc_spinlock; enum sde_enc_enable_state enable_state; atomic_t vblank_refcount; @@ -349,8 +351,8 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( #endif void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, - struct drm_framebuffer *fb, const struct sde_format *format, - struct sde_rect *wb_roi); + const struct sde_format *format, u32 output_type, + struct sde_rect *roi); /** * sde_encoder_helper_trigger_start - control start helper function diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 0b6ee302e231..69a4237f7b67 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -242,17 +242,20 @@ static void sde_encoder_phys_vid_setup_timing_engine( SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n"); drm_mode_debug_printmodeline(&mode); - if (phys_enc->split_role != ENC_ROLE_SOLO) { + if (phys_enc->split_role != ENC_ROLE_SOLO || + (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) { mode.hdisplay >>= 1; mode.htotal >>= 1; mode.hsync_start >>= 1; mode.hsync_end >>= 1; + mode.hskew >>= 1; SDE_DEBUG_VIDENC(vid_enc, - "split_role %d, halve horizontal %d %d %d %d\n", + "split_role %d, halve horizontal %d %d %d %d %d\n", phys_enc->split_role, mode.hdisplay, mode.htotal, - mode.hsync_start, mode.hsync_end); + mode.hsync_start, mode.hsync_end, + mode.hskew); } drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params); @@ -407,6 +410,9 @@ static void sde_encoder_phys_vid_mode_set( return; } + phys_enc->hw_ctl = NULL; + phys_enc->hw_cdm = NULL; + rm = &phys_enc->sde_kms->rm; vid_enc = to_sde_encoder_phys_vid(phys_enc); phys_enc->cached_mode = *adj_mode; @@ -427,6 +433,20 @@ static void sde_encoder_phys_vid_mode_set( phys_enc->hw_ctl = NULL; return; } + + /* CDM is optional */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw; + } + + if (IS_ERR(phys_enc->hw_cdm)) { + SDE_ERROR("CDM required but not allocated: %ld\n", + PTR_ERR(phys_enc->hw_cdm)); + phys_enc->hw_cdm = NULL; + } } static int sde_encoder_phys_vid_control_vblank_irq( @@ -477,6 +497,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) struct sde_encoder_phys_vid *vid_enc; struct sde_hw_intf *intf; struct sde_hw_ctl *ctl; + struct sde_hw_cdm *hw_cdm = NULL; + struct drm_display_mode mode; + const struct sde_format *fmt = NULL; u32 flush_mask = 0; int ret; @@ -485,7 +508,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) SDE_ERROR("invalid encoder/device\n"); return; } + hw_cdm = phys_enc->hw_cdm; priv = phys_enc->parent->dev->dev_private; + mode = phys_enc->cached_mode; vid_enc = to_sde_encoder_phys_vid(phys_enc); intf = vid_enc->hw_intf; @@ -520,7 +545,21 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) goto end; } + if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + fmt = sde_get_sde_format(DRM_FORMAT_YUV420); + + if (fmt) { + struct sde_rect hdmi_roi; + + hdmi_roi.w = mode.hdisplay; + hdmi_roi.h = mode.vdisplay; + sde_encoder_phys_setup_cdm(phys_enc, fmt, + CDM_CDWN_OUTPUT_HDMI, &hdmi_roi); + } + ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx); + if (ctl->ops.get_bitmask_cdm && hw_cdm) + ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx); ctl->ops.update_pending_flush(ctl, flush_mask); SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n", @@ -554,21 +593,34 @@ static void sde_encoder_phys_vid_get_hw_resources( struct drm_connector_state *conn_state) { struct sde_encoder_phys_vid *vid_enc; + struct sde_mdss_cfg *vid_catalog; if (!phys_enc || !hw_res) { SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n", - phys_enc != 0, hw_res != 0, conn_state != 0); + phys_enc != NULL, hw_res != NULL, conn_state != NULL); return; } + vid_catalog = phys_enc->sde_kms->catalog; vid_enc = to_sde_encoder_phys_vid(phys_enc); - if (!vid_enc->hw_intf) { - SDE_ERROR("invalid arg(s), hw_intf\n"); + if (!vid_enc->hw_intf || !vid_catalog) { + SDE_ERROR("invalid arg(s), hw_intf %d vid_catalog %d\n", + vid_enc->hw_intf != NULL, vid_catalog != NULL); return; } SDE_DEBUG_VIDENC(vid_enc, "\n"); + if (vid_enc->hw_intf->idx > INTF_MAX) { + SDE_ERROR("invalid arg(s), idx %d\n", + vid_enc->hw_intf->idx); + return; + } hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO; + + if (vid_catalog->intf[vid_enc->hw_intf->idx - INTF_0].type + == INTF_HDMI) + hw_res->needs_cdm = true; + SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm); } static int sde_encoder_phys_vid_wait_for_vblank( @@ -713,6 +765,11 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n", atomic_read(&phys_enc->vblank_refcount)); + if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) { + SDE_DEBUG_DRIVER("[cdm_disable]\n"); + phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm); + } + phys_enc->enable_state = SDE_ENC_DISABLED; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 9368c4974126..65b16419fcec 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -86,87 +86,6 @@ static void sde_encoder_phys_wb_set_traffic_shaper( wb_cfg->ts_cfg.en = false; } -/** - * sde_encoder_phys_setup_cdm - setup chroma down block - * @phys_enc: Pointer to physical encoder - * @fb: Pointer to output framebuffer - * @format: Output format - */ -void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, - struct drm_framebuffer *fb, const struct sde_format *format, - struct sde_rect *wb_roi) -{ - struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm; - struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg; - int ret; - - if (!SDE_FORMAT_IS_YUV(format)) { - SDE_DEBUG("[cdm_disable fmt:%x]\n", - format->base.pixel_format); - - if (hw_cdm && hw_cdm->ops.disable) - hw_cdm->ops.disable(hw_cdm); - - return; - } - - memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg)); - - cdm_cfg->output_width = wb_roi->w; - cdm_cfg->output_height = wb_roi->h; - cdm_cfg->output_fmt = format; - cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB; - cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ? - CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; - - /* enable 10 bit logic */ - switch (cdm_cfg->output_fmt->chroma_sample) { - case SDE_CHROMA_RGB: - cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - case SDE_CHROMA_H2V1: - cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - case SDE_CHROMA_420: - cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; - cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; - break; - case SDE_CHROMA_H1V2: - default: - SDE_ERROR("unsupported chroma sampling type\n"); - cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - } - - SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", - cdm_cfg->output_width, - cdm_cfg->output_height, - cdm_cfg->output_fmt->base.pixel_format, - cdm_cfg->output_type, - cdm_cfg->output_bit_depth, - cdm_cfg->h_cdwn_type, - cdm_cfg->v_cdwn_type); - - if (hw_cdm && hw_cdm->ops.setup_cdwn) { - ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg); - if (ret < 0) { - SDE_ERROR("failed to setup CDM %d\n", ret); - return; - } - } - - if (hw_cdm && hw_cdm->ops.enable) { - ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); - if (ret < 0) { - SDE_ERROR("failed to enable CDM %d\n", ret); - return; - } - } -} - /** * sde_encoder_phys_wb_setup_fb - setup output framebuffer * @phys_enc: Pointer to physical encoder @@ -493,7 +412,8 @@ static void sde_encoder_phys_wb_setup( sde_encoder_phys_wb_set_traffic_shaper(phys_enc); - sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi); + sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt, + CDM_CDWN_OUTPUT_WB, wb_roi); sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi); diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c index a59ec31ba276..2187d221a352 100644 --- a/drivers/gpu/drm/msm/sde/sde_formats.c +++ b/drivers/gpu/drm/msm/sde/sde_formats.c @@ -10,13 +10,18 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + #include +#include #include "sde_kms.h" #include "sde_formats.h" #define SDE_UBWC_META_MACRO_W_H 16 #define SDE_UBWC_META_BLOCK_SIZE 256 +#define SDE_UBWC_PLANE_SIZE_ALIGNMENT 4096 + #define SDE_MAX_IMG_WIDTH 0x3FFF #define SDE_MAX_IMG_HEIGHT 0x3FFF @@ -42,7 +47,7 @@ bp, flg, fm, np) \ .unpack_count = uc, \ .bpp = bp, \ .fetch_mode = fm, \ - .flag = flg, \ + .flag = {(flg)}, \ .num_planes = np \ } @@ -60,7 +65,7 @@ alpha, chroma, count, bp, flg, fm, np) \ .unpack_count = count, \ .bpp = bp, \ .fetch_mode = fm, \ - .flag = flg, \ + .flag = {(flg)}, \ .num_planes = np \ } @@ -77,7 +82,24 @@ alpha, chroma, count, bp, flg, fm, np) \ .unpack_count = 2, \ .bpp = 2, \ .fetch_mode = fm, \ - .flag = flg, \ + .flag = {(flg)}, \ + .num_planes = np \ +} + +#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 1, \ + .unpack_tight = 0, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ .num_planes = np \ } @@ -95,10 +117,20 @@ flg, fm, np) \ .unpack_count = 1, \ .bpp = bp, \ .fetch_mode = fm, \ - .flag = flg, \ + .flag = {(flg)}, \ .num_planes = np \ } +/* + * struct sde_media_color_map - maps drm format to media format + * @format: DRM base pixel format + * @color: Media API color related to DRM format + */ +struct sde_media_color_map { + uint32_t format; + uint32_t color; +}; + static const struct sde_format sde_format_map[] = { INTERLEAVED_RGB_FMT(ARGB8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, @@ -270,49 +302,49 @@ static const struct sde_format sde_format_map[] = { INTERLEAVED_RGB_FMT(BGRA1010102, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, true, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(RGBA1010102, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, true, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(ABGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, true, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(ARGB2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, true, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(XRGB2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, false, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(BGRX1010102, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, false, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(XBGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, false, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), INTERLEAVED_RGB_FMT(RGBX1010102, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, false, 4, SDE_FORMAT_FLAG_DX, SDE_FETCH_LINEAR, 1), @@ -377,6 +409,82 @@ static const struct sde_format sde_format_map[] = { SDE_FETCH_LINEAR, 3), }; +/* + * A5x tile formats tables: + * These tables hold the A5x tile formats supported. + */ +static const struct sde_format sde_format_map_tile[] = { + INTERLEAVED_RGB_FMT(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(ABGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(RGBA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(BGRA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(BGRX8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 4, 0, + SDE_FETCH_UBWC, 1), + + INTERLEAVED_RGB_FMT(RGBX8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, 0, + SDE_FETCH_UBWC, 1), + + PSEUDO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV, + SDE_FETCH_UBWC, 2), + + PSEUDO_YUV_FMT(NV21, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV, + SDE_FETCH_UBWC, 2), +}; + +static const struct sde_format sde_format_map_p010_tile[] = { + PSEUDO_YUV_FMT_LOOSE(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX), + SDE_FETCH_UBWC, 2), +}; + +static const struct sde_format sde_format_map_tp10_tile[] = { + PSEUDO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX), + SDE_FETCH_UBWC, 2), +}; + /* * UBWC formats table: * This table holds the UBWC formats supported. @@ -384,40 +492,67 @@ static const struct sde_format sde_format_map[] = { * the data will be passed by user-space. */ static const struct sde_format sde_format_map_ubwc[] = { - INTERLEAVED_RGB_FMT(RGB565, + INTERLEAVED_RGB_FMT(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, - false, 2, 0, + false, 2, SDE_FORMAT_FLAG_COMPRESSED, SDE_FETCH_UBWC, 2), - INTERLEAVED_RGB_FMT(RGBA8888, + INTERLEAVED_RGB_FMT(ABGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, 0, + true, 4, SDE_FORMAT_FLAG_COMPRESSED, SDE_FETCH_UBWC, 2), - INTERLEAVED_RGB_FMT(RGBX8888, + INTERLEAVED_RGB_FMT(XBGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 4, 0, + false, 4, SDE_FORMAT_FLAG_COMPRESSED, SDE_FETCH_UBWC, 2), - INTERLEAVED_RGB_FMT(RGBA1010102, + INTERLEAVED_RGB_FMT(ABGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, SDE_FORMAT_FLAG_DX, + true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED, SDE_FETCH_UBWC, 2), - INTERLEAVED_RGB_FMT(RGBX1010102, + INTERLEAVED_RGB_FMT(XBGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, SDE_FORMAT_FLAG_DX, + true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED, SDE_FETCH_UBWC, 2), PSEUDO_YUV_FMT(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, - SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV, + SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV | + SDE_FORMAT_FLAG_COMPRESSED, + SDE_FETCH_UBWC, 4), +}; + +static const struct sde_format sde_format_map_p010[] = { + PSEUDO_YUV_FMT_LOOSE(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX), + SDE_FETCH_LINEAR, 2), +}; + +static const struct sde_format sde_format_map_p010_ubwc[] = { + PSEUDO_YUV_FMT_LOOSE(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX | + SDE_FORMAT_FLAG_COMPRESSED), + SDE_FETCH_UBWC, 4), +}; + +static const struct sde_format sde_format_map_tp10_ubwc[] = { + PSEUDO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX | + SDE_FORMAT_FLAG_COMPRESSED), SDE_FETCH_UBWC, 4), }; @@ -452,6 +587,37 @@ static void _sde_get_v_h_subsample_rate( } } +static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt) +{ + static const struct sde_media_color_map sde_media_ubwc_map[] = { + {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC}, + {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC}, + {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC}, + }; + int color_fmt = -1; + int i; + + if (fmt->base.pixel_format == DRM_FORMAT_NV12) { + if (SDE_FORMAT_IS_DX(fmt)) { + if (fmt->unpack_tight) + color_fmt = COLOR_FMT_NV12_BPP10_UBWC; + else + color_fmt = COLOR_FMT_P010_UBWC; + } else + color_fmt = COLOR_FMT_NV12_UBWC; + return color_fmt; + } + + for (i = 0; i < ARRAY_SIZE(sde_media_ubwc_map); ++i) + if (fmt->base.pixel_format == sde_media_ubwc_map[i].format) { + color_fmt = sde_media_ubwc_map[i].color; + break; + } + return color_fmt; +} + static int _sde_format_get_plane_sizes_ubwc( const struct sde_format *fmt, const uint32_t width, @@ -459,6 +625,8 @@ static int _sde_format_get_plane_sizes_ubwc( struct sde_hw_fmt_layout *layout) { int i; + int color; + bool meta = SDE_FORMAT_IS_UBWC(fmt); memset(layout, 0, sizeof(struct sde_hw_fmt_layout)); layout->format = fmt; @@ -466,86 +634,63 @@ static int _sde_format_get_plane_sizes_ubwc( layout->height = height; layout->num_planes = fmt->num_planes; - if (fmt->base.pixel_format == DRM_FORMAT_NV12) { - uint32_t y_stride_alignment, uv_stride_alignment; - uint32_t y_height_alignment, uv_height_alignment; - uint32_t y_tile_width = 32; - uint32_t y_tile_height = 8; - uint32_t uv_tile_width = y_tile_width / 2; - uint32_t uv_tile_height = y_tile_height; - uint32_t y_bpp_numer = 1, y_bpp_denom = 1; - uint32_t uv_bpp_numer = 1, uv_bpp_denom = 1; - - y_stride_alignment = 128; - uv_stride_alignment = 64; - y_height_alignment = 32; - uv_height_alignment = 32; - y_bpp_numer = 1; - uv_bpp_numer = 2; - y_bpp_denom = 1; - uv_bpp_denom = 1; - - layout->num_planes = 4; - /* Y bitstream stride and plane size */ - layout->plane_pitch[0] = ALIGN(width, y_stride_alignment); - layout->plane_pitch[0] = (layout->plane_pitch[0] * y_bpp_numer) - / y_bpp_denom; - layout->plane_size[0] = ALIGN(layout->plane_pitch[0] * - ALIGN(height, y_height_alignment), 4096); - - /* CbCr bitstream stride and plane size */ - layout->plane_pitch[1] = ALIGN(width / 2, uv_stride_alignment); - layout->plane_pitch[1] = (layout->plane_pitch[1] * uv_bpp_numer) - / uv_bpp_denom; - layout->plane_size[1] = ALIGN(layout->plane_pitch[1] * - ALIGN(height / 2, uv_height_alignment), 4096); - - /* Y meta data stride and plane size */ - layout->plane_pitch[2] = ALIGN( - DIV_ROUND_UP(width, y_tile_width), 64); - layout->plane_size[2] = ALIGN(layout->plane_pitch[2] * - ALIGN(DIV_ROUND_UP(height, y_tile_height), 16), 4096); - - /* CbCr meta data stride and plane size */ - layout->plane_pitch[3] = ALIGN( - DIV_ROUND_UP(width / 2, uv_tile_width), 64); - layout->plane_size[3] = ALIGN(layout->plane_pitch[3] * - ALIGN(DIV_ROUND_UP(height / 2, uv_tile_height), 16), - 4096); - - } else if (fmt->base.pixel_format == DRM_FORMAT_ABGR8888 || - fmt->base.pixel_format == DRM_FORMAT_XBGR8888 || - fmt->base.pixel_format == DRM_FORMAT_BGRA1010102 || - fmt->base.pixel_format == DRM_FORMAT_BGRX1010102 || - fmt->base.pixel_format == DRM_FORMAT_BGR565) { - - uint32_t stride_alignment, aligned_bitstream_width; - - if (fmt->base.pixel_format == DRM_FORMAT_BGR565) - stride_alignment = 128; - else - stride_alignment = 64; - layout->num_planes = 3; - - /* Nothing in plane[1] */ - - /* RGB bitstream stride and plane size */ - aligned_bitstream_width = ALIGN(width, stride_alignment); - layout->plane_pitch[0] = aligned_bitstream_width * fmt->bpp; - layout->plane_size[0] = ALIGN(fmt->bpp * aligned_bitstream_width - * ALIGN(height, 16), 4096); - - /* RGB meta data stride and plane size */ - layout->plane_pitch[2] = ALIGN(DIV_ROUND_UP( - aligned_bitstream_width, 16), 64); - layout->plane_size[2] = ALIGN(layout->plane_pitch[2] * - ALIGN(DIV_ROUND_UP(height, 4), 16), 4096); - } else { + color = _sde_format_get_media_color_ubwc(fmt); + if (color < 0) { DRM_ERROR("UBWC format not supported for fmt:0x%X\n", fmt->base.pixel_format); return -EINVAL; } + if (SDE_FORMAT_IS_YUV(layout->format)) { + uint32_t y_sclines, uv_sclines; + uint32_t y_meta_scanlines = 0; + uint32_t uv_meta_scanlines = 0; + + layout->num_planes = 2; + layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width); + y_sclines = VENUS_Y_SCANLINES(color, height); + layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * + y_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + + layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width); + uv_sclines = VENUS_UV_SCANLINES(color, height); + layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] * + uv_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + + if (!meta) + goto done; + + layout->num_planes += 2; + layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height); + layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * + y_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + + layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height); + layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] * + uv_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + + } else { + uint32_t rgb_scanlines, rgb_meta_scanlines; + + layout->num_planes = 1; + + layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width); + rgb_scanlines = VENUS_RGB_SCANLINES(color, height); + layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * + rgb_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + + if (!meta) + goto done; + layout->num_planes += 2; + layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height); + layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * + rgb_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT); + } + +done: for (i = 0; i < SDE_MAX_PLANES; i++) layout->total_size += layout->plane_size[i]; @@ -574,6 +719,7 @@ static int _sde_format_get_plane_sizes_linear( } else { uint32_t v_subsample, h_subsample; uint32_t chroma_samp; + uint32_t bpp = 1; chroma_samp = fmt->chroma_sample; _sde_get_v_h_subsample_rate(chroma_samp, &v_subsample, @@ -584,8 +730,11 @@ static int _sde_format_get_plane_sizes_linear( return -EINVAL; } - layout->plane_pitch[0] = width; - layout->plane_pitch[1] = width / h_subsample; + if ((fmt->base.pixel_format == DRM_FORMAT_NV12) && + (SDE_FORMAT_IS_DX(fmt))) + bpp = 2; + layout->plane_pitch[0] = width * bpp; + layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; layout->plane_size[0] = layout->plane_pitch[0] * height; layout->plane_size[1] = layout->plane_pitch[1] * (height / v_subsample); @@ -608,7 +757,7 @@ static int _sde_format_get_plane_sizes_linear( return 0; } -static int _sde_format_get_plane_sizes( +int sde_format_get_plane_sizes( const struct sde_format *fmt, const uint32_t w, const uint32_t h, @@ -624,7 +773,7 @@ static int _sde_format_get_plane_sizes( return -ERANGE; } - if (SDE_FORMAT_IS_UBWC(fmt)) + if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt)) return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout); return _sde_format_get_plane_sizes_linear(fmt, w, h, layout); @@ -636,6 +785,7 @@ static int _sde_format_populate_addrs_ubwc( struct sde_hw_fmt_layout *layout) { uint32_t base_addr; + bool meta; if (!fb || !layout) { DRM_ERROR("invalid pointers\n"); @@ -648,6 +798,8 @@ static int _sde_format_populate_addrs_ubwc( return -EFAULT; } + meta = SDE_FORMAT_IS_UBWC(layout->format); + /* Per-format logic for verifying active planes */ if (SDE_FORMAT_IS_YUV(layout->format)) { /************************************************/ @@ -677,6 +829,9 @@ static int _sde_format_populate_addrs_ubwc( layout->plane_addr[1] = base_addr + layout->plane_size[0] + layout->plane_size[2] + layout->plane_size[3]; + if (!meta) + goto done; + /* configure Y metadata plane */ layout->plane_addr[2] = base_addr; @@ -704,10 +859,14 @@ static int _sde_format_populate_addrs_ubwc( layout->plane_addr[0] = base_addr + layout->plane_size[2]; layout->plane_addr[1] = 0; + + if (!meta) + goto done; + layout->plane_addr[2] = base_addr; layout->plane_addr[3] = 0; } - +done: return 0; } @@ -761,7 +920,7 @@ int sde_format_populate_layout( layout->format = to_sde_format(msm_framebuffer_format(fb)); /* Populate the plane sizes etc via get_format */ - ret = _sde_format_get_plane_sizes(layout->format, fb->width, fb->height, + ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height, layout); if (ret) return ret; @@ -770,7 +929,8 @@ int sde_format_populate_layout( plane_addr[i] = layout->plane_addr[i]; /* Populate the addresses given the fb */ - if (SDE_FORMAT_IS_UBWC(layout->format)) + if (SDE_FORMAT_IS_UBWC(layout->format) || + SDE_FORMAT_IS_TILE(layout->format)) ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout); else ret = _sde_format_populate_addrs_linear(aspace, fb, layout); @@ -864,7 +1024,7 @@ int sde_format_check_modified_format( fmt = to_sde_format(msm_fmt); num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format); - ret = _sde_format_get_plane_sizes(fmt, cmd->width, cmd->height, + ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height, &layout); if (ret) return ret; @@ -874,7 +1034,8 @@ int sde_format_check_modified_format( DRM_ERROR("invalid handle for plane %d\n", i); return -EINVAL; } - bos_total_size += bos[i]->size; + if ((i == 0) || (bos[i] != bos[0])) + bos_total_size += bos[i]->size; } if (bos_total_size < layout.total_size) { @@ -902,14 +1063,14 @@ const struct sde_format *sde_get_sde_format_ext( * All planes used must specify the same modifier. */ if (modifiers_len && !modifiers) { - DRM_ERROR("invalid modifiers array\n"); + SDE_ERROR("invalid modifiers array\n"); return NULL; } else if (modifiers && modifiers_len && modifiers[0]) { mod0 = modifiers[0]; - DBG("plane format modifier 0x%llX", mod0); + SDE_DEBUG("plane format modifier 0x%llX\n", mod0); for (i = 1; i < modifiers_len; i++) { if (modifiers[i] != mod0) { - DRM_ERROR("bad fmt mod 0x%llX on plane %d\n", + SDE_ERROR("bad fmt mod 0x%llX on plane %d\n", modifiers[i], i); return NULL; } @@ -922,12 +1083,55 @@ const struct sde_format *sde_get_sde_format_ext( map_size = ARRAY_SIZE(sde_format_map); break; case DRM_FORMAT_MOD_QCOM_COMPRESSED: + case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE: map = sde_format_map_ubwc; map_size = ARRAY_SIZE(sde_format_map_ubwc); - DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED", format); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n", + format); + break; + case DRM_FORMAT_MOD_QCOM_DX: + map = sde_format_map_p010; + map_size = ARRAY_SIZE(sde_format_map_p010); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format); + break; + case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED): + case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED | + DRM_FORMAT_MOD_QCOM_TILE): + map = sde_format_map_p010_ubwc; + map_size = ARRAY_SIZE(sde_format_map_p010_ubwc); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n", + format); + break; + case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED | + DRM_FORMAT_MOD_QCOM_TIGHT): + case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED | + DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE): + map = sde_format_map_tp10_ubwc; + map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc); + SDE_DEBUG( + "found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n", + format); + break; + case DRM_FORMAT_MOD_QCOM_TILE: + map = sde_format_map_tile; + map_size = ARRAY_SIZE(sde_format_map_tile); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format); + break; + case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX): + map = sde_format_map_p010_tile; + map_size = ARRAY_SIZE(sde_format_map_p010_tile); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n", + format); + break; + case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX | + DRM_FORMAT_MOD_QCOM_TIGHT): + map = sde_format_map_tp10_tile; + map_size = ARRAY_SIZE(sde_format_map_tp10_tile); + SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n", + format); break; default: - DRM_ERROR("unsupported format modifier %llX\n", mod0); + SDE_ERROR("unsupported format modifier %llX\n", mod0); return NULL; } @@ -939,10 +1143,10 @@ const struct sde_format *sde_get_sde_format_ext( } if (fmt == NULL) - DRM_ERROR("unsupported fmt 0x%X modifier 0x%llX\n", + SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n", format, mod0); else - DBG("fmt %s mod 0x%llX ubwc %d yuv %d", + SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n", drm_get_format_name(format), mod0, SDE_FORMAT_IS_UBWC(fmt), SDE_FORMAT_IS_YUV(fmt)); diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h index 0de081d619b7..ec8f97da4a41 100644 --- a/drivers/gpu/drm/msm/sde/sde_formats.h +++ b/drivers/gpu/drm/msm/sde/sde_formats.h @@ -58,6 +58,21 @@ uint32_t sde_populate_formats( uint64_t *pixel_modifiers, uint32_t pixel_formats_max); +/** + * sde_format_get_plane_sizes - calculate size and layout of given buffer format + * @fmt: pointer to sde_format + * @w: width of the buffer + * @h: height of the buffer + * @layout: layout of the buffer + * + * Return: size of the buffer + */ +int sde_format_get_plane_sizes( + const struct sde_format *fmt, + const uint32_t w, + const uint32_t h, + struct sde_hw_fmt_layout *layout); + /** * sde_format_check_modified_format - validate format and buffers for * sde non-standard, i.e. modified format diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 17b678cfca46..a185eb338134 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -405,6 +405,38 @@ static struct sde_prop_type vbif_prop[] = { /************************************************************* * static API list *************************************************************/ + +/** + * _sde_copy_formats - copy formats from src_list to dst_list + * @dst_list: pointer to destination list where to copy formats + * @dst_list_size: size of destination list + * @dst_list_pos: starting position on the list where to copy formats + * @src_list: pointer to source list where to copy formats from + * @src_list_size: size of source list + * Return: number of elements populated + */ +static uint32_t _sde_copy_formats( + struct sde_format_extended *dst_list, + uint32_t dst_list_size, + uint32_t dst_list_pos, + const struct sde_format_extended *src_list, + uint32_t src_list_size) +{ + uint32_t cur_pos, i; + + if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1))) + return 0; + + for (i = 0, cur_pos = dst_list_pos; + (cur_pos < (dst_list_size - 1)) && (i < src_list_size) + && src_list[i].fourcc_format; ++i, ++cur_pos) + dst_list[cur_pos] = src_list[i]; + + dst_list[cur_pos].fourcc_format = 0; + + return i; +} + static int _parse_dt_u32_handler(struct device_node *np, char *prop_name, u32 *offsets, int len, bool mandatory) { @@ -471,6 +503,7 @@ static int _validate_dt_entry(struct device_node *np, rc = -EINVAL; } *off_count = 0; + memset(prop_count, 0, sizeof(int) * prop_size); return rc; } } @@ -656,9 +689,10 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, { sblk->maxupscale = MAX_SSPP_UPSCALE; sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; + sblk->format_list = plane_formats_yuv; sspp->id = SSPP_VIG0 + *vig_count; sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count; - sblk->format_list = plane_formats_yuv; + sspp->type = SSPP_TYPE_VIG; set_bit(SDE_SSPP_QOS, &sspp->features); (*vig_count)++; @@ -726,9 +760,10 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, { sblk->maxupscale = MAX_SSPP_UPSCALE; sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; + sblk->format_list = plane_formats; sspp->id = SSPP_RGB0 + *rgb_count; sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count; - sblk->format_list = plane_formats; + sspp->type = SSPP_TYPE_RGB; set_bit(SDE_SSPP_QOS, &sspp->features); (*rgb_count)++; @@ -766,9 +801,10 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg, set_bit(SDE_SSPP_CURSOR, &sspp->features); sblk->maxupscale = SSPP_UNITY_SCALE; sblk->maxdwnscale = SSPP_UNITY_SCALE; + sblk->format_list = cursor_formats; sspp->id = SSPP_CURSOR0 + *cursor_count; sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count; - sblk->format_list = plane_formats; + sspp->type = SSPP_TYPE_CURSOR; (*cursor_count)++; snprintf(sspp->name, sizeof(sspp->name), "cursor%d", *cursor_count-1); } @@ -779,9 +815,10 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg, { sblk->maxupscale = SSPP_UNITY_SCALE; sblk->maxdwnscale = SSPP_UNITY_SCALE; + sblk->format_list = plane_formats; sspp->id = SSPP_DMA0 + *dma_count; sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count; - sblk->format_list = plane_formats; + sspp->type = SSPP_TYPE_DMA; set_bit(SDE_SSPP_QOS, &sspp->features); (*dma_count)++; snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1); @@ -1988,9 +2025,129 @@ end: return rc; } -static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, +static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) { + int i, rc = 0; + uint32_t dma_list_size, vig_list_size, wb2_list_size; + uint32_t cursor_list_size = 0; + struct sde_sspp_sub_blks *sblk; + uint32_t index = 0; + + if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) { + cursor_list_size = ARRAY_SIZE(cursor_formats); + sde_cfg->cursor_formats = kcalloc(cursor_list_size, + sizeof(struct sde_format_extended), GFP_KERNEL); + if (!sde_cfg->cursor_formats) { + rc = -ENOMEM; + goto end; + } + index = _sde_copy_formats(sde_cfg->cursor_formats, + cursor_list_size, 0, cursor_formats, + ARRAY_SIZE(cursor_formats)); + } + + dma_list_size = ARRAY_SIZE(plane_formats); + vig_list_size = ARRAY_SIZE(plane_formats_yuv); + wb2_list_size = ARRAY_SIZE(wb2_formats); + + dma_list_size += ARRAY_SIZE(rgb_10bit_formats); + vig_list_size += ARRAY_SIZE(rgb_10bit_formats) + + ARRAY_SIZE(tp10_ubwc_formats) + + ARRAY_SIZE(p010_formats); + wb2_list_size += ARRAY_SIZE(rgb_10bit_formats) + + ARRAY_SIZE(tp10_ubwc_formats); + + sde_cfg->dma_formats = kcalloc(dma_list_size, + sizeof(struct sde_format_extended), GFP_KERNEL); + if (!sde_cfg->dma_formats) { + rc = -ENOMEM; + goto end; + } + + sde_cfg->vig_formats = kcalloc(vig_list_size, + sizeof(struct sde_format_extended), GFP_KERNEL); + if (!sde_cfg->vig_formats) { + rc = -ENOMEM; + goto end; + } + + sde_cfg->wb_formats = kcalloc(wb2_list_size, + sizeof(struct sde_format_extended), GFP_KERNEL); + if (!sde_cfg->wb_formats) { + SDE_ERROR("failed to allocate wb format list\n"); + rc = -ENOMEM; + goto end; + } + + if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) || + IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301)) { + sde_cfg->has_hdr = true; + } + + index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size, + 0, plane_formats, ARRAY_SIZE(plane_formats)); + index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size, + index, rgb_10bit_formats, + ARRAY_SIZE(rgb_10bit_formats)); + + index = _sde_copy_formats(sde_cfg->vig_formats, vig_list_size, + 0, plane_formats_yuv, ARRAY_SIZE(plane_formats_yuv)); + index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size, + index, rgb_10bit_formats, + ARRAY_SIZE(rgb_10bit_formats)); + index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size, + index, p010_formats, ARRAY_SIZE(p010_formats)); + + index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size, + index, tp10_ubwc_formats, + ARRAY_SIZE(tp10_ubwc_formats)); + + index = _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size, + 0, wb2_formats, ARRAY_SIZE(wb2_formats)); + index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size, + index, rgb_10bit_formats, + ARRAY_SIZE(rgb_10bit_formats)); + index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size, + index, tp10_ubwc_formats, + ARRAY_SIZE(tp10_ubwc_formats)); + + for (i = 0; i < sde_cfg->sspp_count; ++i) { + struct sde_sspp_cfg *sspp = &sde_cfg->sspp[i]; + + sblk = (struct sde_sspp_sub_blks *)sspp->sblk; + switch (sspp->type) { + case SSPP_TYPE_VIG: + sblk->format_list = sde_cfg->vig_formats; + break; + case SSPP_TYPE_CURSOR: + if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) + sblk->format_list = sde_cfg->cursor_formats; + else + SDE_ERROR("invalid sspp type %d, xin id %d\n", + sspp->type, sspp->xin_id); + break; + case SSPP_TYPE_DMA: + sblk->format_list = sde_cfg->dma_formats; + break; + default: + SDE_ERROR("invalid sspp type %d\n", sspp->type); + rc = -EINVAL; + goto end; + } + } + + for (i = 0; i < sde_cfg->wb_count; ++i) + sde_cfg->wb[i].format_list = sde_cfg->wb_formats; + +end: + return rc; +} + +static int sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) +{ + int rc = 0; + switch (hw_rev) { case SDE_HW_VER_170: case SDE_HW_VER_171: @@ -1998,10 +2155,14 @@ static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, /* update msm8996 target here */ break; case SDE_HW_VER_300: + case SDE_HW_VER_301: case SDE_HW_VER_400: /* update cobalt and skunk target here */ + rc = sde_hardware_format_caps(sde_cfg, hw_rev); break; } + + return rc; } void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg) @@ -2040,6 +2201,11 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg) } } + kfree(sde_cfg->dma_formats); + kfree(sde_cfg->cursor_formats); + kfree(sde_cfg->vig_formats); + kfree(sde_cfg->wb_formats); + kfree(sde_cfg); } @@ -2109,7 +2275,9 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, if (rc) SDE_DEBUG("virtual plane is not supported.\n"); - sde_hardware_caps(sde_cfg, hw_rev); + rc = sde_hardware_caps(sde_cfg, hw_rev); + if (rc) + goto end; return sde_cfg; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index bca221d2a959..73bb77b7afa6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -42,7 +42,8 @@ #define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */ #define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */ #define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */ -#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* cobalt v1.0 */ +#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */ +#define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */ #define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */ #define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400) @@ -457,7 +458,8 @@ struct sde_ctl_cfg { * @sblk: SSPP sub-blocks information * @xin_id: bus client identifier * @clk_ctrl clock control identifier - *@name source pipe name + * @name source pipe name + * @type sspp type identifier */ struct sde_sspp_cfg { SDE_HW_BLK_INFO; @@ -465,6 +467,7 @@ struct sde_sspp_cfg { u32 xin_id; enum sde_clk_ctrl_type clk_ctrl; char name[SSPP_NAME_SIZE]; + u32 type; }; /** @@ -652,6 +655,11 @@ struct sde_vp_cfg { * @csc_type csc or csc_10bit support. * @has_src_split source split feature status * @has_cdp Client driver prefetch feature status + * @has_hdr HDR feature support + * @dma_formats Supported formats for dma pipe + * @cursor_formats Supported formats for cursor pipe + * @vig_formats Supported formats for vig pipe + * @wb_formats Supported formats for wb */ struct sde_mdss_cfg { u32 hwversion; @@ -665,7 +673,7 @@ struct sde_mdss_cfg { u32 csc_type; bool has_src_split; bool has_cdp; - + bool has_hdr; u32 mdss_count; struct sde_mdss_base_cfg mdss[MAX_BLOCKS]; @@ -704,6 +712,11 @@ struct sde_mdss_cfg { u32 vp_count; struct sde_vp_cfg vp[MAX_BLOCKS]; + + struct sde_format_extended *dma_formats; + struct sde_format_extended *cursor_formats; + struct sde_format_extended *vig_formats; + struct sde_format_extended *wb_formats; }; struct sde_mdss_hw_cfg_handler { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h index 296694422653..dbc8981a7f8f 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,17 +16,17 @@ static const struct sde_format_extended plane_formats[] = { {DRM_FORMAT_ARGB8888, 0}, {DRM_FORMAT_ABGR8888, 0}, {DRM_FORMAT_RGBA8888, 0}, - {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_BGRA8888, 0}, {DRM_FORMAT_XRGB8888, 0}, {DRM_FORMAT_RGBX8888, 0}, {DRM_FORMAT_BGRX8888, 0}, {DRM_FORMAT_XBGR8888, 0}, - {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_RGB888, 0}, {DRM_FORMAT_BGR888, 0}, {DRM_FORMAT_RGB565, 0}, - {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_BGR565, 0}, {DRM_FORMAT_ARGB1555, 0}, {DRM_FORMAT_ABGR1555, 0}, @@ -52,16 +52,16 @@ static const struct sde_format_extended plane_formats_yuv[] = { {DRM_FORMAT_ABGR8888, 0}, {DRM_FORMAT_RGBA8888, 0}, {DRM_FORMAT_BGRX8888, 0}, - {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_BGRA8888, 0}, {DRM_FORMAT_XRGB8888, 0}, {DRM_FORMAT_XBGR8888, 0}, {DRM_FORMAT_RGBX8888, 0}, - {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_RGB888, 0}, {DRM_FORMAT_BGR888, 0}, {DRM_FORMAT_RGB565, 0}, - {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_BGR565, 0}, {DRM_FORMAT_ARGB1555, 0}, {DRM_FORMAT_ABGR1555, 0}, @@ -94,13 +94,33 @@ static const struct sde_format_extended plane_formats_yuv[] = { {0, 0}, }; +static const struct sde_format_extended cursor_formats[] = { + {DRM_FORMAT_ARGB8888, 0}, + {DRM_FORMAT_ABGR8888, 0}, + {DRM_FORMAT_RGBA8888, 0}, + {DRM_FORMAT_BGRA8888, 0}, + {DRM_FORMAT_XRGB8888, 0}, + {DRM_FORMAT_ARGB1555, 0}, + {DRM_FORMAT_ABGR1555, 0}, + {DRM_FORMAT_RGBA5551, 0}, + {DRM_FORMAT_BGRA5551, 0}, + {DRM_FORMAT_ARGB4444, 0}, + {DRM_FORMAT_ABGR4444, 0}, + {DRM_FORMAT_RGBA4444, 0}, + {DRM_FORMAT_BGRA4444, 0}, + {0, 0}, +}; + static const struct sde_format_extended wb2_formats[] = { {DRM_FORMAT_RGB565, 0}, + {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_RGB888, 0}, {DRM_FORMAT_ARGB8888, 0}, {DRM_FORMAT_RGBA8888, 0}, + {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_XRGB8888, 0}, {DRM_FORMAT_RGBX8888, 0}, + {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_ARGB1555, 0}, {DRM_FORMAT_RGBA5551, 0}, {DRM_FORMAT_XRGB1555, 0}, @@ -127,8 +147,31 @@ static const struct sde_format_extended wb2_formats[] = { {DRM_FORMAT_YUV420, 0}, {DRM_FORMAT_NV12, 0}, + {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED}, {DRM_FORMAT_NV16, 0}, {DRM_FORMAT_YUYV, 0}, {0, 0}, }; + +static const struct sde_format_extended rgb_10bit_formats[] = { + {DRM_FORMAT_BGRA1010102, 0}, + {DRM_FORMAT_BGRX1010102, 0}, + {DRM_FORMAT_RGBA1010102, 0}, + {DRM_FORMAT_RGBX1010102, 0}, + {DRM_FORMAT_ABGR2101010, 0}, + {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_XBGR2101010, 0}, + {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, + {DRM_FORMAT_ARGB2101010, 0}, + {DRM_FORMAT_XRGB2101010, 0}, +}; + +static const struct sde_format_extended p010_formats[] = { + {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX}, +}; + +static const struct sde_format_extended tp10_ubwc_formats[] = { + {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED | + DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT}, +}; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c index c7cbb93bece4..9ec81c227e60 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -88,50 +88,12 @@ static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm, return ERR_PTR(-EINVAL); } -static void sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx, +static int sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx, struct sde_csc_cfg *data) { - struct sde_hw_blk_reg_map *c = &ctx->hw; - u32 csc_reg_off = CDM_CSC_10_MATRIX_COEFF_0; - u32 val; + sde_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true); - /* matrix coeff */ - val = data->csc_mv[0] | (data->csc_mv[1] << 16); - SDE_REG_WRITE(c, csc_reg_off, val); - val = data->csc_mv[2] | (data->csc_mv[3] << 16); - SDE_REG_WRITE(c, csc_reg_off + 0x4, val); - val = data->csc_mv[4] | (data->csc_mv[5] << 16); - SDE_REG_WRITE(c, csc_reg_off + 0x8, val); - val = data->csc_mv[6] | (data->csc_mv[7] << 16); - SDE_REG_WRITE(c, csc_reg_off + 0xc, val); - val = data->csc_mv[8]; - SDE_REG_WRITE(c, csc_reg_off + 0x10, val); - - /* Pre clamp */ - val = (data->csc_pre_lv[0] << 16) | data->csc_pre_lv[1]; - SDE_REG_WRITE(c, csc_reg_off + 0x14, val); - val = (data->csc_pre_lv[2] << 16) | data->csc_pre_lv[3]; - SDE_REG_WRITE(c, csc_reg_off + 0x18, val); - val = (data->csc_pre_lv[4] << 16) | data->csc_pre_lv[5]; - SDE_REG_WRITE(c, csc_reg_off + 0x1c, val); - - /* Post clamp */ - val = (data->csc_post_lv[0] << 16) | data->csc_post_lv[1]; - SDE_REG_WRITE(c, csc_reg_off + 0x20, val); - val = (data->csc_post_lv[2] << 16) | data->csc_post_lv[3]; - SDE_REG_WRITE(c, csc_reg_off + 0x24, val); - val = (data->csc_post_lv[4] << 16) | data->csc_post_lv[5]; - SDE_REG_WRITE(c, csc_reg_off + 0x28, val); - - /* Pre-Bias */ - SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]); - SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]); - SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]); - - /* Post-Bias */ - SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]); - SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]); - SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]); + return 0; } static int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx, @@ -265,7 +227,7 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx, return -EINVAL; if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { - if (fmt->chroma_sample != SDE_CHROMA_H1V2) + if (fmt->chroma_sample == SDE_CHROMA_H1V2) return -EINVAL; /*unsupported format */ opmode = BIT(0); opmode |= (fmt->chroma_sample << 1); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h index 264b8a418573..a0afd897e867 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -65,8 +65,9 @@ struct sde_hw_cdm_ops { * to program a different matrix than default matrix. * @cdm: Pointer to the chroma down context structure * @data Pointer to CSC configuration data + * return: 0 if success; error code otherwise */ - void (*setup_csc_data)(struct sde_hw_cdm *cdm, + int (*setup_csc_data)(struct sde_hw_cdm *cdm, struct sde_csc_cfg *data); /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c index 49930365d989..1535d1d1ade5 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -357,37 +357,32 @@ static const struct sde_irq_type sde_irq_map[] = { SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - /* irq_idx: 68-71 */ + /* irq_idx: 72-75 */ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2}, { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2, SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2}, { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2}, { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3, SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2}, - /* irq_idx: 72-75 */ + /* irq_idx: 76-79 */ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2}, { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0, SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - /* irq_idx: 76-79 */ + /* irq_idx: 80-83 */ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2}, { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1, SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - /* irq_idx: 80-83 */ + /* irq_idx: 84-87 */ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2}, { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2, SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2}, { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2}, { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3, SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2}, - /* irq_idx: 84-87 */ - { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, - { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, /* irq_idx: 88-91 */ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, { SDE_IRQ_TYPE_RESERVED, 0, 0, 2}, @@ -905,7 +900,7 @@ static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr, sde_intr_set[reg_idx].status_off) & sde_irq_map[irq_idx].irq_mask; if (intr_status && clear) - SDE_REG_WRITE(&intr->hw, sde_intr_set[irq_idx].clr_off, + SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off, intr_status); spin_unlock_irqrestore(&intr->mask_lock, irq_flags); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index dcba248d27b0..92dd829eee3e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,13 +41,29 @@ #define SDE_MAX_DE_CURVES 3 #endif -#define SDE_FORMAT_FLAG_YUV (1 << 0) -#define SDE_FORMAT_FLAG_DX (1 << 1) +enum sde_format_flags { + SDE_FORMAT_FLAG_YUV_BIT, + SDE_FORMAT_FLAG_DX_BIT, + SDE_FORMAT_FLAG_COMPRESSED_BIT, + SDE_FORMAT_FLAG_BIT_MAX, +}; -#define SDE_FORMAT_IS_YUV(X) ((X)->flag & SDE_FORMAT_FLAG_YUV) -#define SDE_FORMAT_IS_DX(X) ((X)->flag & SDE_FORMAT_FLAG_DX) +#define SDE_FORMAT_FLAG_YUV BIT(SDE_FORMAT_FLAG_YUV_BIT) +#define SDE_FORMAT_FLAG_DX BIT(SDE_FORMAT_FLAG_DX_BIT) +#define SDE_FORMAT_FLAG_COMPRESSED BIT(SDE_FORMAT_FLAG_COMPRESSED_BIT) +#define SDE_FORMAT_IS_YUV(X) \ + (test_bit(SDE_FORMAT_FLAG_YUV_BIT, (X)->flag)) +#define SDE_FORMAT_IS_DX(X) \ + (test_bit(SDE_FORMAT_FLAG_DX_BIT, (X)->flag)) #define SDE_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == SDE_FETCH_LINEAR) -#define SDE_FORMAT_IS_UBWC(X) ((X)->fetch_mode == SDE_FETCH_UBWC) +#define SDE_FORMAT_IS_TILE(X) \ + (((X)->fetch_mode == SDE_FETCH_UBWC) && \ + !test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) +#define SDE_FORMAT_IS_UBWC(X) \ + (((X)->fetch_mode == SDE_FETCH_UBWC) && \ + test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) + +#define TO_S15D16(_x_) ((_x_) << 7) #define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0) #define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0) @@ -325,6 +341,15 @@ enum sde_3d_blend_mode { BLEND_3D_MAX }; +enum sde_csc_type { + SDE_CSC_RGB2YUV_601L, + SDE_CSC_RGB2YUV_601FR, + SDE_CSC_RGB2YUV_709L, + SDE_CSC_RGB2YUV_2020L, + SDE_CSC_RGB2YUV_2020FR, + SDE_MAX_CSC +}; + /** struct sde_format - defines the format configuration which * allows SDE HW to correctly fetch and decode the format * @base: base msm_format struture containing fourcc code @@ -357,7 +382,7 @@ struct sde_format { u8 alpha_enable; u8 num_planes; enum sde_fetch_type fetch_mode; - u32 flag; + DECLARE_BITMAP(flag, SDE_FORMAT_FLAG_BIT_MAX); u16 tile_width; u16 tile_height; }; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index 882a1c84e9a2..ea2890d776ae 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -755,14 +755,17 @@ static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data) { u32 idx; + bool csc10 = false; if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data) return; - if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features)) + if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features)) { idx += CSC_10BIT_OFFSET; + csc10 = true; + } - sde_hw_csc_setup(&ctx->hw, idx, data); + sde_hw_csc_setup(&ctx->hw, idx, data, csc10); } static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c index 6f52f31a7569..b899f0c2f71c 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_util.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -42,9 +42,10 @@ u32 *sde_hw_util_get_log_mask_ptr(void) void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c, u32 csc_reg_off, - struct sde_csc_cfg *data) + struct sde_csc_cfg *data, bool csc10) { static const u32 matrix_shift = 7; + u32 clamp_shift = csc10 ? 16 : 8; u32 val; /* matrix coeff - convert S15.16 to S4.9 */ @@ -64,19 +65,19 @@ void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c, SDE_REG_WRITE(c, csc_reg_off + 0x10, val); /* Pre clamp */ - val = (data->csc_pre_lv[0] << 8) | data->csc_pre_lv[1]; + val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1]; SDE_REG_WRITE(c, csc_reg_off + 0x14, val); - val = (data->csc_pre_lv[2] << 8) | data->csc_pre_lv[3]; + val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3]; SDE_REG_WRITE(c, csc_reg_off + 0x18, val); - val = (data->csc_pre_lv[4] << 8) | data->csc_pre_lv[5]; + val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5]; SDE_REG_WRITE(c, csc_reg_off + 0x1c, val); /* Post clamp */ - val = (data->csc_post_lv[0] << 8) | data->csc_post_lv[1]; + val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1]; SDE_REG_WRITE(c, csc_reg_off + 0x20, val); - val = (data->csc_post_lv[2] << 8) | data->csc_post_lv[3]; + val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3]; SDE_REG_WRITE(c, csc_reg_off + 0x24, val); - val = (data->csc_post_lv[4] << 8) | data->csc_post_lv[5]; + val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5]; SDE_REG_WRITE(c, csc_reg_off + 0x28, val); /* Pre-Bias */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h index a4d8be9de907..c38c22237a57 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_util.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -49,7 +49,7 @@ void *sde_hw_util_get_dir(void); void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c, u32 csc_reg_off, - struct sde_csc_cfg *data); + struct sde_csc_cfg *data, bool csc10); #endif /* _SDE_HW_UTIL_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 544fef90ef6b..526f7912d2f1 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -328,24 +328,12 @@ static int sde_debugfs_danger_init(struct sde_kms *sde_kms, static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { - struct sde_kms *sde_kms = to_sde_kms(kms); - struct drm_device *dev = sde_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - - sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true); - return sde_crtc_vblank(crtc, true); } static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { - struct sde_kms *sde_kms = to_sde_kms(kms); - struct drm_device *dev = sde_kms->dev; - struct msm_drm_private *priv = dev->dev_private; - sde_crtc_vblank(crtc, false); - - sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); } static void sde_kms_prepare_commit(struct msm_kms *kms, @@ -355,6 +343,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms, struct drm_device *dev = sde_kms->dev; struct msm_drm_private *priv = dev->dev_private; + if (sde_kms->splash_info.handoff) + sde_splash_clean_up_exit_lk(kms); + sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true); } @@ -601,6 +592,10 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .mode_valid = sde_hdmi_mode_valid, .get_info = sde_hdmi_get_info, .set_property = sde_hdmi_set_property, + .get_property = sde_hdmi_get_property, + .pre_kickoff = sde_hdmi_pre_kickoff, + .mode_needs_full_range = sde_hdmi_mode_needs_full_range, + .get_csc_type = sde_hdmi_get_csc_type }; struct msm_display_info info = {0}; struct drm_encoder *encoder; @@ -995,8 +990,15 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms, sde_hw_catalog_deinit(sde_kms->catalog); sde_kms->catalog = NULL; + if (sde_kms->splash_info.handoff) { + if (sde_kms->core_client) + sde_splash_destroy(&sde_kms->splash_info, + &priv->phandle, sde_kms->core_client); + } + if (sde_kms->core_client) - sde_power_client_destroy(&priv->phandle, sde_kms->core_client); + sde_power_client_destroy(&priv->phandle, + sde_kms->core_client); sde_kms->core_client = NULL; if (sde_kms->vbif[VBIF_NRT]) @@ -1108,6 +1110,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) continue; } + /* Attaching smmu means IOMMU HW starts to work immediately. + * However, display HW in LK is still accessing memory + * while the memory map is not done yet. + * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass + * stage 1 translation in IOMMU HW. + */ + if ((i == MSM_SMMU_DOMAIN_UNSECURE) && + sde_kms->splash_info.handoff) { + ret = mmu->funcs->set_property(mmu, + DOMAIN_ATTR_EARLY_MAP, + &sde_kms->splash_info.handoff); + if (ret) { + SDE_ERROR("failed to set map att: %d\n", ret); + mmu->funcs->destroy(mmu); + goto fail; + } + } + aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev, mmu, "sde"); if (IS_ERR(aspace)) { @@ -1125,6 +1145,19 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) goto fail; } + /* + * It's safe now to map the physical memory blcok LK accesses. + */ + if ((i == MSM_SMMU_DOMAIN_UNSECURE) && + sde_kms->splash_info.handoff) { + ret = sde_splash_smmu_map(sde_kms->dev, mmu, + &sde_kms->splash_info); + if (ret) { + SDE_ERROR("map rsv mem failed: %d\n", ret); + msm_gem_address_space_put(aspace); + goto fail; + } + } } return 0; @@ -1139,6 +1172,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) struct sde_kms *sde_kms; struct drm_device *dev; struct msm_drm_private *priv; + struct sde_splash_info *sinfo; int i, rc = -EINVAL; if (!kms) { @@ -1228,6 +1262,35 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } + /* + * Read the DISP_INTF_SEL register to check + * whether early display is enabled in LK. + */ + rc = sde_splash_get_handoff_status(kms); + if (rc) { + SDE_ERROR("get early splash status failed: %d\n", rc); + goto power_error; + } + + /* + * when LK has enabled early display, sde_splash_parse_dt and + * sde_splash_init must be called. The first function is to parse the + * mandatory memory node for splash function, and the second function + * will first do bandwidth voting job, because display hardware is now + * accessing AHB data bus, otherwise device reboot will happen, and then + * to check if the memory is reserved. + */ + sinfo = &sde_kms->splash_info; + if (sinfo->handoff) { + rc = sde_splash_parse_dt(dev); + if (rc) { + SDE_ERROR("parse dt for splash info failed: %d\n", rc); + goto power_error; + } + + sde_splash_init(&priv->phandle, kms); + } + for (i = 0; i < sde_kms->catalog->vbif_count; i++) { u32 vbif_idx = sde_kms->catalog->vbif[i].id; @@ -1302,7 +1365,10 @@ static int sde_kms_hw_init(struct msm_kms *kms) */ dev->mode_config.allow_fb_modifiers = true; - sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); + if (!sde_kms->splash_info.handoff) + sde_power_resource_enable(&priv->phandle, + sde_kms->core_client, false); + return 0; drm_obj_init_err: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 44f6be959ac9..d929e48a3fe8 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -34,6 +34,7 @@ #include "sde_power_handle.h" #include "sde_irq.h" #include "sde_core_perf.h" +#include "sde_splash.h" #define DRMID(x) ((x) ? (x)->base.id : -1) @@ -157,6 +158,9 @@ struct sde_kms { bool has_danger_ctrl; void **hdmi_displays; int hdmi_display_count; + + /* splash handoff structure */ + struct sde_splash_info splash_info; }; struct vsync_info { diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 3ccad0c0c08d..6e2ccfa8e428 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -34,6 +34,14 @@ #include "sde_plane.h" #include "sde_color_processing.h" +static bool suspend_blank = true; +module_param(suspend_blank, bool, 0400); +MODULE_PARM_DESC(suspend_blank, + "If set, active planes will force their outputs to black,\n" + "by temporarily enabling the color fill, when recovering\n" + "from a system resume instead of attempting to display the\n" + "last provided frame buffer."); + #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__) @@ -138,6 +146,7 @@ struct sde_plane { struct sde_debugfs_regset32 debugfs_src; struct sde_debugfs_regset32 debugfs_scaler; struct sde_debugfs_regset32 debugfs_csc; + bool debugfs_default_scale; }; #define to_sde_plane(x) container_of(x, struct sde_plane, base) @@ -147,6 +156,20 @@ static bool sde_plane_enabled(struct drm_plane_state *state) return state && state->fb && state->crtc; } +static struct sde_kms *_sde_plane_get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv; + + if (!plane || !plane->dev) + return NULL; + + priv = plane->dev->dev_private; + if (!priv) + return NULL; + + return to_sde_kms(priv->kms); +} + /** * _sde_plane_calc_fill_level - calculate fill level of the given source format * @plane: Pointer to drm plane @@ -582,12 +605,62 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms) return ret; } +/** + * _sde_plane_get_aspace: gets the address space based on the + * fb_translation mode property + */ +static int _sde_plane_get_aspace( + struct sde_plane *psde, + struct sde_plane_state *pstate, + struct msm_gem_address_space **aspace) +{ + struct sde_kms *kms; + int mode; + + if (!psde || !pstate || !aspace) { + SDE_ERROR("invalid parameters\n"); + return -EINVAL; + } + + kms = _sde_plane_get_kms(&psde->base); + if (!kms) { + SDE_ERROR("invalid kms\n"); + return -EINVAL; + } + + mode = sde_plane_get_property(pstate, + PLANE_PROP_FB_TRANSLATION_MODE); + + switch (mode) { + case SDE_DRM_FB_NON_SEC: + *aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; + if (!aspace) + return -EINVAL; + break; + case SDE_DRM_FB_SEC: + *aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE]; + if (!aspace) + return -EINVAL; + break; + case SDE_DRM_FB_SEC_DIR_TRANS: + case SDE_DRM_FB_NON_SEC_DIR_TRANS: + *aspace = NULL; + break; + default: + SDE_ERROR("invalid fb_translation mode:%d\n", mode); + return -EFAULT; + } + + return 0; +} + static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp, struct sde_plane_state *pstate, struct sde_hw_pipe_cfg *pipe_cfg, struct drm_framebuffer *fb) { struct sde_plane *psde; + struct msm_gem_address_space *aspace = NULL; int ret; if (!pp || !pstate || !pipe_cfg || !fb) { @@ -603,7 +676,13 @@ static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp, return; } - ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout); + ret = _sde_plane_get_aspace(psde, pstate, &aspace); + if (ret) { + SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret); + return; + } + + ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout); if (ret == -EAGAIN) SDE_DEBUG_PLANE(psde, "not updating same src addrs\n"); else if (ret) @@ -616,9 +695,20 @@ static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp, struct sde_plane_state *pstate) { struct sde_plane *psde = pp->sde_plane; - struct sde_hw_scaler3_cfg *cfg = pp->scaler3_cfg; + struct sde_hw_scaler3_cfg *cfg; int ret = 0; + if (!pp || !pp->scaler3_cfg) { + SDE_ERROR("invalid args\n"); + return -EINVAL; + } else if (!pstate) { + /* pstate is expected to be null on forced color fill */ + SDE_DEBUG("null pstate\n"); + return -EINVAL; + } + + cfg = pp->scaler3_cfg; + cfg->dir_lut = msm_property_get_blob( &psde->property_info, pstate->property_blobs, &cfg->dir_len, @@ -653,6 +743,7 @@ static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp, } memset(scale_cfg, 0, sizeof(*scale_cfg)); + memset(&pp->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext)); decimated = DECIMATED_DIMENSION(src_w, pp->pipe_cfg.horz_decimation); @@ -1000,7 +1091,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp, int error; error = _sde_plane_setup_scaler3_lut(pp, pstate); - if (error || !pp->pixel_ext_usr) { + if (error || !pp->pixel_ext_usr || + psde->debugfs_default_scale) { memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); /* calculate default config for QSEED3 */ _sde_plane_setup_scaler3(pp, @@ -1011,7 +1103,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp, pp->scaler3_cfg, fmt, chroma_subsmpl_h, chroma_subsmpl_v); } - } else if (!pp->pixel_ext_usr) { + } else if (!pp->pixel_ext_usr || !pstate || + psde->debugfs_default_scale) { uint32_t deci_dim, i; /* calculate default configuration for QSEED2 */ @@ -1132,9 +1225,9 @@ static int _sde_plane_color_fill(struct sde_phy_plane *pp, } static int _sde_plane_mode_set(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_plane_state *state) { - uint32_t nplanes, src_flags; + uint32_t nplanes, src_flags = 0x0; struct sde_plane *psde; struct sde_plane_state *pstate; const struct sde_format *fmt; @@ -1145,6 +1238,7 @@ static int _sde_plane_mode_set(struct drm_plane *plane, int idx; struct sde_phy_plane *pp; uint32_t num_of_phy_planes = 0, maxlinewidth = 0xFFFF; + int mode = 0; if (!plane) { SDE_ERROR("invalid plane\n"); @@ -1220,6 +1314,15 @@ static int _sde_plane_mode_set(struct drm_plane *plane, return 0; memset(&src, 0, sizeof(struct sde_rect)); + + /* update secure session flag */ + mode = sde_plane_get_property(pstate, + PLANE_PROP_FB_TRANSLATION_MODE); + if ((mode == SDE_DRM_FB_SEC) || + (mode == SDE_DRM_FB_SEC_DIR_TRANS)) + src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION; + + /* update roi config */ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) { POPULATE_RECT(&src, state->src_x, state->src_y, @@ -1286,10 +1389,11 @@ static int _sde_plane_mode_set(struct drm_plane *plane, pp->scaler3_cfg); } - if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) && + if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) || + (src_flags & + SDE_SSPP_SECURE_OVERLAY_SESSION)) && pp->pipe_hw->ops.setup_format) { - src_flags = 0x0; - SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n", + SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n", sde_plane_get_property(pstate, PLANE_PROP_ROTATION)); if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) & BIT(DRM_REFLECT_X)) @@ -1344,10 +1448,23 @@ static int sde_plane_prepare_fb(struct drm_plane *plane, { struct drm_framebuffer *fb = new_state->fb; struct sde_plane *psde = to_sde_plane(plane); + struct sde_plane_state *pstate; + int rc; + + if (!psde || !new_state) + return -EINVAL; if (!new_state->fb) return 0; + pstate = to_sde_plane_state(new_state); + rc = _sde_plane_get_aspace(psde, pstate, &psde->aspace); + + if (rc) { + SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", rc); + return rc; + } + SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id); return msm_framebuffer_prepare(fb, psde->aspace); } @@ -1607,8 +1724,8 @@ void sde_plane_flush(struct drm_plane *plane) */ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) { if (psde->is_error) - /* force white frame with 0% alpha pipe output on error */ - _sde_plane_color_fill(pp, 0xFFFFFF, 0x0); + /* force white frame with 100% alpha pipe output on error */ + _sde_plane_color_fill(pp, 0xFFFFFF, 0xFF); else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) /* force 100% alpha */ _sde_plane_color_fill(pp, pp->color_fill, 0xFF); @@ -1617,6 +1734,10 @@ void sde_plane_flush(struct drm_plane *plane) pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr); } + /* force black color fill during suspend */ + if (msm_is_suspend_state(plane->dev) && suspend_blank) + _sde_plane_color_fill(pp, 0x0, 0x0); + /* flag h/w flush complete */ if (plane->state) to_sde_plane_state(plane->state)->pending = false; @@ -1813,10 +1934,12 @@ static void _sde_plane_install_properties(struct drm_plane *plane, BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION); msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0, - e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP); + e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP, + SDE_DRM_BLEND_OP_PREMULTIPLIED); msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1, - e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG); + e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG, + 0); list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) { if (pp->pipe_hw->ops.setup_solidfill) @@ -1879,7 +2002,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane, 0x0, 0, e_fb_translation_mode, ARRAY_SIZE(e_fb_translation_mode), - PLANE_PROP_FB_TRANSLATION_MODE); + PLANE_PROP_FB_TRANSLATION_MODE, SDE_DRM_FB_NON_SEC); } static inline void _sde_plane_set_csc_v1(struct sde_phy_plane *pp, @@ -2486,6 +2609,10 @@ static void _sde_plane_init_debugfs(struct sde_plane *psde, sde_debugfs_create_regset32("scaler_blk", S_IRUGO, psde->debugfs_root, &psde->debugfs_scaler); + debugfs_create_bool("default_scaling", + 0644, + psde->debugfs_root, + &psde->debugfs_default_scale); sde_debugfs_setup_regset32(&psde->debugfs_csc, sblk->csc_blk.base + cfg->base, @@ -2631,7 +2758,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, /* cache local stuff for later */ plane = &psde->base; - psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; INIT_LIST_HEAD(&psde->phy_plane_head); diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c new file mode 100644 index 000000000000..19e6406600cd --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_splash.c @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include + +#include "msm_drv.h" +#include "msm_mmu.h" +#include "sde_kms.h" +#include "sde_hw_mdss.h" +#include "sde_hw_util.h" +#include "sde_hw_intf.h" +#include "sde_hw_catalog.h" +#include "dsi_display.h" + +#define MDP_SSPP_TOP0_OFF 0x1000 +#define DISP_INTF_SEL 0x004 +#define SPLIT_DISPLAY_EN 0x2F4 + +/* scratch registers */ +#define SCRATCH_REGISTER_0 0x014 +#define SCRATCH_REGISTER_1 0x018 +#define SCRATCH_REGISTER_2 0x01C + +#define SDE_LK_RUNNING_VALUE 0xC001CAFE +#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD +#define SDE_LK_EXIT_VALUE 0xDEADBEEF + +#define SDE_LK_EXIT_MAX_LOOP 20 + +static DEFINE_MUTEX(sde_splash_lock); + +/* + * In order to free reseved memory from bootup, and we are not + * able to call the __init free functions, so we need to free + * this memory by ourselves using the free_reserved_page() function. + */ +static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys, + size_t size) +{ + unsigned long pfn_start, pfn_end, pfn_idx; + + memblock_free(phys, size); + + pfn_start = phys >> PAGE_SHIFT; + pfn_end = (phys + size) >> PAGE_SHIFT; + + for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++) + free_reserved_page(pfn_to_page(pfn_idx)); +} + +static int _sde_splash_parse_dt_get_lk_pool_node(struct drm_device *dev, + struct sde_splash_info *sinfo) +{ + struct device_node *parent, *node; + struct resource r; + int ret = 0; + + if (!sinfo) + return -EINVAL; + + parent = of_find_node_by_path("/reserved-memory"); + if (!parent) + return -EINVAL; + + node = of_find_node_by_name(parent, "lk_pool"); + if (!node) { + SDE_ERROR("mem reservation for lk_pool is not presented\n"); + ret = -EINVAL; + goto parent_node_err; + } + + /* find the mode */ + if (of_address_to_resource(node, 0, &r)) { + ret = -EINVAL; + goto child_node_err; + } + + sinfo->lk_pool_paddr = (dma_addr_t)r.start; + sinfo->lk_pool_size = r.end - r.start; + + DRM_INFO("lk_pool: addr:%pK, size:%pK\n", + (void *)sinfo->lk_pool_paddr, + (void *)sinfo->lk_pool_size); + +child_node_err: + of_node_put(node); + +parent_node_err: + of_node_put(parent); + + return ret; +} + +static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev, + struct sde_splash_info *sinfo) +{ + unsigned long size = 0; + dma_addr_t start; + struct device_node *node; + int ret = 0, i = 0, len = 0; + + /* get reserved memory for display module */ + if (of_get_property(dev->dev->of_node, "contiguous-region", &len)) + sinfo->splash_mem_num = len / sizeof(u32); + else + sinfo->splash_mem_num = 0; + + sinfo->splash_mem_paddr = + kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num, + GFP_KERNEL); + if (!sinfo->splash_mem_paddr) { + SDE_ERROR("alloc splash_mem_paddr failed\n"); + return -ENOMEM; + } + + sinfo->splash_mem_size = + kmalloc(sizeof(size_t) * sinfo->splash_mem_num, + GFP_KERNEL); + if (!sinfo->splash_mem_size) { + SDE_ERROR("alloc splash_mem_size failed\n"); + goto error; + } + + sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) * + sinfo->splash_mem_num, GFP_KERNEL); + if (!sinfo->obj) { + SDE_ERROR("construct splash gem objects failed\n"); + goto error; + } + + for (i = 0; i < sinfo->splash_mem_num; i++) { + node = of_parse_phandle(dev->dev->of_node, + "contiguous-region", i); + + if (node) { + struct resource r; + + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + + size = r.end - r.start; + start = (dma_addr_t)r.start; + + sinfo->splash_mem_paddr[i] = start; + sinfo->splash_mem_size[i] = size; + + DRM_INFO("blk: %d, addr:%pK, size:%pK\n", + i, (void *)sinfo->splash_mem_paddr[i], + (void *)sinfo->splash_mem_size[i]); + + of_node_put(node); + } + } + + return ret; + +error: + kfree(sinfo->splash_mem_paddr); + sinfo->splash_mem_paddr = NULL; + + kfree(sinfo->splash_mem_size); + sinfo->splash_mem_size = NULL; + + return -ENOMEM; +} + +static bool _sde_splash_lk_check(struct sde_hw_intr *intr) +{ + return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw, + SCRATCH_REGISTER_1)) ? true : false; +} + +/** + * _sde_splash_notify_lk_to_exit. + * + * Function to monitor LK's status and tell it to exit. + */ +static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr) +{ + int i = 0; + + /* first is to write exit signal to scratch register*/ + SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE); + + while ((SDE_LK_EXIT_VALUE != + SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) && + (++i < SDE_LK_EXIT_MAX_LOOP)) { + DRM_INFO("wait for LK's exit"); + msleep(20); + } + + if (i == SDE_LK_EXIT_MAX_LOOP) + SDE_ERROR("Loop LK's exit failed\n"); +} + +static int _sde_splash_gem_new(struct drm_device *dev, + struct sde_splash_info *sinfo) +{ + int i, ret; + + for (i = 0; i < sinfo->splash_mem_num; i++) { + sinfo->obj[i] = msm_gem_new(dev, + sinfo->splash_mem_size[i], MSM_BO_UNCACHED); + + if (IS_ERR(sinfo->obj[i])) { + ret = PTR_ERR(sinfo->obj[i]); + SDE_ERROR("failed to allocate gem, ret=%d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (sinfo->obj[i]) + msm_gem_free_object(sinfo->obj[i]); + sinfo->obj[i] = NULL; + } + + return ret; +} + +static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **p; + dma_addr_t paddr; + int npages = obj->size >> PAGE_SHIFT; + int i; + + p = drm_malloc_ab(npages, sizeof(struct page *)); + if (!p) + return -ENOMEM; + + paddr = phys; + + for (i = 0; i < npages; i++) { + p[i] = phys_to_page(paddr); + paddr += PAGE_SIZE; + } + + msm_obj->sgt = drm_prime_pages_to_sg(p, npages); + if (IS_ERR(msm_obj->sgt)) { + SDE_ERROR("failed to allocate sgt\n"); + return -ENOMEM; + } + + msm_obj->pages = p; + + return 0; +} + +static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj) +{ + if (msm_obj->pages) { + sg_free_table(msm_obj->sgt); + kfree(msm_obj->sgt); + drm_free_large(msm_obj->pages); + msm_obj->pages = NULL; + } +} + +static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo) +{ + kfree(sinfo->splash_mem_paddr); + sinfo->splash_mem_paddr = NULL; + + kfree(sinfo->splash_mem_size); + sinfo->splash_mem_size = NULL; +} + +static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo, + u32 *hdmi_cnt, u32 *dsi_cnt) +{ + mutex_lock(&sde_splash_lock); + *hdmi_cnt = sinfo->hdmi_connector_cnt; + *dsi_cnt = sinfo->dsi_connector_cnt; + mutex_unlock(&sde_splash_lock); +} + +static int _sde_splash_free_resource(struct msm_mmu *mmu, + struct sde_splash_info *sinfo, enum splash_connector_type conn) +{ + struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]); + + if (!msm_obj) + return -EINVAL; + + if (mmu->funcs && mmu->funcs->unmap) + mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn], + msm_obj->sgt, NULL); + + _sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn], + sinfo->splash_mem_size[conn]); + + _sde_splash_destroy_gem_object(msm_obj); + + return 0; +} + +__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms) +{ + struct sde_kms *sde_kms; + struct sde_splash_info *sinfo; + int i = 0; + + if (!phandle || !kms) { + SDE_ERROR("invalid phandle/kms\n"); + return -EINVAL; + } + + sde_kms = to_sde_kms(kms); + sinfo = &sde_kms->splash_info; + + sinfo->dsi_connector_cnt = 0; + sinfo->hdmi_connector_cnt = 0; + + sde_power_data_bus_bandwidth_ctrl(phandle, + sde_kms->core_client, true); + + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) { + SDE_ERROR("failed to reserve memory\n"); + + /* withdraw the vote when failed. */ + sde_power_data_bus_bandwidth_ctrl(phandle, + sde_kms->core_client, false); + + return -EINVAL; + } + } + + return 0; +} + +void sde_splash_destroy(struct sde_splash_info *sinfo, + struct sde_power_handle *phandle, + struct sde_power_client *pclient) +{ + struct msm_gem_object *msm_obj; + int i = 0; + + if (!sinfo || !phandle || !pclient) { + SDE_ERROR("invalid sde_kms/phandle/pclient\n"); + return; + } + + for (i = 0; i < sinfo->splash_mem_num; i++) { + msm_obj = to_msm_bo(sinfo->obj[i]); + + if (msm_obj) + _sde_splash_destroy_gem_object(msm_obj); + } + + sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false); + + _sde_splash_destroy_splash_node(sinfo); +} + +/* + * sde_splash_parse_dt. + * In the function, it will parse and reserve two kinds of memory node. + * First is to get the reserved memory for display buffers. + * Second is to get the memory node LK's code stack is running on. + */ +int sde_splash_parse_dt(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct sde_kms *sde_kms; + struct sde_splash_info *sinfo; + + if (!priv || !priv->kms) { + SDE_ERROR("Invalid kms\n"); + return -EINVAL; + } + + sde_kms = to_sde_kms(priv->kms); + sinfo = &sde_kms->splash_info; + + if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) { + SDE_ERROR("get display node failed\n"); + return -EINVAL; + } + + if (_sde_splash_parse_dt_get_lk_pool_node(dev, sinfo)) { + SDE_ERROR("get LK pool node failed\n"); + return -EINVAL; + } + + return 0; +} + +int sde_splash_get_handoff_status(struct msm_kms *kms) +{ + uint32_t intf_sel = 0; + uint32_t split_display = 0; + uint32_t num_of_display_on = 0; + uint32_t i = 0; + struct sde_kms *sde_kms = to_sde_kms(kms); + struct sde_rm *rm; + struct sde_hw_blk_reg_map *c; + struct sde_splash_info *sinfo; + struct sde_mdss_cfg *catalog; + + sinfo = &sde_kms->splash_info; + if (!sinfo) { + SDE_ERROR("%s(%d): invalid splash info\n", + __func__, __LINE__); + return -EINVAL; + } + + rm = &sde_kms->rm; + + if (!rm || !rm->hw_mdp) { + SDE_ERROR("invalid rm.\n"); + return -EINVAL; + } + + c = &rm->hw_mdp->hw; + if (c) { + intf_sel = SDE_REG_READ(c, DISP_INTF_SEL); + split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN); + } + + catalog = sde_kms->catalog; + + if (intf_sel != 0) { + for (i = 0; i < catalog->intf_count; i++) + if ((intf_sel >> i*8) & 0x000000FF) + num_of_display_on++; + + /* + * For split display enabled - DSI0, DSI1 interfaces are + * considered as single display. So decrement + * 'num_of_display_on' by 1 + */ + if (split_display) + num_of_display_on--; + } + + if (num_of_display_on) { + sinfo->handoff = true; + sinfo->program_scratch_regs = true; + sinfo->lk_is_exited = false; + } else { + sinfo->handoff = false; + sinfo->program_scratch_regs = false; + sinfo->lk_is_exited = true; + } + + return 0; +} + +int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, + struct sde_splash_info *sinfo) +{ + struct msm_gem_object *msm_obj; + int i = 0, ret = 0; + + if (!mmu || !sinfo) + return -EINVAL; + + /* first is to construct drm_gem_objects for splash memory */ + if (_sde_splash_gem_new(dev, sinfo)) + return -ENOMEM; + + /* second is to contruct sgt table for calling smmu map */ + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (_sde_splash_get_pages(sinfo->obj[i], + sinfo->splash_mem_paddr[i])) + return -ENOMEM; + } + + for (i = 0; i < sinfo->splash_mem_num; i++) { + msm_obj = to_msm_bo(sinfo->obj[i]); + + if (mmu->funcs && mmu->funcs->map) { + ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i], + msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL); + + if (!ret) { + SDE_ERROR("Map blk %d @%pK failed.\n", + i, (void *)sinfo->splash_mem_paddr[i]); + return ret; + } + } + } + + return ret ? 0 : -ENOMEM; +} + +void sde_splash_setup_connector_count(struct sde_splash_info *sinfo, + int connector_type) +{ + switch (connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + sinfo->hdmi_connector_cnt++; + break; + case DRM_MODE_CONNECTOR_DSI: + sinfo->dsi_connector_cnt++; + break; + default: + SDE_ERROR("invalid connector_type %d\n", connector_type); + } +} + +bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo) +{ + bool ret = 0; + + mutex_lock(&sde_splash_lock); + ret = !sinfo->handoff && !sinfo->lk_is_exited; + mutex_unlock(&sde_splash_lock); + + return ret; +} + +int sde_splash_clean_up_free_resource(struct msm_kms *kms, + struct sde_power_handle *phandle, + int connector_type, void *display) +{ + struct sde_kms *sde_kms; + struct sde_splash_info *sinfo; + struct msm_mmu *mmu; + struct dsi_display *dsi_display = display; + int ret = 0; + int hdmi_conn_count = 0; + int dsi_conn_count = 0; + static const char *last_commit_display_type = "unknown"; + + if (!phandle || !kms) { + SDE_ERROR("invalid phandle/kms.\n"); + return -EINVAL; + } + + sde_kms = to_sde_kms(kms); + sinfo = &sde_kms->splash_info; + if (!sinfo) { + SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__); + return -EINVAL; + } + + _sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count, + &dsi_conn_count); + + mutex_lock(&sde_splash_lock); + if (hdmi_conn_count == 0 && dsi_conn_count == 0 && + !sinfo->lk_is_exited) { + /* When both hdmi's and dsi's handoff are finished, + * 1. Destroy splash node objects. + * 2. Release the memory which LK's stack is running on. + * 3. Withdraw AHB data bus bandwidth voting. + */ + DRM_INFO("HDMI and DSI resource handoff is completed\n"); + + sinfo->lk_is_exited = true; + + _sde_splash_destroy_splash_node(sinfo); + + _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr, + sinfo->lk_pool_size); + + sde_power_data_bus_bandwidth_ctrl(phandle, + sde_kms->core_client, false); + + mutex_unlock(&sde_splash_lock); + return 0; + } + + mmu = sde_kms->aspace[0]->mmu; + + switch (connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + if (sinfo->hdmi_connector_cnt == 1) { + sinfo->hdmi_connector_cnt--; + + ret = _sde_splash_free_resource(mmu, + sinfo, SPLASH_HDMI); + } + break; + case DRM_MODE_CONNECTOR_DSI: + /* + * Basically, we have commits coming on two DSI connectors. + * So when releasing DSI resource, it's ensured that the + * coming commits should happen on different DSIs, to promise + * the handoff has finished on the two DSIs, then it's safe + * to release DSI resource, otherwise, problem happens when + * freeing memory, while DSI0 or DSI1 is still visiting + * the memory. + */ + if (strcmp(dsi_display->display_type, "unknown") && + strcmp(last_commit_display_type, + dsi_display->display_type)) { + if (sinfo->dsi_connector_cnt > 1) + sinfo->dsi_connector_cnt--; + else if (sinfo->dsi_connector_cnt == 1) { + ret = _sde_splash_free_resource(mmu, + sinfo, SPLASH_DSI); + + sinfo->dsi_connector_cnt--; + } + + last_commit_display_type = dsi_display->display_type; + } + break; + default: + ret = -EINVAL; + SDE_ERROR("%s: invalid connector_type %d\n", + __func__, connector_type); + } + + mutex_unlock(&sde_splash_lock); + + return ret; +} + +/* + * In below function, it will + * 1. Notify LK to exit and wait for exiting is done. + * 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu. + */ +int sde_splash_clean_up_exit_lk(struct msm_kms *kms) +{ + struct sde_splash_info *sinfo; + struct msm_mmu *mmu; + struct sde_kms *sde_kms = to_sde_kms(kms); + int ret; + + sinfo = &sde_kms->splash_info; + + if (!sinfo) { + SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__); + return -EINVAL; + } + + /* Monitor LK's status and tell it to exit. */ + mutex_lock(&sde_splash_lock); + if (sinfo->program_scratch_regs) { + if (_sde_splash_lk_check(sde_kms->hw_intr)) + _sde_splash_notify_lk_exit(sde_kms->hw_intr); + + sinfo->handoff = false; + sinfo->program_scratch_regs = false; + } + mutex_unlock(&sde_splash_lock); + + if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) { + /* We do not return fault value here, to ensure + * flag "lk_is_exited" is set. + */ + SDE_ERROR("invalid mmu\n"); + WARN_ON(1); + } else { + mmu = sde_kms->aspace[0]->mmu; + /* After LK has exited, set early domain map attribute + * to 1 to enable stage 1 translation in iommu driver. + */ + if (mmu->funcs && mmu->funcs->set_property) { + ret = mmu->funcs->set_property(mmu, + DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff); + + if (ret) + SDE_ERROR("set_property failed\n"); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h new file mode 100644 index 000000000000..babf88335e49 --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_splash.h @@ -0,0 +1,132 @@ +/** + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SDE_SPLASH_H_ +#define SDE_SPLASH_H_ + +#include "msm_kms.h" +#include "msm_mmu.h" + +enum splash_connector_type { + SPLASH_DSI = 0, + SPLASH_HDMI, +}; + +struct sde_splash_info { + /* handoff flag */ + bool handoff; + + /* flag of display scratch registers */ + bool program_scratch_regs; + + /* to indicate LK is totally exited */ + bool lk_is_exited; + + /* memory node used for display buffer */ + uint32_t splash_mem_num; + + /* physical address of memory node for display buffer */ + phys_addr_t *splash_mem_paddr; + + /* size of memory node */ + size_t *splash_mem_size; + + /* constructed gem objects for smmu mapping */ + struct drm_gem_object **obj; + + /* physical address of lk pool */ + phys_addr_t lk_pool_paddr; + + /* memory size of lk pool */ + size_t lk_pool_size; + + /* registered hdmi connector count */ + uint32_t hdmi_connector_cnt; + + /* registered dst connector count */ + uint32_t dsi_connector_cnt; +}; + +/* APIs for early splash handoff functions */ + +/** + * sde_splash_get_handoff_status. + * + * This function will read DISP_INTF_SEL regsiter to get + * the status of early splash. + */ +int sde_splash_get_handoff_status(struct msm_kms *kms); + +/** + * sde_splash_init + * + * This function will do bandwidth vote and reserved memory + */ +int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms); + +/** + *sde_splash_setup_connector_count + * + * To count connector numbers for DSI and HDMI respectively. + */ +void sde_splash_setup_connector_count(struct sde_splash_info *sinfo, + int connector_type); + +/** + * sde_splash_clean_up_exit_lk. + * + * Tell LK to exit, and clean up the resource. + */ +int sde_splash_clean_up_exit_lk(struct msm_kms *kms); + +/** + * sde_splash_clean_up_free_resource. + * + * According to input connector_type, free + * HDMI's and DSI's resource respectively. + */ +int sde_splash_clean_up_free_resource(struct msm_kms *kms, + struct sde_power_handle *phandle, + int connector_type, void *display); + +/** + * sde_splash_parse_dt. + * + * Parse reserved memory block from DT for early splash. + */ +int sde_splash_parse_dt(struct drm_device *dev); + +/** + * sde_splash_smmu_map. + * + * Map the physical memory LK visited into iommu driver. + */ +int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, + struct sde_splash_info *sinfo); + +/** + * sde_splash_destroy + * + * Destroy the resource in failed case. + */ +void sde_splash_destroy(struct sde_splash_info *sinfo, + struct sde_power_handle *phandle, + struct sde_power_client *pclient); + +/** + * sde_splash_get_lk_complete_status + * + * Get LK's status to check if it has been stopped. + */ +bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo); + +#endif diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c index 69ab367307ea..68246253bb70 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.c +++ b/drivers/gpu/drm/msm/sde_edid_parser.c @@ -93,6 +93,21 @@ for ((i) = (start); \ (i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \ (i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1) +static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db) +{ + int hdmi_id; + + if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK) + return false; + + if (sde_cea_db_payload_len(db) < 7) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IEEE_OUI_HF; +} + static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id) { u8 *db = NULL; @@ -214,10 +229,17 @@ u32 video_format) { u8 cea_mode = 0; struct drm_display_mode *mode; + u32 mode_fmt_flags = 0; /* Need to add Y420 support flag to the modes */ list_for_each_entry(mode, &connector->probed_modes, head) { + /* Cache the format flags before clearing */ + mode_fmt_flags = mode->flags; + /* Clear the RGB/YUV format flags before calling upstream API */ + mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK; cea_mode = drm_match_cea_mode(mode); + /* Restore the format flags */ + mode->flags = mode_fmt_flags; if ((cea_mode != 0) && (cea_mode == video_format)) { SDE_EDID_DEBUG("%s found match for %d ", __func__, video_format); @@ -231,7 +253,7 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl, const u8 *db) { u32 offset = 0; - u8 len = 0; + u8 cmdb_len = 0; u8 svd_len = 0; const u8 *svd = NULL; u32 i = 0, j = 0; @@ -247,10 +269,8 @@ const u8 *db) return; } SDE_EDID_DEBUG("%s +\n", __func__); - len = db[0] & 0x1f; + cmdb_len = db[0] & 0x1f; - if (len < 7) - return; /* Byte 3 to L+1 contain SVDs */ offset += 2; @@ -258,20 +278,24 @@ const u8 *db) if (svd) { /*moving to the next byte as vic info begins there*/ - ++svd; svd_len = svd[0] & 0x1f; + ++svd; } for (i = 0; i < svd_len; i++, j++) { - video_format = *svd & 0x7F; - if (db[offset] & (1 << j)) + video_format = *(svd + i) & 0x7F; + if (cmdb_len == 1) { + /* If cmdb_len is 1, it means all SVDs support YUV */ + sde_edid_set_y420_support(connector, video_format); + } else if (db[offset] & (1 << j)) { sde_edid_set_y420_support(connector, video_format); - if (j & 0x80) { - j = j/8; - offset++; - if (offset >= len) - break; + if (j & 0x80) { + j = j/8; + offset++; + if (offset >= cmdb_len) + break; + } } } @@ -339,6 +363,63 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl) SDE_EDID_DEBUG("%s -\n", __func__); } +static void _sde_edid_update_dc_modes( +struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl) +{ + int i, start, end; + u8 *edid_ext, *hdmi; + struct drm_display_info *disp_info; + u32 hdmi_dc_yuv_modes = 0; + + SDE_EDID_DEBUG("%s +\n", __func__); + + if (!connector || !edid_ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + disp_info = &connector->display_info; + + edid_ext = sde_find_cea_extension(edid_ctrl->edid); + + if (!edid_ext) { + SDE_ERROR("no cea extension\n"); + return; + } + + if (sde_cea_db_offsets(edid_ext, &start, &end)) + return; + + sde_for_each_cea_db(edid_ext, i, start, end) { + if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) { + + hdmi = &edid_ext[i]; + + if (sde_cea_db_payload_len(hdmi) < 7) + continue; + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30; + SDE_EDID_DEBUG("Y420 30-bit supported\n"); + } + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36; + SDE_EDID_DEBUG("Y420 36-bit supported\n"); + } + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36; + SDE_EDID_DEBUG("Y420 48-bit supported\n"); + } + } + } + + disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes; + + SDE_EDID_DEBUG("%s -\n", __func__); +} + static void _sde_edid_extract_audio_data_blocks( struct sde_edid_ctrl *edid_ctrl) { @@ -476,6 +557,7 @@ int _sde_edid_update_modes(struct drm_connector *connector, rc = drm_add_edid_modes(connector, edid_ctrl->edid); sde_edid_set_mode_format(connector, edid_ctrl); + _sde_edid_update_dc_modes(connector, edid_ctrl); SDE_EDID_DEBUG("%s -", __func__); return rc; } diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h index 1143dc2c7bec..59e3dceca33c 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.h +++ b/drivers/gpu/drm/msm/sde_edid_parser.h @@ -33,6 +33,8 @@ #define SDE_CEA_EXT 0x02 #define SDE_EXTENDED_TAG 0x07 +#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20) + enum extended_data_block_types { VIDEO_CAPABILITY_DATA_BLOCK = 0x0, VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01, diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h new file mode 100644 index 000000000000..49cca9399cb0 --- /dev/null +++ b/drivers/gpu/drm/msm/sde_hdcp.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SDE_HDCP_H__ +#define __SDE_HDCP_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "hdmi.h" +#include "sde_kms.h" +#include "sde_hdmi_util.h" + +#ifdef SDE_HDCP_DEBUG_ENABLE +#define SDE_HDCP_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) +#else +#define SDE_HDCP_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args) +#endif + +enum sde_hdcp_client_id { + HDCP_CLIENT_HDMI, + HDCP_CLIENT_DP, +}; + +enum sde_hdcp_states { + HDCP_STATE_INACTIVE, + HDCP_STATE_AUTHENTICATING, + HDCP_STATE_AUTHENTICATED, + HDCP_STATE_AUTH_FAIL, + HDCP_STATE_AUTH_ENC_NONE, + HDCP_STATE_AUTH_ENC_1X, + HDCP_STATE_AUTH_ENC_2P2 +}; + +struct sde_hdcp_init_data { + struct dss_io_data *core_io; + struct dss_io_data *qfprom_io; + struct dss_io_data *hdcp_io; + struct mutex *mutex; + struct workqueue_struct *workq; + void *cb_data; + void (*notify_status)(void *cb_data, enum sde_hdcp_states status); + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + u8 sink_rx_status; + u16 *version; + u32 phy_addr; + u32 hdmi_tx_ver; + bool sec_access; + enum sde_hdcp_client_id client_id; +}; + +struct sde_hdcp_ops { + int (*isr)(void *ptr); + int (*cp_irq)(void *ptr); + int (*reauthenticate)(void *input); + int (*authenticate)(void *hdcp_ctrl); + bool (*feature_supported)(void *input); + void (*off)(void *hdcp_ctrl); +}; + +void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data); +void sde_hdcp_1x_deinit(void *input); +struct sde_hdcp_ops *sde_hdcp_1x_start(void *input); +void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data); +void sde_hdmi_hdcp2p2_deinit(void *input); +const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state); +struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input); +#endif /* __SDE_HDCP_H__ */ diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c new file mode 100644 index 000000000000..3aba9e307732 --- /dev/null +++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c @@ -0,0 +1,1722 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include "sde_hdcp.h" +#include "sde_hdmi_util.h" +#include "video/msm_hdmi_hdcp_mgr.h" + +#define SDE_HDCP_STATE_NAME (sde_hdcp_state_name(hdcp->hdcp_state)) + +/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */ +#define HDCP_KEYS_STATE_NO_KEYS 0 +#define HDCP_KEYS_STATE_NOT_CHECKED 1 +#define HDCP_KEYS_STATE_CHECKING 2 +#define HDCP_KEYS_STATE_VALID 3 +#define HDCP_KEYS_STATE_AKSV_NOT_VALID 4 +#define HDCP_KEYS_STATE_CHKSUM_MISMATCH 5 +#define HDCP_KEYS_STATE_PROD_AKSV 6 +#define HDCP_KEYS_STATE_RESERVED 7 + +#define TZ_HDCP_CMD_ID 0x00004401 + +#define HDCP_INT_CLR (isr->auth_success_ack | isr->auth_fail_ack | \ + isr->auth_fail_info_ack | isr->tx_req_ack | \ + isr->encryption_ready_ack | \ + isr->encryption_not_ready_ack | isr->tx_req_done_ack) + +#define HDCP_INT_EN (isr->auth_success_mask | isr->auth_fail_mask | \ + isr->encryption_ready_mask | \ + isr->encryption_not_ready_mask) + +#define HDCP_POLL_SLEEP_US (20 * 1000) +#define HDCP_POLL_TIMEOUT_US (HDCP_POLL_SLEEP_US * 100) + +#define sde_hdcp_1x_state(x) (hdcp->hdcp_state == x) + +struct sde_hdcp_sink_addr { + char *name; + u32 addr; + u32 len; +}; + +struct sde_hdcp_1x_reg_data { + u32 reg_id; + struct sde_hdcp_sink_addr *sink; +}; + +struct sde_hdcp_skaddr_map { + /* addresses to read from sink */ + struct sde_hdcp_sink_addr bcaps; + struct sde_hdcp_sink_addr bksv; + struct sde_hdcp_sink_addr r0; + struct sde_hdcp_sink_addr bstatus; + struct sde_hdcp_sink_addr cp_irq_status; + struct sde_hdcp_sink_addr ksv_fifo; + struct sde_hdcp_sink_addr v_h0; + struct sde_hdcp_sink_addr v_h1; + struct sde_hdcp_sink_addr v_h2; + struct sde_hdcp_sink_addr v_h3; + struct sde_hdcp_sink_addr v_h4; + + /* addresses to write to sink */ + struct sde_hdcp_sink_addr an; + struct sde_hdcp_sink_addr aksv; + struct sde_hdcp_sink_addr ainfo; +}; + +struct sde_hdcp_int_set { + /* interrupt register */ + u32 int_reg; + + /* interrupt enable/disable masks */ + u32 auth_success_mask; + u32 auth_fail_mask; + u32 encryption_ready_mask; + u32 encryption_not_ready_mask; + u32 tx_req_mask; + u32 tx_req_done_mask; + + /* interrupt acknowledgment */ + u32 auth_success_ack; + u32 auth_fail_ack; + u32 auth_fail_info_ack; + u32 encryption_ready_ack; + u32 encryption_not_ready_ack; + u32 tx_req_ack; + u32 tx_req_done_ack; + + /* interrupt status */ + u32 auth_success_int; + u32 auth_fail_int; + u32 encryption_ready; + u32 encryption_not_ready; + u32 tx_req_int; + u32 tx_req_done_int; +}; + +struct sde_hdcp_reg_set { + u32 status; + u32 keys_offset; + u32 r0_offset; + u32 v_offset; + u32 ctrl; + u32 aksv_lsb; + u32 aksv_msb; + u32 entropy_ctrl0; + u32 entropy_ctrl1; + u32 sec_sha_ctrl; + u32 sec_sha_data; + u32 sha_status; + + u32 data2_0; + u32 data3; + u32 data4; + u32 data5; + u32 data6; + + u32 sec_data0; + u32 sec_data1; + u32 sec_data7; + u32 sec_data8; + u32 sec_data9; + u32 sec_data10; + u32 sec_data11; + u32 sec_data12; + + u32 reset; + u32 reset_bit; + + u32 repeater; +}; + +#define HDCP_REG_SET_CLIENT_HDMI \ + {HDMI_HDCP_LINK0_STATUS, 28, 24, 20, HDMI_HDCP_CTRL, \ + HDMI_HDCP_SW_LOWER_AKSV, HDMI_HDCP_SW_UPPER_AKSV, \ + HDMI_HDCP_ENTROPY_CTRL0, HDMI_HDCP_ENTROPY_CTRL1, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA, \ + HDMI_HDCP_SHA_STATUS, HDMI_HDCP_RCVPORT_DATA2_0, \ + HDMI_HDCP_RCVPORT_DATA3, HDMI_HDCP_RCVPORT_DATA4, \ + HDMI_HDCP_RCVPORT_DATA5, HDMI_HDCP_RCVPORT_DATA6, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \ + HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \ + HDMI_HDCP_RESET, BIT(0), BIT(6)} + +/* To do for DP */ +#define HDCP_REG_SET_CLIENT_DP \ + {0} + +#define HDCP_HDMI_SINK_ADDR_MAP \ + {{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \ + {"bstatus", 0x41, 2}, {"??", 0x0, 0}, {"ksv-fifo", 0x43, 0}, \ + {"v_h0", 0x20, 4}, {"v_h1", 0x24, 4}, {"v_h2", 0x28, 4}, \ + {"v_h3", 0x2c, 4}, {"v_h4", 0x30, 4}, {"an", 0x18, 8}, \ + {"aksv", 0x10, 5}, {"ainfo", 0x00, 0},} + +#define HDCP_DP_SINK_ADDR_MAP \ + {{"bcaps", 0x68028, 1}, {"bksv", 0x68000, 5}, {"r0'", 0x68005, 2}, \ + {"binfo", 0x6802A, 2}, {"cp_irq_status", 0x68029, 1}, \ + {"ksv-fifo", 0x6802C, 0}, {"v_h0", 0x68014, 4}, {"v_h1", 0x68018, 4}, \ + {"v_h2", 0x6801C, 4}, {"v_h3", 0x68020, 4}, {"v_h4", 0x68024, 4}, \ + {"an", 0x6800C, 8}, {"aksv", 0x68007, 5}, {"ainfo", 0x6803B, 1} } + +#define HDCP_HDMI_INT_SET \ + {HDMI_HDCP_INT_CTRL, \ + BIT(2), BIT(6), 0, 0, 0, 0, \ + BIT(1), BIT(5), BIT(7), 0, 0, 0, 0, \ + BIT(0), BIT(4), 0, 0, 0, 0} + +#define HDCP_DP_INT_SET \ + {DP_INTR_STATUS2, \ + BIT(17), BIT(20), BIT(24), BIT(27), 0, 0, \ + BIT(16), BIT(19), BIT(21), BIT(23), BIT(26), 0, 0, \ + BIT(15), BIT(18), BIT(22), BIT(25), 0, 0} + +struct sde_hdcp_1x { + u8 bcaps; + u32 tp_msgid; + u32 an_0, an_1, aksv_0, aksv_1; + bool sink_r0_ready; + bool reauth; + bool ksv_ready; + enum sde_hdcp_states hdcp_state; + struct HDCP_V2V1_MSG_TOPOLOGY cached_tp; + struct HDCP_V2V1_MSG_TOPOLOGY current_tp; + struct delayed_work hdcp_auth_work; + struct completion r0_checked; + struct completion sink_r0_available; + struct sde_hdcp_init_data init_data; + struct sde_hdcp_ops *ops; + struct sde_hdcp_reg_set reg_set; + struct sde_hdcp_int_set int_set; + struct sde_hdcp_skaddr_map sink_addr; + struct workqueue_struct *workq; +}; + +const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state) +{ + switch (hdcp_state) { + case HDCP_STATE_INACTIVE: return "HDCP_STATE_INACTIVE"; + case HDCP_STATE_AUTHENTICATING: return "HDCP_STATE_AUTHENTICATING"; + case HDCP_STATE_AUTHENTICATED: return "HDCP_STATE_AUTHENTICATED"; + case HDCP_STATE_AUTH_FAIL: return "HDCP_STATE_AUTH_FAIL"; + default: return "???"; + } +} + +static int sde_hdcp_1x_count_one(u8 *array, u8 len) +{ + int i, j, count = 0; + + for (i = 0; i < len; i++) + for (j = 0; j < 8; j++) + count += (((array[i] >> j) & 0x1) ? 1 : 0); + return count; +} + +static void reset_hdcp_ddc_failures(struct sde_hdcp_1x *hdcp) +{ + int hdcp_ddc_ctrl1_reg; + int hdcp_ddc_status; + int failure; + int nack0; + struct dss_io_data *io; + + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + return; + } + + io = hdcp->init_data.core_io; + + /* Check for any DDC transfer failures */ + hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS); + failure = (hdcp_ddc_status >> 16) & BIT(0); + nack0 = (hdcp_ddc_status >> 14) & BIT(0); + SDE_HDCP_DEBUG("%s: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n", + SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0); + + if (failure) { + /* + * Indicates that the last HDCP HW DDC transfer failed. + * This occurs when a transfer is attempted with HDCP DDC + * disabled (HDCP_DDC_DISABLE=1) or the number of retries + * matches HDCP_DDC_RETRY_CNT. + * Failure occurred, let's clear it. + */ + SDE_HDCP_DEBUG("%s: DDC failure HDCP_DDC_STATUS=0x%08x\n", + SDE_HDCP_STATE_NAME, hdcp_ddc_status); + + /* First, Disable DDC */ + DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0)); + + /* ACK the Failure to Clear it */ + hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1); + DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1, + hdcp_ddc_ctrl1_reg | BIT(0)); + + /* Check if the FAILURE got Cleared */ + hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS); + hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0); + if (hdcp_ddc_status == 0x0) + SDE_HDCP_DEBUG("%s: HDCP DDC Failure cleared\n", + SDE_HDCP_STATE_NAME); + else + SDE_ERROR("%s: Unable to clear HDCP DDC Failure", + SDE_HDCP_STATE_NAME); + + /* Re-Enable HDCP DDC */ + DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0); + } + + if (nack0) { + SDE_HDCP_DEBUG("%s: Before: HDMI_DDC_SW_STATUS=0x%08x\n", + SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS)); + /* Reset HDMI DDC software status */ + DSS_REG_W_ND(io, HDMI_DDC_CTRL, + DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3)); + msleep(20); + DSS_REG_W_ND(io, HDMI_DDC_CTRL, + DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3))); + + /* Reset HDMI DDC Controller */ + DSS_REG_W_ND(io, HDMI_DDC_CTRL, + DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1)); + msleep(20); + DSS_REG_W_ND(io, HDMI_DDC_CTRL, + DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1)); + SDE_HDCP_DEBUG("%s: After: HDMI_DDC_SW_STATUS=0x%08x\n", + SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS)); + } + + hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS); + + failure = (hdcp_ddc_status >> 16) & BIT(0); + nack0 = (hdcp_ddc_status >> 14) & BIT(0); + SDE_HDCP_DEBUG("%s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n", + SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0); +} /* reset_hdcp_ddc_failures */ + +static void sde_hdcp_1x_hw_ddc_clean(struct sde_hdcp_1x *hdcp) +{ + struct dss_io_data *io = NULL; + u32 hdcp_ddc_status, ddc_hw_status; + u32 ddc_xfer_done, ddc_xfer_req; + u32 ddc_hw_req, ddc_hw_not_idle; + bool ddc_hw_not_ready, xfer_not_done, hw_not_done; + u32 timeout_count; + + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + return; + } + + io = hdcp->init_data.core_io; + if (!io->base) { + pr_err("core io not inititalized\n"); + return; + } + + /* Wait to be clean on DDC HW engine */ + timeout_count = 100; + do { + hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS); + ddc_xfer_req = hdcp_ddc_status & BIT(4); + ddc_xfer_done = hdcp_ddc_status & BIT(10); + + ddc_hw_status = DSS_REG_R(io, HDMI_DDC_HW_STATUS); + ddc_hw_req = ddc_hw_status & BIT(16); + ddc_hw_not_idle = ddc_hw_status & (BIT(0) | BIT(1)); + + /* ddc transfer was requested but not completed */ + xfer_not_done = ddc_xfer_req && !ddc_xfer_done; + + /* ddc status is not idle or a hw request pending */ + hw_not_done = ddc_hw_not_idle || ddc_hw_req; + + ddc_hw_not_ready = xfer_not_done || hw_not_done; + + SDE_HDCP_DEBUG("%s: timeout count(%d): ddc hw%sready\n", + SDE_HDCP_STATE_NAME, timeout_count, + ddc_hw_not_ready ? " not " : " "); + SDE_HDCP_DEBUG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n", + hdcp_ddc_status, ddc_hw_status); + if (ddc_hw_not_ready) + msleep(20); + } while (ddc_hw_not_ready && --timeout_count); +} /* hdcp_1x_hw_ddc_clean */ + +static int sde_hdcp_1x_load_keys(void *input) +{ + int rc = 0; + bool use_sw_keys = false; + u32 reg_val; + u32 ksv_lsb_addr, ksv_msb_addr; + u32 aksv_lsb, aksv_msb; + u8 aksv[5]; + struct dss_io_data *io; + struct dss_io_data *qfprom_io; + struct sde_hdcp_1x *hdcp = input; + struct sde_hdcp_reg_set *reg_set; + + if (!hdcp || !hdcp->init_data.core_io || + !hdcp->init_data.qfprom_io) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto end; + } + + if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE) && + !sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) { + pr_err("%s: invalid state. returning\n", + SDE_HDCP_STATE_NAME); + rc = -EINVAL; + goto end; + } + + io = hdcp->init_data.core_io; + qfprom_io = hdcp->init_data.qfprom_io; + reg_set = &hdcp->reg_set; + + /* On compatible hardware, use SW keys */ + reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION); + if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) { + reg_val = DSS_REG_R(qfprom_io, + QFPROM_RAW_FEAT_CONFIG_ROW0_MSB + + QFPROM_RAW_VERSION_4); + + if (!(reg_val & BIT(23))) + use_sw_keys = true; + } + + if (use_sw_keys) { + if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) { + pr_err("setting hdcp SW keys failed\n"); + rc = -EINVAL; + goto end; + } + } else { + /* Fetch aksv from QFPROM, this info should be public. */ + ksv_lsb_addr = HDCP_KSV_LSB; + ksv_msb_addr = HDCP_KSV_MSB; + + if (hdcp->init_data.sec_access) { + ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET; + ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET; + } + + aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr); + aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr); + } + + SDE_HDCP_DEBUG("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME, + aksv_msb, aksv_lsb); + + aksv[0] = aksv_lsb & 0xFF; + aksv[1] = (aksv_lsb >> 8) & 0xFF; + aksv[2] = (aksv_lsb >> 16) & 0xFF; + aksv[3] = (aksv_lsb >> 24) & 0xFF; + aksv[4] = aksv_msb & 0xFF; + + /* check there are 20 ones in AKSV */ + if (sde_hdcp_1x_count_one(aksv, 5) != 20) { + pr_err("AKSV bit count failed\n"); + rc = -EINVAL; + goto end; + } + + DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb); + DSS_REG_W(io, reg_set->aksv_msb, aksv_msb); + + /* Setup seed values for random number An */ + DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF); + DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE); + + /* make sure hw is programmed */ + wmb(); + + /* enable hdcp engine */ + DSS_REG_W(io, reg_set->ctrl, 0x1); + + hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING; +end: + return rc; +} + +static int sde_hdcp_1x_read(struct sde_hdcp_1x *hdcp, + struct sde_hdcp_sink_addr *sink, + u8 *buf, bool realign) +{ + u32 rc = 0; + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + reset_hdcp_ddc_failures(hdcp); + + ddc_ctrl = hdcp->init_data.ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + if (!ddc_data) { + SDE_ERROR("invalid ddc data\n"); + return -EINVAL; + } + memset(ddc_data, 0, sizeof(*ddc_data)); + ddc_data->dev_addr = 0x74; + ddc_data->offset = sink->addr; + ddc_data->data_buf = buf; + ddc_data->data_len = sink->len; + ddc_data->request_len = sink->len; + ddc_data->retry = 5; + ddc_data->what = sink->name; + ddc_data->retry_align = realign; + + rc = sde_hdmi_ddc_read((void *)hdcp->init_data.cb_data); + if (rc) + SDE_ERROR("%s: %s read failed\n", + SDE_HDCP_STATE_NAME, sink->name); + } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + /* To-do DP APIs go here */ + } + + return rc; +} + +static int sde_hdcp_1x_write(struct sde_hdcp_1x *hdcp, + struct sde_hdcp_sink_addr *sink, u8 *buf) +{ + int rc = 0; + struct sde_hdmi_tx_ddc_data *ddc_data; + struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl; + + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + ddc_ctrl = hdcp->init_data.ddc_ctrl; + ddc_data = &ddc_ctrl->ddc_data; + + if (!ddc_data) { + SDE_ERROR("invalid ddc data\n"); + return -EINVAL; + } + memset(ddc_data, 0, sizeof(*ddc_data)); + + ddc_data->dev_addr = 0x74; + ddc_data->offset = sink->addr; + ddc_data->data_buf = buf; + ddc_data->data_len = sink->len; + ddc_data->what = sink->name; + + rc = sde_hdmi_ddc_write((void *)hdcp->init_data.cb_data); + if (rc) + SDE_ERROR("%s: %s write failed\n", + SDE_HDCP_STATE_NAME, sink->name); + } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + /* To-do DP APIs go here */ + } + + return rc; +} + +static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp) +{ + u32 intr_reg; + struct dss_io_data *io; + struct sde_hdcp_int_set *isr; + + io = hdcp->init_data.core_io; + isr = &hdcp->int_set; + + intr_reg = DSS_REG_R(io, isr->int_reg); + + intr_reg |= HDCP_INT_CLR | HDCP_INT_EN; + + DSS_REG_W(io, isr->int_reg, intr_reg); +} + +static int sde_hdcp_1x_read_bcaps(struct sde_hdcp_1x *hdcp) +{ + int rc; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps, + &hdcp->bcaps, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading bcaps\n"); + goto error; + } + + SDE_HDCP_DEBUG("bcaps read: 0x%x\n", hdcp->bcaps); + + hdcp->current_tp.ds_type = hdcp->bcaps & reg_set->repeater ? + DS_REPEATER : DS_RECEIVER; + + SDE_HDCP_DEBUG("ds: %s\n", hdcp->current_tp.ds_type == DS_REPEATER ? + "repeater" : "receiver"); + + /* Write BCAPS to the hardware */ + DSS_REG_W(hdcp_io, reg_set->sec_data12, hdcp->bcaps); +error: + return rc; +} + +static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp) +{ + int rc; + u32 link0_status; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + struct dss_io_data *io = hdcp->init_data.core_io; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + /* Wait for HDCP keys to be checked and validated */ + rc = readl_poll_timeout(io->base + reg_set->status, link0_status, + ((link0_status >> reg_set->keys_offset) & 0x7) + == HDCP_KEYS_STATE_VALID || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("key not ready\n"); + goto error; + } + + /* + * 1.1_Features turned off by default. + * No need to write AInfo since 1.1_Features is disabled. + */ + DSS_REG_W(io, reg_set->data4, 0); + + /* Wait for An0 and An1 bit to be ready */ + rc = readl_poll_timeout(io->base + reg_set->status, link0_status, + (link0_status & (BIT(8) | BIT(9))) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("An not ready\n"); + goto error; + } + + /* As per hardware recommendations, wait before reading An */ + msleep(20); +error: + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) + rc = -EINVAL; + + return rc; +} + +static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp) +{ + int rc; + u8 an[8], aksv[5]; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + an[0] = hdcp->an_0 & 0xFF; + an[1] = (hdcp->an_0 >> 8) & 0xFF; + an[2] = (hdcp->an_0 >> 16) & 0xFF; + an[3] = (hdcp->an_0 >> 24) & 0xFF; + an[4] = hdcp->an_1 & 0xFF; + an[5] = (hdcp->an_1 >> 8) & 0xFF; + an[6] = (hdcp->an_1 >> 16) & 0xFF; + an[7] = (hdcp->an_1 >> 24) & 0xFF; + + SDE_HDCP_DEBUG("an read: 0x%2x%2x%2x%2x%2x%2x%2x%2x\n", + an[7], an[6], an[5], an[4], an[3], an[2], an[1], an[0]); + + rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.an, an); + if (IS_ERR_VALUE(rc)) { + pr_err("error writing an to sink\n"); + goto error; + } + + /* Copy An and AKSV to byte arrays for transmission */ + aksv[0] = hdcp->aksv_0 & 0xFF; + aksv[1] = (hdcp->aksv_0 >> 8) & 0xFF; + aksv[2] = (hdcp->aksv_0 >> 16) & 0xFF; + aksv[3] = (hdcp->aksv_0 >> 24) & 0xFF; + aksv[4] = hdcp->aksv_1 & 0xFF; + + SDE_HDCP_DEBUG("aksv read: 0x%2x%2x%2x%2x%2x\n", + aksv[4], aksv[3], aksv[2], aksv[1], aksv[0]); + + rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.aksv, aksv); + if (IS_ERR_VALUE(rc)) { + pr_err("error writing aksv to sink\n"); + goto error; + } +error: + return rc; +} + +static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp) +{ + struct dss_io_data *io = hdcp->init_data.core_io; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + hdcp->an_0 = DSS_REG_R(io, reg_set->data5); + if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + udelay(1); + hdcp->an_0 = DSS_REG_R(io, reg_set->data5); + } + + hdcp->an_1 = DSS_REG_R(io, reg_set->data6); + if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + udelay(1); + hdcp->an_1 = DSS_REG_R(io, reg_set->data6); + } + + /* Read AKSV */ + hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3); + hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4); + + return 0; +} + +static int sde_hdcp_1x_get_bksv_from_sink(struct sde_hdcp_1x *hdcp) +{ + int rc; + u8 *bksv = hdcp->current_tp.bksv; + u32 link0_bksv_0, link0_bksv_1; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io; + + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bksv, bksv, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading bksv from sink\n"); + goto error; + } + + SDE_HDCP_DEBUG("bksv read: 0x%2x%2x%2x%2x%2x\n", + bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]); + + /* check there are 20 ones in BKSV */ + if (sde_hdcp_1x_count_one(bksv, 5) != 20) { + pr_err("%s: BKSV doesn't have 20 1's and 20 0's\n", + SDE_HDCP_STATE_NAME); + rc = -EINVAL; + goto error; + } + + link0_bksv_0 = bksv[3]; + link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2]; + link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1]; + link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0]; + link0_bksv_1 = bksv[4]; + + DSS_REG_W(hdcp_io, reg_set->sec_data0, link0_bksv_0); + DSS_REG_W(hdcp_io, reg_set->sec_data1, link0_bksv_1); +error: + return rc; +} + +static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp) +{ + int rc; + u8 enable_hpd_irq = 0x1; + + if (hdcp->current_tp.ds_type != DS_REPEATER) + return; + + rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq); + if (IS_ERR_VALUE(rc)) + SDE_HDCP_DEBUG("error writing ainfo to sink\n"); +} + +static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp) +{ + int rc, r0_retry = 3; + u8 buf[2]; + u32 link0_status, timeout_count; + u32 const r0_read_delay_us = 1; + u32 const r0_read_timeout_us = r0_read_delay_us * 10; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + struct dss_io_data *io = hdcp->init_data.core_io; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + /* Wait for HDCP R0 computation to be completed */ + rc = readl_poll_timeout(io->base + reg_set->status, link0_status, + (link0_status & BIT(reg_set->r0_offset)) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("R0 not ready\n"); + goto error; + } + + /* + * HDCP Compliace Test case 1A-01: + * Wait here at least 100ms before reading R0' + */ + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + msleep(100); + } else { + if (!hdcp->sink_r0_ready) { + reinit_completion(&hdcp->sink_r0_available); + timeout_count = wait_for_completion_timeout( + &hdcp->sink_r0_available, HZ / 2); + + if (hdcp->reauth) { + pr_err("sink R0 not ready\n"); + rc = -EINVAL; + goto error; + } + } + } + + do { + memset(buf, 0, sizeof(buf)); + + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.r0, + buf, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading R0' from sink\n"); + goto error; + } + + SDE_HDCP_DEBUG("sink R0'read: %2x%2x\n", buf[1], buf[0]); + + DSS_REG_W(io, reg_set->data2_0, (((u32)buf[1]) << 8) | buf[0]); + + rc = readl_poll_timeout(io->base + reg_set->status, + link0_status, (link0_status & BIT(12)) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + r0_read_delay_us, r0_read_timeout_us); + } while (rc && --r0_retry); +error: + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) + rc = -EINVAL; + + return rc; +} + +static int sde_hdcp_1x_authentication_part1(struct sde_hdcp_1x *hdcp) +{ + int rc; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + sde_hdcp_1x_enable_interrupts(hdcp); + + rc = sde_hdcp_1x_read_bcaps(hdcp); + if (rc) + goto error; + + rc = sde_hdcp_1x_wait_for_hw_ready(hdcp); + if (rc) + goto error; + + rc = sde_hdcp_1x_read_an_aksv_from_hw(hdcp); + if (rc) + goto error; + + rc = sde_hdcp_1x_get_bksv_from_sink(hdcp); + if (rc) + goto error; + + rc = sde_hdcp_1x_send_an_aksv_to_sink(hdcp); + if (rc) + goto error; + + sde_hdcp_1x_enable_sink_irq_hpd(hdcp); + + rc = sde_hdcp_1x_verify_r0(hdcp); + if (rc) + goto error; + + pr_info("SUCCESSFUL\n"); + + return 0; +error: + pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME); + + return rc; +} + +static int sde_hdcp_1x_transfer_v_h(struct sde_hdcp_1x *hdcp) +{ + int rc = 0; + struct dss_io_data *io = hdcp->init_data.hdcp_io; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + struct sde_hdcp_1x_reg_data reg_data[] = { + {reg_set->sec_data7, &hdcp->sink_addr.v_h0}, + {reg_set->sec_data8, &hdcp->sink_addr.v_h1}, + {reg_set->sec_data9, &hdcp->sink_addr.v_h2}, + {reg_set->sec_data10, &hdcp->sink_addr.v_h3}, + {reg_set->sec_data11, &hdcp->sink_addr.v_h4}, + }; + struct sde_hdcp_sink_addr sink = {"V", reg_data->sink->addr}; + u32 size = ARRAY_SIZE(reg_data); + u8 buf[0xFF] = {0}; + u32 i = 0, len = 0; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + for (i = 0; i < size; i++) { + struct sde_hdcp_1x_reg_data *rd = reg_data + i; + + len += rd->sink->len; + } + + sink.len = len; + + rc = sde_hdcp_1x_read(hdcp, &sink, buf, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading %s\n", sink.name); + goto end; + } + + + for (i = 0; i < size; i++) { + struct sde_hdcp_1x_reg_data *rd = reg_data + i; + u32 reg_data; + + memcpy(®_data, buf + (sizeof(u32) * i), sizeof(u32)); + DSS_REG_W(io, rd->reg_id, reg_data); + } +end: + return rc; +} + +static int sde_hdcp_1x_validate_downstream(struct sde_hdcp_1x *hdcp) +{ + int rc; + u8 buf[2] = {0, 0}; + u8 device_count, depth; + u8 max_cascade_exceeded, max_devs_exceeded; + u16 bstatus; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bstatus, + buf, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading bstatus\n"); + goto end; + } + + bstatus = buf[1]; + bstatus = (bstatus << 8) | buf[0]; + + device_count = bstatus & 0x7F; + + SDE_HDCP_DEBUG("device count %d\n", device_count); + + /* Cascaded repeater depth */ + depth = (bstatus >> 8) & 0x7; + SDE_HDCP_DEBUG("depth %d\n", depth); + + /* + * HDCP Compliance 1B-05: + * Check if no. of devices connected to repeater + * exceed max_devices_connected from bit 7 of Bstatus. + */ + max_devs_exceeded = (bstatus & BIT(7)) >> 7; + if (max_devs_exceeded == 0x01) { + pr_err("no. of devs connected exceed max allowed\n"); + rc = -EINVAL; + goto end; + } + + /* + * HDCP Compliance 1B-06: + * Check if no. of cascade connected to repeater + * exceed max_cascade_connected from bit 11 of Bstatus. + */ + max_cascade_exceeded = (bstatus & BIT(11)) >> 11; + if (max_cascade_exceeded == 0x01) { + pr_err("no. of cascade connections exceed max allowed\n"); + rc = -EINVAL; + goto end; + } + + /* Update topology information */ + hdcp->current_tp.dev_count = device_count; + hdcp->current_tp.max_cascade_exceeded = max_cascade_exceeded; + hdcp->current_tp.max_dev_exceeded = max_devs_exceeded; + hdcp->current_tp.depth = depth; + + DSS_REG_W(hdcp->init_data.hdcp_io, + reg_set->sec_data12, hdcp->bcaps | (bstatus << 8)); +end: + return rc; +} + +static int sde_hdcp_1x_read_ksv_fifo(struct sde_hdcp_1x *hdcp) +{ + u32 ksv_read_retry = 20, ksv_bytes, rc = 0; + u8 *ksv_fifo = hdcp->current_tp.ksv_list; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + memset(ksv_fifo, 0, sizeof(hdcp->current_tp.ksv_list)); + + /* each KSV is 5 bytes long */ + ksv_bytes = 5 * hdcp->current_tp.dev_count; + hdcp->sink_addr.ksv_fifo.len = ksv_bytes; + + while (ksv_bytes && --ksv_read_retry) { + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.ksv_fifo, + ksv_fifo, true); + if (IS_ERR_VALUE(rc)) + pr_err("could not read ksv fifo (%d)\n", + ksv_read_retry); + else + break; + } + + if (rc) + pr_err("error reading ksv_fifo\n"); + + return rc; +} + +static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp) +{ + int i, rc = 0; + u8 *ksv_fifo = hdcp->current_tp.ksv_list; + u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len; + struct dss_io_data *io = hdcp->init_data.core_io; + struct dss_io_data *sec_io = hdcp->init_data.hdcp_io; + struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set; + u32 sha_status = 0, status; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + /* reset SHA Controller */ + DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x1); + DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x0); + + for (i = 0; i < ksv_bytes - 1; i++) { + /* Write KSV byte and do not set DONE bit[0] */ + DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, ksv_fifo[i] << 16); + + /* + * Once 64 bytes have been written, we need to poll for + * HDCP_SHA_BLOCK_DONE before writing any further + */ + if (i && !((i + 1) % 64)) { + rc = readl_poll_timeout(io->base + reg_set->sha_status, + sha_status, (sha_status & BIT(0)) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("block not done\n"); + goto error; + } + } + } + + /* Write l to DONE bit[0] */ + DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, + (ksv_fifo[ksv_bytes - 1] << 16) | 0x1); + + /* Now wait for HDCP_SHA_COMP_DONE */ + rc = readl_poll_timeout(io->base + reg_set->sha_status, sha_status, + (sha_status & BIT(4)) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("V computation not done\n"); + goto error; + } + + /* Wait for V_MATCHES */ + rc = readl_poll_timeout(io->base + reg_set->status, status, + (status & BIT(reg_set->v_offset)) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING), + HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US); + if (IS_ERR_VALUE(rc)) { + pr_err("V mismatch\n"); + rc = -EINVAL; + } +error: + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) + rc = -EINVAL; + + return rc; +} + +static int sde_hdcp_1x_wait_for_ksv_ready(struct sde_hdcp_1x *hdcp) +{ + int rc, timeout; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + /* + * Wait until READY bit is set in BCAPS, as per HDCP specifications + * maximum permitted time to check for READY bit is five seconds. + */ + rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps, + &hdcp->bcaps, false); + if (IS_ERR_VALUE(rc)) { + pr_err("error reading bcaps\n"); + goto error; + } + + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + timeout = 50; + + while (!(hdcp->bcaps & BIT(5)) && --timeout) { + rc = sde_hdcp_1x_read(hdcp, + &hdcp->sink_addr.bcaps, + &hdcp->bcaps, false); + if (IS_ERR_VALUE(rc) || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("error reading bcaps\n"); + goto error; + } + msleep(100); + } + } else { + u8 cp_buf = 0; + struct sde_hdcp_sink_addr *sink = + &hdcp->sink_addr.cp_irq_status; + + timeout = jiffies_to_msecs(jiffies); + + while (1) { + rc = sde_hdcp_1x_read(hdcp, sink, &cp_buf, false); + if (rc) + goto error; + + if (cp_buf & BIT(0)) + break; + + /* max timeout of 5 sec as per hdcp 1.x spec */ + if (abs(timeout - jiffies_to_msecs(jiffies)) > 5000) { + timeout = 0; + break; + } + + if (hdcp->ksv_ready || hdcp->reauth || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) + break; + + /* re-read after a minimum delay */ + msleep(20); + } + } + + if (!timeout || hdcp->reauth || + !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("DS KSV not ready\n"); + rc = -EINVAL; + } else { + hdcp->ksv_ready = true; + } +error: + return rc; +} + +static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp) +{ + int rc; + int v_retry = 3; + + rc = sde_hdcp_1x_validate_downstream(hdcp); + if (rc) + goto error; + + rc = sde_hdcp_1x_read_ksv_fifo(hdcp); + if (rc) + goto error; + + do { + rc = sde_hdcp_1x_transfer_v_h(hdcp); + if (rc) + goto error; + + /* do not proceed further if no device connected */ + if (!hdcp->current_tp.dev_count) + goto error; + + rc = sde_hdcp_1x_write_ksv_fifo(hdcp); + } while (--v_retry && rc); +error: + if (rc) { + pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME); + } else { + hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED; + + pr_info("SUCCESSFUL\n"); + } + + return rc; +} + +static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp) +{ + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + return; + } + + memcpy((void *)&hdcp->cached_tp, + (void *) &hdcp->current_tp, + sizeof(hdcp->cached_tp)); + hdcp1_cache_repeater_topology((void *)&hdcp->cached_tp); +} + +static void sde_hdcp_1x_notify_topology(void) +{ + hdcp1_notify_topology(); +} + +static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp) +{ + if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) { + sde_hdcp_1x_cache_topology(hdcp); + sde_hdcp_1x_notify_topology(); + } + + if (hdcp->init_data.notify_status && + !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) { + hdcp->init_data.notify_status( + hdcp->init_data.cb_data, + hdcp->hdcp_state); + } +} + +static void sde_hdcp_1x_auth_work(struct work_struct *work) +{ + int rc; + struct delayed_work *dw = to_delayed_work(work); + struct sde_hdcp_1x *hdcp = container_of(dw, + struct sde_hdcp_1x, hdcp_auth_work); + struct dss_io_data *io; + + if (!hdcp) { + pr_err("invalid input\n"); + return; + } + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + pr_err("invalid state\n"); + return; + } + + hdcp->sink_r0_ready = false; + hdcp->reauth = false; + hdcp->ksv_ready = false; + + io = hdcp->init_data.core_io; + /* Enabling Software DDC for HDMI and REF timer for DP */ + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) + DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io, + HDMI_DDC_ARBITRATION) & ~(BIT(4))); + else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + /* To do for DP */ + } + + /* + * program hw to enable encryption as soon as + * authentication is successful. + */ + hdcp1_set_enc(true); + + rc = sde_hdcp_1x_authentication_part1(hdcp); + if (rc) + goto end; + + if (hdcp->current_tp.ds_type == DS_REPEATER) { + rc = sde_hdcp_1x_wait_for_ksv_ready(hdcp); + if (rc) + goto end; + } else { + hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED; + goto end; + } + + hdcp->ksv_ready = false; + + rc = sde_hdcp_1x_authentication_part2(hdcp); + if (rc) + goto end; + + /* + * Disabling software DDC before going into part3 to make sure + * there is no Arbitration between software and hardware for DDC + */ + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) + DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io, + HDMI_DDC_ARBITRATION) | (BIT(4))); +end: + if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) + hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL; + + sde_hdcp_1x_update_auth_status(hdcp); +} + +static int sde_hdcp_1x_authenticate(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + + if (!hdcp) { + pr_err("invalid input\n"); + return -EINVAL; + } + + flush_delayed_work(&hdcp->hdcp_auth_work); + + if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + if (!sde_hdcp_1x_load_keys(input)) { + + queue_delayed_work(hdcp->workq, + &hdcp->hdcp_auth_work, HZ/2); + } else { + hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL; + sde_hdcp_1x_update_auth_status(hdcp); + } + + return 0; +} /* hdcp_1x_authenticate */ + +static int sde_hdcp_1x_reauthenticate(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + struct dss_io_data *io; + struct sde_hdcp_reg_set *reg_set; + struct sde_hdcp_int_set *isr; + u32 hdmi_hw_version; + u32 ret = 0, reg; + + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + return -EINVAL; + } + + io = hdcp->init_data.core_io; + reg_set = &hdcp->reg_set; + isr = &hdcp->int_set; + + if (!sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) { + pr_err("invalid state\n"); + return -EINVAL; + } + + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION); + if (hdmi_hw_version >= 0x30030000) { + DSS_REG_W(io, HDMI_CTRL_SW_RESET, BIT(1)); + DSS_REG_W(io, HDMI_CTRL_SW_RESET, 0); + } + + /* Wait to be clean on DDC HW engine */ + sde_hdcp_1x_hw_ddc_clean(hdcp); + } + + /* Disable HDCP interrupts */ + DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN); + + reg = DSS_REG_R(io, reg_set->reset); + DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit); + + /* Disable encryption and disable the HDCP block */ + DSS_REG_W(io, reg_set->ctrl, 0); + + DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit); + + hdcp->hdcp_state = HDCP_STATE_INACTIVE; + sde_hdcp_1x_authenticate(hdcp); + + return ret; +} /* hdcp_1x_reauthenticate */ + +static void sde_hdcp_1x_off(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + struct dss_io_data *io; + struct sde_hdcp_reg_set *reg_set; + struct sde_hdcp_int_set *isr; + int rc = 0; + u32 reg; + + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + return; + } + + io = hdcp->init_data.core_io; + reg_set = &hdcp->reg_set; + isr = &hdcp->int_set; + + if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) { + pr_err("invalid state\n"); + return; + } + + /* + * Disable HDCP interrupts. + * Also, need to set the state to inactive here so that any ongoing + * reauth works will know that the HDCP session has been turned off. + */ + mutex_lock(hdcp->init_data.mutex); + DSS_REG_W(io, isr->int_reg, + DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN); + hdcp->hdcp_state = HDCP_STATE_INACTIVE; + mutex_unlock(hdcp->init_data.mutex); + + /* complete any wait pending */ + complete_all(&hdcp->sink_r0_available); + complete_all(&hdcp->r0_checked); + /* + * Cancel any pending auth/reauth attempts. + * If one is ongoing, this will wait for it to finish. + * No more reauthentiaction attempts will be scheduled since we + * set the currect state to inactive. + */ + rc = cancel_delayed_work_sync(&hdcp->hdcp_auth_work); + if (rc) + SDE_HDCP_DEBUG("%s: Deleted hdcp auth work\n", + SDE_HDCP_STATE_NAME); + + hdcp1_set_enc(false); + + reg = DSS_REG_R(io, reg_set->reset); + DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit); + + /* Disable encryption and disable the HDCP block */ + DSS_REG_W(io, reg_set->ctrl, 0); + + DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit); + + hdcp->sink_r0_ready = false; + + SDE_HDCP_DEBUG("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME); +} /* hdcp_1x_off */ + +static int sde_hdcp_1x_isr(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + int rc = 0; + struct dss_io_data *io; + u32 hdcp_int_val; + struct sde_hdcp_reg_set *reg_set; + struct sde_hdcp_int_set *isr; + + if (!hdcp || !hdcp->init_data.core_io) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + io = hdcp->init_data.core_io; + reg_set = &hdcp->reg_set; + isr = &hdcp->int_set; + + hdcp_int_val = DSS_REG_R(io, isr->int_reg); + + /* Ignore HDCP interrupts if HDCP is disabled */ + if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) { + DSS_REG_W(io, isr->int_reg, hdcp_int_val | HDCP_INT_CLR); + return 0; + } + + if (hdcp_int_val & isr->auth_success_int) { + /* AUTH_SUCCESS_INT */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->auth_success_ack)); + SDE_HDCP_DEBUG("%s: AUTH SUCCESS\n", SDE_HDCP_STATE_NAME); + + if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) + complete_all(&hdcp->r0_checked); + } + + if (hdcp_int_val & isr->auth_fail_int) { + /* AUTH_FAIL_INT */ + u32 link_status = DSS_REG_R(io, reg_set->status); + + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->auth_fail_ack)); + + SDE_HDCP_DEBUG("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n", + SDE_HDCP_STATE_NAME, link_status); + + if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) { + hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL; + sde_hdcp_1x_update_auth_status(hdcp); + } else if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) { + complete_all(&hdcp->r0_checked); + } + + /* Clear AUTH_FAIL_INFO as well */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->auth_fail_info_ack)); + } + + if (hdcp_int_val & isr->tx_req_int) { + /* DDC_XFER_REQ_INT */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->tx_req_ack)); + SDE_HDCP_DEBUG("%s: DDC_XFER_REQ_INT received\n", + SDE_HDCP_STATE_NAME); + } + + if (hdcp_int_val & isr->tx_req_done_int) { + /* DDC_XFER_DONE_INT */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->tx_req_done_ack)); + SDE_HDCP_DEBUG("%s: DDC_XFER_DONE received\n", + SDE_HDCP_STATE_NAME); + } + + if (hdcp_int_val & isr->encryption_ready) { + /* Encryption enabled */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->encryption_ready_ack)); + SDE_HDCP_DEBUG("%s: encryption ready received\n", + SDE_HDCP_STATE_NAME); + } + + if (hdcp_int_val & isr->encryption_not_ready) { + /* Encryption enabled */ + DSS_REG_W(io, isr->int_reg, + (hdcp_int_val | isr->encryption_not_ready_ack)); + SDE_HDCP_DEBUG("%s: encryption not ready received\n", + SDE_HDCP_STATE_NAME); + } + +error: + return rc; +} + +void sde_hdcp_1x_deinit(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + + if (!hdcp) { + pr_err("invalid input\n"); + return; + } + + if (hdcp->workq) + destroy_workqueue(hdcp->workq); + + kfree(hdcp); +} /* hdcp_1x_deinit */ + +static void sde_hdcp_1x_update_client_reg_set(struct sde_hdcp_1x *hdcp) +{ + + if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) { + struct sde_hdcp_reg_set reg_set = HDCP_REG_SET_CLIENT_HDMI; + struct sde_hdcp_skaddr_map sink_addr = HDCP_HDMI_SINK_ADDR_MAP; + struct sde_hdcp_int_set isr = HDCP_HDMI_INT_SET; + + hdcp->reg_set = reg_set; + hdcp->sink_addr = sink_addr; + hdcp->int_set = isr; + } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) { + /* TO DO for DP + * Will be filled later + */ + } +} + +static bool sde_hdcp_1x_is_cp_irq_raised(struct sde_hdcp_1x *hdcp) +{ + int ret; + u8 buf = 0; + struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1}; + + ret = sde_hdcp_1x_read(hdcp, &sink, &buf, false); + if (IS_ERR_VALUE(ret)) + pr_err("error reading irq_vector\n"); + + return buf & BIT(2) ? true : false; +} + +static void sde_hdcp_1x_clear_cp_irq(struct sde_hdcp_1x *hdcp) +{ + int ret; + u8 buf = BIT(2); + struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1}; + + ret = sde_hdcp_1x_write(hdcp, &sink, &buf); + if (IS_ERR_VALUE(ret)) + pr_err("error clearing irq_vector\n"); +} + +static int sde_hdcp_1x_cp_irq(void *input) +{ + struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input; + u8 buf = 0; + int ret; + + if (!hdcp) { + pr_err("invalid input\n"); + goto irq_not_handled; + } + + if (!sde_hdcp_1x_is_cp_irq_raised(hdcp)) { + SDE_HDCP_DEBUG("cp_irq not raised\n"); + goto irq_not_handled; + } + + ret = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.cp_irq_status, + &buf, false); + if (IS_ERR_VALUE(ret)) { + pr_err("error reading cp_irq_status\n"); + goto irq_not_handled; + } + + if ((buf & BIT(2)) || (buf & BIT(3))) { + pr_err("%s\n", + buf & BIT(2) ? "LINK_INTEGRITY_FAILURE" : + "REAUTHENTICATION_REQUEST"); + + hdcp->reauth = true; + + if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) + hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL; + + complete_all(&hdcp->sink_r0_available); + sde_hdcp_1x_update_auth_status(hdcp); + } else if (buf & BIT(1)) { + SDE_HDCP_DEBUG("R0' AVAILABLE\n"); + hdcp->sink_r0_ready = true; + complete_all(&hdcp->sink_r0_available); + } else if ((buf & BIT(0))) { + SDE_HDCP_DEBUG("KSVs READY\n"); + + hdcp->ksv_ready = true; + } else { + SDE_HDCP_DEBUG("spurious interrupt\n"); + } + + sde_hdcp_1x_clear_cp_irq(hdcp); + return 0; + +irq_not_handled: + return -EINVAL; +} + +void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data) +{ + struct sde_hdcp_1x *hdcp = NULL; + char name[20]; + static struct sde_hdcp_ops ops = { + .isr = sde_hdcp_1x_isr, + .cp_irq = sde_hdcp_1x_cp_irq, + .reauthenticate = sde_hdcp_1x_reauthenticate, + .authenticate = sde_hdcp_1x_authenticate, + .off = sde_hdcp_1x_off + }; + + if (!init_data || !init_data->core_io || !init_data->qfprom_io || + !init_data->mutex || !init_data->notify_status || + !init_data->workq || !init_data->cb_data) { + pr_err("invalid input\n"); + goto error; + } + + if (init_data->sec_access && !init_data->hdcp_io) { + pr_err("hdcp_io required\n"); + goto error; + } + + hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL); + if (!hdcp) + goto error; + + hdcp->init_data = *init_data; + hdcp->ops = &ops; + + snprintf(name, sizeof(name), "hdcp_1x_%d", + hdcp->init_data.client_id); + + hdcp->workq = create_workqueue(name); + if (!hdcp->workq) { + pr_err("Error creating workqueue\n"); + kfree(hdcp); + goto error; + } + + sde_hdcp_1x_update_client_reg_set(hdcp); + + INIT_DELAYED_WORK(&hdcp->hdcp_auth_work, sde_hdcp_1x_auth_work); + + hdcp->hdcp_state = HDCP_STATE_INACTIVE; + init_completion(&hdcp->r0_checked); + init_completion(&hdcp->sink_r0_available); + + SDE_HDCP_DEBUG("HDCP module initialized. HDCP_STATE=%s\n", + SDE_HDCP_STATE_NAME); + + return (void *)hdcp; + +error: + return NULL; +} /* hdcp_1x_init */ + +struct sde_hdcp_ops *sde_hdcp_1x_start(void *input) +{ + return ((struct sde_hdcp_1x *)input)->ops; +} + diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h index 82d3e28918fd..7e4f24ae7de8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h @@ -4,6 +4,7 @@ struct nvkm_alarm { struct list_head head; + struct list_head exec; u64 timestamp; void (*func)(struct nvkm_alarm *); }; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 58a3f7cf2fb3..00de1bf81519 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -370,7 +370,8 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); + if (!dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d236fc7c425b..91a61d2cca88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev, true); - drm_kms_helper_poll_enable(drm_dev); + + if (!drm_dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 2e3a62d38fe9..1621c8ae0fa0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -99,6 +99,7 @@ struct nv84_fence_priv { struct nouveau_bo *bo; struct nouveau_bo *bo_gart; u32 *suspend; + struct mutex mutex; }; u64 nv84_fence_crtc(struct nouveau_channel *, int); diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 6ae1b3494bcd..b7b961233949 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (nvif_unpack(argv->v0, 0, 0, true)) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - if (argv->v0.object == 0ULL) + if (argv->v0.object == 0ULL && + argv->v0.type != NVIF_IOCTL_V0_DEL) argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ else argv->v0.owner = NVDRM_OBJECT_USIF; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 412c5be5a9ca..7bc26eceda66 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) } nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); + mutex_lock(&priv->mutex); nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_bo_vma_del(priv->bo, &fctx->vma); + mutex_unlock(&priv->mutex); nouveau_fence_context_del(&fctx->base); chan->fence = NULL; nouveau_fence_context_free(&fctx->base); @@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.sync32 = nv84_fence_sync32; fctx->base.sequence = nv84_fence_read(chan); + mutex_lock(&priv->mutex); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } + mutex_unlock(&priv->mutex); /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { @@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_base = fence_context_alloc(priv->base.contexts); priv->base.uevent = true; + mutex_init(&priv->mutex); + /* Use VRAM if there is any ; otherwise fallback to system memory */ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : /* diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e..6d8f21290aa2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) if (bar->bar[0].mem) { addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); + nvkm_wr32(device, 0x001714, 0x80000000 | addr); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c index 79fcdb43e174..46033909d950 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c @@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) /* Move to completed list. We'll drop the lock before * executing the callback so it can reschedule itself. */ - list_move_tail(&alarm->head, &exec); + list_del_init(&alarm->head); + list_add(&alarm->exec, &exec); } /* Shut down interrupt if no more pending alarms. */ @@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) spin_unlock_irqrestore(&tmr->lock, flags); /* Execute completed callbacks. */ - list_for_each_entry_safe(alarm, atemp, &exec, head) { - list_del_init(&alarm->head); + list_for_each_entry_safe(alarm, atemp, &exec, exec) { + list_del(&alarm->exec); alarm->func(alarm); } } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index d4ac8c837314..8e86cf7da614 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -30,6 +30,7 @@ #include "radeon_audio.h" #include "atom.h" #include +#include extern int atom_debug; @@ -2183,9 +2184,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) goto assigned; } - /* on DCE32 and encoder can driver any block so just crtc id */ + /* + * On DCE32 any encoder can drive any block so usually just use crtc id, + * but Apple thinks different at least on iMac10,1, so there use linkb, + * otherwise the internal eDP panel will stay dark. + */ if (ASIC_IS_DCE32(rdev)) { - enc_idx = radeon_crtc->crtc_id; + if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) + enc_idx = (dig->linkb) ? 1 : 0; + else + enc_idx = radeon_crtc->crtc_id; + goto assigned; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 4a09947be244..2ccf81168d1e 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -776,6 +776,18 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + if (vblank_time < switch_limit) return true; else diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index f81fb2641097..134874cab4c7 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7762,7 +7762,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -7792,7 +7792,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 32491355a1d4..ba9e6ed4ae54 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4924,7 +4924,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -4955,7 +4955,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cc2fdf0be37a..0e20c08f8977 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3945,7 +3945,7 @@ static void r600_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a9b01bcf7d0a..fcecaf5b5526 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4aa2cbe4c85f..a77521695c9a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, /* macbook pro 8.2 */ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f878d6962da5..5cf3a2cbc07e 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6335,7 +6335,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -6366,7 +6366,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 48cb19949ca3..9255b9c096b6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -282,26 +282,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) * Page Flip */ -void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, - struct drm_file *file) -{ - struct drm_pending_vblank_event *event; - struct drm_device *dev = rcrtc->crtc.dev; - unsigned long flags; - - /* Destroy the pending vertical blanking event associated with the - * pending page flip, if any, and disable vertical blanking interrupts. - */ - spin_lock_irqsave(&dev->event_lock, flags); - event = rcrtc->event; - if (event && event->base.file_priv == file) { - rcrtc->event = NULL; - event->base.destroy(&event->base); - drm_crtc_vblank_put(&rcrtc->crtc); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 4b95d9d08c49..2bbe3f5aab65 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -67,8 +67,6 @@ enum rcar_du_output { int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); -void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, - struct drm_file *file); void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 40422f6b645e..bf4674aa6405 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -144,91 +144,6 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); * DRM operations */ -static int rcar_du_unload(struct drm_device *dev) -{ - struct rcar_du_device *rcdu = dev->dev_private; - - if (rcdu->fbdev) - drm_fbdev_cma_fini(rcdu->fbdev); - - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - drm_vblank_cleanup(dev); - - dev->irq_enabled = 0; - dev->dev_private = NULL; - - return 0; -} - -static int rcar_du_load(struct drm_device *dev, unsigned long flags) -{ - struct platform_device *pdev = dev->platformdev; - struct device_node *np = pdev->dev.of_node; - struct rcar_du_device *rcdu; - struct resource *mem; - int ret; - - if (np == NULL) { - dev_err(dev->dev, "no platform data\n"); - return -ENODEV; - } - - rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); - if (rcdu == NULL) { - dev_err(dev->dev, "failed to allocate private data\n"); - return -ENOMEM; - } - - init_waitqueue_head(&rcdu->commit.wait); - - rcdu->dev = &pdev->dev; - rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; - rcdu->ddev = dev; - dev->dev_private = rcdu; - - /* I/O resources */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(rcdu->mmio)) - return PTR_ERR(rcdu->mmio); - - /* Initialize vertical blanking interrupts handling. Start with vblank - * disabled for all CRTCs. - */ - ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize vblank\n"); - goto done; - } - - /* DRM/KMS objects */ - ret = rcar_du_modeset_init(rcdu); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); - goto done; - } - - dev->irq_enabled = 1; - - platform_set_drvdata(pdev, rcdu); - -done: - if (ret) - rcar_du_unload(dev); - - return ret; -} - -static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) -{ - struct rcar_du_device *rcdu = dev->dev_private; - unsigned int i; - - for (i = 0; i < rcdu->num_crtcs; ++i) - rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); -} - static void rcar_du_lastclose(struct drm_device *dev) { struct rcar_du_device *rcdu = dev->dev_private; @@ -269,11 +184,7 @@ static const struct file_operations rcar_du_fops = { static struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, - .load = rcar_du_load, - .unload = rcar_du_unload, - .preclose = rcar_du_preclose, .lastclose = rcar_du_lastclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, @@ -333,20 +244,106 @@ static const struct dev_pm_ops rcar_du_pm_ops = { * Platform driver */ -static int rcar_du_probe(struct platform_device *pdev) -{ - return drm_platform_init(&rcar_du_driver, pdev); -} - static int rcar_du_remove(struct platform_device *pdev) { struct rcar_du_device *rcdu = platform_get_drvdata(pdev); + struct drm_device *ddev = rcdu->ddev; - drm_put_dev(rcdu->ddev); + mutex_lock(&ddev->mode_config.mutex); + drm_connector_unplug_all(ddev); + mutex_unlock(&ddev->mode_config.mutex); + + drm_dev_unregister(ddev); + + if (rcdu->fbdev) + drm_fbdev_cma_fini(rcdu->fbdev); + + drm_kms_helper_poll_fini(ddev); + drm_mode_config_cleanup(ddev); + + drm_dev_unref(ddev); return 0; } +static int rcar_du_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct rcar_du_device *rcdu; + struct drm_connector *connector; + struct drm_device *ddev; + struct resource *mem; + int ret; + + if (np == NULL) { + dev_err(&pdev->dev, "no device tree node\n"); + return -ENODEV; + } + + /* Allocate and initialize the DRM and R-Car device structures. */ + rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); + if (rcdu == NULL) + return -ENOMEM; + + init_waitqueue_head(&rcdu->commit.wait); + + rcdu->dev = &pdev->dev; + rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; + + platform_set_drvdata(pdev, rcdu); + + /* I/O resources */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rcdu->mmio)) + ret = PTR_ERR(rcdu->mmio); + + /* DRM/KMS objects */ + ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); + if (!ddev) + return -ENOMEM; + + drm_dev_set_unique(ddev, dev_name(&pdev->dev)); + + rcdu->ddev = ddev; + ddev->dev_private = rcdu; + + ret = rcar_du_modeset_init(rcdu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); + goto error; + } + + ddev->irq_enabled = 1; + + /* Register the DRM device with the core and the connectors with + * sysfs. + */ + ret = drm_dev_register(ddev, 0); + if (ret) + goto error; + + mutex_lock(&ddev->mode_config.mutex); + drm_for_each_connector(connector, ddev) { + ret = drm_connector_register(connector); + if (ret < 0) + break; + } + mutex_unlock(&ddev->mode_config.mutex); + + if (ret < 0) + goto error; + + DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); + + return 0; + +error: + rcar_du_remove(pdev); + + return ret; +} + static struct platform_driver rcar_du_platform_driver = { .probe = rcar_du_probe, .remove = rcar_du_remove, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index 96f2eb43713c..6038be93c58d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -55,12 +55,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_hdmi_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -108,9 +102,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index ca12e8ca5552..46429c4be8e5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -761,6 +761,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) if (ret < 0) return ret; + /* Initialize vertical blanking interrupts handling. Start with vblank + * disabled for all CRTCs. + */ + ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); + if (ret < 0) + return ret; + /* Initialize the groups. */ num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 0c43032fc693..e905f5da7aaa 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -62,12 +62,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) { @@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_lvds_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_lvds_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -117,9 +111,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index e0a5d8f93963..9d7e5c99caf6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -31,12 +31,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { .best_encoder = rcar_du_connector_best_encoder, }; -static void rcar_du_vga_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - static enum drm_connector_status rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) { @@ -48,7 +42,7 @@ static const struct drm_connector_funcs connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_vga_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = rcar_du_vga_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -76,9 +70,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_connector_register(connector); - if (ret < 0) - return ret; connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index f300eba95bb1..1244cdf52859 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, return -ENOMEM; size = roundup(size, PAGE_SIZE); ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); - if (ret != 0) + if (ret != 0) { + kfree(bo); return ret; + } bo->dumb = false; virtio_gpu_init_ttm_placement(bo, pinned); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index ecf15cf0c3fd..04fd0f2b6af0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -471,7 +471,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index b6a0806b06bf..a1c68e6a689e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, return fifo_state->static_buffer; else { fifo_state->dynamic_buffer = vmalloc(bytes); + if (!fifo_state->dynamic_buffer) + goto out_err; return fifo_state->dynamic_buffer; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index c9c04ccccdd9..027987023400 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1288,11 +1288,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; int ret; uint32_t size; - uint32_t backup_handle; + uint32_t backup_handle = 0; if (req->multisample_count != 0) return -EINVAL; + if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; + if (unlikely(vmw_user_surface_size == 0)) vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 128; @@ -1328,12 +1331,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, &res->backup, &user_srf->backup_base); - if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < - res->backup_size) { - DRM_ERROR("Surface backup buffer is too small.\n"); - vmw_dmabuf_unreference(&res->backup); - ret = -EINVAL; - goto out_unlock; + if (ret == 0) { + if (res->backup->base.num_pages * PAGE_SIZE < + res->backup_size) { + DRM_ERROR("Surface backup buffer is too small.\n"); + vmw_dmabuf_unreference(&res->backup); + ret = -EINVAL; + goto out_unlock; + } else { + backup_handle = req->buffer_handle; + } } } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) ret = vmw_user_dmabuf_alloc(dev_priv, tfile, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 89c7590ad121..7cab049771de 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -902,6 +902,9 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev, device->pwrctrl.bus_control = of_property_read_bool(node, "qcom,bus-control"); + device->pwrctrl.input_disable = of_property_read_bool(node, + "qcom,disable-wake-on-touch"); + return 0; } @@ -1016,15 +1019,19 @@ static int adreno_probe(struct platform_device *pdev) /* Initialize coresight for the target */ adreno_coresight_init(adreno_dev); - adreno_input_handler.private = device; - #ifdef CONFIG_INPUT - /* - * It isn't fatal if we cannot register the input handler. Sad, - * perhaps, but not fatal - */ - if (input_register_handler(&adreno_input_handler)) - KGSL_DRV_ERR(device, "Unable to register the input handler\n"); + if (!device->pwrctrl.input_disable) { + adreno_input_handler.private = device; + /* + * It isn't fatal if we cannot register the input handler. Sad, + * perhaps, but not fatal + */ + if (input_register_handler(&adreno_input_handler)) { + adreno_input_handler.private = NULL; + KGSL_DRV_ERR(device, + "Unable to register the input handler\n"); + } + } #endif out: if (status) { @@ -1076,7 +1083,8 @@ static int adreno_remove(struct platform_device *pdev) _adreno_free_memories(adreno_dev); #ifdef CONFIG_INPUT - input_unregister_handler(&adreno_input_handler); + if (adreno_input_handler.private) + input_unregister_handler(&adreno_input_handler); #endif adreno_sysfs_close(adreno_dev); @@ -1309,6 +1317,10 @@ static int _adreno_start(struct adreno_device *adreno_dev) /* make sure ADRENO_DEVICE_STARTED is not set here */ BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)); + /* disallow l2pc during wake up to improve GPU wake up time */ + kgsl_pwrctrl_update_l2pc(&adreno_dev->dev, + KGSL_L2PC_WAKEUP_TIMEOUT); + pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, pmqos_wakeup_vote); @@ -2139,6 +2151,11 @@ static int adreno_soft_reset(struct kgsl_device *device) /* Reset the GPU */ _soft_reset(adreno_dev); + /* Clear the busy_data stats - we're starting over from scratch */ + adreno_dev->busy_data.gpu_busy = 0; + adreno_dev->busy_data.vbif_ram_cycles = 0; + adreno_dev->busy_data.vbif_starved_ram = 0; + /* Set the page table back to the default page table */ adreno_ringbuffer_set_global(adreno_dev, 0); kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 218d08e6dfc3..4a0acdcf8844 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -568,6 +568,8 @@ enum adreno_regs { ADRENO_REG_RBBM_RBBM_CTL, ADRENO_REG_UCHE_INVALIDATE0, ADRENO_REG_UCHE_INVALIDATE1, + ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, ADRENO_REG_RBBM_SECVID_TRUST_CONTROL, @@ -1508,21 +1510,60 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb, spin_unlock_irqrestore(&rb->preempt_lock, flags); } +static inline bool is_power_counter_overflow(struct adreno_device *adreno_dev, + unsigned int reg, unsigned int prev_val, unsigned int *perfctr_pwr_hi) +{ + unsigned int val; + bool ret = false; + + /* + * If prev_val is zero, it is first read after perf counter reset. + * So set perfctr_pwr_hi register to zero. + */ + if (prev_val == 0) { + *perfctr_pwr_hi = 0; + return ret; + } + adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val); + if (val != *perfctr_pwr_hi) { + *perfctr_pwr_hi = val; + ret = true; + } + return ret; +} + static inline unsigned int counter_delta(struct kgsl_device *device, unsigned int reg, unsigned int *counter) { + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int val; unsigned int ret = 0; + bool overflow = true; + static unsigned int perfctr_pwr_hi; /* Read the value */ kgsl_regread(device, reg, &val); + if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg + (adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO)) + overflow = is_power_counter_overflow(adreno_dev, reg, + *counter, &perfctr_pwr_hi); + /* Return 0 for the first read */ if (*counter != 0) { - if (val < *counter) - ret = (0xFFFFFFFF - *counter) + val; - else + if (val >= *counter) { ret = val - *counter; + } else if (overflow == true) { + ret = (0xFFFFFFFF - *counter) + val; + } else { + /* + * Since KGSL got abnormal value from the counter, + * We will drop the value from being accumulated. + */ + pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n", + val, *counter, reg); + return 0; + } } *counter = val; diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 423071811b43..0e3e5b64bdc7 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1530,6 +1530,10 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { A3XX_UCHE_CACHE_INVALIDATE0_REG), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A3XX_UCHE_CACHE_INVALIDATE1_REG), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A3XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A3XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A3XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index 5ca04e522270..6170cc263e4a 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -806,6 +806,10 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A4XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A4XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A4XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index dcc6651710fe..3fb13c7a0814 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -59,7 +59,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = { { adreno_is_a530, a530_vbif }, { adreno_is_a512, a540_vbif }, { adreno_is_a510, a530_vbif }, - { adreno_is_a508, a540_vbif }, + { adreno_is_a508, a530_vbif }, { adreno_is_a505, a530_vbif }, { adreno_is_a506, a530_vbif }, }; @@ -715,6 +715,10 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev) if (ret) goto err; + /* Integer overflow check for cmd_size */ + if (data[2] > (data[0] - 2)) + goto err; + cmds = data + data[2] + 3; cmd_size = data[0] - data[2] - 2; @@ -2069,6 +2073,9 @@ static void a5xx_start(struct adreno_device *adreno_dev) } + /* Disable All flat shading optimization */ + kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 10); + /* * VPC corner case with local memory load kill leads to corrupt * internal state. Normal Disable does not work for all a5x chips. @@ -3069,6 +3076,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2, A5XX_RBBM_BLOCK_SW_RESET_CMD2), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A5XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A5XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A5XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index 0e56731b16e2..883a9810fbf4 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -537,13 +537,42 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, "smmu_info"); } + +static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + + kgsl_free_global(device, &iommu->smmu_info); +} + #else static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) { return -ENODEV; } + +static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ +} #endif +static void a5xx_preemption_close(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_preemption *preempt = &adreno_dev->preempt; + struct adreno_ringbuffer *rb; + unsigned int i; + + del_timer(&preempt->timer); + kgsl_free_global(device, &preempt->counters); + a5xx_preemption_iommu_close(adreno_dev); + + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + kgsl_free_global(device, &rb->preemption_desc); + } +} + int a5xx_preemption_init(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -568,7 +597,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, "preemption_counters"); if (ret) - return ret; + goto err; addr = preempt->counters.gpuaddr; @@ -576,10 +605,16 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr); if (ret) - return ret; + goto err; addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE; } - return a5xx_preemption_iommu_init(adreno_dev); + ret = a5xx_preemption_iommu_init(adreno_dev); + +err: + if (ret) + a5xx_preemption_close(device); + + return ret; } diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c index bc7c0badf189..496fc6a9248e 100644 --- a/drivers/gpu/msm/adreno_a5xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c @@ -358,8 +358,8 @@ static const unsigned int a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, - 0x04E0, 0x04F4, 0X04F6, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, - 0xF800, 0xF807, + 0x04E0, 0x04F4, 0X04F8, 0x0529, 0x0531, 0x0533, 0x0540, 0x0555, + 0xF400, 0xF400, 0xF800, 0xF807, /* CP */ 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28, 0x0B78, 0x0B7F, @@ -420,8 +420,8 @@ static const unsigned int a5xx_registers[] = { * is the stop offset (inclusive) */ static const unsigned int a5xx_pre_crashdumper_registers[] = { - /* RBBM: RBBM_STATUS */ - 0x04F5, 0x04F5, + /* RBBM: RBBM_STATUS - RBBM_STATUS3 */ + 0x04F5, 0x04F7, 0x0530, 0x0530, /* CP: CP_STATUS_1 */ 0x0B1D, 0x0B1D, }; diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 5306303b8d15..2027ac66f737 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -131,6 +131,8 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i, static void sync_event_print(struct seq_file *s, struct kgsl_drawobj_sync_event *sync_event) { + unsigned long flags; + switch (sync_event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: { seq_printf(s, "sync: ctx: %d ts: %d", @@ -138,9 +140,13 @@ static void sync_event_print(struct seq_file *s, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&sync_event->handle_lock, flags); + seq_printf(s, "sync: [%pK] %s", sync_event->handle, (sync_event->handle && sync_event->handle->fence) ? sync_event->handle->fence->name : "NULL"); + + spin_unlock_irqrestore(&sync_event->handle_lock, flags); break; default: seq_printf(s, "sync: type: %d", sync_event->type); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 55f906c9cb90..1a94e71f5c1d 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -979,6 +979,13 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) spin_unlock(&dispatcher->plist_lock); } +static inline void _decrement_submit_now(struct kgsl_device *device) +{ + spin_lock(&device->submit_lock); + device->submit_now--; + spin_unlock(&device->submit_lock); +} + /** * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts * @adreno_dev: Pointer to the adreno device struct @@ -988,15 +995,29 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) { struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + spin_lock(&device->submit_lock); + /* If state transition to SLUMBER, schedule the work for later */ + if (device->slumber == true) { + spin_unlock(&device->submit_lock); + goto done; + } + device->submit_now++; + spin_unlock(&device->submit_lock); /* If the dispatcher is busy then schedule the work for later */ if (!mutex_trylock(&dispatcher->mutex)) { - adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); - return; + _decrement_submit_now(device); + goto done; } _adreno_dispatcher_issuecmds(adreno_dev); mutex_unlock(&dispatcher->mutex); + _decrement_submit_now(device); + return; +done: + adreno_dispatcher_schedule(device); } /** @@ -1439,7 +1460,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, spin_unlock(&drawctxt->lock); - kgsl_pwrctrl_update_l2pc(&adreno_dev->dev); + if (device->pwrctrl.l2pc_update_queue) + kgsl_pwrctrl_update_l2pc(&adreno_dev->dev, + KGSL_L2PC_QUEUE_TIMEOUT); /* Add the context to the dispatcher pending list */ dispatcher_queue_context(adreno_dev, drawctxt); @@ -2422,7 +2445,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev) mutex_unlock(&device->mutex); } -static void adreno_dispatcher_work(struct work_struct *work) +static void adreno_dispatcher_work(struct kthread_work *work) { struct adreno_dispatcher *dispatcher = container_of(work, struct adreno_dispatcher, work); @@ -2482,7 +2505,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - kgsl_schedule_work(&dispatcher->work); + queue_kthread_work(&kgsl_driver.worker, &dispatcher->work); } /** @@ -2778,7 +2801,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev) setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer, (unsigned long) adreno_dev); - INIT_WORK(&dispatcher->work, adreno_dispatcher_work); + init_kthread_work(&dispatcher->work, adreno_dispatcher_work); init_completion(&dispatcher->idle_gate); complete_all(&dispatcher->idle_gate); diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 72545db12f90..48f0cdc546ff 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -91,7 +91,7 @@ struct adreno_dispatcher { atomic_t fault; struct plist_head pending; spinlock_t plist_lock; - struct work_struct work; + struct kthread_work work; struct kobject kobj; struct completion idle_gate; unsigned int disp_preempt_fair_sched; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index d79d9613043f..ddc53edce3c1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -26,6 +26,7 @@ #include "adreno_iommu.h" #include "adreno_pm4types.h" #include "adreno_ringbuffer.h" +#include "adreno_trace.h" #include "a3xx_reg.h" #include "adreno_a5xx.h" @@ -58,6 +59,7 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb) } static void adreno_get_submit_time(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, struct adreno_submit_time *time) { unsigned long flags; @@ -87,6 +89,9 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev, } else time->ticks = 0; + /* Trace the GPU time to create a mapping to ftrace time */ + trace_adreno_cmdbatch_sync(rb->drawctxt_active, time->ticks); + /* Get the kernel clock for time since boot */ time->ktime = local_clock(); @@ -128,7 +133,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, _cff_write_ringbuffer(rb); if (time != NULL) - adreno_get_submit_time(adreno_dev, time); + adreno_get_submit_time(adreno_dev, rb, time); adreno_ringbuffer_wptr(adreno_dev, rb); } diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index 16ca0980cfbe..74c4c4e6e1fa 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -148,6 +148,29 @@ TRACE_EVENT(adreno_cmdbatch_retired, ) ); +TRACE_EVENT(adreno_cmdbatch_sync, + TP_PROTO(struct adreno_context *drawctxt, + uint64_t ticks), + TP_ARGS(drawctxt, ticks), + TP_STRUCT__entry( + __field(unsigned int, id) + __field(unsigned int, timestamp) + __field(uint64_t, ticks) + __field(int, prio) + ), + TP_fast_assign( + __entry->id = drawctxt->base.id; + __entry->timestamp = drawctxt->timestamp; + __entry->ticks = ticks; + __entry->prio = drawctxt->base.priority; + ), + TP_printk( + "ctx=%u ctx_prio=%d ts=%u ticks=%lld", + __entry->id, __entry->prio, __entry->timestamp, + __entry->ticks + ) +); + TRACE_EVENT(adreno_cmdbatch_fault, TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault), TP_ARGS(cmdobj, fault), diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 14bc79adaad3..afb489f10172 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -255,13 +255,6 @@ static void _deferred_put(struct work_struct *work) kgsl_mem_entry_put(entry); } -static inline void -kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry) -{ - if (entry) - queue_work(kgsl_driver.mem_workqueue, &entry->work); -} - static inline struct kgsl_mem_entry * kgsl_mem_entry_create(void) { @@ -272,7 +265,6 @@ kgsl_mem_entry_create(void) /* put this ref in the caller functions after init */ kref_get(&entry->refcount); - INIT_WORK(&entry->work, _deferred_put); } return entry; } @@ -1869,7 +1861,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv, return -EINVAL; ret = gpumem_free_entry(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -1887,7 +1879,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv, return -EINVAL; ret = gpumem_free_entry(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -1924,7 +1916,8 @@ static void gpuobj_free_fence_func(void *priv) { struct kgsl_mem_entry *entry = priv; - kgsl_mem_entry_put_deferred(entry); + INIT_WORK(&entry->work, _deferred_put); + queue_work(kgsl_driver.mem_workqueue, &entry->work); } static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv, @@ -1988,7 +1981,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv, else ret = -EINVAL; - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -3365,13 +3358,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv, if (entry == NULL) return -EINVAL; - if (!kgsl_mem_entry_set_pend(entry)) { - kgsl_mem_entry_put(entry); - return -EBUSY; - } - if (entry->memdesc.cur_bindings != 0) { - kgsl_mem_entry_unset_pend(entry); kgsl_mem_entry_put(entry); return -EINVAL; } @@ -3380,7 +3367,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv, /* One put for find_id(), one put for the kgsl_mem_entry_create() */ kgsl_mem_entry_put(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return 0; } @@ -3440,13 +3427,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv, if (entry == NULL) return -EINVAL; - if (!kgsl_mem_entry_set_pend(entry)) { - kgsl_mem_entry_put(entry); - return -EBUSY; - } - if (entry->bind_tree.rb_node != NULL) { - kgsl_mem_entry_unset_pend(entry); kgsl_mem_entry_put(entry); return -EINVAL; } @@ -3455,7 +3436,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv, /* One put for find_id(), one put for the kgsl_mem_entry_create() */ kgsl_mem_entry_put(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return 0; } @@ -4721,6 +4702,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device) device->id, device->reg_phys, device->reg_len); rwlock_init(&device->context_lock); + spin_lock_init(&device->submit_lock); setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device); @@ -4865,6 +4847,8 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; + struct sched_param param = { .sched_priority = 2 }; + /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, "kgsl"); @@ -4928,7 +4912,19 @@ static int __init kgsl_core_init(void) WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry", - WQ_MEM_RECLAIM, 0); + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + + init_kthread_worker(&kgsl_driver.worker); + + kgsl_driver.worker_thread = kthread_run(kthread_worker_fn, + &kgsl_driver.worker, "kgsl_worker_thread"); + + if (IS_ERR(kgsl_driver.worker_thread)) { + pr_err("unable to start kgsl thread\n"); + goto err; + } + + sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m); kgsl_events_init(); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 2a9ac899725c..faf38d1d2293 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -26,6 +26,7 @@ #include #include #include +#include #include /* @@ -152,6 +153,8 @@ struct kgsl_driver { unsigned int full_cache_threshold; struct workqueue_struct *workqueue; struct workqueue_struct *mem_workqueue; + struct kthread_worker worker; + struct task_struct *worker_thread; }; extern struct kgsl_driver kgsl_driver; @@ -301,7 +304,7 @@ struct kgsl_event { void *priv; struct list_head node; unsigned int created; - struct work_struct work; + struct kthread_work work; int result; struct kgsl_event_group *group; }; diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index d93fd9bfbcd0..64dd45a30612 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -256,6 +256,11 @@ struct kgsl_device { struct kgsl_pwrctrl pwrctrl; int open_count; + /* For GPU inline submission */ + uint32_t submit_now; + spinlock_t submit_lock; + bool slumber; + struct mutex mutex; uint32_t state; uint32_t requested_state; diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index f8f0e7ccb0d3..fba18231cb72 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -79,6 +79,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, { struct kgsl_drawobj_sync_event *event; unsigned int i; + unsigned long flags; for (i = 0; i < syncobj->numsyncs; i++) { event = &syncobj->synclist[i]; @@ -101,12 +102,16 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&event->handle_lock, flags); + if (event->handle) dev_err(device->dev, " fence: [%pK] %s\n", event->handle->fence, event->handle->name); else dev_err(device->dev, " fence: invalid\n"); + + spin_unlock_irqrestore(&event->handle_lock, flags); break; } } @@ -119,6 +124,7 @@ static void syncobj_timer(unsigned long data) struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; unsigned int i; + unsigned long flags; if (syncobj == NULL || drawobj->context == NULL) return; @@ -147,12 +153,16 @@ static void syncobj_timer(unsigned long data) i, event->context->id, event->timestamp); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&event->handle_lock, flags); + if (event->handle != NULL) { dev_err(device->dev, " [%d] FENCE %s\n", i, event->handle->fence ? event->handle->fence->name : "NULL"); kgsl_sync_fence_log(event->handle->fence); } + + spin_unlock_irqrestore(&event->handle_lock, flags); break; } } @@ -231,7 +241,7 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); - unsigned long pending; + unsigned long pending, flags; unsigned int i; /* Zap the canary timer */ @@ -262,8 +272,17 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) drawobj_sync_func, event); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: - if (kgsl_sync_fence_async_cancel(event->handle)) + spin_lock_irqsave(&event->handle_lock, flags); + + if (kgsl_sync_fence_async_cancel(event->handle)) { + event->handle = NULL; + spin_unlock_irqrestore( + &event->handle_lock, flags); drawobj_put(drawobj); + } else { + spin_unlock_irqrestore( + &event->handle_lock, flags); + } break; } } @@ -325,12 +344,23 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy); static void drawobj_sync_fence_func(void *priv) { + unsigned long flags; struct kgsl_drawobj_sync_event *event = priv; + drawobj_sync_expire(event->device, event); + trace_syncpoint_fence_expire(event->syncobj, event->handle ? event->handle->name : "unknown"); - drawobj_sync_expire(event->device, event); + spin_lock_irqsave(&event->handle_lock, flags); + + /* + * Setting the event->handle to NULL here make sure that + * other function does not dereference a invalid pointer. + */ + event->handle = NULL; + + spin_unlock_irqrestore(&event->handle_lock, flags); drawobj_put(&event->syncobj->base); } @@ -348,7 +378,14 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, struct kgsl_cmd_syncpoint_fence *sync = priv; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; + struct sync_fence *fence = NULL; unsigned int id; + unsigned long flags; + int ret = 0; + + fence = sync_fence_fdget(sync->fd); + if (fence == NULL) + return -EINVAL; kref_get(&drawobj->refcount); @@ -362,32 +399,39 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, event->device = device; event->context = NULL; + spin_lock_init(&event->handle_lock); set_bit(event->id, &syncobj->pending); + trace_syncpoint_fence(syncobj, fence->name); + + spin_lock_irqsave(&event->handle_lock, flags); + event->handle = kgsl_sync_fence_async_wait(sync->fd, drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { - int ret = PTR_ERR(event->handle); + ret = PTR_ERR(event->handle); + + event->handle = NULL; + spin_unlock_irqrestore(&event->handle_lock, flags); clear_bit(event->id, &syncobj->pending); - event->handle = NULL; drawobj_put(drawobj); /* - * If ret == 0 the fence was already signaled - print a trace - * message so we can track that + * Print a syncpoint_fence_expire trace if + * the fence is already signaled or there is + * a failure in registering the fence waiter. */ - if (ret == 0) - trace_syncpoint_fence_expire(syncobj, "signaled"); - - return ret; + trace_syncpoint_fence_expire(syncobj, (ret < 0) ? + "error" : fence->name); + } else { + spin_unlock_irqrestore(&event->handle_lock, flags); } - trace_syncpoint_fence(syncobj, event->handle->name); - - return 0; + sync_fence_put(fence); + return ret; } /* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index fd9d2bc93f41..b208870e4c42 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -114,6 +114,7 @@ struct kgsl_drawobj_sync { * register this event * @timestamp: Pending timestamp for the event * @handle: Pointer to a sync fence handle + * @handle_lock: Spin lock to protect handle * @device: Pointer to the KGSL device */ struct kgsl_drawobj_sync_event { @@ -123,6 +124,7 @@ struct kgsl_drawobj_sync_event { struct kgsl_context *context; unsigned int timestamp; struct kgsl_sync_fence_waiter *handle; + spinlock_t handle_lock; struct kgsl_device *device; }; diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c index 6e8abf36c50f..859511baba12 100644 --- a/drivers/gpu/msm/kgsl_events.c +++ b/drivers/gpu/msm/kgsl_events.c @@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device, { list_del(&event->node); event->result = result; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); } /** @@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device, * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ -static void _kgsl_event_worker(struct work_struct *work) +static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); @@ -282,7 +282,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, event->created = jiffies; event->group = group; - INIT_WORK(&event->work, _kgsl_event_worker); + init_kthread_work(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); @@ -297,7 +297,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); spin_unlock(&group->lock); return 0; } diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index bb92b8b79d93..685ce3ea968b 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order) /* Map the page into kernel and zero it out */ static void -_kgsl_pool_zero_page(struct page *p, unsigned int pool_order) +_kgsl_pool_zero_page(struct page *p) { - int i; + void *addr = kmap_atomic(p); - for (i = 0; i < (1 << pool_order); i++) { - struct page *page = nth_page(p, i); - void *addr = kmap_atomic(page); - - memset(addr, 0, PAGE_SIZE); - dmac_flush_range(addr, addr + PAGE_SIZE); - kunmap_atomic(addr); - } + memset(addr, 0, PAGE_SIZE); + dmac_flush_range(addr, addr + PAGE_SIZE); + kunmap_atomic(addr); } /* Add a page to specified pool */ static void _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p) { - _kgsl_pool_zero_page(p, pool->pool_order); - spin_lock(&pool->list_lock); list_add_tail(&p->lru, &pool->page_list); pool->page_count++; @@ -280,6 +273,17 @@ static int kgsl_pool_idx_lookup(unsigned int order) return -ENOMEM; } +static int kgsl_pool_get_retry_order(unsigned int order) +{ + int i; + + for (i = kgsl_num_pools-1; i > 0; i--) + if (order >= kgsl_pools[i].pool_order) + return kgsl_pools[i].pool_order; + + return 0; +} + /** * kgsl_pool_alloc_page() - Allocate a page of requested size * @page_size: Size of the page to be allocated @@ -318,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - _kgsl_pool_zero_page(page, order); goto done; } @@ -326,7 +329,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, if (pool == NULL) { /* Retry with lower order pages */ if (order > 0) { - size = PAGE_SIZE << --order; + size = PAGE_SIZE << kgsl_pool_get_retry_order(order); goto eagain; } else { /* @@ -338,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, page = alloc_pages(gfp_mask, order); if (page == NULL) return -ENOMEM; - _kgsl_pool_zero_page(page, order); goto done; } } @@ -368,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - - _kgsl_pool_zero_page(page, order); } done: for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) { p = nth_page(page, j); + _kgsl_pool_zero_page(p); pages[pcount] = p; pcount++; } diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 0150d50c925b..8c998a5d791b 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -43,13 +43,6 @@ #define DEFAULT_BUS_P 25 -/* - * The effective duration of qos request in usecs. After - * timeout, qos request is cancelled automatically. - * Kept 80ms default, inline with default GPU idle time. - */ -#define KGSL_L2PC_CPU_TIMEOUT (80 * 1000) - /* Order deeply matters here because reasons. New entries go on the end */ static const char * const clocks[] = { "src_clk", @@ -81,6 +74,12 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device, static void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state); static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level); +static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, + const char *name); +static void _gpu_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name); +static void _bimc_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name); /** * _record_pwrevent() - Record the history of the new event @@ -405,7 +404,8 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel]; /* Change register settings if any BEFORE pwrlevel change*/ kgsl_pwrctrl_pwrlevel_change_settings(device, 0); - clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], + pwrlevel->gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); trace_kgsl_pwrlevel(device, @@ -423,9 +423,12 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { - clk_set_rate(pwr->gpu_bimc_int_clk, - pwr->gpu_bimc_int_clk_freq); - clk_prepare_enable(pwr->gpu_bimc_int_clk); + kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk, + pwr->gpu_bimc_int_clk_freq, + "bimc_gpu_clk"); + _bimc_clk_prepare_enable(device, + pwr->gpu_bimc_int_clk, + "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = 1; } else if (pwr->previous_pwrlevel == 0 && pwr->gpu_bimc_interface_enabled) { @@ -510,12 +513,14 @@ EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint); /** * kgsl_pwrctrl_update_l2pc() - Update existing qos request * @device: Pointer to the kgsl_device struct + * @timeout_us: the effective duration of qos request in usecs. * * Updates an existing qos request to avoid L2PC on the * CPUs (which are selected through dtsi) on which GPU * thread is running. This would help for performance. */ -void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device) +void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, + unsigned long timeout_us) { int cpu; @@ -529,7 +534,7 @@ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device) pm_qos_update_request_timeout( &device->pwrctrl.l2pc_cpus_qos, device->pwrctrl.pm_qos_cpu_mask_latency, - KGSL_L2PC_CPU_TIMEOUT); + timeout_us); } } EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc); @@ -1012,6 +1017,8 @@ static void __force_on(struct kgsl_device *device, int flag, int on) if (on) { switch (flag) { case KGSL_PWRFLAGS_CLK_ON: + /* make sure pwrrail is ON before enabling clocks */ + kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); break; @@ -1650,9 +1657,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, (requested_state != KGSL_STATE_NAP)) { for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } @@ -1664,9 +1671,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); if ((pwr->pwrlevels[0].gpu_freq > 0)) { - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } @@ -1679,29 +1686,31 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, /* High latency clock maintenance. */ if (device->state != KGSL_STATE_NAP) { if (pwr->pwrlevels[0].gpu_freq > 0) { - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate( + pwr->grp_clks[0], pwr->pwrlevels [pwr->active_pwrlevel]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); } - - for (i = KGSL_MAX_CLKS - 1; i > 0; i--) - clk_prepare(pwr->grp_clks[i]); } - /* as last step, enable grp_clk - this is to let GPU interrupt to come */ + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) - clk_enable(pwr->grp_clks[i]); + _gpu_clk_prepare_enable(device, + pwr->grp_clks[i], clocks[i]); + /* Enable the gpu-bimc-interface clocks */ if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { - clk_set_rate(pwr->gpu_bimc_int_clk, - pwr->gpu_bimc_int_clk_freq); - clk_prepare_enable( - pwr->gpu_bimc_int_clk); + kgsl_pwrctrl_clk_set_rate( + pwr->gpu_bimc_int_clk, + pwr->gpu_bimc_int_clk_freq, + "bimc_gpu_clk"); + _bimc_clk_prepare_enable(device, + pwr->gpu_bimc_int_clk, + "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = 1; } } @@ -1805,7 +1814,12 @@ static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state) struct kgsl_pwrctrl *pwr = &device->pwrctrl; int status = 0; - if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags)) + /* + * Disabling the regulator means also disabling dependent clocks. + * Hence don't disable it if force clock ON is set. + */ + if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) || + test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags)) return 0; if (state == KGSL_PWRFLAGS_OFF) { @@ -2022,7 +2036,54 @@ static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level) rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx], level > pwr->isense_clk_on_level ? KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ); - return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate); + return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], + rate, clocks[pwr->isense_clk_indx]); +} + +/* + * _gpu_clk_prepare_enable - Enable the specified GPU clock + * Try once to enable it and then BUG() for debug + */ +static void _gpu_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name) +{ + int ret; + + if (device->state == KGSL_STATE_NAP) { + ret = clk_enable(clk); + if (ret) + goto err; + return; + } + + ret = clk_prepare_enable(clk); + if (!ret) + return; +err: + /* Failure is fatal so BUG() to facilitate debug */ + KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret); +} + +/* + * _bimc_clk_prepare_enable - Enable the specified GPU clock + * Try once to enable it and then BUG() for debug + */ +static void _bimc_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name) +{ + int ret = clk_prepare_enable(clk); + /* Failure is fatal so BUG() to facilitate debug */ + if (ret) + KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret); +} + +static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, + const char *name) +{ + int ret = clk_set_rate(grp_clk, freq); + + WARN(ret, "KGSL:%s set freq %d failed:%d\n", name, freq, ret); + return ret; } static inline void _close_pcl(struct kgsl_pwrctrl *pwr) @@ -2117,11 +2178,12 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) pwr->pwrlevels[i].gpu_freq = freq; } - clk_set_rate(pwr->grp_clks[0], - pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq, clocks[0]); - clk_set_rate(pwr->grp_clks[6], - clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ)); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6], + clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ), + clocks[6]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); @@ -2134,6 +2196,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask", &pwr->l2pc_cpus_mask); + pwr->l2pc_update_queue = of_property_read_bool( + device->pdev->dev.of_node, + "qcom,l2pc-update-queue"); + pm_runtime_enable(&pdev->dev); ocmem_bus_node = of_find_node_by_name( @@ -2347,9 +2413,24 @@ void kgsl_idle_check(struct work_struct *work) || device->state == KGSL_STATE_NAP) { if (!atomic_read(&device->active_cnt)) { + spin_lock(&device->submit_lock); + if (device->submit_now) { + spin_unlock(&device->submit_lock); + goto done; + } + /* Don't allow GPU inline submission in SLUMBER */ + if (requested_state == KGSL_STATE_SLUMBER) + device->slumber = true; + spin_unlock(&device->submit_lock); + ret = kgsl_pwrctrl_change_state(device, device->requested_state); if (ret == -EBUSY) { + if (requested_state == KGSL_STATE_SLUMBER) { + spin_lock(&device->submit_lock); + device->slumber = false; + spin_unlock(&device->submit_lock); + } /* * If the GPU is currently busy, restore * the requested state and reschedule @@ -2360,7 +2441,7 @@ void kgsl_idle_check(struct work_struct *work) kgsl_schedule_work(&device->idle_check_ws); } } - +done: if (!ret) kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); @@ -2789,6 +2870,13 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device, trace_kgsl_pwr_set_state(device, state); device->state = state; device->requested_state = KGSL_STATE_NONE; + + spin_lock(&device->submit_lock); + if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND) + device->slumber = true; + else + device->slumber = false; + spin_unlock(&device->submit_lock); } static void kgsl_pwrctrl_request_state(struct kgsl_device *device, @@ -3015,7 +3103,7 @@ EXPORT_SYMBOL(kgsl_pwr_limits_add); void kgsl_pwr_limits_del(void *limit_ptr) { struct kgsl_pwr_limit *limit = limit_ptr; - if (IS_ERR(limit)) + if (IS_ERR_OR_NULL(limit)) return; _update_limits(limit, KGSL_PWR_DEL_LIMIT, 0); @@ -3036,7 +3124,7 @@ int kgsl_pwr_limits_set_freq(void *limit_ptr, unsigned int freq) struct kgsl_pwr_limit *limit = limit_ptr; int level; - if (IS_ERR(limit)) + if (IS_ERR_OR_NULL(limit)) return -EINVAL; pwr = &limit->device->pwrctrl; @@ -3058,7 +3146,7 @@ void kgsl_pwr_limits_set_default(void *limit_ptr) { struct kgsl_pwr_limit *limit = limit_ptr; - if (IS_ERR(limit)) + if (IS_ERR_OR_NULL(limit)) return; _update_limits(limit, KGSL_PWR_SET_LIMIT, 0); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index 42f918b80fcd..02707c901839 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -51,6 +51,19 @@ #define KGSL_PWR_DEL_LIMIT 1 #define KGSL_PWR_SET_LIMIT 2 +/* + * The effective duration of qos request in usecs at queue time. + * After timeout, qos request is cancelled automatically. + * Kept 80ms default, inline with default GPU idle time. + */ +#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000) + +/* + * The effective duration of qos request in usecs at wakeup time. + * After timeout, qos request is cancelled automatically. + */ +#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000) + enum kgsl_pwrctrl_timer_type { KGSL_PWR_IDLE_TIMER, }; @@ -128,10 +141,12 @@ struct kgsl_regulator { * @irq_name - resource name for the IRQ * @clk_stats - structure of clock statistics * @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs + * @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time * @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs * @pm_qos_req_dma - the power management quality of service structure * @pm_qos_active_latency - allowed CPU latency in microseconds when active * @pm_qos_cpu_mask_latency - allowed CPU mask latency in microseconds + * @input_disable - To disable GPU wakeup on touch input event * @pm_qos_wakeup_latency - allowed CPU latency in microseconds during wakeup * @bus_control - true if the bus calculation is independent * @bus_mod - modifier from the current power level for the bus vote @@ -183,11 +198,13 @@ struct kgsl_pwrctrl { const char *irq_name; struct kgsl_clk_stats clk_stats; unsigned int l2pc_cpus_mask; + bool l2pc_update_queue; struct pm_qos_request l2pc_cpus_qos; struct pm_qos_request pm_qos_req_dma; unsigned int pm_qos_active_latency; unsigned int pm_qos_cpu_mask_latency; unsigned int pm_qos_wakeup_latency; + bool input_disable; bool bus_control; int bus_mod; unsigned int bus_percent_ab; @@ -249,5 +266,6 @@ int kgsl_active_count_wait(struct kgsl_device *device, int count); void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy); void kgsl_pwrctrl_set_constraint(struct kgsl_device *device, struct kgsl_pwr_constraint *pwrc, uint32_t id); -void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device); +void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, + unsigned long timeout_us); #endif /* __KGSL_PWRCTRL_H */ diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index 7f93ab8fa8d4..2b7a1fcbaa78 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -522,7 +522,8 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; - int level, i; + int level; + unsigned int i; unsigned long cur_freq; if (device == NULL) @@ -550,7 +551,12 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) /* If the governor recommends a new frequency, update it here */ if (*freq != cur_freq) { level = pwr->max_pwrlevel; - for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) + /* + * Array index of pwrlevels[] should be within the permitted + * power levels, i.e., from max_pwrlevel to min_pwrlevel. + */ + for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel + && i <= pwr->min_pwrlevel); i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { if (pwr->thermal_cycle == CYCLE_ACTIVE) level = _thermal_adjust(pwr, i); @@ -585,7 +591,7 @@ int kgsl_devfreq_get_dev_status(struct device *dev, struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwrctrl; struct kgsl_pwrscale *pwrscale; - ktime_t tmp; + ktime_t tmp1, tmp2; if (device == NULL) return -ENODEV; @@ -596,6 +602,8 @@ int kgsl_devfreq_get_dev_status(struct device *dev, pwrctrl = &device->pwrctrl; mutex_lock(&device->mutex); + + tmp1 = ktime_get(); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will @@ -603,9 +611,9 @@ int kgsl_devfreq_get_dev_status(struct device *dev, */ kgsl_pwrscale_update_stats(device); - tmp = ktime_get(); - stat->total_time = ktime_us_delta(tmp, pwrscale->time); - pwrscale->time = tmp; + tmp2 = ktime_get(); + stat->total_time = ktime_us_delta(tmp2, pwrscale->time); + pwrscale->time = tmp1; stat->busy_time = pwrscale->accum_stats.busy_time; diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index 7f4a5a3b251f..27733b068434 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "kgsl.h" #include "kgsl_sharedmem.h" @@ -699,6 +700,10 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc, size_t len; unsigned int align; + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + size = PAGE_ALIGN(size); if (size == 0 || size > UINT_MAX) return -EINVAL; @@ -761,7 +766,8 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc, */ memdesc->size = (size - len); - if (sharedmem_noretry_flag != true) + if (sharedmem_noretry_flag != true && + __ratelimit(&_rs)) KGSL_CORE_ERR( "Out of memory: only allocated %lldKB of %lldKB requested\n", (size - len) >> 10, size >> 10); diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index 358b3b038899..4bf591c236a7 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -179,7 +179,7 @@ int kgsl_add_fence_event(struct kgsl_device *device, goto out; } snprintf(fence_name, sizeof(fence_name), - "%s-pid-%d-ctx-%d-ts-%d", + "%s-pid-%d-ctx-%d-ts-%u", device->name, current->group_leader->pid, context_id, timestamp); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 34ea83d067af..13dc2731195b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2428,6 +2428,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, #if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index e37030624165..37cbc2ecfc5f 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -285,6 +285,9 @@ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a +#define USB_VENDOR_ID_DELL 0x413c +#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 @@ -774,6 +777,9 @@ #define USB_VENDOR_ID_PETALYNX 0x18b1 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 +#define USB_VENDOR_ID_PETZL 0x2122 +#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234 + #define USB_VENDOR_ID_PHILIPS 0x0471 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 0b80633bae91..d4d655a10df1 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) return ret; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 1a2032c2c1fb..690a9f0fa042 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -28,6 +28,8 @@ #define UHID_NAME "uhid" #define UHID_BUFSIZE 32 +static DEFINE_MUTEX(uhid_open_mutex); + struct uhid_device { struct mutex devlock; bool running; @@ -142,15 +144,26 @@ static void uhid_hid_stop(struct hid_device *hid) static int uhid_hid_open(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; + int retval = 0; - return uhid_queue_event(uhid, UHID_OPEN); + mutex_lock(&uhid_open_mutex); + if (!hid->open++) { + retval = uhid_queue_event(uhid, UHID_OPEN); + if (retval) + hid->open--; + } + mutex_unlock(&uhid_open_mutex); + return retval; } static void uhid_hid_close(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; - uhid_queue_event(uhid, UHID_CLOSE); + mutex_lock(&uhid_open_mutex); + if (!--hid->open) + uhid_queue_event(uhid, UHID_CLOSE); + mutex_unlock(&uhid_open_mutex); } static int uhid_hid_parse(struct hid_device *hid) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 6ca6ab00fa93..ce1543d69acb 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -72,6 +72,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 35e3fd9fadf6..b62c50d1b1e4 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1440,37 +1440,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; - if (wacom->pen_input) + if (wacom->pen_input) { dev_dbg(wacom->pen_input->dev.parent, "%s: received report #%d\n", __func__, data[0]); - else if (wacom->touch_input) + + if (len == WACOM_PKGLEN_PENABLED || + data[0] == WACOM_REPORT_PENABLED) + return wacom_tpc_pen(wacom); + } + else if (wacom->touch_input) { dev_dbg(wacom->touch_input->dev.parent, "%s: received report #%d\n", __func__, data[0]); - switch (len) { - case WACOM_PKGLEN_TPC1FG: - return wacom_tpc_single_touch(wacom, len); - - case WACOM_PKGLEN_TPC2FG: - return wacom_tpc_mt_touch(wacom); - - case WACOM_PKGLEN_PENABLED: - return wacom_tpc_pen(wacom); - - default: - switch (data[0]) { - case WACOM_REPORT_TPC1FG: - case WACOM_REPORT_TPCHID: - case WACOM_REPORT_TPCST: - case WACOM_REPORT_TPC1FGE: + switch (len) { + case WACOM_PKGLEN_TPC1FG: return wacom_tpc_single_touch(wacom, len); - case WACOM_REPORT_TPCMT: - case WACOM_REPORT_TPCMT2: - return wacom_mt_touch(wacom); + case WACOM_PKGLEN_TPC2FG: + return wacom_tpc_mt_touch(wacom); - case WACOM_REPORT_PENABLED: - return wacom_tpc_pen(wacom); + default: + switch (data[0]) { + case WACOM_REPORT_TPC1FG: + case WACOM_REPORT_TPCHID: + case WACOM_REPORT_TPCST: + case WACOM_REPORT_TPC1FGE: + return wacom_tpc_single_touch(wacom, len); + + case WACOM_REPORT_TPCMT: + case WACOM_REPORT_TPCMT2: + return wacom_mt_touch(wacom); + + } } } diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c index 30b13282f6c0..cc0b25b130d7 100644 --- a/drivers/hwtracing/coresight/coresight-remote-etm.c +++ b/drivers/hwtracing/coresight/coresight-remote-etm.c @@ -186,12 +186,9 @@ static void remote_etm_rcv_msg(struct work_struct *work) struct remote_etm_drvdata *drvdata = container_of(work, struct remote_etm_drvdata, work_rcv_msg); - mutex_lock(&drvdata->mutex); if (qmi_recv_msg(drvdata->handle) < 0) dev_err(drvdata->dev, "%s: Error receiving QMI message\n", __func__); - - mutex_unlock(&drvdata->mutex); } static void remote_etm_notify(struct qmi_handle *handle, diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 316d8b783d94..691c7bb3afac 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -764,6 +764,16 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) pm_runtime_get_sync(drvdata->dev); mutex_lock(&drvdata->mem_lock); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); + pm_runtime_put(drvdata->dev); + return -EBUSY; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); + if (drvdata->config_type == TMC_CONFIG_TYPE_ETR && drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) { /* @@ -807,19 +817,8 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) coresight_cti_map_trigout(drvdata->cti_flush, 1, 0); coresight_cti_map_trigin(drvdata->cti_reset, 2, 0); } - mutex_unlock(&drvdata->mem_lock); spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR - && drvdata->out_mode == TMC_ETR_OUT_MODE_USB) - usb_qdss_close(drvdata->usbch); - pm_runtime_put(drvdata->dev); - - return -EBUSY; - } if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { tmc_etb_enable_hw(drvdata); @@ -845,6 +844,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) */ drvdata->sticky_enable = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); dev_info(drvdata->dev, "TMC enabled\n"); return 0; @@ -1140,6 +1140,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) unsigned long flags; enum tmc_mode mode; + mutex_lock(&drvdata->mem_lock); spin_lock_irqsave(&drvdata->spinlock, flags); if (!drvdata->sticky_enable) { dev_err(drvdata->dev, "enable tmc once before reading\n"); @@ -1172,11 +1173,13 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) out: drvdata->reading = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); dev_info(drvdata->dev, "TMC read start\n"); return 0; err: spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); return ret; } @@ -1353,7 +1356,11 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, { struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - char *bufp = drvdata->buf + *ppos; + char *bufp; + + mutex_lock(&drvdata->mem_lock); + + bufp = drvdata->buf + *ppos; if (*ppos + len > drvdata->size) len = drvdata->size - *ppos; @@ -1375,6 +1382,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, if (copy_to_user(data, bufp, len)) { dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__); + mutex_unlock(&drvdata->mem_lock); return -EFAULT; } @@ -1382,6 +1390,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n", __func__, len, (int)(drvdata->size - *ppos)); + + mutex_unlock(&drvdata->mem_lock); return len; } diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c index 7f98d9f527b9..f4ed71f9c1a7 100644 --- a/drivers/i2c/busses/i2c-msm-v2.c +++ b/drivers/i2c/busses/i2c-msm-v2.c @@ -1310,7 +1310,8 @@ static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl) ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete); if (!ret && ctrl->xfer.rx_cnt) - i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.rx_complete); + ret = i2c_msm_xfer_wait_for_completion(ctrl, + &ctrl->xfer.rx_complete); dma_xfer_end: /* free scatter-gather lists */ @@ -1716,9 +1717,7 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) void __iomem *base = ctrl->rsrcs.base; struct i2c_msm_xfer *xfer = &ctrl->xfer; struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk; - u32 i2c_status = 0; u32 err_flags = 0; - u32 qup_op = 0; u32 clr_flds = 0; bool log_event = false; bool signal_complete = false; @@ -1731,24 +1730,24 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) return IRQ_HANDLED; } - i2c_status = readl_relaxed(base + QUP_I2C_STATUS); - err_flags = readl_relaxed(base + QUP_ERROR_FLAGS); - qup_op = readl_relaxed(base + QUP_OPERATIONAL); + ctrl->i2c_sts_reg = readl_relaxed(base + QUP_I2C_STATUS); + err_flags = readl_relaxed(base + QUP_ERROR_FLAGS); + ctrl->qup_op_reg = readl_relaxed(base + QUP_OPERATIONAL); - if (i2c_status & QUP_MSTR_STTS_ERR_MASK) { + if (ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK) { signal_complete = true; log_event = true; /* * If there is more than 1 error here, last one sticks. * The order of the error set here matters. */ - if (i2c_status & QUP_ARB_LOST) + if (ctrl->i2c_sts_reg & QUP_ARB_LOST) ctrl->xfer.err = I2C_MSM_ERR_ARB_LOST; - if (i2c_status & QUP_BUS_ERROR) + if (ctrl->i2c_sts_reg & QUP_BUS_ERROR) ctrl->xfer.err = I2C_MSM_ERR_BUS_ERR; - if (i2c_status & QUP_PACKET_NACKED) + if (ctrl->i2c_sts_reg & QUP_PACKET_NACKED) ctrl->xfer.err = I2C_MSM_ERR_NACK; } @@ -1761,7 +1760,7 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) i2c_msm_dbg_qup_reg_dump(ctrl); /* clear interrupts fields */ - clr_flds = i2c_status & QUP_MSTR_STTS_ERR_MASK; + clr_flds = ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK; if (clr_flds) { writel_relaxed(clr_flds, base + QUP_I2C_STATUS); need_wmb = true; @@ -1773,7 +1772,9 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) need_wmb = true; } - clr_flds = qup_op & (QUP_OUTPUT_SERVICE_FLAG | QUP_INPUT_SERVICE_FLAG); + clr_flds = ctrl->qup_op_reg & + (QUP_OUTPUT_SERVICE_FLAG | + QUP_INPUT_SERVICE_FLAG); if (clr_flds) { writel_relaxed(clr_flds, base + QUP_OPERATIONAL); need_wmb = true; @@ -1814,25 +1815,25 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) /* handle data completion */ if (xfer->mode_id == I2C_MSM_XFER_MODE_BLOCK) { /* block ready for writing */ - if (qup_op & QUP_OUTPUT_SERVICE_FLAG) { + if (ctrl->qup_op_reg & QUP_OUTPUT_SERVICE_FLAG) { log_event = true; - if (qup_op & QUP_OUT_BLOCK_WRITE_REQ) + if (ctrl->qup_op_reg & QUP_OUT_BLOCK_WRITE_REQ) complete(&blk->wait_tx_blk); - if ((qup_op & blk->complete_mask) + if ((ctrl->qup_op_reg & blk->complete_mask) == blk->complete_mask) { log_event = true; signal_complete = true; } } /* block ready for reading */ - if (qup_op & QUP_INPUT_SERVICE_FLAG) { + if (ctrl->qup_op_reg & QUP_INPUT_SERVICE_FLAG) { log_event = true; complete(&blk->wait_rx_blk); } } else { /* for FIFO/DMA Mode*/ - if (qup_op & QUP_MAX_INPUT_DONE_FLAG) { + if (ctrl->qup_op_reg & QUP_MAX_INPUT_DONE_FLAG) { log_event = true; /* * If last transaction is an input then the entire @@ -1850,7 +1851,7 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) * here QUP_OUTPUT_SERVICE_FLAG and assumes that * QUP_MAX_OUTPUT_DONE_FLAG. */ - if (qup_op & (QUP_OUTPUT_SERVICE_FLAG | + if (ctrl->qup_op_reg & (QUP_OUTPUT_SERVICE_FLAG | QUP_MAX_OUTPUT_DONE_FLAG)) { log_event = true; /* @@ -1863,13 +1864,11 @@ static irqreturn_t i2c_msm_qup_isr(int irq, void *devid) } isr_end: - if (ctrl->xfer.err || (ctrl->dbgfs.dbg_lvl >= MSM_DBG)) - i2c_msm_dbg_dump_diag(ctrl, true, i2c_status, qup_op); - if (log_event || (ctrl->dbgfs.dbg_lvl >= MSM_DBG)) i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_IRQ_END, - i2c_status, qup_op, err_flags); + ctrl->i2c_sts_reg, ctrl->qup_op_reg, + err_flags); if (signal_complete) complete(&ctrl->xfer.complete); @@ -2078,8 +2077,12 @@ static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl, xfer->timeout, time_left, 0); } else { /* return an error if one detected by ISR */ - if (xfer->err) + if (ctrl->xfer.err || + (ctrl->dbgfs.dbg_lvl >= MSM_DBG)) { + i2c_msm_dbg_dump_diag(ctrl, true, + ctrl->i2c_sts_reg, ctrl->qup_op_reg); ret = -(xfer->err); + } i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_COMPLT_OK, xfer->timeout, time_left, 0); } @@ -2229,19 +2232,8 @@ static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl) static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl) { int ret; - struct i2c_msm_xfer *xfer = &ctrl->xfer; mutex_lock(&ctrl->xfer.mtx); - /* if system is suspended just bail out */ - if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) { - struct i2c_msg *msgs = xfer->msgs + xfer->cur_buf.msg_idx; - dev_err(ctrl->dev, - "slave:0x%x is calling xfer when system is suspended\n", - msgs->addr); - mutex_unlock(&ctrl->xfer.mtx); - return -EIO; - } - i2c_msm_pm_pinctrl_state(ctrl, true); pm_runtime_get_sync(ctrl->dev); /* @@ -2327,6 +2319,14 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) return PTR_ERR(msgs); } + /* if system is suspended just bail out */ + if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) { + dev_err(ctrl->dev, + "slave:0x%x is calling xfer when system is suspended\n", + msgs->addr); + return -EIO; + } + ret = i2c_msm_pm_xfer_start(ctrl); if (ret) return ret; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 630bce68bf38..b61db9db3ca5 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -54,7 +54,7 @@ #define SMBSLVDAT (0xC + piix4_smba) /* count for request_region */ -#define SMBIOSIZE 8 +#define SMBIOSIZE 9 /* PCI Address Constants */ #define SMBBA 0x090 diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 0ed77eeff31e..a2e3dd715380 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; + void *dmadata = kmalloc(len, GFP_KERNEL); + int ret; + + if (!dmadata) + return -ENOMEM; /* do control transfer */ - return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), + ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | - USB_DIR_IN, value, index, data, len, 2000); + USB_DIR_IN, value, index, dmadata, len, 2000); + + memcpy(data, dmadata, len); + kfree(dmadata); + return ret; } static int usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; + void *dmadata = kmemdup(data, len, GFP_KERNEL); + int ret; + + if (!dmadata) + return -ENOMEM; /* do control transfer */ - return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), + ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - value, index, data, len, 2000); + value, index, dmadata, len, 2000); + + kfree(dmadata); + return ret; } static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index 537cca877f66..28ab4e52dab5 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -180,6 +180,9 @@ #define FG_ADC_RR_VOLT_INPUT_FACTOR 8 #define FG_ADC_RR_CURR_INPUT_FACTOR 2000 #define FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL 1886 +#define FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL 9 +#define FG_ADC_RR_CURR_USBIN_660_UV_VAL 579500 + #define FG_ADC_SCALE_MILLI_FACTOR 1000 #define FG_ADC_KELVINMIL_CELSIUSMIL 273150 @@ -192,6 +195,9 @@ #define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000 #define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000 #define FG_RR_CONV_MAX_RETRY_CNT 50 +#define FG_RR_TP_REV_VERSION1 21 +#define FG_RR_TP_REV_VERSION2 29 +#define FG_RR_TP_REV_VERSION3 32 /* * The channel number is not a physical index in hardware, @@ -228,6 +234,7 @@ struct rradc_chip { struct rradc_chan_prop *chan_props; struct device_node *revid_dev_node; struct pmic_revid_data *pmic_fab_id; + int volt; }; struct rradc_channels { @@ -331,8 +338,8 @@ static int rradc_post_process_therm(struct rradc_chip *chip, int64_t temp; /* K = code/4 */ - temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K); - temp *= FG_ADC_SCALE_MILLI_FACTOR; + temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR); + temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K); *result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL; return 0; @@ -353,7 +360,7 @@ static int rradc_post_process_volt(struct rradc_chip *chip, return 0; } -static int rradc_post_process_curr(struct rradc_chip *chip, +static int rradc_post_process_usbin_curr(struct rradc_chip *chip, struct rradc_chan_prop *prop, u16 adc_code, int *result_ua) { @@ -361,11 +368,33 @@ static int rradc_post_process_curr(struct rradc_chip *chip, if (!prop) return -EINVAL; - - if (prop->channel == RR_ADC_USBIN_I) - scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL; - else - scale = FG_ADC_RR_CURR_INPUT_FACTOR; + if (chip->revid_dev_node) { + switch (chip->pmic_fab_id->pmic_subtype) { + case PM660_SUBTYPE: + if (((chip->pmic_fab_id->tp_rev + >= FG_RR_TP_REV_VERSION1) + && (chip->pmic_fab_id->tp_rev + <= FG_RR_TP_REV_VERSION2)) + || (chip->pmic_fab_id->tp_rev + >= FG_RR_TP_REV_VERSION3)) { + chip->volt = div64_s64(chip->volt, 1000); + chip->volt = chip->volt * + FG_ADC_RR_CURR_USBIN_660_FACTOR_MIL; + chip->volt = FG_ADC_RR_CURR_USBIN_660_UV_VAL - + (chip->volt); + chip->volt = div64_s64(1000000000, chip->volt); + scale = chip->volt; + } else + scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL; + break; + case PMI8998_SUBTYPE: + scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL; + break; + default: + pr_err("No PMIC subtype found\n"); + return -EINVAL; + } + } /* scale * V/A; 2.5V ADC full scale */ ua = ((int64_t)adc_code * scale); @@ -376,6 +405,24 @@ static int rradc_post_process_curr(struct rradc_chip *chip, return 0; } +static int rradc_post_process_dcin_curr(struct rradc_chip *chip, + struct rradc_chan_prop *prop, u16 adc_code, + int *result_ua) +{ + int64_t ua = 0; + + if (!prop) + return -EINVAL; + + /* 0.5 V/A; 2.5V ADC full scale */ + ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR); + ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR); + ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000)); + *result_ua = ua; + + return 0; +} + static int rradc_post_process_die_temp(struct rradc_chip *chip, struct rradc_chan_prop *prop, u16 adc_code, int *result_millidegc) @@ -591,13 +638,13 @@ static const struct rradc_channels rradc_chans[] = { BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED), FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB, FG_ADC_RR_AUX_THERM_STS) - RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr, + RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_usbin_curr, FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB, FG_ADC_RR_USB_IN_I_STS) RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt, FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB, FG_ADC_RR_USB_IN_V_STS) - RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_curr, + RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_dcin_curr, FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB, FG_ADC_RR_DC_IN_I_STS) RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt, @@ -955,6 +1002,21 @@ static int rradc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_PROCESSED: + if (((chip->pmic_fab_id->tp_rev + >= FG_RR_TP_REV_VERSION1) + && (chip->pmic_fab_id->tp_rev + <= FG_RR_TP_REV_VERSION2)) + || (chip->pmic_fab_id->tp_rev + >= FG_RR_TP_REV_VERSION3)) { + if (chan->address == RR_ADC_USBIN_I) { + prop = &chip->chan_props[RR_ADC_USBIN_V]; + rc = rradc_do_conversion(chip, prop, &adc_code); + if (rc) + break; + prop->scale(chip, prop, adc_code, &chip->volt); + } + } + prop = &chip->chan_props[chan->address]; rc = rradc_do_conversion(chip, prop, &adc_code); if (rc) diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 6bf89d8f3741..b9d1e5c58ec5 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000}; static const struct reg_field reg_field_it = REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); static const struct reg_field reg_field_als_intr = - REG_FIELD(LTR501_INTR, 0, 0); -static const struct reg_field reg_field_ps_intr = REG_FIELD(LTR501_INTR, 1, 1); +static const struct reg_field reg_field_ps_intr = + REG_FIELD(LTR501_INTR, 0, 0); static const struct reg_field reg_field_als_rate = REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); static const struct reg_field reg_field_ps_rate = diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index bf0bd7e03aff..420478924a0c 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -40,9 +40,9 @@ #define AS3935_AFE_PWR_BIT BIT(0) #define AS3935_INT 0x03 -#define AS3935_INT_MASK 0x07 +#define AS3935_INT_MASK 0x0f #define AS3935_EVENT_INT BIT(3) -#define AS3935_NOISE_INT BIT(1) +#define AS3935_NOISE_INT BIT(0) #define AS3935_DATA 0x07 #define AS3935_DATA_MASK 0x3F @@ -263,8 +263,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private) static void calibrate_as3935(struct as3935_state *st) { - mutex_lock(&st->lock); - /* mask disturber interrupt bit */ as3935_write(st, AS3935_INT, BIT(5)); @@ -274,8 +272,6 @@ static void calibrate_as3935(struct as3935_state *st) mdelay(2); as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); - - mutex_unlock(&st->lock); } #ifdef CONFIG_PM_SLEEP @@ -312,6 +308,8 @@ static int as3935_resume(struct device *dev) val &= ~AS3935_AFE_PWR_BIT; ret = as3935_write(st, AS3935_AFE_GAIN, val); + calibrate_as3935(st); + err_resume: mutex_unlock(&st->lock); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3f5741a3e728..43d5166db4c6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -857,6 +857,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); + qp_attr->port_num = id_priv->id.port_num; + *qp_attr_mask |= IB_QP_PORT; } else ret = -ENOSYS; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1c02deab068f..b7a73f1a8beb 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2287,6 +2287,11 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if ((cmd.attr_mask & IB_QP_PORT) && + (cmd.port_num < rdma_start_port(ib_dev) || + cmd.port_num > rdma_end_port(ib_dev))) + return -EINVAL; + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, out_len); @@ -2827,6 +2832,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (cmd.attr.port_num < rdma_start_port(ib_dev) || + cmd.attr.port_num > rdma_end_port(ib_dev)) + return -EINVAL; + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) return -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e6b7556d5221..cbc4216091c9 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -2088,8 +2088,10 @@ send_last: ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; - if (!ret) + if (!ret) { + qib_put_ss(&qp->r_sge); goto rnr_nak; + } wc.ex.imm_data = ohdr->u.rc.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 1a2b2620421e..6f4dc0fd2ca3 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1122,8 +1122,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons @@ -1528,6 +1530,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), }, }, + { + /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"), + }, + }, { /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ .matches = { @@ -1549,6 +1558,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), }, }, + { + /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"), + }, + }, { /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ .matches = { diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index e7b96f1ac2c5..5be14ad29d46 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), }, }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 89abfdb539ac..c84c685056b9 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -434,8 +434,10 @@ static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = true; - mb(); + spin_unlock_irq(&i8042_lock); + return 0; } @@ -448,16 +450,20 @@ static void i8042_stop(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = false; + port->serio = NULL; + spin_unlock_irq(&i8042_lock); /* + * We need to make sure that interrupt handler finishes using + * our serio port before we return from this function. * We synchronize with both AUX and KBD IRQs because there is * a (very unlikely) chance that AUX IRQ is raised for KBD port * and vice versa. */ synchronize_irq(I8042_AUX_IRQ); synchronize_irq(I8042_KBD_IRQ); - port->serio = NULL; } /* @@ -574,7 +580,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&i8042_lock, flags); - if (likely(port->exists && !filtered)) + if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); out: diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index 78bdd24af28b..08bfb83a9447 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -1003,7 +1003,10 @@ static unsigned char *fts_status_event_handler( case FTS_WATER_MODE_ON: case FTS_WATER_MODE_OFF: default: - logError(1, "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", tag, __func__, event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]); + logError(0, + "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", + tag, __func__, event[0], event[1], event[2], + event[3], event[4], event[5], event[6], event[7]); break; } @@ -1755,8 +1758,6 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va info->resume_bit = 1; - fts_system_reset(); - fts_mode_handler(info, 0); info->sensor_sleep = false; @@ -1959,9 +1960,9 @@ static int parse_dt(struct device *dev, struct fts_i2c_platform_data *bdata) bdata->bus_reg_name = name; logError(0, "%s bus_reg_name = %s\n", tag, name); - if (of_property_read_bool(np, "st, reset-gpio")) { + if (of_property_read_bool(np, "st,reset-gpio")) { bdata->reset_gpio = of_get_named_gpio_flags(np, - "st, reset-gpio", 0, NULL); + "st,reset-gpio", 0, NULL); logError(0, "%s reset_gpio =%d\n", tag, bdata->reset_gpio); } else { bdata->reset_gpio = GPIO_NOT_DEFINED; @@ -2210,7 +2211,13 @@ static int fts_probe(struct i2c_client *client, } #endif - queue_delayed_work(info->fwu_workqueue, &info->fwu_work, msecs_to_jiffies(EXP_FN_WORK_DELAY_MS)); + /*if wanna auto-update FW when probe, + * please don't comment the following code + */ + + /* queue_delayed_work(info->fwu_workqueue, &info->fwu_work, + * msecs_to_jiffies(EXP_FN_WORK_DELAY_MS)); + */ logError(1, "%s Probe Finished!\n", tag); return OK; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 4831eb910fc7..22160e481794 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -699,9 +699,9 @@ out_clear_state: out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 0725c92a6f85..b30739de79e7 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1727,7 +1727,8 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain, static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu) { - int ret, scm_ret = 0; + int ret; + u64 scm_ret = 0; if (!arm_smmu_is_static_cb(smmu)) return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b92b8a724efb..f9711aceef54 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; - level_pfn = pfn & level_mask(level - 1); + level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 33176a4aa6ef..92e6ae48caf8 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -394,36 +394,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); - if (ret) { - kfree(device); - return ret; - } + if (ret) + goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return -ENOMEM; + ret = -ENOMEM; + goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { - kfree(device->name); if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ + kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } - - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return ret; + goto err_free_name; } kobject_get(group->devices_kobj); @@ -435,8 +429,10 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); if (group->domain) - __iommu_attach_device(group->domain, dev); + ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + if (ret) + goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, @@ -447,6 +443,21 @@ rename: pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); return 0; + +err_put_group: + mutex_lock(&group->mutex); + list_del(&device->list); + mutex_unlock(&group->mutex); + dev->iommu_group = NULL; + kobject_put(group->devices_kobj); +err_free_name: + kfree(device->name); +err_remove_link: + sysfs_remove_link(&dev->kobj, "iommu_group"); +err_free_device: + kfree(device); + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); + return ret; } EXPORT_SYMBOL_GPL(iommu_group_add_device); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2e0f61a2dc3f..9e96d81bc5cd 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -793,6 +793,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, int enabled; u64 val; + if (cpu >= nr_cpu_ids) + return -EINVAL; + if (gic_irq_in_rdist(d)) return -EINVAL; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index deb89d63a728..e684be1bb7c0 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -19,9 +19,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -39,6 +39,7 @@ struct keystone_irq_device { struct irq_domain *irqd; struct regmap *devctrl_regs; u32 devctrl_offset; + raw_spinlock_t wa_lock; }; static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) @@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d) /* nothing to do here */ } -static void keystone_irq_handler(struct irq_desc *desc) +static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) { - unsigned int irq = irq_desc_get_irq(desc); - struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); + struct keystone_irq_device *kirq = keystone_irq; + unsigned long wa_lock_flags; unsigned long pending; int src, virq; dev_dbg(kirq->dev, "start irq %d\n", irq); - chained_irq_enter(irq_desc_get_chip(desc), desc); - pending = keystone_irq_readl(kirq); keystone_irq_writel(kirq, pending); @@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc) if (!virq) dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n", src, virq); + raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); generic_handle_irq(virq); + raw_spin_unlock_irqrestore(&kirq->wa_lock, + wa_lock_flags); } } - chained_irq_exit(irq_desc_get_chip(desc), desc); - dev_dbg(kirq->dev, "end irq %d\n", irq); + return IRQ_HANDLED; } static int keystone_irq_map(struct irq_domain *h, unsigned int virq, @@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev) return -ENODEV; } + raw_spin_lock_init(&kirq->wa_lock); + platform_set_drvdata(pdev, kirq); - irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); + ret = request_irq(kirq->irq, keystone_irq_handler, + 0, dev_name(dev), kirq); + if (ret) { + irq_domain_remove(kirq->irqd); + return ret; + } /* clear all source bits */ keystone_irq_writel(kirq, ~0x0); @@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev) struct keystone_irq_device *kirq = platform_get_drvdata(pdev); int hwirq; + free_irq(kirq->irq, kirq); + for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 17304705f2cf..05fa9f7af53c 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = icoll_mask_irq, .irq_unmask = icoll_unmask_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, }; static struct irq_chip asm9260_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = asm9260_mask_irq, .irq_unmask = asm9260_unmask_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, }; asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index bb3ac5fe5846..72a391e01011 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); irq_set_default_host(root_domain); diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 472ae1770964..f728755fa292 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_host(root_domain); return 0; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9b856e1890d1..e4c43a17b333 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; + bname[sizeof(bname)-1] = 0; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index aa5dd5668528..dbad5c431bcb 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) char newname[10]; if (p) { - /* Slave-Name MUST not be empty */ - if (!strlen(p + 1)) + /* Slave-Name MUST not be empty or overflow 'newname' */ + if (strscpy(newname, p + 1, sizeof(newname)) <= 0) return NULL; - strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 9c1e8adaf4fc..bf3fbd00a091 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s id); return NULL; } else { - rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); + rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); if (!rs) return NULL; rs->state = CCPResetIdle; diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index 564d20079715..15c931bbbf65 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -158,6 +158,11 @@ #define FLASH_LED_DISABLE 0x00 #define FLASH_LED_SAFETY_TMR_DISABLED 0x13 #define FLASH_LED_MAX_TOTAL_CURRENT_MA 3750 +#define FLASH_LED_IRES5P0_MAX_CURR_MA 640 +#define FLASH_LED_IRES7P5_MAX_CURR_MA 960 +#define FLASH_LED_IRES10P0_MAX_CURR_MA 1280 +#define FLASH_LED_IRES12P5_MAX_CURR_MA 1600 +#define MAX_IRES_LEVELS 4 /* notifier call chain for flash-led irqs */ static ATOMIC_NOTIFIER_HEAD(irq_notifier_list); @@ -196,13 +201,15 @@ struct flash_node_data { struct pinctrl_state *hw_strobe_state_suspend; int hw_strobe_gpio; int ires_ua; + int default_ires_ua; int max_current; int current_ma; int prev_current_ma; u8 duration; u8 id; u8 type; - u8 ires; + u8 ires_idx; + u8 default_ires_idx; u8 hdrm_val; u8 current_reg_val; u8 strobe_ctrl; @@ -305,6 +312,11 @@ static int otst3_threshold_table[] = { 125, 119, 113, 107, 149, 143, 137, 131, }; +static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = { + FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA, + FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA +}; + static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data) { int rc; @@ -935,6 +947,7 @@ static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode) static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) { + int i = 0; int prgm_current_ma = value; int min_ma = fnode->ires_ua / 1000; struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev); @@ -944,7 +957,22 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) else if (value < min_ma) prgm_current_ma = min_ma; + fnode->ires_idx = fnode->default_ires_idx; + fnode->ires_ua = fnode->default_ires_ua; + prgm_current_ma = min(prgm_current_ma, fnode->max_current); + if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) { + /* find the matching ires */ + for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) { + if (prgm_current_ma <= max_ires_curr_ma_table[i]) { + fnode->ires_idx = i; + fnode->ires_ua = FLASH_LED_IRES_MIN_UA + + (FLASH_LED_IRES_BASE - fnode->ires_idx) * + FLASH_LED_IRES_DIVISOR; + break; + } + } + } fnode->current_ma = prgm_current_ma; fnode->cdev.brightness = prgm_current_ma; fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma, @@ -1062,7 +1090,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) val = 0; for (i = 0; i < led->num_fnodes; i++) if (snode->led_mask & BIT(led->fnode[i].id)) - val |= led->fnode[i].ires << (led->fnode[i].id * 2); + val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base), FLASH_LED_CURRENT_MASK, val); @@ -1434,13 +1462,14 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, return rc; } - fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA; - fnode->ires = FLASH_LED_IRES_DEFAULT_VAL; + fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA; + fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL; rc = of_property_read_u32(node, "qcom,ires-ua", &val); if (!rc) { - fnode->ires_ua = val; - fnode->ires = FLASH_LED_IRES_BASE - - (val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR; + fnode->default_ires_ua = fnode->ires_ua = val; + fnode->default_ires_idx = fnode->ires_idx = + FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) / + FLASH_LED_IRES_DIVISOR; } else if (rc != -EINVAL) { pr_err("Unable to read current resolution rc=%d\n", rc); return rc; diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c index 950244f1e4e8..bfa7d29701da 100644 --- a/drivers/leds/leds-qpnp-wled.c +++ b/drivers/leds/leds-qpnp-wled.c @@ -160,18 +160,19 @@ #define QPNP_WLED_MOD_EN_SHFT 7 #define QPNP_WLED_MOD_EN 1 #define QPNP_WLED_GATE_DRV_MASK 0xFE -#define QPNP_WLED_SYNC_DLY_MASK 0xF8 +#define QPNP_WLED_SYNC_DLY_MASK GENMASK(2, 0) #define QPNP_WLED_SYNC_DLY_MIN_US 0 #define QPNP_WLED_SYNC_DLY_MAX_US 1400 #define QPNP_WLED_SYNC_DLY_STEP_US 200 #define QPNP_WLED_DEF_SYNC_DLY_US 400 -#define QPNP_WLED_FS_CURR_MASK 0xF0 +#define QPNP_WLED_FS_CURR_MASK GENMASK(3, 0) #define QPNP_WLED_FS_CURR_MIN_UA 0 #define QPNP_WLED_FS_CURR_MAX_UA 30000 #define QPNP_WLED_FS_CURR_STEP_UA 2500 -#define QPNP_WLED_CABC_MASK 0x7F +#define QPNP_WLED_CABC_MASK 0x80 #define QPNP_WLED_CABC_SHIFT 7 #define QPNP_WLED_CURR_SINK_SHIFT 4 +#define QPNP_WLED_CURR_SINK_MASK GENMASK(7, 4) #define QPNP_WLED_BRIGHT_LSB_MASK 0xFF #define QPNP_WLED_BRIGHT_MSB_SHIFT 8 #define QPNP_WLED_BRIGHT_MSB_MASK 0x0F @@ -208,12 +209,14 @@ #define QPNP_WLED_SEC_UNLOCK 0xA5 #define QPNP_WLED_MAX_STRINGS 4 +#define QPNP_PM660_WLED_MAX_STRINGS 3 #define WLED_MAX_LEVEL_4095 4095 #define QPNP_WLED_RAMP_DLY_MS 20 #define QPNP_WLED_TRIGGER_NONE "none" #define QPNP_WLED_STR_SIZE 20 #define QPNP_WLED_MIN_MSLEEP 20 #define QPNP_WLED_SC_DLY_MS 20 +#define QPNP_WLED_SOFT_START_DLY_US 10000 #define NUM_SUPPORTED_AVDD_VOLTAGES 6 #define QPNP_WLED_DFLT_AVDD_MV 7600 @@ -381,6 +384,8 @@ struct qpnp_wled { u16 ramp_ms; u16 ramp_step; u16 cons_sync_write_delay_us; + u16 auto_calibration_ovp_count; + u16 max_strings; u8 strings[QPNP_WLED_MAX_STRINGS]; u8 num_strings; u8 loop_auto_gm_thresh; @@ -396,6 +401,9 @@ struct qpnp_wled { bool en_ext_pfet_sc_pro; bool prev_state; bool ovp_irq_disabled; + bool auto_calib_enabled; + bool auto_calib_done; + ktime_t start_ovp_fault_time; }; /* helper to read a pmic register */ @@ -531,7 +539,7 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level) u8 reg; /* set brightness registers */ - for (i = 0; i < wled->num_strings; i++) { + for (i = 0; i < wled->max_strings; i++) { reg = level & QPNP_WLED_BRIGHT_LSB_MASK; rc = qpnp_wled_write_reg(wled, QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base, @@ -600,7 +608,8 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled, * OVP interrupt disabled when the module is disabled. */ if (state) { - usleep_range(10000, 11000); + usleep_range(QPNP_WLED_SOFT_START_DLY_US, + QPNP_WLED_SOFT_START_DLY_US + 1000); rc = qpnp_wled_psm_config(wled, false); if (rc < 0) return rc; @@ -873,32 +882,25 @@ static ssize_t qpnp_wled_fs_curr_ua_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_wled *wled = dev_get_drvdata(dev); - int data, i, rc, temp; + int data, i, rc; u8 reg; rc = kstrtoint(buf, 10, &data); if (rc) return rc; - for (i = 0; i < wled->num_strings; i++) { + for (i = 0; i < wled->max_strings; i++) { if (data < QPNP_WLED_FS_CURR_MIN_UA) data = QPNP_WLED_FS_CURR_MIN_UA; else if (data > QPNP_WLED_FS_CURR_MAX_UA) data = QPNP_WLED_FS_CURR_MAX_UA; - rc = qpnp_wled_read_reg(wled, - QPNP_WLED_FS_CURR_REG(wled->sink_base, - wled->strings[i]), ®); + reg = data / QPNP_WLED_FS_CURR_STEP_UA; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_FS_CURR_REG(wled->sink_base, i), + QPNP_WLED_FS_CURR_MASK, reg); if (rc < 0) return rc; - reg &= QPNP_WLED_FS_CURR_MASK; - temp = data / QPNP_WLED_FS_CURR_STEP_UA; - reg |= temp; - rc = qpnp_wled_write_reg(wled, - QPNP_WLED_FS_CURR_REG(wled->sink_base, - wled->strings[i]), reg); - if (rc) - return rc; } wled->fs_curr_ua = data; @@ -1090,6 +1092,229 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr) return 0; } +#define AUTO_CALIB_BRIGHTNESS 16 +static int wled_auto_calibrate(struct qpnp_wled *wled) +{ + int rc = 0, i; + u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts; + + mutex_lock(&wled->lock); + + /* disable OVP IRQ */ + if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) { + disable_irq_nosync(wled->ovp_irq); + wled->ovp_irq_disabled = true; + } + + /* read configured sink configuration */ + rc = qpnp_wled_read_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config); + if (rc < 0) { + pr_err("Failed to read SINK configuration rc=%d\n", rc); + goto failed_calib; + } + + /* disable the module before starting calibration */ + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), + QPNP_WLED_MODULE_EN_MASK, 0); + if (rc < 0) { + pr_err("Failed to disable WLED module rc=%d\n", rc); + goto failed_calib; + } + + /* set low brightness across all sinks */ + rc = qpnp_wled_set_level(wled, AUTO_CALIB_BRIGHTNESS); + if (rc < 0) { + pr_err("Failed to set brightness for calibration rc=%d\n", rc); + goto failed_calib; + } + + /* disable all sinks */ + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0); + if (rc < 0) { + pr_err("Failed to disable all sinks rc=%d\n", rc); + goto failed_calib; + } + + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), + QPNP_WLED_MODULE_EN_MASK, + QPNP_WLED_MODULE_EN_MASK); + if (rc < 0) { + pr_err("Failed to enable WLED module rc=%d\n", rc); + goto failed_calib; + } + /* + * Delay for the WLED soft-start, check the OVP status + * only after soft-start is complete + */ + usleep_range(QPNP_WLED_SOFT_START_DLY_US, + QPNP_WLED_SOFT_START_DLY_US + 1000); + + /* iterate through the strings one by one */ + for (i = 0; i < wled->max_strings; i++) { + sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i); + + /* Enable feedback control */ + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_FDBK_OP_REG(wled->ctrl_base), + i + 1); + if (rc < 0) { + pr_err("Failed to enable feedback for SINK %d rc = %d\n", + i + 1, rc); + goto failed_calib; + } + + /* enable the sink */ + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_test); + if (rc < 0) { + pr_err("Failed to configure SINK %d rc=%d\n", + i + 1, rc); + goto failed_calib; + } + + /* delay for WLED soft-start */ + usleep_range(QPNP_WLED_SOFT_START_DLY_US, + QPNP_WLED_SOFT_START_DLY_US + 1000); + + rc = qpnp_wled_read_reg(wled, + QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts); + if (rc < 0) { + pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc); + goto failed_calib; + } + + if (int_sts & QPNP_WLED_OVP_FAULT_BIT) + pr_debug("WLED OVP fault detected with SINK %d\n", + i + 1); + else + sink_valid |= sink_test; + } + + if (sink_valid == sink_config) { + pr_debug("WLED auto-calibration complete, default sink-config=%x OK!\n", + sink_config); + } else { + pr_warn("Invalid WLED default sink config=%x changing it to=%x\n", + sink_config, sink_valid); + sink_config = sink_valid; + } + + if (!sink_config) { + pr_warn("No valid WLED sinks found\n"); + goto failed_calib; + } + + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), + QPNP_WLED_MODULE_EN_MASK, 0); + if (rc < 0) { + pr_err("Failed to disable WLED module rc=%d\n", rc); + goto failed_calib; + } + + /* write the new sink configuration */ + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_config); + if (rc < 0) { + pr_err("Failed to reconfigure the default sink rc=%d\n", rc); + goto failed_calib; + } + + /* MODULATOR_EN setting for valid sinks */ + for (i = 0; i < wled->max_strings; i++) { + if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i))) + reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT); + else + reg = 0x0; /* disable modulator_en for unused sink */ + + if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) + reg &= QPNP_WLED_GATE_DRV_MASK; + else + reg |= ~QPNP_WLED_GATE_DRV_MASK; + + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg); + if (rc < 0) { + pr_err("Failed to configure MODULATOR_EN rc=%d\n", rc); + goto failed_calib; + } + } + + /* restore the feedback setting */ + rc = qpnp_wled_write_reg(wled, + QPNP_WLED_FDBK_OP_REG(wled->ctrl_base), + wled->fdbk_op); + if (rc < 0) { + pr_err("Failed to restore feedback setting rc=%d\n", rc); + goto failed_calib; + } + + /* restore brightness */ + rc = qpnp_wled_set_level(wled, wled->cdev.brightness); + if (rc < 0) { + pr_err("Failed to set brightness after calibration rc=%d\n", + rc); + goto failed_calib; + } + + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), + QPNP_WLED_MODULE_EN_MASK, + QPNP_WLED_MODULE_EN_MASK); + if (rc < 0) { + pr_err("Failed to enable WLED module rc=%d\n", rc); + goto failed_calib; + } + + /* delay for WLED soft-start */ + usleep_range(QPNP_WLED_SOFT_START_DLY_US, + QPNP_WLED_SOFT_START_DLY_US + 1000); + +failed_calib: + if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) { + enable_irq(wled->ovp_irq); + wled->ovp_irq_disabled = false; + } + mutex_unlock(&wled->lock); + return rc; +} + +#define WLED_AUTO_CAL_OVP_COUNT 5 +#define WLED_AUTO_CAL_CNT_DLY_US 1000000 /* 1 second */ +static bool qpnp_wled_auto_cal_required(struct qpnp_wled *wled) +{ + s64 elapsed_time_us; + + /* + * Check if the OVP fault was an occasional one + * or if its firing continuously, the latter qualifies + * for an auto-calibration check. + */ + if (!wled->auto_calibration_ovp_count) { + wled->start_ovp_fault_time = ktime_get(); + wled->auto_calibration_ovp_count++; + } else { + elapsed_time_us = ktime_us_delta(ktime_get(), + wled->start_ovp_fault_time); + if (elapsed_time_us > WLED_AUTO_CAL_CNT_DLY_US) + wled->auto_calibration_ovp_count = 0; + else + wled->auto_calibration_ovp_count++; + + if (wled->auto_calibration_ovp_count >= + WLED_AUTO_CAL_OVP_COUNT) { + wled->auto_calibration_ovp_count = 0; + return true; + } + } + + return false; +} + /* ovp irq handler */ static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled) { @@ -1114,6 +1339,21 @@ static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled) if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT)) pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n", int_sts, fault_sts); + + if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) { + if (wled->auto_calib_enabled && !wled->auto_calib_done) { + if (qpnp_wled_auto_cal_required(wled)) { + rc = wled_auto_calibrate(wled); + if (rc < 0) { + pr_err("Failed auto-calibration rc=%d\n", + rc); + return IRQ_HANDLED; + } + wled->auto_calib_done = true; + } + } + } + return IRQ_HANDLED; } @@ -1423,7 +1663,7 @@ static int qpnp_wled_vref_config(struct qpnp_wled *wled) static int qpnp_wled_config(struct qpnp_wled *wled) { int rc, i, temp; - u8 reg = 0; + u8 reg = 0, sink_en = 0, mask; /* Configure display type */ rc = qpnp_wled_set_disp(wled, wled->ctrl_base); @@ -1622,16 +1862,50 @@ static int qpnp_wled_config(struct qpnp_wled *wled) rc = qpnp_wled_write_reg(wled, QPNP_WLED_CURR_SINK_REG(wled->sink_base), reg); + for (i = 0; i < wled->max_strings; i++) { + /* SYNC DELAY */ + if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US) + wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US; + + reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US; + mask = QPNP_WLED_SYNC_DLY_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i), + mask, reg); + if (rc < 0) + return rc; + + /* FULL SCALE CURRENT */ + if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA) + wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA; + + reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA; + mask = QPNP_WLED_FS_CURR_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_FS_CURR_REG(wled->sink_base, i), + mask, reg); + if (rc < 0) + return rc; + + /* CABC */ + reg = wled->en_cabc ? (1 << QPNP_WLED_CABC_SHIFT) : 0; + mask = QPNP_WLED_CABC_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_CABC_REG(wled->sink_base, i), + mask, reg); + if (rc < 0) + return rc; + } + + /* Settings specific to valid sinks */ for (i = 0; i < wled->num_strings; i++) { - if (wled->strings[i] >= QPNP_WLED_MAX_STRINGS) { + if (wled->strings[i] >= wled->max_strings) { dev_err(&wled->pdev->dev, "Invalid string number\n"); return -EINVAL; } - /* MODULATOR */ rc = qpnp_wled_read_reg(wled, - QPNP_WLED_MOD_EN_REG(wled->sink_base, - wled->strings[i]), ®); + QPNP_WLED_MOD_EN_REG(wled->sink_base, i), ®); if (rc < 0) return rc; reg &= QPNP_WLED_MOD_EN_MASK; @@ -1643,72 +1917,22 @@ static int qpnp_wled_config(struct qpnp_wled *wled) reg |= ~QPNP_WLED_GATE_DRV_MASK; rc = qpnp_wled_write_reg(wled, - QPNP_WLED_MOD_EN_REG(wled->sink_base, - wled->strings[i]), reg); + QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg); if (rc) return rc; - /* SYNC DELAY */ - if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US) - wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US; - - rc = qpnp_wled_read_reg(wled, - QPNP_WLED_SYNC_DLY_REG(wled->sink_base, - wled->strings[i]), ®); - if (rc < 0) - return rc; - reg &= QPNP_WLED_SYNC_DLY_MASK; - temp = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US; - reg |= temp; - rc = qpnp_wled_write_reg(wled, - QPNP_WLED_SYNC_DLY_REG(wled->sink_base, - wled->strings[i]), reg); - if (rc) - return rc; - - /* FULL SCALE CURRENT */ - if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA) - wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA; - - rc = qpnp_wled_read_reg(wled, - QPNP_WLED_FS_CURR_REG(wled->sink_base, - wled->strings[i]), ®); - if (rc < 0) - return rc; - reg &= QPNP_WLED_FS_CURR_MASK; - temp = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA; - reg |= temp; - rc = qpnp_wled_write_reg(wled, - QPNP_WLED_FS_CURR_REG(wled->sink_base, - wled->strings[i]), reg); - if (rc) - return rc; - - /* CABC */ - rc = qpnp_wled_read_reg(wled, - QPNP_WLED_CABC_REG(wled->sink_base, - wled->strings[i]), ®); - if (rc < 0) - return rc; - reg &= QPNP_WLED_CABC_MASK; - reg |= (wled->en_cabc << QPNP_WLED_CABC_SHIFT); - rc = qpnp_wled_write_reg(wled, - QPNP_WLED_CABC_REG(wled->sink_base, - wled->strings[i]), reg); - if (rc) - return rc; - - /* Enable CURRENT SINK */ - rc = qpnp_wled_read_reg(wled, - QPNP_WLED_CURR_SINK_REG(wled->sink_base), ®); - if (rc < 0) - return rc; + /* SINK EN */ temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT; - reg |= (1 << temp); - rc = qpnp_wled_write_reg(wled, - QPNP_WLED_CURR_SINK_REG(wled->sink_base), reg); - if (rc) - return rc; + sink_en |= (1 << temp); + } + mask = QPNP_WLED_CURR_SINK_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), + mask, sink_en); + if (rc < 0) { + dev_err(&wled->pdev->dev, + "Failed to enable WLED sink config rc = %d\n", rc); + return rc; } rc = qpnp_wled_sync_reg_toggle(wled); @@ -1728,8 +1952,13 @@ static int qpnp_wled_config(struct qpnp_wled *wled) wled->ovp_irq, rc); return rc; } - disable_irq(wled->ovp_irq); - wled->ovp_irq_disabled = true; + rc = qpnp_wled_read_reg(wled, + QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), ®); + /* disable the OVP irq only if the module is not enabled */ + if (!rc && !(reg & QPNP_WLED_MODULE_EN_MASK)) { + disable_irq(wled->ovp_irq); + wled->ovp_irq_disabled = true; + } } if (wled->sc_irq >= 0) { @@ -2091,11 +2320,16 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) wled->en_cabc = of_property_read_bool(pdev->dev.of_node, "qcom,en-cabc"); + if (wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) + wled->max_strings = QPNP_PM660_WLED_MAX_STRINGS; + else + wled->max_strings = QPNP_WLED_MAX_STRINGS; + prop = of_find_property(pdev->dev.of_node, "qcom,led-strings-list", &temp_val); if (!prop || !temp_val || temp_val > QPNP_WLED_MAX_STRINGS) { dev_err(&pdev->dev, "Invalid strings info, use default"); - wled->num_strings = QPNP_WLED_MAX_STRINGS; + wled->num_strings = wled->max_strings; for (i = 0; i < wled->num_strings; i++) wled->strings[i] = i; } else { @@ -2118,6 +2352,9 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node, "qcom,lcd-psm-ctrl"); + + wled->auto_calib_enabled = of_property_read_bool(pdev->dev.of_node, + "qcom,auto-calibration-enable"); return 0; } @@ -2185,13 +2422,13 @@ static int qpnp_wled_probe(struct platform_device *pdev) } mutex_init(&wled->bus_lock); + mutex_init(&wled->lock); rc = qpnp_wled_config(wled); if (rc) { dev_err(&pdev->dev, "wled config failed\n"); return rc; } - mutex_init(&wled->lock); INIT_WORK(&wled->work, qpnp_wled_work); wled->ramp_ms = QPNP_WLED_RAMP_DLY_MS; wled->ramp_step = 1; diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 6a4811f85705..9cf826df89b1 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -104,11 +104,14 @@ static void tx_tick(struct mbox_chan *chan, int r) /* Submit next message */ msg_submit(chan); + if (!mssg) + return; + /* Notify the client */ - if (mssg && chan->cl->tx_done) + if (chan->cl->tx_done) chan->cl->tx_done(chan->cl, mssg, r); - if (chan->cl->tx_block) + if (r != -ETIME && chan->cl->tx_block) complete(&chan->tx_complete); } @@ -261,7 +264,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) msg_submit(chan); - if (chan->cl->tx_block && chan->active_req) { + if (chan->cl->tx_block) { unsigned long wait; int ret; @@ -272,8 +275,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) ret = wait_for_completion_timeout(&chan->tx_complete, wait); if (ret == 0) { - t = -EIO; - tx_tick(chan, -EIO); + t = -ETIME; + tx_tick(chan, t); } } diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c index 4f6086970131..43d566fd38ae 100644 --- a/drivers/md/dm-android-verity.c +++ b/drivers/md/dm-android-verity.c @@ -645,6 +645,8 @@ static int add_as_linear_device(struct dm_target *ti, char *dev) android_verity_target.iterate_devices = dm_linear_iterate_devices, android_verity_target.io_hints = NULL; + set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0); + err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args); if (!err) { diff --git a/drivers/md/md.c b/drivers/md/md.c index eff554a12fb4..0a856cb181e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); - sb->super_offset = rdev->sb_start; + sb->super_offset = cpu_to_le64(rdev->sb_start); sb->sb_csum = calc_sb_1_csum(sb); md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct mddev *mddev) /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || - (mddev->layout != le64_to_cpu(sb->layout)) || + (mddev->layout != le32_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) return true; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d81be5e471d0..f24a9e14021d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1088,7 +1088,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) */ DEFINE_WAIT(w); for (;;) { - flush_signals(current); + sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || @@ -1097,7 +1097,10 @@ static void make_request(struct mddev *mddev, struct bio * bio) !md_cluster_ops->area_resyncing(mddev, WRITE, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); } finish_wait(&conf->wait_barrier, &w); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4384b46cee1a..8f60520c8392 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5279,12 +5279,15 @@ static void make_request(struct mddev *mddev, struct bio * bi) * userspace, we want an interruptible * wait. */ - flush_signals(current); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_INTERRUPTIBLE); if (logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { + sigset_t full, old; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); do_prepare = true; } goto retry; @@ -5818,6 +5821,8 @@ static void raid5_do_work(struct work_struct *work) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); blk_finish_plug(&plug); pr_debug("--- raid5worker inactive\n"); @@ -7528,12 +7533,10 @@ static void end_reshape(struct r5conf *conf) { if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { - struct md_rdev *rdev; spin_lock_irq(&conf->device_lock); conf->previous_raid_disks = conf->raid_disks; - rdev_for_each(rdev, conf->mddev) - rdev->data_offset = rdev->new_data_offset; + md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; conf->mddev->reshape_position = MaxSector; diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c index 8001cde1db1e..503135a4f47a 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c @@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl) } if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS) - ret = s5c73m3_af_run(state, ~af_lock); + ret = s5c73m3_af_run(state, !af_lock); return ret; } diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c index 8f2556ec3971..61611d1682d1 100644 --- a/drivers/media/pci/cx88/cx88-cards.c +++ b/drivers/media/pci/cx88/cx88-cards.c @@ -3691,7 +3691,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) core->nr = nr; sprintf(core->name, "cx88[%d]", core->nr); - core->tvnorm = V4L2_STD_NTSC_M; + /* + * Note: Setting initial standard here would cause first call to + * cx88_set_tvnorm() to return without programming any registers. Leave + * it blank for at this point and it will get set later in + * cx8800_initdev() + */ + core->tvnorm = 0; + core->width = 320; core->height = 240; core->field = V4L2_FIELD_INTERLACED; diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index aef9acf351f6..abbf5b05b6f5 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1429,7 +1429,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, /* initial device configuration */ mutex_lock(&core->lock); - cx88_set_tvnorm(core, core->tvnorm); + cx88_set_tvnorm(core, V4L2_STD_NTSC_M); v4l2_ctrl_handler_setup(&core->video_hdl); v4l2_ctrl_handler_setup(&core->audio_hdl); cx88_video_mux(core, 0); diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index 8ef6399d794f..bc957528f69f 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = { /* ----------------------------------------------------------- */ +/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ +static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) +{ + u8 subaddr = 0x7, dmdregval; + u8 data[2]; + int ret; + struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0, + .buf = &subaddr, .len = 1}, + {.addr = 0x08, + .flags = I2C_M_RD, + .buf = &dmdregval, .len = 1} + }; + struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0, + .buf = data, .len = 2} }; + + ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); + if ((ret == 2) && (dmdregval & 0x2)) { + pr_debug("%s: DVB-T demod i2c gate was left closed\n", + dev->name); + + data[0] = subaddr; + data[1] = (dmdregval & ~0x2); + if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) + pr_err("%s: EEPROM i2c gate open failure\n", + dev->name); + } +} + static int saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len) { unsigned char buf; int i,err; + if (dev->board == SAA7134_BOARD_MD7134) + saa7134_i2c_eeprom_md7134_gate(dev); + dev->i2c_client.addr = 0xa0 >> 1; buf = 0; if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) { diff --git a/drivers/media/platform/msm/ais/camera/camera.c b/drivers/media/platform/msm/ais/camera/camera.c index 158b83c12d00..33808d18d4c4 100644 --- a/drivers/media/platform/msm/ais/camera/camera.c +++ b/drivers/media/platform/msm/ais/camera/camera.c @@ -491,13 +491,16 @@ static long camera_v4l2_vidioc_private_ioctl(struct file *filep, void *fh, if (WARN_ON(!k_ioctl || !pvdev)) return -EIO; + if (cmd != VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD) + return -EINVAL; + switch (k_ioctl->id) { case MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF: { struct msm_camera_return_buf ptr, *tmp = NULL; MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl->ioctl_ptr, sizeof(tmp)); - if (copy_from_user(&ptr, tmp, + if (copy_from_user(&ptr, (void __user *)tmp, sizeof(struct msm_camera_return_buf))) { return -EFAULT; } @@ -795,7 +798,7 @@ static long camera_handle_internal_compat_ioctl(struct file *file, { long rc = 0; struct msm_camera_private_ioctl_arg k_ioctl; - void __user *tmp_compat_ioctl_ptr = NULL; + void *tmp_compat_ioctl_ptr = NULL; rc = msm_copy_camera_private_ioctl_args(arg, &k_ioctl, &tmp_compat_ioctl_ptr); @@ -810,11 +813,13 @@ static long camera_handle_internal_compat_ioctl(struct file *file, k_ioctl.id, k_ioctl.size); return -EINVAL; } - k_ioctl.ioctl_ptr = (__u64)tmp_compat_ioctl_ptr; - if (!k_ioctl.ioctl_ptr) { + + if (tmp_compat_ioctl_ptr == NULL) { pr_debug("Invalid ptr for id %d", k_ioctl.id); return -EINVAL; } + k_ioctl.ioctl_ptr = (__u64)(uintptr_t)tmp_compat_ioctl_ptr; + rc = camera_v4l2_vidioc_private_ioctl(file, file->private_data, 0, cmd, (void *)&k_ioctl); } @@ -826,7 +831,7 @@ static long camera_handle_internal_compat_ioctl(struct file *file, return rc; } -long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd, +static long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = 0; diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.c b/drivers/media/platform/msm/ais/common/cam_hw_ops.c index 073778c9edcc..cf28e0ca6536 100644 --- a/drivers/media/platform/msm/ais/common/cam_hw_ops.c +++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.c @@ -50,7 +50,7 @@ struct cam_ahb_client_data { static struct cam_ahb_client_data data; -int get_vector_index(char *name) +static int get_vector_index(char *name) { int i = 0, rc = -1; @@ -213,7 +213,7 @@ err1: } EXPORT_SYMBOL(cam_ahb_clk_init); -int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id, +static int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote) { int i = 0; diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.c b/drivers/media/platform/msm/ais/common/cam_smmu_api.c index d3b239e9f304..0f0e14506325 100644 --- a/drivers/media/platform/msm/ais/common/cam_smmu_api.c +++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.c @@ -466,7 +466,7 @@ static enum dma_data_direction cam_smmu_translate_dir( return DMA_NONE; } -void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops) +static void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops) { unsigned int i; int j = 0; diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.h b/drivers/media/platform/msm/ais/common/cam_smmu_api.h index 4a13598dc719..26bd30a6c8c8 100644 --- a/drivers/media/platform/msm/ais/common/cam_smmu_api.h +++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.h @@ -43,6 +43,10 @@ enum cam_smmu_map_dir { CAM_SMMU_MAP_INVALID }; +int cam_smmu_query_vaddr_in_range(int handle, + unsigned long fault_addr, unsigned long *start_addr, + unsigned long *end_addr, int *fd); + /** * @param identifier: Unique identifier to be used by clients which they * should get from device tree. CAM SMMU driver will diff --git a/drivers/media/platform/msm/ais/common/cam_soc_api.c b/drivers/media/platform/msm/ais/common/cam_soc_api.c index 118d665a44d3..92f3e4007390 100644 --- a/drivers/media/platform/msm/ais/common/cam_soc_api.c +++ b/drivers/media/platform/msm/ais/common/cam_soc_api.c @@ -36,7 +36,7 @@ struct msm_cam_bus_pscale_data { struct mutex lock; }; -struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX]; +static struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX]; /* Get all clocks from DT */ static int msm_camera_get_clk_info_internal(struct device *dev, @@ -771,7 +771,7 @@ void __iomem *msm_camera_get_reg_base(struct platform_device *pdev, char *device_name, int reserve_mem) { struct resource *mem; - void *base; + void __iomem *base; if (!pdev || !device_name) { pr_err("Invalid params\n"); diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c index 8370f556a40d..22518c2cae7d 100644 --- a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c +++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c @@ -123,8 +123,8 @@ void msm_camera_io_memcpy_toio(void __iomem *dest_addr, void __iomem *src_addr, u32 len) { int i; - u32 *d = (u32 *) dest_addr; - u32 *s = (u32 *) src_addr; + u32 __iomem *d = (u32 __iomem *) dest_addr; + u32 __iomem *s = (u32 __iomem *) src_addr; for (i = 0; i < len; i++) writel_relaxed(*s++, d++); @@ -178,7 +178,7 @@ void msm_camera_io_dump(void __iomem *addr, int size, int enable) { char line_str[128], *p_str; int i; - u32 *p = (u32 *) addr; + u32 __iomem *p = (u32 __iomem *) addr; u32 data; CDBG("%s: addr=%pK size=%d\n", __func__, addr, size); @@ -242,8 +242,8 @@ void msm_camera_io_memcpy_mb(void __iomem *dest_addr, void __iomem *src_addr, u32 len) { int i; - u32 *d = (u32 *) dest_addr; - u32 *s = (u32 *) src_addr; + u32 __iomem *d = (u32 __iomem *) dest_addr; + u32 __iomem *s = (u32 __iomem *) src_addr; /* This is generic function called who needs to register * writes with memory barrier */ diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.h b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h index 338e24d45500..3bd6c5f4866e 100644 --- a/drivers/media/platform/msm/ais/common/msm_camera_io_util.h +++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h @@ -40,6 +40,8 @@ void msm_camera_io_w(u32 data, void __iomem *addr); void msm_camera_io_w_mb(u32 data, void __iomem *addr); u32 msm_camera_io_r(void __iomem *addr); u32 msm_camera_io_r_mb(void __iomem *addr); +void msm_camera_io_memcpy_toio(void __iomem *dest_addr, + void __iomem *src_addr, u32 len); void msm_camera_io_dump(void __iomem *addr, int size, int enable); void msm_camera_io_memcpy(void __iomem *dest_addr, void __iomem *src_addr, u32 len); diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c index 420083f019cf..d9e109938e7e 100644 --- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c +++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c @@ -430,6 +430,7 @@ static int msm_fd_open(struct file *file) ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ctx->vb2_q.io_modes = VB2_USERPTR; ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + mutex_init(&ctx->lock); ret = vb2_queue_init(&ctx->vb2_q); if (ret < 0) { dev_err(device->dev, "Error queue init\n"); @@ -480,7 +481,9 @@ static int msm_fd_release(struct file *file) msm_cpp_vbif_register_error_handler((void *)ctx, VBIF_CLIENT_FD, NULL); + mutex_lock(&ctx->lock); vb2_queue_release(&ctx->vb2_q); + mutex_unlock(&ctx->lock); vfree(ctx->stats); @@ -510,7 +513,9 @@ static unsigned int msm_fd_poll(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data); unsigned int ret; + mutex_lock(&ctx->lock); ret = vb2_poll(&ctx->vb2_q, file, wait); + mutex_unlock(&ctx->lock); if (atomic_read(&ctx->subscribed_for_event)) { poll_wait(file, &ctx->fh.wait, wait); @@ -748,9 +753,9 @@ static int msm_fd_reqbufs(struct file *file, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_reqbufs(&ctx->vb2_q, req); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -766,9 +771,9 @@ static int msm_fd_qbuf(struct file *file, void *fh, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_qbuf(&ctx->vb2_q, pb); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -785,9 +790,9 @@ static int msm_fd_dqbuf(struct file *file, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -803,7 +808,9 @@ static int msm_fd_streamon(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); int ret; + mutex_lock(&ctx->lock); ret = vb2_streamon(&ctx->vb2_q, buf_type); + mutex_unlock(&ctx->lock); if (ret < 0) dev_err(ctx->fd_device->dev, "Stream on fails\n"); @@ -822,7 +829,9 @@ static int msm_fd_streamoff(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); int ret; + mutex_lock(&ctx->lock); ret = vb2_streamoff(&ctx->vb2_q, buf_type); + mutex_unlock(&ctx->lock); if (ret < 0) dev_err(ctx->fd_device->dev, "Stream off fails\n"); @@ -1053,14 +1062,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) a->value = ctx->format.size->work_size; break; case V4L2_CID_FD_WORK_MEMORY_FD: + mutex_lock(&ctx->fd_device->recovery_lock); if (ctx->work_buf.fd != -1) msm_fd_hw_unmap_buffer(&ctx->work_buf); if (a->value >= 0) { ret = msm_fd_hw_map_buffer(&ctx->mem_pool, a->value, &ctx->work_buf); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&ctx->fd_device->recovery_lock); return ret; + } } + mutex_unlock(&ctx->fd_device->recovery_lock); break; default: return -EINVAL; diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.h b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h index c15032256f4d..a7615a65d2fc 100644 --- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.h +++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.h @@ -161,6 +161,7 @@ struct fd_ctx { struct msm_fd_mem_pool mem_pool; struct msm_fd_stats *stats; struct msm_fd_buf_handle work_buf; + struct mutex lock; }; /* diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c index 2133f9391433..585865b12387 100644 --- a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c +++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c @@ -76,7 +76,7 @@ static int msm_buf_check_head_sanity(struct msm_isp_bufq *bufq) return rc; } -struct msm_isp_bufq *msm_isp_get_bufq( +static struct msm_isp_bufq *msm_isp_get_bufq( struct msm_isp_buf_mgr *buf_mgr, uint32_t bufq_handle) { @@ -161,7 +161,7 @@ static int msm_isp_free_bufq_handle(struct msm_isp_buf_mgr *buf_mgr, /* Set everything except lock to 0 */ bufq->bufq_handle = 0; - bufq->bufs = 0; + bufq->bufs = NULL; bufq->vfe_id = 0; bufq->output_id = 0; bufq->num_bufs = 0; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.c b/drivers/media/platform/msm/ais/isp/msm_isp.c index 97c0f779cf73..d62b830535a3 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp.c @@ -49,9 +49,6 @@ MODULE_DEVICE_TABLE(of, msm_vfe_dt_match); #define OVERFLOW_BUFFER_LENGTH 64 static char stat_line[OVERFLOW_LENGTH]; -struct msm_isp_statistics stats; -struct msm_isp_ub_info ub_info; - static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev, struct msm_isp_bw_req_info *isp_req_hist); @@ -107,8 +104,8 @@ static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file) return 0; } -static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char, - size_t t_size_t, loff_t *t_loff_t) +static ssize_t vfe_debugfs_statistics_read(struct file *t_file, + char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { int i; uint64_t *ptr; @@ -132,7 +129,7 @@ static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char, } static ssize_t vfe_debugfs_statistics_write(struct file *t_file, - const char *t_char, size_t t_size_t, loff_t *t_loff_t) + const char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { struct vfe_device *vfe_dev = (struct vfe_device *) t_file->private_data; @@ -149,7 +146,7 @@ static int bw_history_open(struct inode *inode, struct file *file) return 0; } -static ssize_t bw_history_read(struct file *t_file, char *t_char, +static ssize_t bw_history_read(struct file *t_file, char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { int i; @@ -194,7 +191,7 @@ static ssize_t bw_history_read(struct file *t_file, char *t_char, } static ssize_t bw_history_write(struct file *t_file, - const char *t_char, size_t t_size_t, loff_t *t_loff_t) + const char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { struct msm_isp_bw_req_info *isp_req_hist = (struct msm_isp_bw_req_info *) t_file->private_data; @@ -210,7 +207,7 @@ static int ub_info_open(struct inode *inode, struct file *file) return 0; } -static ssize_t ub_info_read(struct file *t_file, char *t_char, +static ssize_t ub_info_read(struct file *t_file, char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { int i; @@ -241,7 +238,7 @@ static ssize_t ub_info_read(struct file *t_file, char *t_char, } static ssize_t ub_info_write(struct file *t_file, - const char *t_char, size_t t_size_t, loff_t *t_loff_t) + const char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { struct vfe_device *vfe_dev = (struct vfe_device *) t_file->private_data; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h index 72a76d178aa8..86974eeb4a32 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp.h @@ -355,6 +355,7 @@ struct msm_vfe_hardware_info { uint32_t dmi_reg_offset; uint32_t min_ab; uint32_t min_ib; + uint32_t regulator_num; const char *regulator_names[]; }; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c index 8991433b2c67..d33dc758aef9 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c @@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev) int rc = 0; int i; - vfe_dev->vfe_num_regulators = - sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *); + vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num; vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) * vfe_dev->vfe_num_regulators, GFP_KERNEL); @@ -2708,7 +2707,6 @@ struct msm_vfe_hardware_info vfe47_hw_info = { .process_camif_irq = msm_vfe47_process_input_irq, .process_reset_irq = msm_vfe47_process_reset_irq, .process_halt_irq = msm_vfe47_process_halt_irq, - .process_reset_irq = msm_vfe47_process_reset_irq, .process_reg_update = msm_vfe47_process_reg_update, .process_axi_irq = msm_isp_process_axi_irq, .process_stats_irq = msm_isp_process_stats_irq, @@ -2812,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = { .dmi_reg_offset = 0xC2C, .axi_hw_info = &msm_vfe47_axi_hw_info, .stats_hw_info = &msm_vfe47_stats_hw_info, + .regulator_num = 3, .regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"}, }; EXPORT_SYMBOL(vfe47_hw_info); diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.h b/drivers/media/platform/msm/ais/isp/msm_isp47.h index b29fca61ce7c..9af0acd3656a 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.h @@ -30,6 +30,8 @@ enum msm_vfe47_stats_comp_idx { extern struct msm_vfe_hardware_info vfe47_hw_info; +uint32_t msm_vfe47_ub_reg_offset(struct vfe_device *vfe_dev, int wm_idx); +uint32_t msm_vfe47_get_ub_size(struct vfe_device *vfe_dev); void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev, uint32_t *irq_status0, uint32_t *irq_status1); void msm_vfe47_read_irq_status_and_clear(struct vfe_device *vfe_dev, @@ -70,6 +72,8 @@ int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev, enum msm_vfe_axi_stream_src stream_src, uint32_t io_format); int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev, void *arg); +int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, + void *arg); void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev, struct msm_vfe_pix_cfg *pix_cfg); void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev, diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h index 0396fc4680f1..5ed89161b7f3 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h @@ -27,6 +27,9 @@ int msm_isp_axi_create_stream(struct vfe_device *vfe_dev, void msm_isp_axi_destroy_stream( struct msm_vfe_axi_shared_data *axi_data, int stream_idx); +int msm_isp_axi_get_num_planes(uint32_t output_format, + struct msm_vfe_axi_stream *stream_info); + int msm_isp_validate_axi_request( struct msm_vfe_axi_shared_data *axi_data, struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd); @@ -36,21 +39,34 @@ void msm_isp_axi_reserve_wm( struct msm_vfe_axi_shared_data *axi_data, struct msm_vfe_axi_stream *stream_info); +void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data, + struct msm_vfe_axi_stream *stream_info); + void msm_isp_axi_reserve_comp_mask( struct msm_vfe_axi_shared_data *axi_data, struct msm_vfe_axi_stream *stream_info); +void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data, + struct msm_vfe_axi_stream *stream_info); + int msm_isp_axi_check_stream_state( struct vfe_device *vfe_dev, struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd); +void msm_isp_check_for_output_error(struct vfe_device *vfe_dev, + struct msm_isp_timestamp *ts, struct msm_isp_sof_info *sof_info); + int msm_isp_calculate_framedrop( struct msm_vfe_axi_shared_data *axi_data, struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd); +void msm_isp_calculate_bandwidth( + struct msm_vfe_axi_shared_data *axi_data, + struct msm_vfe_axi_stream *stream_info); void msm_isp_reset_framedrop(struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info); int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg); +void msm_isp_start_avtimer(void); void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp); int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg); int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev, diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c index 6e89544161ee..0d08cffda25c 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c @@ -891,6 +891,12 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg) struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL; struct msm_isp_sw_framskip *sw_skip_info = NULL; + if (update_cmd->num_streams > MSM_ISP_STATS_MAX) { + pr_err("%s: Invalid num_streams %d\n", + __func__, update_cmd->num_streams); + return -EINVAL; + } + /* validate request */ for (i = 0; i < update_cmd->num_streams; i++) { update_info = (struct msm_vfe_axi_stream_cfg_update_info *) diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h index 707901bc6271..ae438a675542 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h @@ -19,6 +19,8 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, uint32_t pingpong_status, struct msm_isp_timestamp *ts); +int msm_isp_stats_create_stream(struct vfe_device *vfe_dev, + struct msm_vfe_stats_stream_request_cmd *stream_req_cmd); void msm_isp_stats_stream_update(struct vfe_device *vfe_dev); int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg); int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg); diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c index 0353ab27cf19..9e5317eb2920 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c @@ -512,7 +512,7 @@ static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev, return rc; } -int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg) +static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg) { int rc = 0; struct msm_vfe_input_cfg *input_cfg = arg; @@ -542,7 +542,7 @@ int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg) return rc; } -int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg) +static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg) { int rc = 0; struct msm_vfe_camif_cfg *camif_cfg = arg; @@ -579,7 +579,7 @@ int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg) } -int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg) +static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg) { struct msm_vfe_operation_cfg *op_cfg = arg; @@ -1233,14 +1233,16 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev, case VFE_WRITE: { msm_camera_io_memcpy(vfe_dev->vfe_base + reg_cfg_cmd->u.rw_info.reg_offset, - cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4, + (void __iomem *) + (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4), reg_cfg_cmd->u.rw_info.len); break; } case VFE_WRITE_MB: { msm_camera_io_memcpy_mb(vfe_dev->vfe_base + reg_cfg_cmd->u.rw_info.reg_offset, - cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4, + (void __iomem *) + (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4), reg_cfg_cmd->u.rw_info.len); break; } @@ -2295,12 +2297,12 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) } #ifdef CONFIG_MSM_AVTIMER -void msm_isp_end_avtimer(void) +static void msm_isp_end_avtimer(void) { avcs_core_disable_power_collapse(0); } #else -void msm_isp_end_avtimer(void) +static void msm_isp_end_avtimer(void) { pr_err("AV Timer is not supported\n"); } @@ -2408,7 +2410,7 @@ void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev, } } -void msm_isp_dump_irq_debug(void) +static void msm_isp_dump_irq_debug(void) { uint32_t index, count, i; diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.c b/drivers/media/platform/msm/ais/ispif/msm_ispif.c index 8eb88364a2cb..c41f4546da5f 100644 --- a/drivers/media/platform/msm/ais/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.c @@ -69,6 +69,11 @@ static void msm_ispif_io_dump_reg(struct ispif_device *ispif) { if (!ispif->enb_dump_reg) return; + if (!ispif->base) { + pr_err("%s: null pointer for the ispif base\n", __func__); + return; + } + msm_camera_io_dump(ispif->base, 0x250, 0); } diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c index e8859b7db5cb..2a1ec86118c5 100644 --- a/drivers/media/platform/msm/ais/msm.c +++ b/drivers/media/platform/msm/ais/msm.c @@ -48,10 +48,10 @@ bool is_daemon_status = true; /* config node envent queue */ static struct v4l2_fh *msm_eventq; -spinlock_t msm_eventq_lock; +static spinlock_t msm_eventq_lock; static struct pid *msm_pid; -spinlock_t msm_pid_lock; +static spinlock_t msm_pid_lock; /* * It takes 20 bytes + NULL character to write the @@ -62,7 +62,7 @@ spinlock_t msm_pid_lock; #define msm_dequeue(queue, type, member) ({ \ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ spin_lock_irqsave(&__q->lock, flags); \ if (!list_empty(&__q->list)) { \ __q->len--; \ @@ -78,7 +78,7 @@ spinlock_t msm_pid_lock; #define msm_delete_sd_entry(queue, type, member, q_node) ({ \ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ spin_lock_irqsave(&__q->lock, flags); \ if (!list_empty(&__q->list)) { \ list_for_each_entry(node, &__q->list, member) \ @@ -95,7 +95,7 @@ spinlock_t msm_pid_lock; #define msm_delete_entry(queue, type, member, q_node) ({ \ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ spin_lock_irqsave(&__q->lock, flags); \ if (!list_empty(&__q->list)) { \ list_for_each_entry(node, &__q->list, member) \ @@ -131,7 +131,7 @@ typedef int (*msm_queue_func)(void *d1, void *d2); #define msm_queue_traverse_action(queue, type, member, func, data) do {\ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ msm_queue_func __f = (func); \ spin_lock_irqsave(&__q->lock, flags); \ if (!list_empty(&__q->list)) { \ @@ -147,7 +147,7 @@ typedef int (*msm_queue_find_func)(void *d1, void *d2); #define msm_queue_find(queue, type, member, func, data) ({\ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ typeof(node) __ret = NULL; \ msm_queue_find_func __f = (func); \ spin_lock_irqsave(&__q->lock, flags); \ @@ -283,22 +283,47 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id) struct msm_session *session = NULL; struct msm_stream *stream = NULL; unsigned long flags; + int try_count = 0; session = msm_queue_find(msm_session_q, struct msm_session, list, __msm_queue_find_session, &session_id); + if (!session) return; - stream = msm_queue_find(&session->stream_q, struct msm_stream, - list, __msm_queue_find_stream, &stream_id); - if (!stream) - return; - spin_lock_irqsave(&(session->stream_q.lock), flags); - list_del_init(&stream->list); - session->stream_q.len--; - kfree(stream); - stream = NULL; - spin_unlock_irqrestore(&(session->stream_q.lock), flags); + while (1) { + + if (try_count > 5) { + pr_err("%s : not able to delete stream %d\n", + __func__, __LINE__); + break; + } + + write_lock(&session->stream_rwlock); + try_count++; + stream = msm_queue_find(&session->stream_q, struct msm_stream, + list, __msm_queue_find_stream, &stream_id); + + if (!stream) { + write_unlock(&session->stream_rwlock); + return; + } + + if (msm_vb2_get_stream_state(stream) != 1) { + write_unlock(&session->stream_rwlock); + continue; + } + + spin_lock_irqsave(&(session->stream_q.lock), flags); + list_del_init(&stream->list); + session->stream_q.len--; + kfree(stream); + stream = NULL; + spin_unlock_irqrestore(&(session->stream_q.lock), flags); + write_unlock(&session->stream_rwlock); + break; + } + } EXPORT_SYMBOL(msm_delete_stream); @@ -446,6 +471,7 @@ int msm_create_session(unsigned int session_id, struct video_device *vdev) mutex_init(&session->lock); mutex_init(&session->lock_q); mutex_init(&session->close_lock); + rwlock_init(&session->stream_rwlock); return 0; } EXPORT_SYMBOL(msm_create_session); @@ -699,6 +725,9 @@ static long msm_private_ioctl(struct file *file, void *fh, return 0; } + if (!event_data) + return -EINVAL; + memset(&event, 0, sizeof(struct v4l2_event)); session_id = event_data->session_id; stream_id = event_data->stream_id; @@ -1040,17 +1069,25 @@ static struct v4l2_file_operations msm_fops = { #endif }; -struct msm_stream *msm_get_stream(unsigned int session_id, - unsigned int stream_id) +struct msm_session *msm_get_session(unsigned int session_id) { struct msm_session *session; - struct msm_stream *stream; session = msm_queue_find(msm_session_q, struct msm_session, list, __msm_queue_find_session, &session_id); if (!session) return ERR_PTR(-EINVAL); + return session; +} +EXPORT_SYMBOL(msm_get_session); + + +struct msm_stream *msm_get_stream(struct msm_session *session, + unsigned int stream_id) +{ + struct msm_stream *stream; + stream = msm_queue_find(&session->stream_q, struct msm_stream, list, __msm_queue_find_stream, &stream_id); @@ -1108,6 +1145,34 @@ struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q) } EXPORT_SYMBOL(msm_get_stream_from_vb2q); +struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q) +{ + struct msm_session *session; + struct msm_stream *stream; + unsigned long flags1; + unsigned long flags2; + + spin_lock_irqsave(&msm_session_q->lock, flags1); + list_for_each_entry(session, &(msm_session_q->list), list) { + spin_lock_irqsave(&(session->stream_q.lock), flags2); + list_for_each_entry( + stream, &(session->stream_q.list), list) { + if (stream->vb2_q == q) { + spin_unlock_irqrestore + (&(session->stream_q.lock), flags2); + spin_unlock_irqrestore + (&msm_session_q->lock, flags1); + return session; + } + } + spin_unlock_irqrestore(&(session->stream_q.lock), flags2); + } + spin_unlock_irqrestore(&msm_session_q->lock, flags1); + return NULL; +} +EXPORT_SYMBOL(msm_get_session_from_vb2q); + + #ifdef CONFIG_COMPAT long msm_copy_camera_private_ioctl_args(unsigned long arg, struct msm_camera_private_ioctl_arg *k_ioctl, @@ -1119,7 +1184,7 @@ long msm_copy_camera_private_ioctl_args(unsigned long arg, return -EIO; if (copy_from_user(&up_ioctl, - (struct msm_camera_private_ioctl_arg *)arg, + (void __user *)arg, sizeof(struct msm_camera_private_ioctl_arg))) return -EFAULT; diff --git a/drivers/media/platform/msm/ais/msm.h b/drivers/media/platform/msm/ais/msm.h index d8b2d5871fc2..5d456310c301 100644 --- a/drivers/media/platform/msm/ais/msm.h +++ b/drivers/media/platform/msm/ais/msm.h @@ -114,6 +114,7 @@ struct msm_session { struct mutex lock; struct mutex lock_q; struct mutex close_lock; + rwlock_t stream_rwlock; }; static inline bool msm_is_daemon_present(void) @@ -131,11 +132,13 @@ int msm_create_stream(unsigned int session_id, void msm_delete_stream(unsigned int session_id, unsigned int stream_id); int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id); void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id); -struct msm_stream *msm_get_stream(unsigned int session_id, +struct msm_session *msm_get_session(unsigned int session_id); +struct msm_stream *msm_get_stream(struct msm_session *session, unsigned int stream_id); struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id, unsigned int stream_id); struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q); +struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q); struct msm_session *msm_session_find(unsigned int session_id); #ifdef CONFIG_COMPAT long msm_copy_camera_private_ioctl_args(unsigned long arg, diff --git a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c index 073b91a6d2d9..66751b1f0657 100644 --- a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c +++ b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c @@ -51,7 +51,7 @@ static int32_t msm_buf_mngr_hdl_cont_get_buf(struct msm_buf_mngr_device *dev, } static int32_t msm_buf_mngr_get_buf(struct msm_buf_mngr_device *dev, - void __user *argp) + void *argp) { unsigned long flags; int32_t rc = 0; @@ -465,7 +465,7 @@ static int msm_generic_buf_mngr_close(struct v4l2_subdev *sd, return rc; } -int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp) +static int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp) { int rc = 0; @@ -531,7 +531,7 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd, { int32_t rc = 0; struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd); - void __user *argp = (void __user *)arg; + void *argp = arg; if (!buf_mngr_dev) { pr_err("%s buf manager device NULL\n", __func__); @@ -557,13 +557,13 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd, MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl.ioctl_ptr, sizeof(tmp)); - if (copy_from_user(&buf_info, tmp, + if (copy_from_user(&buf_info, (void __user *)tmp, sizeof(struct msm_buf_mngr_info))) { return -EFAULT; } k_ioctl.ioctl_ptr = (uintptr_t)&buf_info; - argp = &k_ioctl; + argp = (void *)&k_ioctl; rc = msm_cam_buf_mgr_ops(cmd, argp); } break; diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c index 280bf4ebb596..36aa3f62fbec 100644 --- a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c @@ -41,20 +41,28 @@ static int msm_vb2_queue_setup(struct vb2_queue *q, return 0; } -int msm_vb2_buf_init(struct vb2_buffer *vb) +static int msm_vb2_buf_init(struct vb2_buffer *vb) { struct msm_stream *stream; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2_buf; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return -EINVAL; + + read_lock(&session->stream_rwlock); + stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s: Couldn't find stream\n", __func__); + read_unlock(&session->stream_rwlock); return -EINVAL; } msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); msm_vb2_buf->in_freeq = 0; - + read_unlock(&session->stream_rwlock); return 0; } @@ -62,6 +70,7 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; + struct msm_session *session; unsigned long flags; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -71,21 +80,30 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) return; } + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock(&session->stream_rwlock); + stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); + read_unlock(&session->stream_rwlock); return; } spin_lock_irqsave(&stream->stream_lock, flags); list_add_tail(&msm_vb2->list, &stream->queued_list); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); } static void msm_vb2_buf_finish(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; + struct msm_session *session; unsigned long flags; struct msm_vb2_buffer *msm_vb2_entry, *temp; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -96,9 +114,16 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) return; } + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock(&session->stream_rwlock); + stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); + read_unlock(&session->stream_rwlock); return; } @@ -111,18 +136,27 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) } } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); } static void msm_vb2_stop_stream(struct vb2_queue *q) { struct msm_vb2_buffer *msm_vb2, *temp; struct msm_stream *stream; + struct msm_session *session; unsigned long flags; struct vb2_v4l2_buffer *vb2_v4l2_buf; + session = msm_get_session_from_vb2q(q); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock(&session->stream_rwlock); + stream = msm_get_stream_from_vb2q(q); if (!stream) { pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__); + read_unlock(&session->stream_rwlock); return; } @@ -142,8 +176,28 @@ static void msm_vb2_stop_stream(struct vb2_queue *q) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); } +int msm_vb2_get_stream_state(struct msm_stream *stream) +{ + struct msm_vb2_buffer *msm_vb2, *temp; + unsigned long flags; + int rc = 1; + + spin_lock_irqsave(&stream->stream_lock, flags); + list_for_each_entry_safe(msm_vb2, temp, &(stream->queued_list), list) { + if (msm_vb2->in_freeq != 0) { + rc = 0; + break; + } + } + spin_unlock_irqrestore(&stream->stream_lock, flags); + return rc; +} +EXPORT_SYMBOL(msm_vb2_get_stream_state); + + static struct vb2_ops msm_vb2_get_q_op = { .queue_setup = msm_vb2_queue_setup, .buf_init = msm_vb2_buf_init, @@ -199,13 +253,22 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, { struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; unsigned long flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return NULL; + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return NULL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -228,6 +291,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return vb2_v4l2_buf; } @@ -236,13 +300,23 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, { struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; unsigned long flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return NULL; + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return NULL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -263,6 +337,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return vb2_v4l2_buf; } @@ -270,15 +345,24 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, unsigned int stream_id) { struct msm_stream *stream; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; int rc = 0; unsigned long flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { list_for_each_entry(msm_vb2, &(stream->queued_list), list) { @@ -306,6 +390,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return rc; } @@ -317,11 +402,21 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + struct msm_session *session; int rc = 0; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { list_for_each_entry(msm_vb2, &(stream->queued_list), list) { @@ -353,6 +448,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return rc; } @@ -361,14 +457,23 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, { struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; unsigned long flags; long rc = -EINVAL; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return rc; + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -394,6 +499,7 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return rc; } EXPORT_SYMBOL(msm_vb2_return_buf_by_idx); @@ -404,10 +510,20 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + struct msm_session *session; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + + read_lock(&session->stream_rwlock); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock(&session->stream_rwlock); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); list_for_each_entry(msm_vb2, &(stream->queued_list), list) { vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf); @@ -416,6 +532,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return 0; } diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h index 3dbb21332857..0f57112e82f2 100644 --- a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h +++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.h @@ -68,5 +68,6 @@ struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void); int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req_sd); long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index); +int msm_vb2_get_stream_state(struct msm_stream *stream); #endif /*_MSM_VB_H */ diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c index 811ac98beead..6c50070c91ab 100644 --- a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c @@ -2882,7 +2882,7 @@ end: return rc; } -static int msm_cpp_validate_input(unsigned int cmd, void *arg, +static int msm_cpp_validate_ioctl_input(unsigned int cmd, void *arg, struct msm_camera_v4l2_ioctl_t **ioctl_ptr) { switch (cmd) { @@ -2922,6 +2922,14 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd, pr_err("sd %pK\n", sd); return -EINVAL; } + + + rc = msm_cpp_validate_ioctl_input(cmd, arg, &ioctl_ptr); + if (rc != 0) { + pr_err("input validation failed\n"); + return rc; + } + cpp_dev = v4l2_get_subdevdata(sd); if (cpp_dev == NULL) { pr_err("cpp_dev is null\n"); @@ -2933,11 +2941,6 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd, return -EINVAL; } - rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr); - if (rc != 0) { - pr_err("input validation failed\n"); - return rc; - } mutex_lock(&cpp_dev->mutex); CPP_DBG("E cmd: 0x%x\n", cmd); @@ -3437,6 +3440,7 @@ STREAM_BUFF_END: } else { pr_err("%s:%d IOMMMU attach triggered in invalid state\n", __func__, __LINE__); + rc = -EINVAL; } break; } @@ -4061,7 +4065,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file, default: pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n", __func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE); - break; + mutex_unlock(&cpp_dev->mutex); + return -EINVAL; } mutex_unlock(&cpp_dev->mutex); @@ -4092,7 +4097,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file, default: pr_err_ratelimited("%s: unsupported compat type :%d\n", __func__, cmd); - break; + return -EINVAL; } if (is_copytouser_req) { diff --git a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c index 8df56fe526fe..40806d5a164f 100644 --- a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c +++ b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c @@ -522,7 +522,7 @@ static int32_t msm_actuator_piezo_move_focus( CDBG("Enter\n"); if (copy_from_user(&ringing_params_kernel, - &(move_params->ringing_params[0]), + (void __user *)&(move_params->ringing_params[0]), sizeof(struct damping_params_t))) { pr_err("copy_from_user failed\n"); return -EFAULT; @@ -612,7 +612,7 @@ static int32_t msm_actuator_move_focus( return -EFAULT; } if (copy_from_user(ringing_params_kernel, - &(move_params->ringing_params[0]), + (void __user *)&(move_params->ringing_params[0]), (sizeof(struct damping_params_t))*(a_ctrl->region_size))) { pr_err("copy_from_user failed\n"); /* Free the allocated memory for damping parameters */ @@ -732,7 +732,7 @@ static int32_t msm_actuator_bivcm_move_focus( return -EFAULT; } if (copy_from_user(ringing_params_kernel, - &(move_params->ringing_params[0]), + (void __user *)&(move_params->ringing_params[0]), (sizeof(struct damping_params_t))*(a_ctrl->region_size))) { pr_err("copy_from_user failed\n"); /* Free the allocated memory for damping parameters */ @@ -1289,7 +1289,7 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl, a_ctrl->total_steps = set_info->af_tuning_params.total_steps; if (copy_from_user(&a_ctrl->region_params, - (void *)set_info->af_tuning_params.region_params, + (void __user *)set_info->af_tuning_params.region_params, a_ctrl->region_size * sizeof(struct region_params_t))) return -EFAULT; @@ -1332,7 +1332,7 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl, } if (copy_from_user(&a_ctrl->reg_tbl, - (void *)set_info->actuator_params.reg_tbl_params, + (void __user *)set_info->actuator_params.reg_tbl_params, a_ctrl->reg_tbl_size * sizeof(struct msm_actuator_reg_params_t))) { kfree(a_ctrl->i2c_reg_tbl); @@ -1354,7 +1354,8 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl, return -EFAULT; } if (copy_from_user(init_settings, - (void *)set_info->actuator_params.init_settings, + (void __user *) + set_info->actuator_params.init_settings, set_info->actuator_params.init_setting_size * sizeof(struct reg_settings_t))) { kfree(init_settings); @@ -1411,7 +1412,7 @@ static int msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl) } static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl, - void __user *argp) + void *argp) { struct msm_actuator_cfg_data *cdata = (struct msm_actuator_cfg_data *)argp; @@ -1571,7 +1572,7 @@ static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd, { int rc; struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd); - void __user *argp = (void __user *)arg; + void *argp = arg; CDBG("Enter\n"); CDBG("%s:%d a_ctrl %pK argp %pK\n", __func__, __LINE__, a_ctrl, argp); @@ -1721,6 +1722,10 @@ static long msm_actuator_subdev_do_ioctl( parg = &actuator_data; break; } + break; + case VIDIOC_MSM_ACTUATOR_CFG: + pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd); + return -EINVAL; } rc = msm_actuator_subdev_ioctl(sd, cmd, parg); diff --git a/drivers/media/platform/msm/ais/sensor/cci/Makefile b/drivers/media/platform/msm/ais/sensor/cci/Makefile index 3942508c0d66..b8b8c83bc6de 100644 --- a/drivers/media/platform/msm/ais/sensor/cci/Makefile +++ b/drivers/media/platform/msm/ais/sensor/cci/Makefile @@ -2,3 +2,4 @@ ccflags-y += -Idrivers/media/platform/msm/ais ccflags-y += -Idrivers/media/platform/msm/ais/common ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io obj-$(CONFIG_MSM_AIS) += msm_cci.o +obj-$(CONFIG_MSM_AIS) += msm_early_cam.o diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c new file mode 100644 index 000000000000..00ec613e7303 --- /dev/null +++ b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c @@ -0,0 +1,279 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "msm_sd.h" +#include "msm_early_cam.h" +#include "msm_cam_cci_hwreg.h" +#include "msm_camera_io_util.h" +#include "msm_camera_dt_util.h" +#include "cam_hw_ops.h" + +#undef CDBG +#define CDBG(fmt, args...) pr_debug(fmt, ##args) + +#undef EARLY_CAM_DBG +#ifdef MSM_EARLY_CAM_DEBUG +#define EARLY_CAM_DBG(fmt, args...) pr_err(fmt, ##args) +#else +#define EARLY_CAM_DBG(fmt, args...) pr_debug(fmt, ##args) +#endif + +#define MSM_EARLY_CAM_DRV_NAME "msm_early_cam" +static struct platform_driver msm_early_camera_driver; +static struct early_cam_device *new_early_cam_dev; + +int msm_early_cam_disable_clocks(void) +{ + int rc = 0; + + CDBG("%s:\n", __func__); + /* Vote OFF for clocks */ + if (new_early_cam_dev == NULL) { + rc = -EINVAL; + pr_err("%s: clock structure uninitialised %d\n", __func__, + rc); + return rc; + } + + if ((new_early_cam_dev->pdev == NULL) || + (new_early_cam_dev->early_cam_clk_info == NULL) || + (new_early_cam_dev->early_cam_clk == NULL) || + (new_early_cam_dev->num_clk == 0)) { + rc = -EINVAL; + pr_err("%s: Clock details uninitialised %d\n", __func__, + rc); + return rc; + } + + rc = msm_camera_clk_enable(&new_early_cam_dev->pdev->dev, + new_early_cam_dev->early_cam_clk_info, + new_early_cam_dev->early_cam_clk, + new_early_cam_dev->num_clk, false); + if (rc < 0) { + pr_err("%s: clk disable failed %d\n", __func__, rc); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY, + CAM_AHB_SUSPEND_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote OFF AHB_CLIENT_CSIPHY %d\n", + __func__, rc); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID, + CAM_AHB_SUSPEND_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote OFF AHB_CLIENT_CSID %d\n", + __func__, rc); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI, + CAM_AHB_SUSPEND_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote OFF AHB_CLIENT_CCI %d\n", + __func__, rc); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF, + CAM_AHB_SUSPEND_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote OFF AHB_CLIENT_ISPIF %d\n", + __func__, rc); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_VFE0, + CAM_AHB_SUSPEND_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote OFF AHB_CLIENT_VFE0 %d\n", + __func__, rc); + return rc; + } + pr_debug("Turned OFF camera clocks\n"); + return 0; + +} +static int msm_early_cam_probe(struct platform_device *pdev) +{ + int rc = 0; + + CDBG("%s: pdev %pK device id = %d\n", __func__, pdev, pdev->id); + + /* Vote for Early camera if enabled */ + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY, + CAM_AHB_SVS_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSID, + CAM_AHB_SVS_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CCI, + CAM_AHB_SVS_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_ISPIF, + CAM_AHB_SVS_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + return rc; + } + + rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_VFE0, + CAM_AHB_SVS_VOTE); + if (rc < 0) { + pr_err("%s: failed to vote for AHB\n", __func__); + return rc; + } + + new_early_cam_dev = kzalloc(sizeof(struct early_cam_device), + GFP_KERNEL); + if (!new_early_cam_dev) + return -ENOMEM; + + if (pdev->dev.of_node) + of_property_read_u32((&pdev->dev)->of_node, + "cell-index", &pdev->id); + + rc = msm_camera_get_clk_info_and_rates(pdev, + &new_early_cam_dev->early_cam_clk_info, + &new_early_cam_dev->early_cam_clk, + &new_early_cam_dev->early_cam_clk_rates, + &new_early_cam_dev->num_clk_cases, + &new_early_cam_dev->num_clk); + if (rc < 0) { + pr_err("%s: msm_early_cam_get_clk_info() failed", __func__); + kfree(new_early_cam_dev); + return -EFAULT; + } + + new_early_cam_dev->ref_count = 0; + new_early_cam_dev->pdev = pdev; + + rc = msm_camera_get_dt_vreg_data( + new_early_cam_dev->pdev->dev.of_node, + &(new_early_cam_dev->early_cam_vreg), + &(new_early_cam_dev->regulator_count)); + if (rc < 0) { + pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__); + rc = -EFAULT; + goto early_cam_release_mem; + } + + if ((new_early_cam_dev->regulator_count < 0) || + (new_early_cam_dev->regulator_count > MAX_REGULATOR)) { + pr_err("%s: invalid reg count = %d, max is %d\n", __func__, + new_early_cam_dev->regulator_count, MAX_REGULATOR); + rc = -EFAULT; + goto early_cam_invalid_vreg_data; + } + + rc = msm_camera_config_vreg(&new_early_cam_dev->pdev->dev, + new_early_cam_dev->early_cam_vreg, + new_early_cam_dev->regulator_count, + NULL, + 0, + &new_early_cam_dev->early_cam_reg_ptr[0], 1); + if (rc < 0) + pr_err("%s:%d early_cam config_vreg failed\n", __func__, + __LINE__); + + rc = msm_camera_enable_vreg(&new_early_cam_dev->pdev->dev, + new_early_cam_dev->early_cam_vreg, + new_early_cam_dev->regulator_count, + NULL, + 0, + &new_early_cam_dev->early_cam_reg_ptr[0], 1); + if (rc < 0) + pr_err("%s:%d early_cam enable_vreg failed\n", __func__, + __LINE__); + + rc = msm_camera_clk_enable(&new_early_cam_dev->pdev->dev, + new_early_cam_dev->early_cam_clk_info, + new_early_cam_dev->early_cam_clk, + new_early_cam_dev->num_clk, true); + + if (rc < 0) { + pr_err("%s: clk enable failed %d\n", __func__, rc); + rc = 0; + goto early_cam_release_mem; + } + + platform_set_drvdata(pdev, new_early_cam_dev); + + return 0; + +early_cam_invalid_vreg_data: + kfree(new_early_cam_dev->early_cam_vreg); +early_cam_release_mem: + kfree(new_early_cam_dev); + new_early_cam_dev = NULL; + return rc; +} + +static int msm_early_cam_exit(struct platform_device *pdev) +{ + return 0; +} + +static int __init msm_early_cam_init_module(void) +{ + return platform_driver_register(&msm_early_camera_driver); +} + +static void __exit msm_early_cam_exit_module(void) +{ + kfree(new_early_cam_dev); + platform_driver_unregister(&msm_early_camera_driver); +} + +static const struct of_device_id msm_early_camera_match_table[] = { + { .compatible = "qcom,early-cam" }, + {}, +}; + +static struct platform_driver msm_early_camera_driver = { + .probe = msm_early_cam_probe, + .remove = msm_early_cam_exit, + .driver = { + .name = MSM_EARLY_CAM_DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = msm_early_camera_match_table, + }, +}; + +MODULE_DEVICE_TABLE(of, msm_early_camera_match_table); + +module_init(msm_early_cam_init_module); +module_exit(msm_early_cam_exit_module); +MODULE_DESCRIPTION("MSM early camera driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h new file mode 100644 index 000000000000..a40ab2d1ebc3 --- /dev/null +++ b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MSM_EARLY_CAM_H +#define MSM_EARLY_CAM_H + +#include +#include +#include +#include +#include +#include +#include +#include "msm_sd.h" +#include "cam_soc_api.h" + +#define NUM_MASTERS 2 +#define NUM_QUEUES 2 + +#define TRUE 1 +#define FALSE 0 + + +enum msm_early_cam_state_t { + STATE_DISABLED, + STATE_ENABLED, +}; + +struct early_cam_device { + struct platform_device *pdev; + uint8_t ref_count; + enum msm_early_cam_state_t early_cam_state; + size_t num_clk; + size_t num_clk_cases; + struct clk **early_cam_clk; + uint32_t **early_cam_clk_rates; + struct msm_cam_clk_info *early_cam_clk_info; + struct camera_vreg_t *early_cam_vreg; + struct regulator *early_cam_reg_ptr[MAX_REGULATOR]; + int32_t regulator_count; +}; + +int msm_early_cam_disable_clocks(void); +#endif diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h index f88c0ef82499..f55e6c344ef1 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v2_0 = { +static struct csid_reg_parms_t csid_v2_0 = { /* MIPI CSID registers */ 0x0, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h index e2bb6cd499ff..9ba3555ff01f 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v2_2 = { +static struct csid_reg_parms_t csid_v2_2 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h index 440f869692f7..c75c4167453c 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v3_0 = { +static struct csid_reg_parms_t csid_v3_0 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h index dde47046b679..dc71f39a38f1 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v3_1 = { +static struct csid_reg_parms_t csid_v3_1 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h index 5241a90fbc86..00085dbf94a0 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v3_2 = { +static struct csid_reg_parms_t csid_v3_2 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h index 0e8ff6c0986d..1d465b66b33f 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h @@ -14,9 +14,9 @@ #define MSM_CSID_3_4_1_HWREG_H #include -uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v3_4_1 = { +static struct csid_reg_parms_t csid_v3_4_1 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h index 651526cb3db8..d78e68e090e7 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h @@ -15,8 +15,8 @@ #include -uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; -struct csid_reg_parms_t csid_v3_4_2 = { +static uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; +static struct csid_reg_parms_t csid_v3_4_2 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h index fff29fc9d4c4..bbf4b287ffe4 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h @@ -15,8 +15,8 @@ #include -uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; -struct csid_reg_parms_t csid_v3_4_3 = { +static uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; +static struct csid_reg_parms_t csid_v3_4_3 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h index f7d7d3548c4b..534ef3f5533c 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v3_5_1[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; +static uint8_t csid_lane_assign_v3_5_1[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; -struct csid_reg_parms_t csid_v3_5_1 = { +static struct csid_reg_parms_t csid_v3_5_1 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h index b423b6e510a0..392d902d3e0c 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h @@ -15,9 +15,9 @@ #include -uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; +static uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3}; -struct csid_reg_parms_t csid_v3_5 = { +static struct csid_reg_parms_t csid_v3_5 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h index b95a774ca737..6722974f889b 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h @@ -15,8 +15,8 @@ #include -uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; -struct csid_reg_parms_t csid_v3_6_0 = { +static uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4}; +static struct csid_reg_parms_t csid_v3_6_0 = { /* MIPI CSID registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c index 331ba939adfa..2b3eefa65606 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c +++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c @@ -708,7 +708,7 @@ static int msm_csid_release(struct csid_device *csid_dev) return 0; } -static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg) +static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg) { int rc = 0; struct csid_cfg_data *cdata = (struct csid_cfg_data *)arg; @@ -728,7 +728,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg) case CSID_TESTMODE_CFG: { csid_dev->is_testmode = 1; if (copy_from_user(&csid_dev->testmode_params, - (void *)cdata->cfg.csid_testmode_params, + (void __user *)cdata->cfg.csid_testmode_params, sizeof(struct msm_camera_csid_testmode_parms))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -741,7 +741,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg) int i = 0; if (copy_from_user(&csid_params, - (void *)cdata->cfg.csid_params, + (void __user *)cdata->cfg.csid_params, sizeof(struct msm_camera_csid_params))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -790,7 +790,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg) int i = 0; if (copy_from_user(&csid_params, - (void *)cdata->cfg.csid_params, + (void __user *)cdata->cfg.csid_params, sizeof(struct msm_camera_csid_params))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -892,7 +892,7 @@ static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd, #ifdef CONFIG_COMPAT -static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg) +static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void *arg) { int rc = 0; struct csid_cfg_data32 *arg32 = (struct csid_cfg_data32 *) (arg); @@ -913,7 +913,8 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg) case CSID_TESTMODE_CFG: { csid_dev->is_testmode = 1; if (copy_from_user(&csid_dev->testmode_params, - (void *)compat_ptr(arg32->cfg.csid_testmode_params), + (void __user *) + compat_ptr(arg32->cfg.csid_testmode_params), sizeof(struct msm_camera_csid_testmode_parms))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -926,7 +927,7 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg) struct msm_camera_csid_params32 csid_params32; if (copy_from_user(&csid_params32, - (void *)compat_ptr(arg32->cfg.csid_params), + (void __user *)compat_ptr(arg32->cfg.csid_params), sizeof(struct msm_camera_csid_params32))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -975,7 +976,7 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg) struct msm_camera_csid_params32 csid_params32; if (copy_from_user(&csid_params32, - (void *)compat_ptr(arg32->cfg.csid_params), + (void __user *)compat_ptr(arg32->cfg.csid_params), sizeof(struct msm_camera_csid_params32))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h index 618926fa8341..3b377de66a2c 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h @@ -15,7 +15,7 @@ #include -struct csiphy_reg_parms_t csiphy_v2_0 = { +static struct csiphy_reg_parms_t csiphy_v2_0 = { /* MIPI CSI PHY registers */ 0x17C, 0x0, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h index 867aec2e0103..71b07299c342 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h @@ -15,7 +15,7 @@ #include -struct csiphy_reg_parms_t csiphy_v2_2 = { +static struct csiphy_reg_parms_t csiphy_v2_2 = { /* MIPI CSI PHY registers */ 0x17C, 0x0, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h index 69efdcc71499..8846fde0f6ed 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h @@ -15,7 +15,7 @@ #include -struct csiphy_reg_parms_t csiphy_v3_0 = { +static struct csiphy_reg_parms_t csiphy_v3_0 = { /* MIPI CSI PHY registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h index 7fc74a366a6c..044e16ef3848 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h @@ -15,7 +15,7 @@ #include -struct csiphy_reg_parms_t csiphy_v3_1 = { +static struct csiphy_reg_parms_t csiphy_v3_1 = { /* MIPI CSI PHY registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h index cdf62d46ee7d..c01f0540dfd2 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h @@ -15,7 +15,7 @@ #include -struct csiphy_reg_parms_t csiphy_v3_2 = { +static struct csiphy_reg_parms_t csiphy_v3_2 = { /* MIPI CSI PHY registers */ 0x0, 0x4, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h index 5af1ded189a6..78ac19993fee 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h @@ -18,14 +18,14 @@ #include -struct csiphy_reg_parms_t csiphy_v3_4_2_1 = { +static struct csiphy_reg_parms_t csiphy_v3_4_2_1 = { .mipi_csiphy_interrupt_status0_addr = 0x8B0, .mipi_csiphy_interrupt_clear0_addr = 0x858, .mipi_csiphy_glbl_irq_cmd_addr = 0x828, .combo_clk_mask = 0x10, }; -struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = { +static struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = { /* MIPI CSI PHY registers */ {0x814, 0x0}, {0x818, 0x1}, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h index d85dd1ec3a48..e6072e747a63 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h @@ -18,14 +18,14 @@ #include -struct csiphy_reg_parms_t csiphy_v3_4_2 = { +static struct csiphy_reg_parms_t csiphy_v3_4_2 = { .mipi_csiphy_interrupt_status0_addr = 0x8B0, .mipi_csiphy_interrupt_clear0_addr = 0x858, .mipi_csiphy_glbl_irq_cmd_addr = 0x828, .combo_clk_mask = 0x10, }; -struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = { +static struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = { /* MIPI CSI PHY registers */ {0x814, 0x0}, {0x818, 0x1}, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h index 99b725a75c8f..bc70697cce5c 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h @@ -18,14 +18,14 @@ #include -struct csiphy_reg_parms_t csiphy_v3_5 = { +static struct csiphy_reg_parms_t csiphy_v3_5 = { .mipi_csiphy_interrupt_status0_addr = 0x8B0, .mipi_csiphy_interrupt_clear0_addr = 0x858, .mipi_csiphy_glbl_irq_cmd_addr = 0x828, .combo_clk_mask = 0x10, }; -struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = { +static struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = { /* MIPI CSI PHY registers */ {0x814, 0x0}, {0x818, 0x1}, diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c index d146cc3d28a5..c3b087f61888 100644 --- a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c @@ -164,7 +164,7 @@ static int msm_csiphy_3phase_lane_config( mipi_csiphy_3ph_lnn_ctrl1.data, csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. mipi_csiphy_3ph_lnn_ctrl1.addr + 0x200*i); - msm_camera_io_w(((csiphy_params->settle_cnt >> 8) & 0xff), + msm_camera_io_w(0, csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. mipi_csiphy_3ph_lnn_ctrl2.addr + 0x200*i); msm_camera_io_w((csiphy_params->settle_cnt & 0xff), @@ -648,7 +648,7 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev, return rc; } -void msm_csiphy_disable_irq( +static void msm_csiphy_disable_irq( struct csiphy_device *csiphy_dev) { void __iomem *csiphybase; @@ -1207,7 +1207,7 @@ static int32_t msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg) break; case CSIPHY_CFG: if (copy_from_user(&csiphy_params, - (void *)cdata->cfg.csiphy_params, + (void __user *)cdata->cfg.csiphy_params, sizeof(struct msm_camera_csiphy_params))) { pr_err("%s: %d failed\n", __func__, __LINE__); rc = -EFAULT; diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c index b97156cbd486..024677e1b755 100644 --- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c @@ -54,7 +54,7 @@ static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = { .i2c_poll = msm_camera_cci_i2c_poll, }; -void msm_torch_brightness_set(struct led_classdev *led_cdev, +static void msm_torch_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { if (!torch_trigger) { @@ -152,6 +152,13 @@ static int32_t msm_flash_i2c_write_table( conf_array.reg_setting = settings->reg_setting_a; conf_array.size = settings->size; + /* Validate the settings size */ + if ((!conf_array.size) || (conf_array.size > MAX_I2C_REG_SET)) { + pr_err("failed: invalid size %d", conf_array.size); + return -EINVAL; + } + + return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table( &flash_ctrl->flash_i2c_client, &conf_array); } @@ -202,7 +209,7 @@ static int32_t msm_flash_i2c_init( } if (copy_from_user(power_setting_array32, - (void *)flash_init_info->power_setting_array, + (void __user *)flash_init_info->power_setting_array, sizeof(struct msm_sensor_power_setting_array32))) { pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); @@ -248,7 +255,7 @@ static int32_t msm_flash_i2c_init( } else #endif if (copy_from_user(&flash_ctrl->power_setting_array, - (void *)flash_init_info->power_setting_array, + (void __user *)flash_init_info->power_setting_array, sizeof(struct msm_sensor_power_setting_array))) { pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); return -EFAULT; @@ -298,7 +305,8 @@ static int32_t msm_flash_i2c_init( goto msm_flash_i2c_init_fail; } - if (copy_from_user(settings, (void *)flash_init_info->settings, + if (copy_from_user(settings, + (void __user *)flash_init_info->settings, sizeof(struct msm_camera_i2c_reg_setting_array))) { kfree(settings); pr_err("%s copy_from_user failed %d\n", @@ -414,7 +422,7 @@ static int32_t msm_flash_i2c_write_setting_array( if (!settings) return -ENOMEM; - if (copy_from_user(settings, (void *)flash_data->cfg.settings, + if (copy_from_user(settings, (void __user *)flash_data->cfg.settings, sizeof(struct msm_camera_i2c_reg_setting_array))) { kfree(settings); pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); @@ -496,23 +504,46 @@ static int32_t msm_flash_init( } flash_ctrl->flash_state = MSM_CAMERA_FLASH_INIT; - CDBG("Exit"); return 0; } -#ifdef CONFIG_COMPAT static int32_t msm_flash_init_prepare( struct msm_flash_ctrl_t *flash_ctrl, struct msm_flash_cfg_data_t *flash_data) { + #ifdef CONFIG_COMPAT + struct msm_flash_cfg_data_t flash_data_k; + struct msm_flash_init_info_t flash_init_info; + int32_t i = 0; + + if (!is_compat_task()) { + /*for 64-bit usecase,it need copy the data to local memory*/ + flash_data_k.cfg_type = flash_data->cfg_type; + for (i = 0; i < MAX_LED_TRIGGERS; i++) { + flash_data_k.flash_current[i] = + flash_data->flash_current[i]; + flash_data_k.flash_duration[i] = + flash_data->flash_duration[i]; + } + + flash_data_k.cfg.flash_init_info = &flash_init_info; + if (copy_from_user(&flash_init_info, + (void __user *)(flash_data->cfg.flash_init_info), + sizeof(struct msm_flash_init_info_t))) { + pr_err("%s copy_from_user failed %d\n", + __func__, __LINE__); + return -EFAULT; + } + return msm_flash_init(flash_ctrl, &flash_data_k); + } + /* + * for 32-bit usecase,it already copy the userspace + * data to local memory in msm_flash_subdev_do_ioctl() + * so here do not need copy from user + */ return msm_flash_init(flash_ctrl, flash_data); -} #else -static int32_t msm_flash_init_prepare( - struct msm_flash_ctrl_t *flash_ctrl, - struct msm_flash_cfg_data_t *flash_data) -{ struct msm_flash_cfg_data_t flash_data_k; struct msm_flash_init_info_t flash_init_info; int32_t i = 0; @@ -527,15 +558,15 @@ static int32_t msm_flash_init_prepare( flash_data_k.cfg.flash_init_info = &flash_init_info; if (copy_from_user(&flash_init_info, - (void *)(flash_data->cfg.flash_init_info), + (void __user *)(flash_data->cfg.flash_init_info), sizeof(struct msm_flash_init_info_t))) { pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); return -EFAULT; } return msm_flash_init(flash_ctrl, &flash_data_k); -} #endif +} static int32_t msm_flash_low( struct msm_flash_ctrl_t *flash_ctrl, @@ -626,7 +657,7 @@ static int32_t msm_flash_release( } static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl, - void __user *argp) + void *argp) { int32_t rc = 0; struct msm_flash_cfg_data_t *flash_data = @@ -701,7 +732,7 @@ static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_flash_ctrl_t *fctrl = NULL; - void __user *argp = (void __user *)arg; + void *argp = arg; CDBG("Enter\n"); @@ -1021,13 +1052,13 @@ static long msm_flash_subdev_do_ioctl( sd = vdev_to_v4l2_subdev(vdev); u32 = (struct msm_flash_cfg_data_t32 *)arg; - flash_data.cfg_type = u32->cfg_type; - for (i = 0; i < MAX_LED_TRIGGERS; i++) { - flash_data.flash_current[i] = u32->flash_current[i]; - flash_data.flash_duration[i] = u32->flash_duration[i]; - } switch (cmd) { case VIDIOC_MSM_FLASH_CFG32: + flash_data.cfg_type = u32->cfg_type; + for (i = 0; i < MAX_LED_TRIGGERS; i++) { + flash_data.flash_current[i] = u32->flash_current[i]; + flash_data.flash_duration[i] = u32->flash_duration[i]; + } cmd = VIDIOC_MSM_FLASH_CFG; switch (flash_data.cfg_type) { case CFG_FLASH_OFF: @@ -1038,7 +1069,8 @@ static long msm_flash_subdev_do_ioctl( case CFG_FLASH_INIT: flash_data.cfg.flash_init_info = &flash_init_info; if (copy_from_user(&flash_init_info32, - (void *)compat_ptr(u32->cfg.flash_init_info), + (void __user *) + compat_ptr(u32->cfg.flash_init_info), sizeof(struct msm_flash_init_info_t32))) { pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c index 955be342e8cf..8f2fd0f9e24d 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c @@ -23,7 +23,7 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client, enum msm_camera_i2c_data_type data_type) { int32_t rc = -EFAULT; - unsigned char buf[client->addr_type+data_type]; + unsigned char *buf = NULL; struct msm_camera_cci_ctrl cci_ctrl; if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR @@ -33,6 +33,11 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client, && data_type != MSM_CAMERA_I2C_WORD_DATA)) return rc; + buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type, + GFP_KERNEL); + if (!buf) + return -ENOMEM; + cci_ctrl.cmd = MSM_CCI_I2C_READ; cci_ctrl.cci_info = client->cci_client; cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr; @@ -42,6 +47,8 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client, rc = v4l2_subdev_call(client->cci_client->cci_subdev, core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl); if (rc < 0) { + kfree(buf); + buf = NULL; pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc); return rc; } @@ -51,6 +58,8 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client, else *data = buf[0] << 8 | buf[1]; + kfree(buf); + buf = NULL; S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data); return rc; } diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c index 071600ed5221..66300e3f7359 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c @@ -685,7 +685,7 @@ ERROR2: kfree(array); ERROR1: kfree(ps); - power_setting_size = 0; + power_setting_size = NULL; return rc; } diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h index a29ef21274c2..fdeeb4aebf00 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h @@ -62,6 +62,9 @@ int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg, int msm_camera_pinctrl_init (struct msm_pinctrl_info *sensor_pctrl, struct device *dev); +int msm_cam_sensor_handle_reg_gpio(int seq_val, + struct msm_camera_gpio_conf *gconf, int val); + int32_t msm_sensor_driver_get_gpio_data( struct msm_camera_gpio_conf **gpio_conf, struct device_node *of_node); diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c index 9098b23dbc67..449951f5ffad 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c @@ -88,7 +88,8 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client, return rc; } - buf = kzalloc(client->addr_type+data_type, GFP_KERNEL); + buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type, + GFP_KERNEL); if (!buf) { S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__); return -ENOMEM; @@ -179,7 +180,7 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client, enum msm_camera_i2c_data_type data_type) { int32_t rc = -EFAULT; - unsigned char buf[client->addr_type+data_type]; + unsigned char *buf = NULL; uint8_t len = 0; if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR @@ -188,6 +189,11 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client, && data_type != MSM_CAMERA_I2C_WORD_DATA)) return rc; + buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type, + GFP_KERNEL); + if (!buf) + return -ENOMEM; + S_I2C_DBG("%s reg addr = 0x%x data type: %d\n", __func__, addr, data_type); if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) { @@ -219,6 +225,9 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client, rc = msm_camera_qup_i2c_txdata(client, buf, len); if (rc < 0) S_I2C_DBG("%s fail\n", __func__); + + kfree(buf); + buf = NULL; return rc; } @@ -226,7 +235,7 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client, uint32_t addr, uint8_t *data, uint32_t num_byte) { int32_t rc = -EFAULT; - unsigned char buf[client->addr_type+num_byte]; + unsigned char *buf = NULL; uint8_t len = 0, i = 0; if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR @@ -234,6 +243,10 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client, || num_byte == 0) return rc; + buf = kzalloc(client->addr_type+num_byte, GFP_KERNEL); + if (!buf) + return -ENOMEM; + S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n", __func__, addr, num_byte); if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) { @@ -263,6 +276,9 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client, rc = msm_camera_qup_i2c_txdata(client, buf, len+num_byte); if (rc < 0) S_I2C_DBG("%s fail\n", __func__); + + kfree(buf); + buf = NULL; return rc; } diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c index cd277f0ca0da..d0e27bcc4aac 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c @@ -513,7 +513,7 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client, &client->spi_client->cmd_tbl.page_program; uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len; uint16_t len = 0; - char buf[data_type]; + char *buf = NULL; char *tx; int rc = -EINVAL; @@ -524,10 +524,13 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client, && data_type != MSM_CAMERA_I2C_WORD_DATA)) return rc; S_I2C_DBG("Data: 0x%x\n", data); + buf = kzalloc(data_type, GFP_KERNEL); + if (!buf) + goto NOMEM; len = header_len + (uint8_t)data_type; tx = kmalloc(len, GFP_KERNEL | GFP_DMA); if (!tx) - goto NOMEM; + goto FREEBUF; if (data_type == MSM_CAMERA_I2C_BYTE_DATA) { buf[0] = data; SPIDBG("Byte %d: 0x%x\n", len, buf[0]); @@ -540,6 +543,8 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client, if (rc < 0) goto ERROR; goto OUT; +FREEBUF: + kfree(buf); NOMEM: pr_err("%s: memory allocation failed\n", __func__); return -ENOMEM; @@ -547,6 +552,7 @@ ERROR: pr_err("%s: error write\n", __func__); OUT: kfree(tx); + kfree(buf); return rc; } int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client, @@ -585,7 +591,7 @@ int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client, client->addr_type = client_addr_type; return rc; } -uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting, +static uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size, uint32_t index, uint16_t burst_addr) { uint32_t i; @@ -601,7 +607,7 @@ uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting, } #ifdef SPI_DYNAMIC_ALLOC -int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client, +static int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client, struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size, struct msm_camera_burst_info *info, enum msm_camera_i2c_data_type data_type) @@ -677,7 +683,7 @@ fail: return rc; } #else /* SPI_DYNAMIC_ALLOC */ -int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client, +static int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client, struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size, struct msm_camera_burst_info *info, enum msm_camera_i2c_data_type data_type) diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h index 28aa184ce630..9f87db1dbbfa 100644 --- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h +++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h @@ -83,6 +83,14 @@ uint16_t msm_camera_spi_get_hlen(struct msm_camera_spi_inst *inst) return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len; } +int32_t msm_camera_spi_tx_helper(struct msm_camera_i2c_client *client, + struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data, + uint32_t num_byte, char *tx, char *rx); + +int32_t msm_camera_spi_tx_read(struct msm_camera_i2c_client *client, + struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data, + uint32_t num_byte, char *tx, char *rx); + int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client, uint32_t addr, uint16_t *data, enum msm_camera_i2c_data_type data_type); diff --git a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c index bfb960ea862a..68ab4003b666 100644 --- a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c +++ b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c @@ -282,7 +282,7 @@ static int32_t msm_ir_cut_handle_init( } static int32_t msm_ir_cut_config(struct msm_ir_cut_ctrl_t *ir_cut_ctrl, - void __user *argp) + void *argp) { int32_t rc = -EINVAL; struct msm_ir_cut_cfg_data_t *ir_cut_data = @@ -327,7 +327,7 @@ static long msm_ir_cut_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_ir_cut_ctrl_t *fctrl = NULL; - void __user *argp = (void __user *)arg; + void *argp = arg; CDBG("Enter\n"); diff --git a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c index 803bce440ee1..9e200071f9eb 100644 --- a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c +++ b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c @@ -196,7 +196,7 @@ static int32_t msm_ir_led_handle_init( } static int32_t msm_ir_led_config(struct msm_ir_led_ctrl_t *ir_led_ctrl, - void __user *argp) + void *argp) { int32_t rc = -EINVAL; struct msm_ir_led_cfg_data_t *ir_led_data = @@ -241,7 +241,7 @@ static long msm_ir_led_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_ir_led_ctrl_t *fctrl = NULL; - void __user *argp = (void __user *)arg; + void *argp = arg; struct msm_ir_led_cfg_data_t ir_led_data = {0}; if (!sd) { diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.c b/drivers/media/platform/msm/ais/sensor/msm_sensor.c index c671ea71d2a7..a276b03e5294 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.c @@ -343,7 +343,7 @@ static long msm_sensor_subdev_ioctl(struct v4l2_subdev *sd, { int rc = 0; struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd); - void __user *argp = (void __user *)arg; + void *argp = arg; if (!s_ctrl) { pr_err("%s s_ctrl NULL\n", __func__); @@ -421,7 +421,7 @@ long msm_sensor_subdev_fops_ioctl(struct file *file, } static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, - void __user *argp) + void *argp) { struct sensorb_cfg_data32 *cdata = (struct sensorb_cfg_data32 *)argp; int32_t rc = 0; @@ -498,7 +498,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, } if (copy_from_user(&conf_array32, - (void *)compat_ptr(cdata->cfg.setting), + (void __user *)compat_ptr(cdata->cfg.setting), sizeof(struct msm_camera_i2c_reg_setting32))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -525,7 +525,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, break; } if (copy_from_user(reg_setting, - (void *)(conf_array.reg_setting), + (void __user *)(conf_array.reg_setting), conf_array.size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -571,7 +571,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, (struct msm_camera_i2c_read_config *) compat_ptr(cdata->cfg.setting); - if (copy_from_user(&read_config, read_config_ptr, + if (copy_from_user(&read_config, (void __user *)read_config_ptr, sizeof(struct msm_camera_i2c_read_config))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -640,7 +640,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, goto DONE; if (copy_from_user(&write_config32, - (void *)compat_ptr(cdata->cfg.setting), + (void __user *)compat_ptr(cdata->cfg.setting), sizeof( struct msm_camera_i2c_array_write_config32))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -682,7 +682,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, break; } if (copy_from_user(reg_setting, - (void *)(write_config.conf_array.reg_setting), + (void __user *)(write_config.conf_array.reg_setting), write_config.conf_array.size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -753,7 +753,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, } if (copy_from_user(&conf_array32, - (void *)compat_ptr(cdata->cfg.setting), + (void __user *)compat_ptr(cdata->cfg.setting), sizeof(struct msm_camera_i2c_seq_reg_setting32))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -780,7 +780,8 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, rc = -ENOMEM; break; } - if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, + if (copy_from_user(reg_setting, + (void __user *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_seq_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -863,7 +864,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, goto DONE; if (copy_from_user(&stop_setting32, - (void *)compat_ptr((cdata->cfg.setting)), + (void __user *)compat_ptr((cdata->cfg.setting)), sizeof(struct msm_camera_i2c_reg_setting32))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -890,7 +891,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, break; } if (copy_from_user(stop_setting->reg_setting, - (void *)reg_setting, + (void __user *)reg_setting, stop_setting->size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -949,7 +950,7 @@ DONE: } #endif -int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) +int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp) { struct sensorb_cfg_data *cdata = (struct sensorb_cfg_data *)argp; int32_t rc = 0; @@ -1026,7 +1027,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) } if (copy_from_user(&conf_array, - (void *)cdata->cfg.setting, + (void __user *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -1046,7 +1047,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) rc = -ENOMEM; break; } - if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, + if (copy_from_user(reg_setting, + (void __user *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -1089,7 +1091,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) read_config_ptr = (struct msm_camera_i2c_read_config *)cdata->cfg.setting; - if (copy_from_user(&read_config, read_config_ptr, + if (copy_from_user(&read_config, (void __user *)read_config_ptr, sizeof(struct msm_camera_i2c_read_config))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -1153,7 +1155,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) goto DONE; if (copy_from_user(&write_config, - (void *)cdata->cfg.setting, + (void __user *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_array_write_config))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -1178,7 +1180,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) break; } if (copy_from_user(reg_setting, - (void *)(write_config.conf_array.reg_setting), + (void __user *)(write_config.conf_array.reg_setting), write_config.conf_array.size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -1243,7 +1245,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) } if (copy_from_user(&conf_array, - (void *)cdata->cfg.setting, + (void __user *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_seq_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -1265,7 +1267,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) rc = -ENOMEM; break; } - if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, + if (copy_from_user(reg_setting, + (void __user *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_seq_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -1349,7 +1352,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) goto DONE; if (copy_from_user(stop_setting, - (void *)cdata->cfg.setting, + (void __user *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -1371,7 +1374,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) break; } if (copy_from_user(stop_setting->reg_setting, - (void *)reg_setting, + (void __user *)reg_setting, stop_setting->size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.h b/drivers/media/platform/msm/ais/sensor/msm_sensor.h index 060383b05170..eacd3b05420c 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor.h +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.h @@ -94,7 +94,7 @@ struct msm_sensor_ctrl_t { struct msm_sensor_init_t s_init; }; -int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp); +int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp); int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl); diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c index 18a2bac96a89..c02972e5e993 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c @@ -385,7 +385,7 @@ static int32_t msm_sensor_get_pw_settings_compat( pr_err("failed: no memory ps32"); return -ENOMEM; } - if (copy_from_user(ps32, (void *)us_ps, sizeof(*ps32) * size)) { + if (copy_from_user(ps32, (void __user *)us_ps, sizeof(*ps32) * size)) { pr_err("failed: copy_from_user"); kfree(ps32); return -EFAULT; @@ -422,7 +422,9 @@ static int32_t msm_sensor_create_pd_settings(void *setting, } else #endif { - if (copy_from_user(pd, (void *)pu, sizeof(*pd) * size_down)) { + if (copy_from_user(pd, + (void __user *)pu, + sizeof(*pd) * size_down)) { pr_err("failed: copy_from_user"); return -EFAULT; } @@ -474,7 +476,8 @@ static int32_t msm_sensor_get_power_down_settings(void *setting, } } else #endif - if (copy_from_user(pd, (void *)slave_info->power_setting_array. + if (copy_from_user(pd, + (void __user *)slave_info->power_setting_array. power_down_setting, sizeof(*pd) * size_down)) { pr_err("failed: copy_from_user"); kfree(pd); @@ -540,7 +543,8 @@ static int32_t msm_sensor_get_power_up_settings(void *setting, #endif { if (copy_from_user(pu, - (void *)slave_info->power_setting_array.power_setting, + (void __user *) + slave_info->power_setting_array.power_setting, sizeof(*pu) * size)) { pr_err("failed: copy_from_user"); kfree(pu); @@ -653,7 +657,7 @@ int32_t msm_sensor_driver_probe(void *setting, rc = -ENOMEM; goto free_slave_info; } - if (copy_from_user((void *)slave_info32, setting, + if (copy_from_user((void *)slave_info32, (void __user *)setting, sizeof(*slave_info32))) { pr_err("failed: copy_from_user"); rc = -EFAULT; @@ -704,7 +708,7 @@ int32_t msm_sensor_driver_probe(void *setting, #endif { if (copy_from_user(slave_info, - (void *)setting, sizeof(*slave_info))) { + (void __user *)setting, sizeof(*slave_info))) { pr_err("failed: copy_from_user"); rc = -EFAULT; goto free_slave_info; diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c index c3943be78226..ffbf963e819e 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_init.c @@ -16,11 +16,17 @@ #include "msm_sensor_driver.h" #include "msm_sensor.h" #include "msm_sd.h" +#include "msm_camera_io_util.h" +#include "msm_early_cam.h" /* Logging macro */ #undef CDBG #define CDBG(fmt, args...) pr_debug(fmt, ##args) +#define EARLY_CAMERA_SIGNAL_DONE 0xa5a5a5a5 +#define EARLY_CAMERA_SIGNAL_DISABLED 0 + +static bool early_camera_clock_off; static struct msm_sensor_init_t *s_init; static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init) @@ -42,10 +48,14 @@ static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init) return rc; } +#define MMSS_A_VFE_0_SPARE 0xC84 + /* Static function definition */ int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init, void *arg) { int32_t rc = 0; + u32 val = 0; + void __iomem *base; struct sensor_init_cfg_data *cfg = (struct sensor_init_cfg_data *)arg; /* Validate input parameters */ @@ -68,6 +78,28 @@ int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init, void *arg) break; case CFG_SINIT_PROBE_DONE: + if (early_camera_clock_off == false) { + base = ioremap(0x00A10000, 0x1000); + val = msm_camera_io_r_mb(base + MMSS_A_VFE_0_SPARE); + while (val != EARLY_CAMERA_SIGNAL_DONE) { + if (val == EARLY_CAMERA_SIGNAL_DISABLED) + break; + msleep(1000); + val = msm_camera_io_r_mb( + base + MMSS_A_VFE_0_SPARE); + pr_err("Waiting for signal from LK val = %u\n", + val); + } + rc = msm_early_cam_disable_clocks(); + if (rc < 0) { + pr_err("Failed to disable early camera :%d\n", + rc); + } else { + early_camera_clock_off = true; + pr_debug("Voted OFF early camera clocks\n"); + } + } + s_init->module_init_status = 1; wake_up(&s_init->state_wait); break; @@ -99,6 +131,7 @@ static int __init msm_sensor_init_module(void) mutex_init(&s_init->imutex); init_waitqueue_head(&s_init->state_wait); + early_camera_clock_off = false; return ret; } diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c index f3147b127438..aa7658f359ac 100644 --- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c +++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c @@ -382,7 +382,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl, return -EFAULT; } if (copy_from_user(settings, - (void *)set_info->ois_params.settings, + (void __user *)set_info->ois_params.settings, set_info->ois_params.setting_size * sizeof(struct reg_settings_ois_t))) { kfree(settings); @@ -407,7 +407,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl, static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl, - void __user *argp) + void *argp) { struct msm_ois_cfg_data *cdata = (struct msm_ois_cfg_data *)argp; @@ -449,7 +449,7 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl, } else #endif if (copy_from_user(&conf_array, - (void *)cdata->cfg.settings, + (void __user *)cdata->cfg.settings, sizeof(struct msm_camera_i2c_seq_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; @@ -470,7 +470,8 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl, rc = -ENOMEM; break; } - if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, + if (copy_from_user(reg_setting, + (void __user *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_seq_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); @@ -495,7 +496,7 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl, } static int32_t msm_ois_config_download(struct msm_ois_ctrl_t *o_ctrl, - void __user *argp) + void *argp) { struct msm_ois_cfg_download_data *cdata = (struct msm_ois_cfg_download_data *)argp; @@ -606,7 +607,7 @@ static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd, { int rc; struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd); - void __user *argp = (void __user *)arg; + void *argp = arg; CDBG("Enter\n"); CDBG("%s:%d o_ctrl %pK argp %pK\n", __func__, __LINE__, o_ctrl, argp); @@ -780,11 +781,11 @@ static long msm_ois_subdev_do_ioctl( u32 = (struct msm_ois_cfg_data32 *)arg; parg = arg; - ois_data.cfgtype = u32->cfgtype; switch (cmd) { case VIDIOC_MSM_OIS_CFG32: cmd = VIDIOC_MSM_OIS_CFG; + ois_data.cfgtype = u32->cfgtype; switch (u32->cfgtype) { case CFG_OIS_CONTROL: @@ -805,7 +806,7 @@ static long msm_ois_subdev_do_ioctl( break; case CFG_OIS_I2C_WRITE_SEQ_TABLE: if (copy_from_user(&settings32, - (void *)compat_ptr(u32->cfg.settings), + (void __user *)compat_ptr(u32->cfg.settings), sizeof( struct msm_camera_i2c_seq_reg_setting32))) { pr_err("copy_from_user failed\n"); @@ -818,7 +819,6 @@ static long msm_ois_subdev_do_ioctl( settings.reg_setting = compat_ptr(settings32.reg_setting); - ois_data.cfgtype = u32->cfgtype; ois_data.cfg.settings = &settings; parg = &ois_data; break; diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c index c243d587e308..90edadaed1ef 100644 --- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c +++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c @@ -175,35 +175,45 @@ int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data, void msm_camera_io_dump(void __iomem *addr, int size, int enable) { - char line_str[128], *p_str; + char line_str[128]; int i; - u32 *p = (u32 *) addr; + ptrdiff_t p = 0; + size_t offset = 0, used = 0; u32 data; CDBG("%s: addr=%pK size=%d\n", __func__, addr, size); - if (!p || (size <= 0) || !enable) + if (!addr || (size <= 0) || !enable) return; line_str[0] = '\0'; - p_str = line_str; for (i = 0; i < size/4; i++) { if (i % 4 == 0) { -#ifdef CONFIG_COMPAT - snprintf(p_str, 20, "%016lx: ", (unsigned long) p); - p_str += 18; -#else - snprintf(p_str, 12, "%08lx: ", (unsigned long) p); - p_str += 10; -#endif + used = snprintf(line_str + offset, + sizeof(line_str) - offset, "0x%04tX: ", p); + if (offset + used >= sizeof(line_str)) { + pr_err("%s\n", line_str); + offset = 0; + line_str[0] = '\0'; + } else { + offset += used; + } + } + data = readl_relaxed(addr + p); + p = p + 4; + used = snprintf(line_str + offset, + sizeof(line_str) - offset, "%08x ", data); + if (offset + used >= sizeof(line_str)) { + pr_err("%s\n", line_str); + offset = 0; + line_str[0] = '\0'; + } else { + offset += used; } - data = readl_relaxed(p++); - snprintf(p_str, 12, "%08x ", data); - p_str += 9; if ((i + 1) % 4 == 0) { pr_err("%s\n", line_str); line_str[0] = '\0'; - p_str = line_str; + offset = 0; } } if (line_str[0] != '\0') diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c index a04d7ca73fe1..d881b4aea48f 100644 --- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c +++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c @@ -434,6 +434,7 @@ static int msm_fd_open(struct file *file) ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ctx->vb2_q.io_modes = VB2_USERPTR; ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + mutex_init(&ctx->lock); ret = vb2_queue_init(&ctx->vb2_q); if (ret < 0) { dev_err(device->dev, "Error queue init\n"); @@ -484,7 +485,9 @@ static int msm_fd_release(struct file *file) msm_cpp_vbif_register_error_handler((void *)ctx, VBIF_CLIENT_FD, NULL); + mutex_lock(&ctx->lock); vb2_queue_release(&ctx->vb2_q); + mutex_unlock(&ctx->lock); vfree(ctx->stats); @@ -514,7 +517,9 @@ static unsigned int msm_fd_poll(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data); unsigned int ret; + mutex_lock(&ctx->lock); ret = vb2_poll(&ctx->vb2_q, file, wait); + mutex_unlock(&ctx->lock); if (atomic_read(&ctx->subscribed_for_event)) { poll_wait(file, &ctx->fh.wait, wait); @@ -752,9 +757,9 @@ static int msm_fd_reqbufs(struct file *file, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_reqbufs(&ctx->vb2_q, req); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -770,9 +775,9 @@ static int msm_fd_qbuf(struct file *file, void *fh, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_qbuf(&ctx->vb2_q, pb); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -789,9 +794,9 @@ static int msm_fd_dqbuf(struct file *file, int ret; struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); - mutex_lock(&ctx->fd_device->recovery_lock); + mutex_lock(&ctx->lock); ret = vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK); - mutex_unlock(&ctx->fd_device->recovery_lock); + mutex_unlock(&ctx->lock); return ret; } @@ -807,7 +812,9 @@ static int msm_fd_streamon(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); int ret; + mutex_lock(&ctx->lock); ret = vb2_streamon(&ctx->vb2_q, buf_type); + mutex_unlock(&ctx->lock); if (ret < 0) dev_err(ctx->fd_device->dev, "Stream on fails\n"); @@ -826,7 +833,9 @@ static int msm_fd_streamoff(struct file *file, struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh); int ret; + mutex_lock(&ctx->lock); ret = vb2_streamoff(&ctx->vb2_q, buf_type); + mutex_unlock(&ctx->lock); if (ret < 0) dev_err(ctx->fd_device->dev, "Stream off fails\n"); @@ -1057,14 +1066,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) a->value = ctx->format.size->work_size; break; case V4L2_CID_FD_WORK_MEMORY_FD: + mutex_lock(&ctx->fd_device->recovery_lock); if (ctx->work_buf.fd != -1) msm_fd_hw_unmap_buffer(&ctx->work_buf); if (a->value >= 0) { ret = msm_fd_hw_map_buffer(&ctx->mem_pool, a->value, &ctx->work_buf); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&ctx->fd_device->recovery_lock); return ret; + } } + mutex_unlock(&ctx->fd_device->recovery_lock); break; default: return -EINVAL; diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h index 6eae2b8d56fb..2b81e5b9ece3 100644 --- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h +++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -174,6 +174,7 @@ struct fd_ctx { struct msm_fd_mem_pool mem_pool; struct msm_fd_stats *stats; struct msm_fd_buf_handle work_buf; + struct mutex lock; }; /* diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h index d64cee834bea..b2d152bf4ef0 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h @@ -456,7 +456,7 @@ struct msm_vfe_axi_stream { uint32_t runtime_output_format; enum msm_stream_rdi_input_type rdi_input_type; struct msm_isp_sw_framskip sw_skip; - uint8_t sw_ping_pong_bit; + int8_t sw_ping_pong_bit; struct vfe_device *vfe_dev[MAX_VFE]; int num_isp; @@ -786,6 +786,7 @@ struct vfe_device { size_t num_norm_clk; bool hvx_clk_state; enum cam_ahb_clk_vote ahb_vote; + enum cam_ahb_clk_vote user_requested_ahb_vote; struct cx_ipeak_client *vfe_cx_ipeak; /* Sync variables*/ diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index 03d1b3c22d61..24d1c6cba84d 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -274,10 +274,12 @@ int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev, enum cam_ahb_clk_vote src_clk_vote; struct msm_isp_clk_rates clk_rates; - if (ahb_cfg) + if (ahb_cfg) { vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote); - else - vote = CAM_AHB_SVS_VOTE; + vfe_dev->user_requested_ahb_vote = vote; + } else { + vote = vfe_dev->user_requested_ahb_vote; + } vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(vfe_dev, &clk_rates); @@ -327,6 +329,7 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev) if (rc) goto clk_enable_failed; + vfe_dev->user_requested_ahb_vote = CAM_AHB_SVS_VOTE; rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE); if (rc < 0) { pr_err("%s: failed to vote for AHB\n", __func__); @@ -1360,6 +1363,7 @@ void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev, { uint16_t first_pixel, last_pixel, first_line, last_line; struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg; + struct msm_vfe_testgen_cfg *testgen_cfg = &pix_cfg->testgen_cfg; uint32_t val, subsample_period, subsample_pattern; uint32_t irq_sub_period = 32; uint32_t frame_sub_period = 32; @@ -1383,8 +1387,15 @@ void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev, subsample_period = camif_cfg->subsample_cfg.irq_subsample_period; subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern; - msm_camera_io_w((camif_cfg->lines_per_frame - 1) << 16 | - (camif_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0x484); + if (pix_cfg->input_mux == TESTGEN) + msm_camera_io_w((testgen_cfg->lines_per_frame - 1) << 16 | + (testgen_cfg->pixels_per_line - 1), + vfe_dev->vfe_base + 0x484); + else + msm_camera_io_w((camif_cfg->lines_per_frame - 1) << 16 | + (camif_cfg->pixels_per_line - 1), + vfe_dev->vfe_base + 0x484); + if (bus_sub_en) { val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C); val &= 0xFFFFFFDF; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 7e74f2f10c8c..63f5497e63b8 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -776,40 +776,6 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev, } } -static int msm_isp_check_sync_time(struct msm_vfe_src_info *src_info, - struct msm_isp_timestamp *ts, - struct master_slave_resource_info *ms_res) -{ - int i; - struct msm_vfe_src_info *master_src_info = NULL; - uint32_t master_time = 0, current_time; - - if (!ms_res->src_sof_mask) - return 0; - - for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { - if (ms_res->src_info[i] == NULL) - continue; - if (src_info == ms_res->src_info[i] || - ms_res->src_info[i]->active == 0) - continue; - if (ms_res->src_sof_mask & - (1 << ms_res->src_info[i]->dual_hw_ms_info.index)) { - master_src_info = ms_res->src_info[i]; - break; - } - } - if (!master_src_info) - return 0; - master_time = master_src_info-> - dual_hw_ms_info.sof_info.mono_timestamp_ms; - current_time = ts->buf_time.tv_sec * 1000 + - ts->buf_time.tv_usec / 1000; - if ((current_time - master_time) > ms_res->sof_delta_threshold) - return 1; - return 0; -} - static void msm_isp_sync_dual_cam_frame_id( struct vfe_device *vfe_dev, struct master_slave_resource_info *ms_res, @@ -824,24 +790,11 @@ static void msm_isp_sync_dual_cam_frame_id( if (src_info->dual_hw_ms_info.sync_state == ms_res->dual_sync_mode) { - if (msm_isp_check_sync_time(src_info, ts, ms_res) == 0) { - (frame_src == VFE_PIX_0) ? src_info->frame_id += + (frame_src == VFE_PIX_0) ? src_info->frame_id += vfe_dev->axi_data.src_info[frame_src]. sof_counter_step : src_info->frame_id++; - return; - } - ms_res->src_sof_mask = 0; - ms_res->active_src_mask = 0; - for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { - if (ms_res->src_info[i] == NULL) - continue; - if (ms_res->src_info[i]->active == 0) - continue; - ms_res->src_info[i]->dual_hw_ms_info. - sync_state = - MSM_ISP_DUAL_CAM_ASYNC; - } + return; } /* find highest frame id */ @@ -1699,6 +1652,9 @@ static int msm_isp_update_deliver_count(struct vfe_device *vfe_dev, rc = -EINVAL; goto done; } else { + /*After wm reload, we get bufdone for ping buffer*/ + if (stream_info->sw_ping_pong_bit == -1) + stream_info->sw_ping_pong_bit = 0; stream_info->undelivered_request_cnt--; if (pingpong_bit != stream_info->sw_ping_pong_bit) { pr_err("%s:%d ping pong bit actual %d sw %d\n", @@ -2296,6 +2252,8 @@ static void msm_isp_input_disable(struct vfe_device *vfe_dev, int cmd_type) ms_res->src_info[src_info->dual_hw_ms_info.index] = NULL; ms_res->num_src--; + if (ms_res->num_src == 0) + ms_res->dual_sync_mode = MSM_ISP_DUAL_CAM_ASYNC; src_info->dual_hw_ms_info.sync_state = MSM_ISP_DUAL_CAM_ASYNC; src_info->dual_hw_type = DUAL_NONE; @@ -2498,7 +2456,8 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg) intf = SRC_TO_INTF(stream_info->stream_src); vfe_dev->axi_data.src_info[intf].lpm = ab_ib_vote->lpm_mode; - if (stream_info->lpm_mode) { + if (stream_info->lpm_mode || + stream_info->state == INACTIVE) { spin_unlock_irqrestore(&stream_info->lock, flags); continue; @@ -2518,7 +2477,8 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg) intf = SRC_TO_INTF(stream_info->stream_src); vfe_dev->axi_data.src_info[intf].lpm = ab_ib_vote->lpm_mode; - if (stream_info->lpm_mode == 0) { + if (stream_info->lpm_mode == 0 || + stream_info->state == INACTIVE) { spin_unlock_irqrestore(&stream_info->lock, flags); continue; @@ -2609,6 +2569,7 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev, struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data; uint32_t bufq_handle = 0, bufq_id = 0; struct msm_isp_timestamp timestamp; + struct msm_vfe_frame_request_queue *queue_req; unsigned long flags; int vfe_idx; @@ -2645,8 +2606,18 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev, VFE_PING_FLAG); msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG); + stream_info->undelivered_request_cnt = 0; spin_unlock_irqrestore(&stream_info->lock, flags); + while (!list_empty(&stream_info->request_q)) { + queue_req = list_first_entry_or_null( + &stream_info->request_q, + struct msm_vfe_frame_request_queue, list); + if (queue_req) { + queue_req->cmd_used = 0; + list_del(&queue_req->list); + } + } for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) { bufq_handle = stream_info->bufq_handle[bufq_id]; @@ -3439,7 +3410,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, } if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id != vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev-> - axi_data.src_info[VFE_PIX_0].sof_counter_step)) || + axi_data.src_info[frame_src].sof_counter_step)) || ((!vfe_dev->axi_data.src_info[frame_src].active))) { pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n", __func__, __LINE__, frame_id, @@ -3475,7 +3446,14 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, spin_lock_irqsave(&stream_info->lock, flags); vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info); - if (stream_info->undelivered_request_cnt == 1) { + /* + * When wm reloaded, pingpong status register would be stale, pingpong + * status would be updated only after AXI_DONE interrupt processed. + * So, we should avoid reading value from pingpong status register + * until buf_done happens for ping buffer. + */ + if ((stream_info->undelivered_request_cnt == 1) && + (stream_info->sw_ping_pong_bit != -1)) { pingpong_status = vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status( vfe_dev); @@ -3548,10 +3526,25 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, stream_info->vfe_dev[k]->vfe_base, wm_mask); } - stream_info->sw_ping_pong_bit = 0; + /* + * sw_ping_pong_bit is updated only when AXI_DONE. + * so now reset this bit to -1. + */ + stream_info->sw_ping_pong_bit = -1; } else if (stream_info->undelivered_request_cnt == 2) { - rc = msm_isp_cfg_ping_pong_address( - stream_info, pingpong_status); + if (stream_info->sw_ping_pong_bit == -1) { + /* + * This means wm is reloaded & ping buffer is + * already configured. And AXI_DONE for ping + * is still pending. So, config pong buffer + * now. + */ + rc = msm_isp_cfg_ping_pong_address(stream_info, + VFE_PONG_FLAG); + } else { + rc = msm_isp_cfg_ping_pong_address( + stream_info, pingpong_status); + } if (rc) { stream_info->undelivered_request_cnt--; spin_unlock_irqrestore(&stream_info->lock, diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h index a8d4cfb43927..0f029c0d5178 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h @@ -136,7 +136,7 @@ static inline void msm_isp_cfg_stream_scratch( } static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data( - struct vfe_device *vfe_dev, int stream_idx) + struct vfe_device *vfe_dev, uint32_t stream_idx) { struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data; struct msm_vfe_axi_stream *stream_info; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index 648249916be4..f0831e64f250 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -267,7 +267,9 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev, int result = 0; memset(&buf_event, 0, sizeof(struct msm_isp_event_data)); - buf_event.timestamp = ts->buf_time; + buf_event.timestamp = ts->event_time; + buf_event.mono_timestamp = ts->buf_time; + buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id; pingpong_status = vfe_dev->hw_info-> vfe_ops.stats_ops.get_pingpong_status(vfe_dev); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index f19e6dd1cb01..e87f2414a879 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -479,8 +479,12 @@ static int msm_isp_cfg_pix(struct vfe_device *vfe_dev, if (input_cfg->d.pix_cfg.input_mux == CAMIF || input_cfg->d.pix_cfg.input_mux == TESTGEN) { - vfe_dev->axi_data.src_info[VFE_PIX_0].width = - input_cfg->d.pix_cfg.camif_cfg.pixels_per_line; + if (input_cfg->d.pix_cfg.input_mux == CAMIF) + vfe_dev->axi_data.src_info[VFE_PIX_0].width = + input_cfg->d.pix_cfg.camif_cfg.pixels_per_line; + if (input_cfg->d.pix_cfg.input_mux == TESTGEN) + vfe_dev->axi_data.src_info[VFE_PIX_0].width = + input_cfg->d.pix_cfg.testgen_cfg.pixels_per_line; if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg. sof_counter_step > 0) { vfe_dev->axi_data.src_info[VFE_PIX_0]. @@ -507,6 +511,9 @@ static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev, return -EINVAL; } + vfe_dev->axi_data. + src_info[input_cfg->input_src].sof_counter_step = 1; + vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock = input_cfg->input_pix_clk; vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg( diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c index ab981f762dd2..9c3bd7b41ce9 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c @@ -86,6 +86,12 @@ static void msm_ispif_io_dump_reg(struct ispif_device *ispif) { if (!ispif->enb_dump_reg) return; + + if (!ispif->base) { + pr_err("%s: null pointer for the ispif base\n", __func__); + return; + } + msm_camera_io_dump(ispif->base, 0x250, 0); } @@ -1019,6 +1025,9 @@ static void msm_ispif_config_stereo(struct ispif_device *ispif, enum msm_ispif_vfe_intf vfe_intf; uint32_t stereo_3d_threshold = STEREO_DEFAULT_3D_THRESHOLD; + if (params->num > MAX_PARAM_ENTRIES) + return; + for (i = 0; i < params->num; i++) { vfe_intf = params->entries[i].vfe_intf; if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c index e40869d41a5d..821833d53905 100644 --- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c +++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -903,26 +903,45 @@ int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *hw_cmd_p, uint32_t m_cmds, void msm_jpeg_io_dump(void *base, int size) { - char line_str[128], *p_str; + char line_str[128]; void __iomem *addr = (void __iomem *)base; int i; u32 *p = (u32 *) addr; + size_t offset = 0; + size_t used = 0; + size_t min_range = 0; + size_t sizeof_line_str = sizeof(line_str); u32 data; JPEG_DBG_HIGH("%s:%d] %pK %d", __func__, __LINE__, addr, size); line_str[0] = '\0'; - p_str = line_str; for (i = 0; i < size/4; i++) { if (i % 4 == 0) { - snprintf(p_str, 12, "%08lx: ", (unsigned long)p); - p_str += 10; + used = snprintf(line_str + offset, + sizeof_line_str - offset, "%pK ", p); + if ((used < min_range) || + (offset + used >= sizeof_line_str)) { + JPEG_PR_ERR("%s\n", line_str); + offset = 0; + line_str[0] = '\0'; + } else { + offset += used; + } } data = msm_camera_io_r(p++); - snprintf(p_str, 12, "%08x ", data); - p_str += 9; + used = snprintf(line_str + offset, + sizeof_line_str - offset, "%08x ", data); + if ((used < min_range) || + (offset + used >= sizeof_line_str)) { + JPEG_PR_ERR("%s\n", line_str); + offset = 0; + line_str[0] = '\0'; + } else { + offset += used; + } if ((i + 1) % 4 == 0) { JPEG_DBG_HIGH("%s\n", line_str); line_str[0] = '\0'; - p_str = line_str; + offset = 0; } } if (line_str[0] != '\0') diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c index f95cc37f5c2c..4e5dc66d94a9 100644 --- a/drivers/media/platform/msm/camera_v2/msm.c +++ b/drivers/media/platform/msm/camera_v2/msm.c @@ -32,7 +32,6 @@ #include "cam_hw_ops.h" #include - static struct v4l2_device *msm_v4l2_dev; static struct list_head ordered_sd_list; @@ -149,7 +148,7 @@ typedef int (*msm_queue_find_func)(void *d1, void *d2); #define msm_queue_find(queue, type, member, func, data) ({\ unsigned long flags; \ struct msm_queue_head *__q = (queue); \ - type *node = 0; \ + type *node = NULL; \ typeof(node) __ret = NULL; \ msm_queue_find_func __f = (func); \ spin_lock_irqsave(&__q->lock, flags); \ @@ -279,22 +278,50 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id) struct msm_session *session = NULL; struct msm_stream *stream = NULL; unsigned long flags; + int try_count = 0; session = msm_queue_find(msm_session_q, struct msm_session, list, __msm_queue_find_session, &session_id); + if (!session) return; - stream = msm_queue_find(&session->stream_q, struct msm_stream, - list, __msm_queue_find_stream, &stream_id); - if (!stream) - return; - spin_lock_irqsave(&(session->stream_q.lock), flags); - list_del_init(&stream->list); - session->stream_q.len--; - kfree(stream); - stream = NULL; - spin_unlock_irqrestore(&(session->stream_q.lock), flags); + while (1) { + unsigned long wl_flags; + + if (try_count > 5) { + pr_err("%s : not able to delete stream %d\n", + __func__, __LINE__); + break; + } + + write_lock_irqsave(&session->stream_rwlock, wl_flags); + try_count++; + stream = msm_queue_find(&session->stream_q, struct msm_stream, + list, __msm_queue_find_stream, &stream_id); + + if (!stream) { + write_unlock_irqrestore(&session->stream_rwlock, + wl_flags); + return; + } + + if (msm_vb2_get_stream_state(stream) != 1) { + write_unlock_irqrestore(&session->stream_rwlock, + wl_flags); + continue; + } + + spin_lock_irqsave(&(session->stream_q.lock), flags); + list_del_init(&stream->list); + session->stream_q.len--; + kfree(stream); + stream = NULL; + spin_unlock_irqrestore(&(session->stream_q.lock), flags); + write_unlock_irqrestore(&session->stream_rwlock, wl_flags); + break; + } + } EXPORT_SYMBOL(msm_delete_stream); @@ -444,6 +471,7 @@ int msm_create_session(unsigned int session_id, struct video_device *vdev) mutex_init(&session->lock); mutex_init(&session->lock_q); mutex_init(&session->close_lock); + rwlock_init(&session->stream_rwlock); if (gpu_limit) { session->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0); @@ -1048,17 +1076,25 @@ static struct v4l2_file_operations msm_fops = { #endif }; -struct msm_stream *msm_get_stream(unsigned int session_id, - unsigned int stream_id) +struct msm_session *msm_get_session(unsigned int session_id) { struct msm_session *session; - struct msm_stream *stream; session = msm_queue_find(msm_session_q, struct msm_session, list, __msm_queue_find_session, &session_id); if (!session) return ERR_PTR(-EINVAL); + return session; +} +EXPORT_SYMBOL(msm_get_session); + + +struct msm_stream *msm_get_stream(struct msm_session *session, + unsigned int stream_id) +{ + struct msm_stream *stream; + stream = msm_queue_find(&session->stream_q, struct msm_stream, list, __msm_queue_find_stream, &stream_id); @@ -1115,6 +1151,34 @@ struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q) } EXPORT_SYMBOL(msm_get_stream_from_vb2q); +struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q) +{ + struct msm_session *session; + struct msm_stream *stream; + unsigned long flags1; + unsigned long flags2; + + spin_lock_irqsave(&msm_session_q->lock, flags1); + list_for_each_entry(session, &(msm_session_q->list), list) { + spin_lock_irqsave(&(session->stream_q.lock), flags2); + list_for_each_entry( + stream, &(session->stream_q.list), list) { + if (stream->vb2_q == q) { + spin_unlock_irqrestore + (&(session->stream_q.lock), flags2); + spin_unlock_irqrestore + (&msm_session_q->lock, flags1); + return session; + } + } + spin_unlock_irqrestore(&(session->stream_q.lock), flags2); + } + spin_unlock_irqrestore(&msm_session_q->lock, flags1); + return NULL; +} +EXPORT_SYMBOL(msm_get_session_from_vb2q); + + #ifdef CONFIG_COMPAT long msm_copy_camera_private_ioctl_args(unsigned long arg, struct msm_camera_private_ioctl_arg *k_ioctl, diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h index 7474cb119147..dce47bc7249c 100644 --- a/drivers/media/platform/msm/camera_v2/msm.h +++ b/drivers/media/platform/msm/camera_v2/msm.h @@ -111,6 +111,7 @@ struct msm_session { struct mutex lock; struct mutex lock_q; struct mutex close_lock; + rwlock_t stream_rwlock; struct kgsl_pwr_limit *sysfs_pwr_limit; }; @@ -129,11 +130,13 @@ int msm_create_stream(unsigned int session_id, void msm_delete_stream(unsigned int session_id, unsigned int stream_id); int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id); void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id); -struct msm_stream *msm_get_stream(unsigned int session_id, +struct msm_session *msm_get_session(unsigned int session_id); +struct msm_stream *msm_get_stream(struct msm_session *session, unsigned int stream_id); struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id, unsigned int stream_id); struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q); +struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q); struct msm_session *msm_session_find(unsigned int session_id); #ifdef CONFIG_COMPAT long msm_copy_camera_private_ioctl_args(unsigned long arg, diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c index c779ee46c19a..e271c7fcd1b6 100644 --- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -44,17 +44,26 @@ static int msm_vb2_queue_setup(struct vb2_queue *q, int msm_vb2_buf_init(struct vb2_buffer *vb) { struct msm_stream *stream; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2_buf; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + unsigned long rl_flags; + + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return -EINVAL; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s: Couldn't find stream\n", __func__); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); msm_vb2_buf->in_freeq = 0; - + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return 0; } @@ -62,7 +71,8 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; - unsigned long flags; + struct msm_session *session; + unsigned long flags, rl_flags; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); @@ -71,34 +81,50 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) return; } + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } spin_lock_irqsave(&stream->stream_lock, flags); list_add_tail(&msm_vb2->list, &stream->queued_list); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); } static void msm_vb2_buf_finish(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; - unsigned long flags; + struct msm_session *session; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2_entry, *temp; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); if (!msm_vb2) { pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__); - return; + return; } + session = msm_get_session_from_vb2q(vb->vb2_queue); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } @@ -111,6 +137,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) } } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } @@ -118,12 +145,20 @@ static void msm_vb2_stop_stream(struct vb2_queue *q) { struct msm_vb2_buffer *msm_vb2, *temp; struct msm_stream *stream; - unsigned long flags; + struct msm_session *session; + unsigned long flags, rl_flags; struct vb2_v4l2_buffer *vb2_v4l2_buf; + session = msm_get_session_from_vb2q(q); + if (IS_ERR_OR_NULL(session)) + return; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + stream = msm_get_stream_from_vb2q(q); if (!stream) { pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } @@ -143,8 +178,28 @@ static void msm_vb2_stop_stream(struct vb2_queue *q) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); } +int msm_vb2_get_stream_state(struct msm_stream *stream) +{ + struct msm_vb2_buffer *msm_vb2, *temp; + unsigned long flags; + int rc = 1; + + spin_lock_irqsave(&stream->stream_lock, flags); + list_for_each_entry_safe(msm_vb2, temp, &(stream->queued_list), list) { + if (msm_vb2->in_freeq != 0) { + rc = 0; + break; + } + } + spin_unlock_irqrestore(&stream->stream_lock, flags); + return rc; +} +EXPORT_SYMBOL(msm_vb2_get_stream_state); + + static struct vb2_ops msm_vb2_get_q_op = { .queue_setup = msm_vb2_queue_setup, .buf_init = msm_vb2_buf_init, @@ -198,14 +253,23 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, unsigned int stream_id) { struct msm_stream *stream; + struct msm_session *session; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return NULL; + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return NULL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -228,6 +292,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return vb2_v4l2_buf; } @@ -235,14 +300,24 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index) { struct msm_stream *stream; + struct msm_session *session; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return NULL; + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return NULL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -263,6 +338,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return vb2_v4l2_buf; } @@ -270,14 +346,24 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, unsigned int stream_id) { struct msm_stream *stream; + struct msm_session *session; struct msm_vb2_buffer *msm_vb2; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; int rc = 0; - unsigned long flags; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + unsigned long flags, rl_flags; + + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { list_for_each_entry(msm_vb2, &(stream->queued_list), list) { @@ -289,6 +375,8 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n", vb, session_id, stream_id); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); return -EINVAL; } msm_vb2 = @@ -305,6 +393,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } @@ -312,15 +401,25 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, unsigned int stream_id, uint32_t sequence, struct timeval *ts, uint32_t reserved) { - unsigned long flags; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; + struct msm_session *session; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; int rc = 0; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { list_for_each_entry(msm_vb2, &(stream->queued_list), list) { @@ -332,6 +431,8 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n", session_id, stream_id, vb); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); return -EINVAL; } msm_vb2 = @@ -352,6 +453,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } @@ -359,15 +461,24 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index) { struct msm_stream *stream; + struct msm_session *session; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; long rc = -EINVAL; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return rc; + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { @@ -393,20 +504,31 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, end: spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } EXPORT_SYMBOL(msm_vb2_return_buf_by_idx); static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) { - unsigned long flags; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; + struct msm_session *session; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; - stream = msm_get_stream(session_id, stream_id); - if (IS_ERR_OR_NULL(stream)) + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) return -EINVAL; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + spin_lock_irqsave(&stream->stream_lock, flags); list_for_each_entry(msm_vb2, &(stream->queued_list), list) { vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf); @@ -415,6 +537,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return 0; } diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h index 53511d5416d7..c65cb58128d9 100644 --- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h +++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -67,5 +67,6 @@ struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void); int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req_sd); long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index); +int msm_vb2_get_stream_state(struct msm_stream *stream); #endif /*_MSM_VB_H */ diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c index cd48f871eb79..b1bea12c2cc3 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c +++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c @@ -573,7 +573,10 @@ static int32_t msm_actuator_move_focus( CDBG("called, dir %d, num_steps %d\n", dir, num_steps); - if (dest_step_pos == a_ctrl->curr_step_pos) + if ((dest_step_pos == a_ctrl->curr_step_pos) || + ((dest_step_pos <= a_ctrl->total_steps) && + (a_ctrl->step_position_table[dest_step_pos] == + a_ctrl->step_position_table[a_ctrl->curr_step_pos]))) return rc; if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) || @@ -1704,6 +1707,10 @@ static long msm_actuator_subdev_do_ioctl( parg = &actuator_data; break; } + break; + case VIDIOC_MSM_ACTUATOR_CFG: + pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd); + return -EINVAL; } rc = msm_actuator_subdev_ioctl(sd, cmd, parg); diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c index 75043e1b0427..3cb6b55ccc8c 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c @@ -403,6 +403,10 @@ static int32_t msm_cci_calc_cmd_len(struct cci_device *cci_dev, if (cmd->reg_addr + 1 == (cmd+1)->reg_addr) { len += data_len; + if (len > cci_dev->payload_size) { + len = len - data_len; + break; + } *pack += data_len; } else break; @@ -1602,6 +1606,12 @@ static int32_t msm_cci_write(struct v4l2_subdev *sd, return rc; } + if (cci_dev->cci_state != CCI_STATE_ENABLED) { + pr_err("%s invalid cci state %d\n", + __func__, cci_dev->cci_state); + return -EINVAL; + } + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX || c_ctrl->cci_info->cci_i2c_master < 0) { pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__); diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index 5376e1e4b6a4..223ddf39dce8 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -152,6 +152,12 @@ static int32_t msm_flash_i2c_write_table( conf_array.reg_setting = settings->reg_setting_a; conf_array.size = settings->size; + /* Validate the settings size */ + if ((!conf_array.size) || (conf_array.size > MAX_I2C_REG_SET)) { + pr_err("failed: invalid size %d", conf_array.size); + return -EINVAL; + } + return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table( &flash_ctrl->flash_i2c_client, &conf_array); } @@ -502,18 +508,42 @@ static int32_t msm_flash_init( return 0; } +static int32_t msm_flash_init_prepare( + struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ #ifdef CONFIG_COMPAT -static int32_t msm_flash_init_prepare( - struct msm_flash_ctrl_t *flash_ctrl, - struct msm_flash_cfg_data_t *flash_data) -{ + struct msm_flash_cfg_data_t flash_data_k; + struct msm_flash_init_info_t flash_init_info; + int32_t i = 0; + + if (!is_compat_task()) { + /*for 64-bit usecase,it need copy the data to local memory*/ + flash_data_k.cfg_type = flash_data->cfg_type; + for (i = 0; i < MAX_LED_TRIGGERS; i++) { + flash_data_k.flash_current[i] = + flash_data->flash_current[i]; + flash_data_k.flash_duration[i] = + flash_data->flash_duration[i]; + } + + flash_data_k.cfg.flash_init_info = &flash_init_info; + if (copy_from_user(&flash_init_info, + (void __user *)(flash_data->cfg.flash_init_info), + sizeof(struct msm_flash_init_info_t))) { + pr_err("%s copy_from_user failed %d\n", + __func__, __LINE__); + return -EFAULT; + } + return msm_flash_init(flash_ctrl, &flash_data_k); + } + /* + * for 32-bit usecase,it already copy the userspace + * data to local memory in msm_flash_subdev_do_ioctl() + * so here do not need copy from user + */ return msm_flash_init(flash_ctrl, flash_data); -} #else -static int32_t msm_flash_init_prepare( - struct msm_flash_ctrl_t *flash_ctrl, - struct msm_flash_cfg_data_t *flash_data) -{ struct msm_flash_cfg_data_t flash_data_k; struct msm_flash_init_info_t flash_init_info; int32_t i = 0; @@ -528,15 +558,15 @@ static int32_t msm_flash_init_prepare( flash_data_k.cfg.flash_init_info = &flash_init_info; if (copy_from_user(&flash_init_info, - (void *)(flash_data->cfg.flash_init_info), + (void __user *)(flash_data->cfg.flash_init_info), sizeof(struct msm_flash_init_info_t))) { pr_err("%s copy_from_user failed %d\n", __func__, __LINE__); return -EFAULT; } return msm_flash_init(flash_ctrl, &flash_data_k); -} #endif +} static int32_t msm_flash_prepare( struct msm_flash_ctrl_t *flash_ctrl) @@ -1144,13 +1174,13 @@ static long msm_flash_subdev_do_ioctl( sd = vdev_to_v4l2_subdev(vdev); u32 = (struct msm_flash_cfg_data_t32 *)arg; - flash_data.cfg_type = u32->cfg_type; - for (i = 0; i < MAX_LED_TRIGGERS; i++) { - flash_data.flash_current[i] = u32->flash_current[i]; - flash_data.flash_duration[i] = u32->flash_duration[i]; - } switch (cmd) { case VIDIOC_MSM_FLASH_CFG32: + flash_data.cfg_type = u32->cfg_type; + for (i = 0; i < MAX_LED_TRIGGERS; i++) { + flash_data.flash_current[i] = u32->flash_current[i]; + flash_data.flash_duration[i] = u32->flash_duration[i]; + } cmd = VIDIOC_MSM_FLASH_CFG; switch (flash_data.cfg_type) { case CFG_FLASH_OFF: diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c index 3f079fe2c173..457bd1730232 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -575,6 +575,8 @@ int msm_camera_get_dt_power_setting_data(struct device_node *of_node, ps[i].seq_val = SENSOR_GPIO_CUSTOM1; else if (!strcmp(seq_name, "sensor_gpio_custom2")) ps[i].seq_val = SENSOR_GPIO_CUSTOM2; + else if (!strcmp(seq_name, "sensor_gpio_custom3")) + ps[i].seq_val = SENSOR_GPIO_CUSTOM3; else rc = -EILSEQ; break; @@ -1078,6 +1080,27 @@ int msm_camera_init_gpio_pin_tbl(struct device_node *of_node, rc = 0; } + rc = of_property_read_u32(of_node, "qcom,gpio-custom3", &val); + if (rc != -EINVAL) { + if (rc < 0) { + pr_err("%s:%d read qcom,gpio-custom3 failed rc %d\n", + __func__, __LINE__, rc); + goto ERROR; + } else if (val >= gpio_array_size) { + pr_err("%s:%d qcom,gpio-custom3 invalid %d\n", + __func__, __LINE__, val); + rc = -EINVAL; + goto ERROR; + } + gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3] = + gpio_array[val]; + gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM3] = 1; + CDBG("%s qcom,gpio-custom3 %d\n", __func__, + gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3]); + } else { + rc = 0; + } + return rc; ERROR: diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c index b3e5dc7f9cb8..c5cdee1bf706 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -520,14 +520,16 @@ int32_t msm_camera_tz_i2c_power_up( msm_camera_tz_get_ta_handle(), sensor_id, &sensor_secure); - if (!rc && sensor_secure) + if (!rc && sensor_secure) { /* Sensor validated by TA*/ sensor_info[sensor_id].ready++; + msm_camera_tz_unlock(); + } else { + msm_camera_tz_unlock(); msm_camera_tz_unload_ta(); rc = -EFAULT; } - msm_camera_tz_unlock(); } } else rc = -EFAULT; diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c index e1143c356721..fcef05374098 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -584,7 +584,12 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl, pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__); break; } - read_config_ptr->data = local_data; + if (copy_to_user(&read_config_ptr->data, + &local_data, sizeof(local_data))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + rc = -EFAULT; + break; + } break; } case CFG_SLAVE_WRITE_I2C_ARRAY: { @@ -1098,7 +1103,12 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__); break; } - read_config_ptr->data = local_data; + if (copy_to_user(&read_config_ptr->data, + &local_data, sizeof(local_data))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + rc = -EFAULT; + break; + } break; } case CFG_SLAVE_WRITE_I2C_ARRAY: { diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c index c94ee509631f..302a7b16bc26 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c +++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c @@ -615,11 +615,13 @@ static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd, pr_err("o_ctrl->i2c_client.i2c_func_tbl NULL\n"); return -EINVAL; } + mutex_lock(o_ctrl->ois_mutex); rc = msm_ois_power_down(o_ctrl); if (rc < 0) { pr_err("%s:%d OIS Power down failed\n", __func__, __LINE__); } + mutex_unlock(o_ctrl->ois_mutex); return msm_ois_close(sd, NULL); default: return -ENOIOCTLCMD; @@ -774,11 +776,10 @@ static long msm_ois_subdev_do_ioctl( u32 = (struct msm_ois_cfg_data32 *)arg; parg = arg; - ois_data.cfgtype = u32->cfgtype; - switch (cmd) { case VIDIOC_MSM_OIS_CFG32: cmd = VIDIOC_MSM_OIS_CFG; + ois_data.cfgtype = u32->cfgtype; switch (u32->cfgtype) { case CFG_OIS_CONTROL: @@ -812,7 +813,6 @@ static long msm_ois_subdev_do_ioctl( settings.reg_setting = compat_ptr(settings32.reg_setting); - ois_data.cfgtype = u32->cfgtype; ois_data.cfg.settings = &settings; parg = &ois_data; break; @@ -820,6 +820,10 @@ static long msm_ois_subdev_do_ioctl( parg = &ois_data; break; } + break; + case VIDIOC_MSM_OIS_CFG: + pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd); + return -EINVAL; } rc = msm_ois_subdev_ioctl(sd, cmd, parg); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index 62980f345f60..abf20aef1256 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -621,9 +621,10 @@ static int sde_rotator_secure_session_ctrl(bool enable) if (mdata->wait_for_transition && mdata->secure_session_ctrl && mdata->callback_request) { ret = mdata->wait_for_transition(mdata->sec_cam_en, enable); - if (ret) { + if (ret < 0) { SDEROT_ERR("failed Secure wait for transition %d\n", ret); + ret = -EPERM; } else { if (mdata->sec_cam_en ^ enable) { mdata->sec_cam_en = enable; @@ -984,6 +985,7 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr) { int i, size, ret = 0; char name[32]; + struct sched_param param = { .sched_priority = 5 }; size = sizeof(struct sde_rot_queue) * mgr->queue_count; mgr->commitq = devm_kzalloc(mgr->device, size, GFP_KERNEL); @@ -994,11 +996,21 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr) snprintf(name, sizeof(name), "rot_commitq_%d_%d", mgr->device->id, i); SDEROT_DBG("work queue name=%s\n", name); - mgr->commitq[i].rot_work_queue = - alloc_ordered_workqueue("%s", - WQ_MEM_RECLAIM | WQ_HIGHPRI, name); - if (!mgr->commitq[i].rot_work_queue) { + init_kthread_worker(&mgr->commitq[i].rot_kw); + mgr->commitq[i].rot_thread = kthread_run(kthread_worker_fn, + &mgr->commitq[i].rot_kw, name); + if (IS_ERR(mgr->commitq[i].rot_thread)) { ret = -EPERM; + mgr->commitq[i].rot_thread = NULL; + break; + } + + ret = sched_setscheduler(mgr->commitq[i].rot_thread, + SCHED_FIFO, ¶m); + if (ret) { + SDEROT_ERR( + "failed to set kthread priority for commitq %d\n", + ret); break; } @@ -1015,10 +1027,21 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr) snprintf(name, sizeof(name), "rot_doneq_%d_%d", mgr->device->id, i); SDEROT_DBG("work queue name=%s\n", name); - mgr->doneq[i].rot_work_queue = alloc_ordered_workqueue("%s", - WQ_MEM_RECLAIM | WQ_HIGHPRI, name); - if (!mgr->doneq[i].rot_work_queue) { + init_kthread_worker(&mgr->doneq[i].rot_kw); + mgr->doneq[i].rot_thread = kthread_run(kthread_worker_fn, + &mgr->doneq[i].rot_kw, name); + if (IS_ERR(mgr->doneq[i].rot_thread)) { ret = -EPERM; + mgr->doneq[i].rot_thread = NULL; + break; + } + + ret = sched_setscheduler(mgr->doneq[i].rot_thread, + SCHED_FIFO, ¶m); + if (ret) { + SDEROT_ERR( + "failed to set kthread priority for doneq %d\n", + ret); break; } @@ -1034,18 +1057,20 @@ static void sde_rotator_deinit_queue(struct sde_rot_mgr *mgr) if (mgr->commitq) { for (i = 0; i < mgr->queue_count; i++) { - if (mgr->commitq[i].rot_work_queue) - destroy_workqueue( - mgr->commitq[i].rot_work_queue); + if (mgr->commitq[i].rot_thread) { + flush_kthread_worker(&mgr->commitq[i].rot_kw); + kthread_stop(mgr->commitq[i].rot_thread); + } } devm_kfree(mgr->device, mgr->commitq); mgr->commitq = NULL; } if (mgr->doneq) { for (i = 0; i < mgr->queue_count; i++) { - if (mgr->doneq[i].rot_work_queue) - destroy_workqueue( - mgr->doneq[i].rot_work_queue); + if (mgr->doneq[i].rot_thread) { + flush_kthread_worker(&mgr->doneq[i].rot_kw); + kthread_stop(mgr->doneq[i].rot_thread); + } } devm_kfree(mgr->device, mgr->doneq); mgr->doneq = NULL; @@ -1090,6 +1115,8 @@ static int sde_rotator_assign_queue(struct sde_rot_mgr *mgr, if (IS_ERR_OR_NULL(hw)) { SDEROT_ERR("fail to allocate hw\n"); ret = PTR_ERR(hw); + if (!ret) + ret = -EINVAL; } else { queue->hw = hw; } @@ -1166,7 +1193,7 @@ void sde_rotator_queue_request(struct sde_rot_mgr *mgr, if (entry->item.ts) entry->item.ts[SDE_ROTATOR_TS_QUEUE] = ktime_get(); - queue_work(queue->rot_work_queue, &entry->commit_work); + queue_kthread_work(&queue->rot_kw, &entry->commit_work); } } @@ -1377,12 +1404,13 @@ static void sde_rotator_release_entry(struct sde_rot_mgr *mgr, * * Note this asynchronous handler is protected by hal lock. */ -static void sde_rotator_commit_handler(struct work_struct *work) +static void sde_rotator_commit_handler(struct kthread_work *work) { struct sde_rot_entry *entry; struct sde_rot_entry_container *request; struct sde_rot_hw_resource *hw; struct sde_rot_mgr *mgr; + struct sched_param param = { .sched_priority = 5 }; int ret; entry = container_of(work, struct sde_rot_entry, commit_work); @@ -1393,6 +1421,12 @@ static void sde_rotator_commit_handler(struct work_struct *work) return; } + ret = sched_setscheduler(entry->fenceq->rot_thread, SCHED_FIFO, ¶m); + if (ret) { + SDEROT_WARN("Fail to set kthread priority for fenceq: %d\n", + ret); + } + mgr = entry->private->mgr; SDEROT_EVTLOG( @@ -1466,7 +1500,7 @@ static void sde_rotator_commit_handler(struct work_struct *work) if (entry->item.ts) entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get(); - queue_work(entry->doneq->rot_work_queue, &entry->done_work); + queue_kthread_work(&entry->doneq->rot_kw, &entry->done_work); sde_rot_mgr_unlock(mgr); return; error: @@ -1478,8 +1512,8 @@ get_hw_res_err: sde_rotator_release_entry(mgr, entry); atomic_dec(&request->pending_count); atomic_inc(&request->failed_count); - if (request->retireq && request->retire_work) - queue_work(request->retireq, request->retire_work); + if (request->retire_kw && request->retire_work) + queue_kthread_work(request->retire_kw, request->retire_work); sde_rot_mgr_unlock(mgr); } @@ -1493,7 +1527,7 @@ get_hw_res_err: * * Note this asynchronous handler is protected by hal lock. */ -static void sde_rotator_done_handler(struct work_struct *work) +static void sde_rotator_done_handler(struct kthread_work *work) { struct sde_rot_entry *entry; struct sde_rot_entry_container *request; @@ -1551,8 +1585,8 @@ static void sde_rotator_done_handler(struct work_struct *work) ATRACE_INT("sde_rot_done", 1); sde_rotator_release_entry(mgr, entry); atomic_dec(&request->pending_count); - if (request->retireq && request->retire_work) - queue_work(request->retireq, request->retire_work); + if (request->retire_kw && request->retire_work) + queue_kthread_work(request->retire_kw, request->retire_work); if (entry->item.ts) entry->item.ts[SDE_ROTATOR_TS_RETIRE] = ktime_get(); sde_rot_mgr_unlock(mgr); @@ -1918,8 +1952,10 @@ static int sde_rotator_add_request(struct sde_rot_mgr *mgr, entry->request = req; - INIT_WORK(&entry->commit_work, sde_rotator_commit_handler); - INIT_WORK(&entry->done_work, sde_rotator_done_handler); + init_kthread_work(&entry->commit_work, + sde_rotator_commit_handler); + init_kthread_work(&entry->done_work, + sde_rotator_done_handler); SDEROT_DBG("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n" "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx, item->src_rect.x, item->src_rect.y, @@ -1957,24 +1993,26 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr, struct sde_rot_entry *entry; int i; - /* - * To avoid signal the rotation entry output fence in the wrong - * order, all the entries in the same request needs to be canceled - * first, before signaling the output fence. - */ - SDEROT_DBG("cancel work start\n"); - sde_rot_mgr_unlock(mgr); - for (i = req->count - 1; i >= 0; i--) { - entry = req->entries + i; - cancel_work_sync(&entry->commit_work); - cancel_work_sync(&entry->done_work); - } - sde_rot_mgr_lock(mgr); - SDEROT_DBG("cancel work done\n"); - for (i = req->count - 1; i >= 0; i--) { - entry = req->entries + i; - sde_rotator_signal_output(entry); - sde_rotator_release_entry(mgr, entry); + if (atomic_read(&req->pending_count)) { + /* + * To avoid signal the rotation entry output fence in the wrong + * order, all the entries in the same request needs to be + * canceled first, before signaling the output fence. + */ + SDEROT_DBG("cancel work start\n"); + sde_rot_mgr_unlock(mgr); + for (i = req->count - 1; i >= 0; i--) { + entry = req->entries + i; + flush_kthread_worker(&entry->commitq->rot_kw); + flush_kthread_worker(&entry->doneq->rot_kw); + } + sde_rot_mgr_lock(mgr); + SDEROT_DBG("cancel work done\n"); + for (i = req->count - 1; i >= 0; i--) { + entry = req->entries + i; + sde_rotator_signal_output(entry); + sde_rotator_release_entry(mgr, entry); + } } list_del_init(&req->list); @@ -1999,7 +2037,7 @@ static void sde_rotator_free_completed_request(struct sde_rot_mgr *mgr, list_for_each_entry_safe(req, req_next, &private->req_list, list) { if ((atomic_read(&req->pending_count) == 0) && - (!req->retire_work && !req->retireq)) { + (!req->retire_work && !req->retire_kw)) { list_del_init(&req->list); devm_kfree(&mgr->pdev->dev, req); } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h index 2073c6d9f115..41918dd9b43e 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h @@ -21,6 +21,7 @@ #include #include #include +#include #include "sde_rotator_base.h" #include "sde_rotator_util.h" @@ -184,7 +185,8 @@ struct sde_rot_hw_resource { }; struct sde_rot_queue { - struct workqueue_struct *rot_work_queue; + struct kthread_worker rot_kw; + struct task_struct *rot_thread; struct sde_rot_timeline *timeline; struct sde_rot_hw_resource *hw; }; @@ -195,8 +197,8 @@ struct sde_rot_entry_container { u32 count; atomic_t pending_count; atomic_t failed_count; - struct workqueue_struct *retireq; - struct work_struct *retire_work; + struct kthread_worker *retire_kw; + struct kthread_work *retire_work; struct sde_rot_entry *entries; }; @@ -205,8 +207,8 @@ struct sde_rot_file_private; struct sde_rot_entry { struct sde_rotation_item item; - struct work_struct commit_work; - struct work_struct done_work; + struct kthread_work commit_work; + struct kthread_work done_work; struct sde_rot_queue *commitq; struct sde_rot_queue *fenceq; struct sde_rot_queue *doneq; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c index f41382b5b20c..1966fa9805c0 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c @@ -1023,6 +1023,9 @@ static ssize_t sde_rotator_debug_base_offset_write(struct file *file, if (sscanf(buf, "%5x %x", &off, &cnt) < 2) return -EINVAL; + if (off % sizeof(u32)) + return -EINVAL; + if (off > dbg->max_offset) return -EINVAL; @@ -1091,6 +1094,9 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file, if (cnt < 2) return -EFAULT; + if (off % sizeof(u32)) + return -EFAULT; + if (off >= dbg->max_offset) return -EFAULT; @@ -1139,6 +1145,9 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file, goto debug_read_error; } + if (dbg->off % sizeof(u32)) + return -EFAULT; + ptr = dbg->base + dbg->off; tot = 0; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c index cfee4efb6f16..08bbed147c86 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c @@ -53,8 +53,8 @@ #define SDE_ROTATOR_DEGREE_180 180 #define SDE_ROTATOR_DEGREE_90 90 -static void sde_rotator_submit_handler(struct work_struct *work); -static void sde_rotator_retire_handler(struct work_struct *work); +static void sde_rotator_submit_handler(struct kthread_work *work); +static void sde_rotator_retire_handler(struct kthread_work *work); #ifdef CONFIG_COMPAT static long sde_rotator_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg); @@ -466,8 +466,8 @@ static void sde_rotator_stop_streaming(struct vb2_queue *q) sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private); sde_rot_mgr_unlock(rot_dev->mgr); mutex_unlock(q->lock); - cancel_work_sync(&ctx->submit_work); - cancel_work_sync(&ctx->retire_work); + flush_kthread_work(&ctx->submit_work); + flush_kthread_work(&ctx->retire_work); mutex_lock(q->lock); } @@ -480,7 +480,7 @@ static void sde_rotator_stop_streaming(struct vb2_queue *q) struct sde_rotator_vbinfo *vbinfo = &ctx->vbinfo_cap[i]; - if (vbinfo->fence && vbinfo->fd < 0) { + if (vbinfo->fence) { /* fence is not used */ SDEDEV_DBG(rot_dev->dev, "put fence s:%d t:%d i:%d\n", @@ -765,8 +765,6 @@ static ssize_t sde_rotator_ctx_show(struct kobject *kobj, ctx->format_cap.fmt.pix.sizeimage); SPRINT("abort_pending=%d\n", ctx->abort_pending); SPRINT("command_pending=%d\n", atomic_read(&ctx->command_pending)); - SPRINT("submit_work=%d\n", work_busy(&ctx->submit_work)); - SPRINT("retire_work=%d\n", work_busy(&ctx->retire_work)); SPRINT("sequence=%u\n", sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline)); SPRINT("timestamp=%u\n", @@ -923,8 +921,8 @@ static int sde_rotator_open(struct file *file) ctx->crop_out.width = 640; ctx->crop_out.height = 480; init_waitqueue_head(&ctx->wait_queue); - INIT_WORK(&ctx->submit_work, sde_rotator_submit_handler); - INIT_WORK(&ctx->retire_work, sde_rotator_retire_handler); + init_kthread_work(&ctx->submit_work, sde_rotator_submit_handler); + init_kthread_work(&ctx->retire_work, sde_rotator_retire_handler); v4l2_fh_init(&ctx->fh, video); file->private_data = &ctx->fh; @@ -954,14 +952,16 @@ static int sde_rotator_open(struct file *file) snprintf(name, sizeof(name), "rot_fenceq_%d_%d", rot_dev->dev->id, ctx->session_id); - ctx->work_queue.rot_work_queue = alloc_ordered_workqueue("%s", - WQ_MEM_RECLAIM | WQ_HIGHPRI, name); - if (!ctx->work_queue.rot_work_queue) { - SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate workqueue\n"); + init_kthread_worker(&ctx->work_queue.rot_kw); + ctx->work_queue.rot_thread = kthread_run(kthread_worker_fn, + &ctx->work_queue.rot_kw, name); + if (IS_ERR(ctx->work_queue.rot_thread)) { + SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate kthread\n"); ret = -EPERM; + ctx->work_queue.rot_thread = NULL; goto error_alloc_workqueue; } - SDEDEV_DBG(ctx->rot_dev->dev, "work queue name=%s\n", name); + SDEDEV_DBG(ctx->rot_dev->dev, "kthread name=%s\n", name); snprintf(name, sizeof(name), "%d_%d", rot_dev->dev->id, ctx->session_id); @@ -1010,7 +1010,8 @@ error_ctrl_handler: error_open_session: sde_rot_mgr_unlock(rot_dev->mgr); sde_rotator_destroy_timeline(ctx->work_queue.timeline); - destroy_workqueue(ctx->work_queue.rot_work_queue); + flush_kthread_worker(&ctx->work_queue.rot_kw); + kthread_stop(ctx->work_queue.rot_thread); error_alloc_workqueue: sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group); error_create_sysfs: @@ -1045,20 +1046,17 @@ static int sde_rotator_release(struct file *file) v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); mutex_unlock(&rot_dev->lock); - SDEDEV_DBG(rot_dev->dev, "release submit work s:%d w:%x\n", - session_id, work_busy(&ctx->submit_work)); - cancel_work_sync(&ctx->submit_work); + SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id); + flush_kthread_worker(&ctx->work_queue.rot_kw); SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id); sde_rot_mgr_lock(rot_dev->mgr); sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id); sde_rot_mgr_unlock(rot_dev->mgr); - SDEDEV_DBG(rot_dev->dev, "release retire work s:%d w:%x\n", - session_id, work_busy(&ctx->retire_work)); - cancel_work_sync(&ctx->retire_work); + SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id); mutex_lock(&rot_dev->lock); SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id); sde_rotator_destroy_timeline(ctx->work_queue.timeline); - destroy_workqueue(ctx->work_queue.rot_work_queue); + kthread_stop(ctx->work_queue.rot_thread); sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group); kobject_put(&ctx->kobj); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); @@ -1459,7 +1457,7 @@ static int sde_rotator_dqbuf(struct file *file, && (buf->index < ctx->nbuf_cap)) { int idx = buf->index; - if (ctx->vbinfo_cap[idx].fence && ctx->vbinfo_cap[idx].fd < 0) { + if (ctx->vbinfo_cap[idx].fence) { /* fence is not used */ SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n", ctx->session_id, idx); @@ -1787,6 +1785,7 @@ static long sde_rotator_private_ioctl(struct file *file, void *fh, struct msm_sde_rotator_fence *fence = arg; struct msm_sde_rotator_comp_ratio *comp_ratio = arg; struct sde_rotator_vbinfo *vbinfo; + int ret; switch (cmd) { case VIDIOC_S_SDE_ROTATOR_FENCE: @@ -1845,18 +1844,39 @@ static long sde_rotator_private_ioctl(struct file *file, void *fh, vbinfo = &ctx->vbinfo_cap[fence->index]; - if (vbinfo->fence == NULL) { - vbinfo->fd = -1; - } else { - vbinfo->fd = - sde_rotator_get_sync_fence_fd(vbinfo->fence); - if (vbinfo->fd < 0) { + if (!vbinfo) + return -EINVAL; + + if (vbinfo->fence) { + ret = sde_rotator_get_sync_fence_fd(vbinfo->fence); + if (ret < 0) { SDEDEV_ERR(rot_dev->dev, "fail get fence fd s:%d\n", ctx->session_id); - return vbinfo->fd; + return ret; } + + /* + * Loose any reference to sync fence once we pass + * it to user. Driver does not clean up user + * unclosed fence descriptors. + */ + vbinfo->fence = NULL; + + /* + * Cache fence descriptor in case user calls this + * ioctl multiple times. Cached value would be stale + * if user duplicated and closed old descriptor. + */ + vbinfo->fd = ret; + } else if (!sde_rotator_get_fd_sync_fence(vbinfo->fd)) { + /* + * User has closed cached fence descriptor. + * Invalidate descriptor cache. + */ + vbinfo->fd = -1; } + fence->fd = vbinfo->fd; SDEDEV_DBG(rot_dev->dev, @@ -2023,7 +2043,7 @@ static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = { * * This function is scheduled in work queue context. */ -static void sde_rotator_retire_handler(struct work_struct *work) +static void sde_rotator_retire_handler(struct kthread_work *work) { struct vb2_v4l2_buffer *src_buf; struct vb2_v4l2_buffer *dst_buf; @@ -2209,7 +2229,7 @@ static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx, goto error_init_request; } - req->retireq = ctx->work_queue.rot_work_queue; + req->retire_kw = &ctx->work_queue.rot_kw; req->retire_work = &ctx->retire_work; ret = sde_rotator_handle_request_common( @@ -2238,7 +2258,7 @@ error_null_buffer: * * This function is scheduled in work queue context. */ -static void sde_rotator_submit_handler(struct work_struct *work) +static void sde_rotator_submit_handler(struct kthread_work *work) { struct sde_rotator_ctx *ctx; struct sde_rotator_device *rot_dev; @@ -2325,7 +2345,7 @@ static void sde_rotator_device_run(void *priv) /* disconnect request (will be freed by core layer) */ sde_rot_mgr_lock(rot_dev->mgr); - ctx->request->retireq = NULL; + ctx->request->retire_kw = NULL; ctx->request->retire_work = NULL; ctx->request = NULL; sde_rot_mgr_unlock(rot_dev->mgr); @@ -2364,7 +2384,7 @@ static void sde_rotator_device_run(void *priv) /* disconnect request (will be freed by core layer) */ sde_rot_mgr_lock(rot_dev->mgr); - ctx->request->retireq = NULL; + ctx->request->retire_kw = NULL; ctx->request->retire_work = NULL; ctx->request = ERR_PTR(-EIO); sde_rot_mgr_unlock(rot_dev->mgr); @@ -2471,7 +2491,7 @@ static int sde_rotator_job_ready(void *priv) v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx), atomic_read(&ctx->command_pending)); atomic_inc(&ctx->command_pending); - queue_work(ctx->work_queue.rot_work_queue, &ctx->submit_work); + queue_kthread_work(&ctx->work_queue.rot_kw, &ctx->submit_work); } else if (!atomic_read(&ctx->request->pending_count)) { /* if pending request completed, forward to device run state */ SDEDEV_DBG(rot_dev->dev, diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h index c8dcdeee9ca0..8e4d86083508 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -131,8 +132,8 @@ struct sde_rotator_ctx { struct sde_rotator_vbinfo *vbinfo_cap; struct sde_rotator_vbinfo *vbinfo_out; wait_queue_head_t wait_queue; - struct work_struct submit_work; - struct work_struct retire_work; + struct kthread_work submit_work; + struct kthread_work retire_work; struct sde_rot_queue work_queue; struct sde_rot_entry_container *request; struct sde_rot_file_private *private; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index 8157e8641e60..10f72a2155db 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -814,6 +814,9 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx, bw /= TRAFFIC_SHAPE_CLKTICK_12MS; if (bw > 0xFF) bw = 0xFF; + else if (bw == 0) + bw = 1; + SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, BIT(31) | bw); SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw); @@ -1257,6 +1260,7 @@ err_put: data->srcp_dma_buf = NULL; imap_err: ion_free(rot->iclient, handle); + sde_smmu_ctrl(0); return rc; } @@ -1270,8 +1274,19 @@ static int sde_hw_rotator_swts_map(struct sde_hw_rotator *rot) { int rc = 0; struct sde_mdp_img_data *data = &rot->swts_buf; + struct sde_rot_data_type *mdata = sde_rot_get_mdata(); sde_smmu_ctrl(1); + if (mdata->wait_for_transition) { + rc = mdata->wait_for_transition(0, 0); + if (rc < 0) { + SDEROT_ERR("failed Secure wait for transition %d\n", + rc); + rc = -EPERM; + goto error; + } + } + rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr, &data->len, DMA_BIDIRECTIONAL); @@ -1291,7 +1306,7 @@ static int sde_hw_rotator_swts_map(struct sde_hw_rotator *rot) data->mapped = true; SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr, - data->len, rot->swts_buffer); + data->len, rot->swts_buffer); sde_smmu_ctrl(0); return rc; @@ -1301,6 +1316,8 @@ kmap_err: err_unmap: dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, DMA_FROM_DEVICE); +error: + sde_smmu_ctrl(0); return rc; } diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 7388dab92c34..037c6f3b12ab 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1832,10 +1832,10 @@ int create_pkt_cmd_session_set_property( pkt->size += sizeof(u32) + sizeof(struct hfi_enable); break; } - case HAL_PARAM_VENC_H264_GENERATE_AUDNAL: + case HAL_PARAM_VENC_GENERATE_AUDNAL: { create_pkt_enable(pkt->rg_property_data, - HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL, + HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL, ((struct hal_enable *)pdata)->enable); pkt->size += sizeof(u32) + sizeof(struct hfi_enable); break; diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c index c9dfb52861bc..1d30a869d754 100644 --- a/drivers/media/platform/msm/vidc/msm_smem.c +++ b/drivers/media/platform/msm/vidc/msm_smem.c @@ -490,11 +490,13 @@ bool msm_smem_compare_buffers(void *clt, int fd, void *priv) } static int ion_cache_operations(struct smem_client *client, - struct msm_smem *mem, enum smem_cache_ops cache_op) + struct msm_smem *mem, enum smem_cache_ops cache_op, + int size) { unsigned long ionflag = 0; int rc = 0; int msm_cache_ops = 0; + int op_size = 0; if (!mem || !client) { dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n", mem, client); @@ -523,10 +525,15 @@ static int ion_cache_operations(struct smem_client *client, rc = -EINVAL; goto cache_op_failed; } + if (size <= 0) + op_size = mem->size; + else + op_size = mem->size < size ? mem->size : size; + rc = msm_ion_do_cache_offset_op(client->clnt, (struct ion_handle *)mem->smem_priv, 0, mem->offset, - (unsigned long)mem->size, msm_cache_ops); + (unsigned long)op_size, msm_cache_ops); if (rc) { dprintk(VIDC_ERR, "cache operation failed %d\n", rc); @@ -538,7 +545,7 @@ cache_op_failed: } int msm_smem_cache_operations(void *clt, struct msm_smem *mem, - enum smem_cache_ops cache_op) + enum smem_cache_ops cache_op, int size) { struct smem_client *client = clt; int rc = 0; @@ -549,7 +556,7 @@ int msm_smem_cache_operations(void *clt, struct msm_smem *mem, } switch (client->mem_type) { case SMEM_ION: - rc = ion_cache_operations(client, mem, cache_op); + rc = ion_cache_operations(client, mem, cache_op, size); if (rc) dprintk(VIDC_ERR, "Failed cache operations: %d\n", rc); diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index c0271c757020..de5a2dececdf 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -230,6 +230,14 @@ static int msm_v4l2_queryctrl(struct file *file, void *fh, return msm_vidc_query_ctrl((void *)vidc_inst, ctrl); } +static int msm_v4l2_query_ext_ctrl(struct file *file, void *fh, + struct v4l2_query_ext_ctrl *ctrl) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_query_ext_ctrl((void *)vidc_inst, ctrl); +} + static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_querycap = msm_v4l2_querycap, .vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt, @@ -247,6 +255,7 @@ static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_s_ctrl = msm_v4l2_s_ctrl, .vidioc_g_ctrl = msm_v4l2_g_ctrl, .vidioc_queryctrl = msm_v4l2_queryctrl, + .vidioc_query_ext_ctrl = msm_v4l2_query_ext_ctrl, .vidioc_s_ext_ctrls = msm_v4l2_s_ext_ctrl, .vidioc_subscribe_event = msm_v4l2_subscribe_event, .vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event, @@ -321,6 +330,7 @@ static int msm_vidc_initialize_core(struct platform_device *pdev, init_completion(&core->completions[i]); } + msm_comm_sort_ctrl(); INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler); return rc; } diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 8ac84ece2c2a..0f6389370643 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -11,6 +11,7 @@ * */ +#include #include #include #include "msm_vidc_internal.h" @@ -18,6 +19,7 @@ #include "vidc_hfi_api.h" #include "msm_vidc_debug.h" #include "msm_vidc_dcvs.h" +#include "msm_vdec.h" #define MSM_VDEC_DVC_NAME "msm_vdec_8974" #define MIN_NUM_OUTPUT_BUFFERS 4 @@ -553,6 +555,7 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = { (1 << V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC) ), .qmenu = mpeg_vidc_video_dpb_color_format, + .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT, }, { .id = V4L2_CID_VIDC_QBUF_MODE, @@ -1488,6 +1491,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q, rc = -EINVAL; break; } + msm_dcvs_try_enable(inst); /* Pretend as if FW itself is asking for * additional buffers. @@ -1567,9 +1571,10 @@ exit: return rc; } -static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst) +static int set_max_internal_buffers_size(struct msm_vidc_inst *inst) { int rc = 0; + struct msm_vidc_list *buf_list = &inst->scratchbufs; struct { enum hal_buffer type; struct hal_buffer_requirements *req; @@ -1577,13 +1582,17 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst) } internal_buffers[] = { { HAL_BUFFER_INTERNAL_SCRATCH, NULL, 0}, { HAL_BUFFER_INTERNAL_SCRATCH_1, NULL, 0}, - { HAL_BUFFER_INTERNAL_SCRATCH_2, NULL, 0}, - { HAL_BUFFER_INTERNAL_PERSIST, NULL, 0}, - { HAL_BUFFER_INTERNAL_PERSIST_1, NULL, 0}, }; struct hal_frame_size frame_sz; int i; + mutex_lock(&buf_list->lock); + if (!list_empty(&buf_list->list)) { + dprintk(VIDC_DBG, "Scratch list already has allocated buf\n"); + mutex_unlock(&buf_list->lock); + return 0; + } + mutex_unlock(&buf_list->lock); frame_sz.buffer_type = HAL_BUFFER_INPUT; frame_sz.width = inst->capability.width.max; @@ -1609,6 +1618,15 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst) get_buff_req_buffer(inst, internal_buffers[i].type); internal_buffers[i].size = internal_buffers[i].req ? internal_buffers[i].req->buffer_size : 0; + + rc = allocate_and_set_internal_bufs(inst, + internal_buffers[i].req, + &inst->scratchbufs, false); + if (rc) + goto alloc_fail; + dprintk(VIDC_DBG, + "Allocated scratch type : %d size to : %zd\n", + internal_buffers[i].type, internal_buffers[i].size); } frame_sz.buffer_type = HAL_BUFFER_INPUT; @@ -1621,25 +1639,18 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst) dprintk(VIDC_ERR, "%s Failed to get back old buf req, %d\n", __func__, rc); - return rc; + goto alloc_fail; } - dprintk(VIDC_DBG, "Old buffer reqs, buffer type = %d width = %d, height = %d\n", frame_sz.buffer_type, frame_sz.width, frame_sz.height); - for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) { - if (internal_buffers[i].req) { - internal_buffers[i].req->buffer_size = - internal_buffers[i].size; - dprintk(VIDC_DBG, - "Changing buffer type : %d size to : %zd\n", - internal_buffers[i].type, - internal_buffers[i].size); - } - } return 0; + +alloc_fail: + msm_comm_release_scratch_buffers(inst, false); + return rc; } static inline int start_streaming(struct msm_vidc_inst *inst) @@ -1650,6 +1661,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst) struct hal_buffer_size_minimum b; unsigned int buffer_size; struct msm_vidc_format *fmt = NULL; + bool max_internal_buf = false; fmt = &inst->fmts[CAPTURE_PORT]; buffer_size = fmt->get_frame_size(0, @@ -1673,8 +1685,9 @@ static inline int start_streaming(struct msm_vidc_inst *inst) dprintk(VIDC_ERR, "H/w scaling is not in valid range\n"); return -EINVAL; } - if ((inst->flags & VIDC_SECURE) && !inst->in_reconfig && - !slave_side_cp) { + max_internal_buf = (inst->flags & VIDC_SECURE) && !slave_side_cp + && (inst->session_type == MSM_VIDC_DECODER); + if (max_internal_buf) { rc = set_max_internal_buffers_size(inst); if (rc) { dprintk(VIDC_ERR, @@ -1683,7 +1696,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst) goto fail_start; } } - rc = msm_comm_set_scratch_buffers(inst); + rc = msm_comm_set_scratch_buffers(inst, max_internal_buf); if (rc) { dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc); @@ -1781,8 +1794,10 @@ static int msm_vdec_start_streaming(struct vb2_queue *q, unsigned int count) if (inst->state == MSM_VIDC_CORE_INVALID || inst->core->state == VIDC_CORE_INVALID || - inst->core->state == VIDC_CORE_UNINIT) - return -EINVAL; + inst->core->state == VIDC_CORE_UNINIT) { + rc = -EINVAL; + goto stream_start_failed; + } hdev = inst->core->device; dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n", @@ -2233,6 +2248,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) struct hal_enable_picture enable_picture; struct hal_enable hal_property; enum hal_property property_id = 0; + enum hal_video_codec codec; u32 property_val = 0; void *pdata = NULL; struct hfi_device *hdev; @@ -2287,12 +2303,23 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE: property_id = HAL_PARAM_VDEC_PICTURE_TYPE_DECODE; if (ctrl->val == - V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_ON) + V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_ON) { enable_picture.picture_type = HAL_PICTURE_I; - else - enable_picture.picture_type = HAL_PICTURE_I | - HAL_PICTURE_P | HAL_PICTURE_B | - HAL_PICTURE_IDR; + } else { + codec = get_hal_codec(inst->fmts[OUTPUT_PORT].fourcc); + if (codec == HAL_VIDEO_CODEC_H264) { + enable_picture.picture_type = HAL_PICTURE_I | + HAL_PICTURE_P | HAL_PICTURE_B | + HAL_PICTURE_IDR; + } else if (codec == HAL_VIDEO_CODEC_HEVC) { + enable_picture.picture_type = HAL_PICTURE_I | + HAL_PICTURE_P | HAL_PICTURE_B | + HAL_PICTURE_IDR | HAL_PICTURE_CRA; + } else { + enable_picture.picture_type = HAL_PICTURE_I | + HAL_PICTURE_P | HAL_PICTURE_B; + } + } pdata = &enable_picture; break; case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO: @@ -2800,3 +2827,21 @@ int msm_vdec_ctrl_init(struct msm_vidc_inst *inst) return msm_comm_ctrl_init(inst, msm_vdec_ctrls, ARRAY_SIZE(msm_vdec_ctrls), &msm_vdec_ctrl_ops); } + +void msm_vdec_g_ctrl(struct msm_vidc_ctrl **ctrls, int *num_ctrls) +{ + *ctrls = msm_vdec_ctrls; + *num_ctrls = NUM_CTRLS; +} + +static int msm_vdec_ctrl_cmp(const void *st1, const void *st2) +{ + return (int32_t)((struct msm_vidc_ctrl *)st1)->id - + (int32_t)((struct msm_vidc_ctrl *)st2)->id; +} + +void msm_vdec_ctrl_sort(void) +{ + sort(msm_vdec_ctrls, NUM_CTRLS, sizeof(struct msm_vidc_ctrl), + msm_vdec_ctrl_cmp, NULL); +} diff --git a/drivers/media/platform/msm/vidc/msm_vdec.h b/drivers/media/platform/msm/vidc/msm_vdec.h index 47426c143c08..227cc99242d8 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.h +++ b/drivers/media/platform/msm/vidc/msm_vdec.h @@ -18,12 +18,13 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst); int msm_vdec_ctrl_init(struct msm_vidc_inst *inst); -int msm_vdec_querycap(void *instance, struct v4l2_capability *cap); -int msm_vdec_enum_fmt(void *instance, struct v4l2_fmtdesc *f); -int msm_vdec_s_fmt(void *instance, struct v4l2_format *f); -int msm_vdec_g_fmt(void *instance, struct v4l2_format *f); -int msm_vdec_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a); -int msm_vdec_reqbufs(void *instance, struct v4l2_requestbuffers *b); +int msm_vdec_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap); +int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f); +int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f); +int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f); +int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst, + struct v4l2_ext_controls *a); +int msm_vdec_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b); int msm_vdec_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b); int msm_vdec_release_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b); int msm_vdec_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b); @@ -32,6 +33,8 @@ int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i); int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i); int msm_vdec_cmd(struct msm_vidc_inst *inst, struct v4l2_decoder_cmd *dec); int msm_vdec_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a); -struct vb2_ops *msm_vdec_get_vb2q_ops(void); +const struct vb2_ops *msm_vdec_get_vb2q_ops(void); +void msm_vdec_g_ctrl(struct msm_vidc_ctrl **ctrls, int *num_ctrls); +void msm_vdec_ctrl_sort(void); #endif diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index cdf91dd80ed3..e4698e0cdcd8 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -861,14 +861,14 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = { .step = 1, }, { - .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER, + .id = V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER, .name = "H264 AU Delimiter", .type = V4L2_CTRL_TYPE_BOOLEAN, - .minimum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED, - .maximum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED, + .minimum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED, + .maximum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED, .step = 1, .default_value = - V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED, + V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED, }, { .id = V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL, @@ -1868,7 +1868,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst) "Failed to get Buffer Requirements : %d\n", rc); goto fail_start; } - rc = msm_comm_set_scratch_buffers(inst); + rc = msm_comm_set_scratch_buffers(inst, false); if (rc) { dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc); goto fail_start; @@ -1908,8 +1908,10 @@ static int msm_venc_start_streaming(struct vb2_queue *q, unsigned int count) if (inst->state == MSM_VIDC_CORE_INVALID || inst->core->state == VIDC_CORE_INVALID || - inst->core->state == VIDC_CORE_UNINIT) - return -EINVAL; + inst->core->state == VIDC_CORE_UNINIT) { + rc = -EINVAL; + goto stream_start_failed; + } dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n", q->type, inst); @@ -3315,14 +3317,14 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) pdata = &vui_timing_info; break; } - case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER: - property_id = HAL_PARAM_VENC_H264_GENERATE_AUDNAL; + case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER: + property_id = HAL_PARAM_VENC_GENERATE_AUDNAL; switch (ctrl->val) { - case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED: + case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED: enable.enable = 0; break; - case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED: + case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED: enable.enable = 1; break; default: diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 1d878555e0a7..3677bb6e32e6 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -14,13 +14,14 @@ #include #include #include +#include +#include #include #include "msm_vidc_internal.h" #include "msm_vidc_debug.h" #include "msm_vdec.h" #include "msm_venc.h" #include "msm_vidc_common.h" -#include #include "vidc_hfi_api.h" #include "msm_vidc_dcvs.h" @@ -88,7 +89,7 @@ int msm_vidc_querycap(void *instance, struct v4l2_capability *cap) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_querycap(instance, cap); + return msm_vdec_querycap(inst, cap); else if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_querycap(instance, cap); return -EINVAL; @@ -103,7 +104,7 @@ int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_enum_fmt(instance, f); + return msm_vdec_enum_fmt(inst, f); else if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_enum_fmt(instance, f); return -EINVAL; @@ -138,6 +139,101 @@ int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl) } EXPORT_SYMBOL(msm_vidc_query_ctrl); +static int msm_vidc_queryctrl_bsearch_cmp1(const void *key, const void *elt) +{ + return *(int32_t *)key - (int32_t)((struct msm_vidc_ctrl *)elt)->id; +} + +static int msm_vidc_queryctrl_bsearch_cmp2(const void *key, const void *elt) +{ + uint32_t id = *(uint32_t *)key; + struct msm_vidc_ctrl *ctrl = (struct msm_vidc_ctrl *)elt; + + if (id >= ctrl[0].id && id < ctrl[1].id) + return 0; + else if (id < ctrl[0].id) + return -1; + else + return 1; +} + +int msm_vidc_query_ext_ctrl(void *instance, struct v4l2_query_ext_ctrl *ctrl) +{ + struct msm_vidc_inst *inst = instance; + bool get_next_ctrl = 0; + int i, num_ctrls, rc = 0; + struct msm_vidc_ctrl *key = NULL; + struct msm_vidc_ctrl *msm_vdec_ctrls; + + if (!inst || !ctrl) + return -EINVAL; + + i = ctrl->id; + memset(ctrl, 0, sizeof(struct v4l2_query_ext_ctrl)); + ctrl->id = i; + + if (ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL) + get_next_ctrl = 1; + else if (ctrl->id & V4L2_CTRL_FLAG_NEXT_COMPOUND) + goto query_ext_ctrl_err; + + ctrl->id &= ~V4L2_CTRL_FLAG_NEXT_CTRL; + ctrl->id &= ~V4L2_CTRL_FLAG_NEXT_COMPOUND; + + if (ctrl->id > V4L2_CID_PRIVATE_BASE || + (ctrl->id >= V4L2_CID_BASE && ctrl->id <= V4L2_CID_LASTP1)) + goto query_ext_ctrl_err; + else if (ctrl->id == V4L2_CID_PRIVATE_BASE && get_next_ctrl) + ctrl->id = V4L2_CID_MPEG_MSM_VIDC_BASE; + + if (inst->session_type == MSM_VIDC_DECODER) + msm_vdec_g_ctrl(&msm_vdec_ctrls, &num_ctrls); + else + return -EINVAL; + + if (!get_next_ctrl) + key = bsearch(&ctrl->id, msm_vdec_ctrls, num_ctrls, + sizeof(struct msm_vidc_ctrl), + msm_vidc_queryctrl_bsearch_cmp1); + else { + key = bsearch(&ctrl->id, msm_vdec_ctrls, num_ctrls-1, + sizeof(struct msm_vidc_ctrl), + msm_vidc_queryctrl_bsearch_cmp2); + + if (key && ctrl->id > key->id) + key++; + if (key) { + for (i = key-msm_vdec_ctrls, key = NULL; + i < num_ctrls; i++) + if (!(msm_vdec_ctrls[i].flags & + V4L2_CTRL_FLAG_DISABLED)) { + key = &msm_vdec_ctrls[i]; + break; + } + } + } + + if (key) { + ctrl->id = key->id; + ctrl->type = key->type; + strlcpy(ctrl->name, key->name, MAX_NAME_LENGTH); + ctrl->minimum = key->minimum; + ctrl->maximum = key->maximum; + ctrl->step = key->step; + ctrl->default_value = key->default_value; + ctrl->flags = key->flags; + ctrl->elems = 1; + ctrl->nr_of_dims = 0; + return rc; + } + +query_ext_ctrl_err: + ctrl->name[0] = '\0'; + ctrl->flags |= V4L2_CTRL_FLAG_DISABLED; + return -EINVAL; +} +EXPORT_SYMBOL(msm_vidc_query_ext_ctrl); + int msm_vidc_s_fmt(void *instance, struct v4l2_format *f) { struct msm_vidc_inst *inst = instance; @@ -146,7 +242,7 @@ int msm_vidc_s_fmt(void *instance, struct v4l2_format *f) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_s_fmt(instance, f); + return msm_vdec_s_fmt(inst, f); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_s_fmt(instance, f); return -EINVAL; @@ -161,7 +257,7 @@ int msm_vidc_g_fmt(void *instance, struct v4l2_format *f) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_g_fmt(instance, f); + return msm_vdec_g_fmt(inst, f); else if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_g_fmt(instance, f); return -EINVAL; @@ -197,7 +293,7 @@ int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_s_ext_ctrl(instance, control); + return msm_vdec_s_ext_ctrl(inst, control); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_s_ext_ctrl(instance, control); return -EINVAL; @@ -212,7 +308,7 @@ int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_reqbufs(instance, b); + return msm_vdec_reqbufs(inst, b); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_reqbufs(instance, b); return -EINVAL; @@ -435,13 +531,69 @@ static inline void save_v4l2_buffer(struct v4l2_buffer *b, } } +static int __map_and_update_binfo(struct msm_vidc_inst *inst, + struct buffer_info *binfo, + struct v4l2_buffer *b, u32 i) +{ + int rc = 0; + struct msm_smem *same_fd_handle = NULL; + + if (i >= VIDEO_MAX_PLANES) { + dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n", + i, VIDEO_MAX_PLANES); + rc = -EINVAL; + goto exit; + } + + same_fd_handle = get_same_fd_buffer( + inst, b->m.planes[i].reserved[0]); + + if (same_fd_handle) { + binfo->device_addr[i] = + same_fd_handle->device_addr + binfo->buff_off[i]; + b->m.planes[i].m.userptr = binfo->device_addr[i]; + binfo->handle[i] = same_fd_handle; + } else { + binfo->handle[i] = map_buffer(inst, &b->m.planes[i], + get_hal_buffer_type(inst, b)); + if (!binfo->handle[i]) + return -EINVAL; + + binfo->mapped[i] = true; + binfo->device_addr[i] = binfo->handle[i]->device_addr + + binfo->buff_off[i]; + b->m.planes[i].m.userptr = binfo->device_addr[i]; + } + +exit: + return rc; +} + +static int __handle_fw_referenced_buffers(struct msm_vidc_inst *inst, + struct buffer_info *binfo, + struct v4l2_buffer *b) +{ + int rc = 0; + u32 i = 0; + + if (EXTRADATA_IDX(b->length)) { + i = EXTRADATA_IDX(b->length); + if (b->m.planes[i].length) + rc = __map_and_update_binfo(inst, binfo, b, i); + } + + if (rc) + dprintk(VIDC_ERR, "%s: Failed to map extradata\n", __func__); + + return rc; +} + int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { struct buffer_info *binfo = NULL; struct buffer_info *temp = NULL, *iterator = NULL; - int plane = 0; - int i = 0, rc = 0; - struct msm_smem *same_fd_handle = NULL; + int plane = 0, rc = 0; + u32 i = 0; if (!b || !inst) { dprintk(VIDC_ERR, "%s: invalid input\n", __func__); @@ -517,39 +669,23 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) rc = 0; goto exit; } else if (rc == 2) { - rc = -EEXIST; + rc = __handle_fw_referenced_buffers(inst, temp, b); + if (!rc) + rc = -EEXIST; goto exit; } - same_fd_handle = get_same_fd_buffer( - inst, b->m.planes[i].reserved[0]); - populate_buf_info(binfo, b, i); - if (same_fd_handle) { - binfo->device_addr[i] = - same_fd_handle->device_addr + binfo->buff_off[i]; - b->m.planes[i].m.userptr = binfo->device_addr[i]; - binfo->mapped[i] = false; - binfo->handle[i] = same_fd_handle; - } else { - binfo->handle[i] = map_buffer(inst, &b->m.planes[i], - get_hal_buffer_type(inst, b)); - if (!binfo->handle[i]) { - rc = -EINVAL; - goto exit; - } - binfo->mapped[i] = true; - binfo->device_addr[i] = binfo->handle[i]->device_addr + - binfo->buff_off[i]; - b->m.planes[i].m.userptr = binfo->device_addr[i]; - } + rc = __map_and_update_binfo(inst, binfo, b, i); + if (rc) + goto map_err; /* We maintain one ref count for all planes*/ if (!i && is_dynamic_output_buffer_mode(b, inst)) { rc = buf_ref_get(inst, binfo); if (rc < 0) - goto exit; + goto map_err; } dprintk(VIDC_DBG, "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", @@ -563,10 +699,14 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) mutex_unlock(&inst->registeredbufs.lock); return 0; +map_err: + if (binfo->handle[0] && binfo->mapped[0]) + msm_comm_smem_free(inst, binfo->handle[0]); exit: kfree(binfo); return rc; } + int unmap_and_deregister_buf(struct msm_vidc_inst *inst, struct buffer_info *binfo) { @@ -631,6 +771,7 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst, temp->handle[i] = 0; temp->device_addr[i] = 0; temp->uvaddr[i] = 0; + temp->mapped[i] = false; } } if (!keep_node) { @@ -670,10 +811,11 @@ int qbuf_dynamic_buf(struct msm_vidc_inst *inst, } int output_buffer_cache_invalidate(struct msm_vidc_inst *inst, - struct buffer_info *binfo) + struct buffer_info *binfo, struct v4l2_buffer *b) { int i = 0; int rc = 0; + int size = -1; if (!inst) { dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); @@ -686,23 +828,35 @@ int output_buffer_cache_invalidate(struct msm_vidc_inst *inst, return -EINVAL; } - for (i = 0; i < binfo->num_planes; i++) { - if (binfo->handle[i]) { - struct msm_smem smem = *binfo->handle[i]; + if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + for (i = 0; i < binfo->num_planes; i++) { + if (binfo->handle[i]) { + struct msm_smem smem = *binfo->handle[i]; - smem.offset = (unsigned int)(binfo->buff_off[i]); - smem.size = binfo->size[i]; - rc = msm_comm_smem_cache_operations(inst, - &smem, SMEM_CACHE_INVALIDATE); - if (rc) { - dprintk(VIDC_ERR, - "%s: Failed to clean caches: %d\n", - __func__, rc); - return -EINVAL; - } - } else - dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n", + if (inst->session_type == MSM_VIDC_ENCODER && + !i) + size = b->m.planes[i].bytesused + + b->m.planes[i].data_offset; + else + size = -1; + + smem.offset = + (unsigned int)(binfo->buff_off[i]); + smem.size = binfo->size[i]; + rc = msm_comm_smem_cache_operations(inst, + &smem, SMEM_CACHE_INVALIDATE, + size); + if (rc) { + dprintk(VIDC_ERR, + "%s: Failed to clean caches: %d\n", + __func__, rc); + return -EINVAL; + } + } else + dprintk(VIDC_DBG, + "%s: NULL handle for plane %d\n", __func__, i); + } } return 0; } @@ -737,7 +891,7 @@ int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_prepare_buf(instance, b); + return msm_vdec_prepare_buf(inst, b); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_prepare_buf(instance, b); return -EINVAL; @@ -804,8 +958,7 @@ int msm_vidc_release_buffers(void *instance, int buffer_type) if (!release_buf) continue; if (inst->session_type == MSM_VIDC_DECODER) - rc = msm_vdec_release_buf(instance, - &buffer_info); + rc = msm_vdec_release_buf(inst, &buffer_info); if (inst->session_type == MSM_VIDC_ENCODER) rc = msm_venc_release_buf(instance, &buffer_info); @@ -858,6 +1011,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) int plane = 0; int rc = 0; int i; + int size = -1; if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) return -EINVAL; @@ -905,7 +1059,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] && b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { rc = msm_comm_smem_cache_operations(inst, - binfo->handle[i], SMEM_CACHE_INVALIDATE); + binfo->handle[i], SMEM_CACHE_INVALIDATE, -1); if (rc) { dprintk(VIDC_ERR, "Failed to inv caches: %d\n", rc); @@ -915,8 +1069,14 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) if (binfo->handle[i] && (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) { + if (inst->session_type == MSM_VIDC_DECODER && !i) + size = b->m.planes[i].bytesused + + b->m.planes[i].data_offset; + else + size = -1; rc = msm_comm_smem_cache_operations(inst, - binfo->handle[i], SMEM_CACHE_CLEAN); + binfo->handle[i], SMEM_CACHE_CLEAN, + size); if (rc) { dprintk(VIDC_ERR, "Failed to clean caches: %d\n", rc); @@ -926,7 +1086,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) } if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_qbuf(instance, b); + return msm_vdec_qbuf(inst, b); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_qbuf(instance, b); @@ -985,7 +1145,7 @@ int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b) return -EINVAL; } - rc = output_buffer_cache_invalidate(inst, buffer_info); + rc = output_buffer_cache_invalidate(inst, buffer_info, b); if (rc) return rc; @@ -1012,7 +1172,7 @@ int msm_vidc_streamon(void *instance, enum v4l2_buf_type i) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_streamon(instance, i); + return msm_vdec_streamon(inst, i); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_streamon(instance, i); return -EINVAL; @@ -1027,7 +1187,7 @@ int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i) return -EINVAL; if (inst->session_type == MSM_VIDC_DECODER) - return msm_vdec_streamoff(instance, i); + return msm_vdec_streamoff(inst, i); if (inst->session_type == MSM_VIDC_ENCODER) return msm_venc_streamoff(instance, i); return -EINVAL; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 7b28e80979f2..1d910f4b235c 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -22,6 +22,7 @@ #include "vidc_hfi_api.h" #include "msm_vidc_debug.h" #include "msm_vidc_dcvs.h" +#include "msm_vdec.h" #define IS_ALREADY_IN_STATE(__p, __d) ({\ int __rc = (__p >= __d);\ @@ -3174,7 +3175,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst, goto err_no_mem; } rc = msm_comm_smem_cache_operations(inst, - handle, SMEM_CACHE_CLEAN); + handle, SMEM_CACHE_CLEAN, -1); if (rc) { dprintk(VIDC_WARN, "Failed to clean cache may cause undefined behavior\n"); @@ -3265,7 +3266,7 @@ static int set_internal_buf_on_fw(struct msm_vidc_inst *inst, hdev = inst->core->device; rc = msm_comm_smem_cache_operations(inst, - handle, SMEM_CACHE_CLEAN); + handle, SMEM_CACHE_CLEAN, -1); if (rc) { dprintk(VIDC_WARN, "Failed to clean cache. Undefined behavior\n"); @@ -3341,9 +3342,9 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst, return reused; } -static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, +int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, struct hal_buffer_requirements *internal_bufreq, - struct msm_vidc_list *buf_list) + struct msm_vidc_list *buf_list, bool set_on_fw) { struct msm_smem *handle; struct internal_buf *binfo; @@ -3380,11 +3381,13 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, binfo->handle = handle; binfo->buffer_type = internal_bufreq->buffer_type; - rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type, - handle, false); - if (rc) - goto fail_set_buffers; - + if (set_on_fw) { + rc = set_internal_buf_on_fw(inst, + internal_bufreq->buffer_type, + handle, false); + if (rc) + goto fail_set_buffers; + } mutex_lock(&buf_list->lock); list_add_tail(&binfo->list, &buf_list->list); mutex_unlock(&buf_list->lock); @@ -3425,7 +3428,7 @@ static int set_internal_buffers(struct msm_vidc_inst *inst, return 0; return allocate_and_set_internal_bufs(inst, internal_buf, - buf_list); + buf_list, true); } int msm_comm_try_state(struct msm_vidc_inst *inst, int state) @@ -3586,39 +3589,6 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd) "Failed to flush buffers: %d\n", rc); } break; - case V4L2_DEC_QCOM_CMD_RECONFIG_HINT: - { - u32 *ptr = NULL; - struct hal_buffer_requirements *output_buf; - - rc = msm_comm_try_get_bufreqs(inst); - if (rc) { - dprintk(VIDC_ERR, - "Getting buffer requirements failed: %d\n", - rc); - break; - } - - output_buf = get_buff_req_buffer(inst, - msm_comm_get_hal_output_buffer(inst)); - if (output_buf) { - if (dec) { - ptr = (u32 *)dec->raw.data; - ptr[0] = output_buf->buffer_size; - ptr[1] = output_buf->buffer_count_actual; - dprintk(VIDC_DBG, - "Reconfig hint, size is %u, count is %u\n", - ptr[0], ptr[1]); - } else { - dprintk(VIDC_ERR, "Null decoder\n"); - } - } else { - dprintk(VIDC_DBG, - "This output buffer not required, buffer_type: %x\n", - HAL_BUFFER_OUTPUT); - } - break; - } default: dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd); rc = -ENOTSUPP; @@ -4452,15 +4422,15 @@ error: return rc; } -int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst) -{ +int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst, + bool max_int_buffer) { int rc = 0; if (!inst || !inst->core || !inst->core->device) { dprintk(VIDC_ERR, "%s invalid parameters\n", __func__); return -EINVAL; } - if (msm_comm_release_scratch_buffers(inst, true)) + if (!max_int_buffer && msm_comm_release_scratch_buffers(inst, true)) dprintk(VIDC_WARN, "Failed to release scratch buffers\n"); rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH, @@ -4524,10 +4494,15 @@ static void msm_comm_flush_in_invalid_state(struct msm_vidc_inst *inst) struct vb2_buffer *vb = container_of(ptr, struct vb2_buffer, queued_entry); - vb->planes[0].bytesused = 0; - vb->planes[0].data_offset = 0; - - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + if (vb->state == VB2_BUF_STATE_ACTIVE) { + vb->planes[0].bytesused = 0; + vb->planes[0].data_offset = 0; + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } else { + dprintk(VIDC_WARN, + "%s VB is in state %d not in ACTIVE state\n" + , __func__, vb->state); + } } mutex_unlock(&inst->bufq[port].lock); } @@ -5154,14 +5129,16 @@ void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem) } int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, - struct msm_smem *mem, enum smem_cache_ops cache_ops) + struct msm_smem *mem, enum smem_cache_ops cache_ops, + int size) { if (!inst || !mem) { dprintk(VIDC_ERR, "%s: invalid params: %pK %pK\n", __func__, inst, mem); return -EINVAL; } - return msm_smem_cache_operations(inst->mem_client, mem, cache_ops); + return msm_smem_cache_operations(inst->mem_client, mem, + cache_ops, size); } struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst, @@ -5426,3 +5403,7 @@ static void msm_comm_print_debug_info(struct msm_vidc_inst *inst) } mutex_unlock(&core->lock); } +void msm_comm_sort_ctrl(void) +{ + msm_vdec_ctrl_sort(); +} diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index eac7f658eb31..5658df95db26 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,9 +41,13 @@ int msm_comm_try_set_prop(struct msm_vidc_inst *inst, enum hal_property ptype, void *pdata); int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype, union hal_get_property *hprop); -int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst); +int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst, + bool max_int_buffer); int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst); int msm_comm_set_output_buffers(struct msm_vidc_inst *inst); +int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, + struct hal_buffer_requirements *internal_bufreq, + struct msm_vidc_list *buf_list, bool set_on_fw); int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst); int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb); void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst); @@ -76,7 +80,7 @@ struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst, enum hal_buffer buffer_type, int map_kernel); void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem); int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, - struct msm_smem *mem, enum smem_cache_ops cache_ops); + struct msm_smem *mem, enum smem_cache_ops cache_ops, int size); struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst, int fd, u32 offset, enum hal_buffer buffer_type); enum hal_video_codec get_hal_codec(int fourcc); @@ -99,4 +103,5 @@ void msm_comm_cleanup_internal_buffers(struct msm_vidc_inst *inst); int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a); bool msm_comm_turbo_session(struct msm_vidc_inst *inst); void msm_comm_print_inst_info(struct msm_vidc_inst *inst); +void msm_comm_sort_ctrl(void); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c index 3e269576c126..9bc313adb10a 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c @@ -407,8 +407,10 @@ static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst) if (dcvs->etb_counter < total_input_buf) { dcvs->etb_counter++; - if (dcvs->etb_counter != total_input_buf) - return rc; + if (dcvs->etb_counter != total_input_buf) { + return msm_comm_scale_clocks_load(core, dcvs->load, + LOAD_CALC_NO_QUIRKS); + } } dprintk(VIDC_PROF, @@ -425,7 +427,7 @@ static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst) } if (fw_pending_bufs >= DCVS_ENC_HIGH_THR && - dcvs->load <= dcvs->load_low) { + dcvs->load < dcvs->load_high) { dcvs->load = dcvs->load_high; dcvs->prev_freq_increased = true; } else { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 690a61f4824f..4cb900bbca10 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -355,7 +355,7 @@ struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list, int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo); int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo); int output_buffer_cache_invalidate(struct msm_vidc_inst *inst, - struct buffer_info *binfo); + struct buffer_info *binfo, struct v4l2_buffer *b); int qbuf_dynamic_buf(struct msm_vidc_inst *inst, struct buffer_info *binfo); int unmap_and_deregister_buf(struct msm_vidc_inst *inst, @@ -369,7 +369,7 @@ struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags, void msm_smem_free(void *clt, struct msm_smem *mem); void msm_smem_delete_client(void *clt); int msm_smem_cache_operations(void *clt, struct msm_smem *mem, - enum smem_cache_ops); + enum smem_cache_ops, int size); struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset, enum hal_buffer buffer_type); struct context_bank_info *msm_smem_get_context_bank(void *clt, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h index 3a329d989918..a1b4ad48b054 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h @@ -106,6 +106,7 @@ struct clock_info { u32 count; bool has_scaling; bool has_mem_retention; + unsigned long rate_on_enable; }; struct clock_set { diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index bc72c4a56c91..20f02ce46029 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -582,7 +582,7 @@ static int __smem_alloc(struct venus_hfi_device *dev, dprintk(VIDC_DBG, "__smem_alloc: ptr = %pK, size = %d\n", alloc->kvaddr, size); rc = msm_smem_cache_operations(dev->hal_client, alloc, - SMEM_CACHE_CLEAN); + SMEM_CACHE_CLEAN, -1); if (rc) { dprintk(VIDC_WARN, "Failed to clean cache\n"); dprintk(VIDC_WARN, "This may result in undefined behavior\n"); @@ -1574,6 +1574,7 @@ static int __scale_clocks(struct venus_hfi_device *device, return rc; } + static int venus_hfi_scale_clocks(void *dev, int load, struct vidc_clk_scale_data *data, unsigned long instant_bitrate) @@ -1600,6 +1601,41 @@ exit: return rc; } +static void __save_clock_rate(struct venus_hfi_device *device, bool reset) +{ + struct clock_info *cl; + + venus_hfi_for_each_clock(device, cl) { + if (cl->has_scaling) { + cl->rate_on_enable = + reset ? 0 : clk_get_rate(cl->clk); + dprintk(VIDC_PROF, "Saved clock %s rate %lu\n", + cl->name, cl->rate_on_enable); + } + } +} + +static void __restore_clock_rate(struct venus_hfi_device *device) +{ + struct clock_info *cl; + + venus_hfi_for_each_clock(device, cl) { + if (cl->has_scaling && cl->rate_on_enable) { + int rc; + + rc = __set_clk_rate(device, cl, cl->rate_on_enable); + if (rc) + dprintk(VIDC_ERR, + "Failed to restore clock %s rate %lu\n", + cl->name, cl->rate_on_enable); + else + dprintk(VIDC_DBG, + "Restored clock %s rate %lu\n", + cl->name, cl->rate_on_enable); + } + } +} + /* Writes into cmdq without raising an interrupt */ static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device, void *pkt, bool *requires_interrupt) @@ -4316,6 +4352,7 @@ static inline int __suspend(struct venus_hfi_device *device) goto err_tzbsp_suspend; } + __save_clock_rate(device, false); __venus_power_off(device, true); dprintk(VIDC_PROF, "Venus power collapsed\n"); return rc; @@ -4345,6 +4382,7 @@ static inline int __resume(struct venus_hfi_device *device) dprintk(VIDC_ERR, "Failed to power on venus\n"); goto err_venus_power_on; } + __restore_clock_rate(device); /* Reboot the firmware */ rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME); @@ -4382,6 +4420,7 @@ exit: err_reset_core: __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND); err_set_video_state: + __save_clock_rate(device, true); __venus_power_off(device, true); err_venus_power_on: dprintk(VIDC_ERR, "Failed to resume from power collapse\n"); @@ -4440,6 +4479,7 @@ fail_protect_mem: subsystem_put(device->resources.fw.cookie); device->resources.fw.cookie = NULL; fail_load_fw: + __save_clock_rate(device, true); __venus_power_off(device, true); fail_venus_power_on: fail_init_pkt: @@ -4461,6 +4501,7 @@ static void __unload_fw(struct venus_hfi_device *device) __vote_buses(device, NULL, 0); subsystem_put(device->resources.fw.cookie); __interface_queues_release(device); + __save_clock_rate(device, true); __venus_power_off(device, false); device->resources.fw.cookie = NULL; __deinit_resources(device); diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 820c8685a75b..6cc5f9f50ba1 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -201,7 +201,7 @@ enum hal_property { HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL, HAL_CONFIG_VENC_MAX_BITRATE, HAL_PARAM_VENC_H264_VUI_TIMING_INFO, - HAL_PARAM_VENC_H264_GENERATE_AUDNAL, + HAL_PARAM_VENC_GENERATE_AUDNAL, HAL_PARAM_VENC_MAX_NUM_B_FRAMES, HAL_PARAM_BUFFER_ALLOC_MODE, HAL_PARAM_VDEC_FRAME_ASSEMBLY, diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index bb9958b0a819..31af06cd88ef 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -340,7 +340,7 @@ struct hfi_buffer_info { (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014) #define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID \ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015) -#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL \ +#define HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL \ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x016) #define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO \ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017) diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 4a608cbe0fdb..9c6fc09b88e0 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -1098,10 +1098,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, struct s5p_jpeg_ctx *ctx) { int c, components = 0, notfound, n_dht = 0, n_dqt = 0; - unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0, - sof_len = 0; - unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER], - dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; + unsigned int height = 0, width = 0, word, subsampling = 0; + unsigned int sos = 0, sof = 0, sof_len = 0; + unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER]; + unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; long length; struct s5p_jpeg_buffer jpeg_buffer; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 65f80b8b9f7a..eb9e7feb9b13 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1629,7 +1629,7 @@ static void imon_incoming_packet(struct imon_context *ictx, if (kc == KEY_KEYBOARD && !ictx->release_code) { ictx->last_keycode = kc; if (!nomouse) { - ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1; + ictx->pad_mouse = !ictx->pad_mouse; dev_dbg(dev, "toggling to %s mode\n", ictx->pad_mouse ? "mouse" : "keyboard"); spin_unlock_irqrestore(&ictx->kc_lock, flags); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c index e1907cd0c3b7..7613d1fee104 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c @@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) memset(&tvdata,0,sizeof(tvdata)); eeprom = pvr2_eeprom_fetch(hdw); - if (!eeprom) return -EINVAL; + if (!eeprom) + return -EINVAL; - { - struct i2c_client fake_client; - /* Newer version expects a useless client interface */ - fake_client.addr = hdw->eeprom_addr; - fake_client.adapter = &hdw->i2c_adap; - tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom); - } + tveeprom_hauppauge_analog(NULL, &tvdata, eeprom); trace_eeprom("eeprom assumed v4l tveeprom module"); trace_eeprom("eeprom direct call results:"); diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 47f37683893a..3dc9ed2e0774 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -793,7 +793,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); */ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) { - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index c30290f33430..fe51e9709210 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -375,8 +375,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata) * and use SDR Mode */ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE - | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); + reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF; } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_HSIC) { /* diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2a4abf736d89..d86795bf9453 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -551,7 +551,7 @@ config VEXPRESS_SYSCFG config UID_SYS_STATS bool "Per-UID statistics" - depends on PROFILING + depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING help Per UID based cpu time statistics exported to /proc/uid_cputime Per UID based io statistics exported to /proc/uid_io diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c index 5484301d57d9..3dc61ea7dc64 100644 --- a/drivers/misc/c2port/c2port-duramar2150.c +++ b/drivers/misc/c2port/c2port-duramar2150.c @@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void) duramar2150_c2port_dev = c2port_device_register("uc", &duramar2150_c2port_ops, NULL); - if (!duramar2150_c2port_dev) { - ret = -ENODEV; + if (IS_ERR(duramar2150_c2port_dev)) { + ret = PTR_ERR(duramar2150_c2port_dev); goto free_region; } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 783337d22f36..10a02934bfc0 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, /* Do this outside the status_mutex to avoid a circular dependency with * the locking in cxl_mmap_fault() */ - if (copy_from_user(&work, uwork, - sizeof(struct cxl_ioctl_start_work))) { - rc = -EFAULT; - goto out; - } + if (copy_from_user(&work, uwork, sizeof(work))) + return -EFAULT; mutex_lock(&ctx->status_mutex); if (ctx->status != OPENED) { diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 65fed7146e9b..cc91f7b3d90c 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component, struct device *dev) { struct enclosure_component *cdev; + int err; if (!edev || component >= edev->components) return -EINVAL; @@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component, if (cdev->dev == dev) return -EEXIST; - if (cdev->dev) + if (cdev->dev) { enclosure_remove_links(cdev); - - put_device(cdev->dev); + put_device(cdev->dev); + } cdev->dev = get_device(dev); - return enclosure_add_links(cdev); + err = enclosure_add_links(cdev); + if (err) { + put_device(cdev->dev); + cdev->dev = NULL; + } + return err; } EXPORT_SYMBOL_GPL(enclosure_add_device); diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c index 33ec0c15efa6..c6f2dbfe573d 100644 --- a/drivers/misc/hdcp.c +++ b/drivers/misc/hdcp.c @@ -12,10 +12,13 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include #include #include #include #include +#include +#include #include #include #include @@ -30,6 +33,8 @@ #include #include #include +#include +#include