Merge 4b34bd7077 on remote branch

Change-Id: I3d89a5d75c8a764e914b3de32288bfccb3fbfbd6
This commit is contained in:
Linux Build Service Account 2018-09-01 16:36:43 -07:00
commit 310b852149
116 changed files with 1737 additions and 462 deletions

View file

@ -13,6 +13,7 @@ Optional properties:
- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC
- qcom,fastrpc-vmid-heap-shared: Flag for Dynamic heap feature, to
share HLOS memory buffer to ADSP
- qcom,secure-domains: FastRPC secure domain configuration
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 146
SUBLEVEL = 148
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -1250,7 +1250,7 @@
/* non-prefetchable memory */
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
num-lanes = <1>;
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
<&clks IMX6SX_CLK_PCIE_AXI>,
<&clks IMX6SX_CLK_LVDS1_OUT>,

View file

@ -0,0 +1,102 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
&mdss_mdp {
dsi_dual_sharp_wqhd_cmd: qcom,mdss_dsi_sharp_wqhd_cmd {
qcom,mdss-dsi-panel-name =
"Dual Sharp WQHD cmd mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
qcom,mdss-dsi-panel-width = <720>;
qcom,mdss-dsi-panel-height = <2560>;
qcom,mdss-dsi-h-front-porch = <30>;
qcom,mdss-dsi-h-back-porch = <100>;
qcom,mdss-dsi-h-pulse-width = <4>;
qcom,mdss-dsi-h-sync-skew = <0>;
qcom,mdss-dsi-v-back-porch = <8>;
qcom,mdss-dsi-v-front-porch = <8>;
qcom,mdss-dsi-v-pulse-width = <1>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
qcom,mdss-dsi-panel-timings = [d9 6a 48 00 b0 b0 52 6c 57 03 04
00];
qcom,mdss-dsi-t-clk-post = <0xc>;
qcom,mdss-dsi-t-clk-pre = <0x28>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-tx-eot-append;
qcom,mdss-pan-physical-width-dimension = <68>;
qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
0f 03 19 01 97
39 01 00 00 00 00 03 92 10 f0
15 01 00 00 00 00 02 90 03
15 01 00 00 00 00 02 03 01
39 01 00 00 00 00 06 f0 55 aa 52 08 04
15 01 00 00 00 00 02 c0 03
39 01 00 00 00 00 06 f0 55 aa 52 08 07
15 01 00 00 00 00 02 ef 01
39 01 00 00 00 00 06 f0 55 aa 52 08 00
15 01 00 00 00 00 02 b4 01
15 01 00 00 00 00 02 35 00
39 01 00 00 00 00 06 f0 55 aa 52 08 01
39 01 00 00 00 00 05 ff aa 55 a5 80
15 01 00 00 00 00 02 6f 01
15 01 00 00 00 00 02 f3 10
39 01 00 00 00 00 05 ff aa 55 a5 00
15 01 00 00 00 00 02 90 01
15 01 00 00 00 00 02 03 00
15 01 00 00 00 00 02 58 01
15 01 00 00 00 00 02 c9 00
15 01 00 00 00 00 02 c0 15
05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
05 01 00 00 78 00 01 29 /* display on + delay 120ms */
];
qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
05 01 00 00 78 00 02 10 00];
qcom,config-select = <&dsi_dual_sharp_wqhd_cmd_config0>;
dsi_dual_sharp_wqhd_cmd_config0: config0 {
qcom,split-mode = "dualctl-split";
};
dsi_dual_sharp_wqhd_cmd_config1: config1 {
qcom,split-mode = "pingpong-split";
};
};
};

View file

@ -0,0 +1,95 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
&mdss_mdp {
dsi_dual_sharp_wqhd_video: qcom,mdss_dsi_sharp_wqhd_video {
qcom,mdss-dsi-panel-name =
"Dual Sharp wqhd video mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
qcom,mdss-dsi-panel-width = <720>;
qcom,mdss-dsi-panel-height = <2560>;
qcom,mdss-dsi-h-front-porch = <30>;
qcom,mdss-dsi-h-back-porch = <100>;
qcom,mdss-dsi-h-pulse-width = <4>;
qcom,mdss-dsi-h-sync-skew = <0>;
qcom,mdss-dsi-v-back-porch = <8>;
qcom,mdss-dsi-v-front-porch = <8>;
qcom,mdss-dsi-v-pulse-width = <1>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
qcom,mdss-dsi-panel-timings = [00 2e 08 0a 12 18 08 0b 09 03 04
00];
qcom,mdss-dsi-t-clk-post = <0xc>;
qcom,mdss-dsi-t-clk-pre = <0x21>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
qcom,mdss-dsi-tx-eot-append;
qcom,mdss-pan-physical-width-dimension = <68>;
qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
0f 03 19 01 97
39 01 00 00 00 00 03 92 10 f0
15 01 00 00 00 00 02 90 03
15 01 00 00 00 00 02 03 01
39 01 00 00 00 00 06 f0 55 aa 52 08 04
15 01 00 00 00 00 02 c0 03
39 01 00 00 00 00 06 f0 55 aa 52 08 07
15 01 00 00 00 00 02 ef 01
39 01 00 00 00 00 06 f0 55 aa 52 08 00
15 01 00 00 00 00 02 b4 10
15 01 00 00 00 00 02 35 00
39 01 00 00 00 00 06 f0 55 aa 52 08 01
39 01 00 00 00 00 05 ff aa 55 a5 80
15 01 00 00 00 00 02 6f 01
15 01 00 00 00 00 02 f3 10
39 01 00 00 00 00 05 ff aa 55 a5 00
15 01 00 00 00 00 02 90 01
15 01 00 00 00 00 02 03 00
15 01 00 00 00 00 02 58 01
15 01 00 00 00 00 02 c9 00
15 01 00 00 00 00 02 c0 15
05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
05 01 00 00 78 00 01 29 /* display on + delay 120ms */
];
qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
05 01 00 00 78 00 02 10 00];
qcom,config-select = <&dsi_dual_sharp_wqhd_video_config0>;
dsi_dual_sharp_wqhd_video_config0: config0 {
qcom,split-mode = "dualctl-split";
};
dsi_dual_sharp_wqhd_video_config1: config1 {
qcom,split-mode = "pingpong-split";
};
};
};

View file

@ -62,6 +62,7 @@
compatible = "qcom,ntn_avb";
vdd-ntn-pci-supply = <&ntn_vreg>;
vdd-ntn-io-supply = <&ntn_vreg>;
ntn-rst-gpio = <&pm8994_gpios 13 0>;
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <1>;
qcom,ntn-bus-num = <1>;
@ -470,6 +471,17 @@
status = "okay";
};
gpio@cc00 { /* GPIO 13 - NTN_RST */
qcom,mode = <1>; /* DIGITAL OUT */
qcom,output-type = <0>; /* CMOS logic */
qcom,pull = <5>;
qcom,vin-sel = <2>; /* 1.8 */
qcom,out-strength = <1>;
qcom,src-sel = <0>; /* GPIO */
qcom,master-en = <1>; /* Enable GPIO */
status = "okay";
};
gpio@d000 { /* GPIO 17 - USB1 VBUS detect */
qcom,mode = <0>; /* Digital Input*/
qcom,pull = <5>; /* No pull */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -385,6 +385,20 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&dsi_dual_sharp_wqhd_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&dsi_dual_sharp_wqhd_cmd {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&mdss_hdmi_tx {
pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
"hdmi_active", "hdmi_sleep";

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -30,6 +30,8 @@
#include "dsi-panel-sim-dualmipi-cmd.dtsi"
#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
#include "dsi-panel-sharp-dualdsi-wqhd-video.dtsi"
#include "dsi-panel-sharp-dualdsi-wqhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@ -227,3 +229,15 @@
qcom,mdss-dsi-t-clk-post = <0x34>;
qcom,mdss-dsi-t-clk-pre = <0x2a>;
};
&dsi_dual_sharp_wqhd_video {
qcom,mdss-dsi-panel-timings = [00 2e 08 0a 12 18 08 0b 09 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x21>;
};
&dsi_dual_sharp_wqhd_cmd {
qcom,mdss-dsi-panel-timings = [00 2e 08 0a 12 18 08 0b 09 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x21>;
};

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -444,6 +444,20 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&dsi_dual_sharp_wqhd_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&dsi_dual_sharp_wqhd_cmd {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&mem_client_3_size {
qcom,peripheral-size = <0x500000>;
};

View file

@ -1570,11 +1570,12 @@
};
};
qcom,rmtfs_sharedmem@85e00000 {
qcom,rmtfs_sharedmem@0 {
compatible = "qcom,sharedmem-uio";
reg = <0x85e00000 0x200000>;
reg = <0x0 0x200000>;
reg-names = "rmtfs";
qcom,client-id = <0x00000001>;
qcom,guard-memory;
};
qcom,rmnet-ipa {

View file

@ -122,6 +122,19 @@
/* Up to 800 Mbps */
<45 512 207108 14432000>;
};
subsys_notif_virt: qcom,subsys_notif_virt@2D000000 {
compatible = "qcom,subsys-notif-virt";
reg = <0x2D000000 0x100>;
reg-names = "vdev_base";
adsp {
subsys-name = "adsp";
interrupts = <0 43 0>;
interrupt-names = "state-irq";
type = "virtual";
offset = <0>;
};
};
};
&reserved_memory {

View file

@ -184,19 +184,22 @@
subsys_notif_virt: qcom,subsys_notif_virt@2D000000 {
compatible = "qcom,subsys-notif-virt";
reg = <0x2D000000 0x18>;
reg = <0x2D000000 0x400>;
reg-names = "vdev_base";
adsp {
subsys-name = "adsp";
type = "native";
offset = <0>;
};
mpss {
subsys-name = "modem";
offset = <8>;
type = "native";
offset = <256>;
};
wlan {
subsys-name = "AR6320";
offset = <16>;
type = "native";
offset = <512>;
};
};
};

View file

@ -556,7 +556,6 @@ CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -662,7 +662,6 @@ CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -700,7 +700,6 @@ CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -575,7 +575,6 @@ CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -649,7 +649,6 @@ CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -108,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@ -151,6 +152,7 @@ CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
@ -243,7 +245,6 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
# CONFIG_DEVPORT is not set
CONFIG_MSM_SMD_PKT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SLIMBUS=y
@ -345,18 +346,10 @@ CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y

View file

@ -96,6 +96,7 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@ -139,6 +140,7 @@ CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
@ -234,7 +236,6 @@ CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
# CONFIG_DEVPORT is not set
CONFIG_MSM_SMD_PKT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SLIMBUS=y
@ -339,19 +340,10 @@ CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y

View file

@ -71,6 +71,7 @@ CONFIG_SWP_EMULATION=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_HIBERNATION=y
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
@ -624,7 +625,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -70,6 +70,7 @@ CONFIG_CMDLINE="console=ttyAMA0"
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_HIBERNATION=y
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
@ -668,7 +669,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -592,7 +592,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -629,7 +629,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -659,7 +659,6 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -735,7 +735,6 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -632,7 +632,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -693,7 +693,6 @@ CONFIG_PFK=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -663,7 +663,6 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -735,7 +735,6 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y

View file

@ -178,7 +178,7 @@ config PREFETCH
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
def_bool y if (!MODULES)
default y
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead

View file

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
/* The synchronize caches instruction executes as a nop on systems in
which all memory references are performed in order. */
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
#if defined(CONFIG_SMP)
#define mb() do { synchronize_caches(); } while (0)
#define rmb() mb()
#define wmb() mb()
#define dma_rmb() mb()
#define dma_wmb() mb()
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define dma_rmb() barrier()
#define dma_wmb() barrier()
#endif
#define __smp_mb() mb()
#define __smp_rmb() mb()
#define __smp_wmb() mb()
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */

View file

@ -481,6 +481,8 @@
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
sync
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif

View file

@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
sync
stw \tmp,0(\la)
mtsm \flags
#endif

View file

@ -631,6 +631,7 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26)
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
@ -645,6 +646,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@ -846,6 +848,7 @@ cas2_action:
cas2_end:
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
@ -856,6 +859,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28

View file

@ -193,12 +193,12 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
@ -214,7 +214,7 @@
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@ -310,6 +310,7 @@
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
@ -331,5 +332,6 @@
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#endif /* _ASM_X86_CPUFEATURES_H */

View file

@ -8,6 +8,8 @@
* Interrupt control:
*/
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
extern inline unsigned long native_save_fl(void)
{
unsigned long flags;

View file

@ -27,8 +27,13 @@
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
/*
* This is beyond the 44 bit limit imposed by the 32bit long pfns,
* but we need the full mask to make sure inverted PROT_NONE
* entries have all the host bits set in a guest.
* The real limit is still 44 bits.
*/
#define __PHYSICAL_MASK_SHIFT 52
#define __VIRTUAL_MASK_SHIFT 32
#else /* !CONFIG_X86_PAE */

View file

@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
/* No inverted PFNs on 2 level page tables */
static inline u64 protnone_mask(u64 val)
{
return 0;
}
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
{
return val;
}
static inline bool __pte_needs_invert(u64 val)
{
return false;
}
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */

View file

@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
#endif
/* Encode and de-code a swap entry */
#define SWP_TYPE_BITS 5
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
/* We always extract/encode the offset by shifting it all the way up, and then down again */
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
/*
* Normally, __swp_entry() converts from arch-independent swp_entry_t to
* arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
* to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
* whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
* __swp_entry_to_pte() through the following helper macro based on 64bit
* __swp_entry().
*/
#define __swp_pteval_entry(type, offset) ((pteval_t) { \
(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
/*
* Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
* swp_entry_t, but also has to convert it from 64bit to the 32bit
* intermediate representation, using the following macros based on 64bit
* __swp_type() and __swp_offset().
*/
#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
__pteval_swp_offset(pte)))
#include <asm/pgtable-invert.h>
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */

View file

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PGTABLE_INVERT_H
#define _ASM_PGTABLE_INVERT_H 1
#ifndef __ASSEMBLY__
static inline bool __pte_needs_invert(u64 val)
{
return !(val & _PAGE_PRESENT);
}
/* Get a mask to xor with the page table entry to get the correct pfn. */
static inline u64 protnone_mask(u64 val)
{
return __pte_needs_invert(val) ? ~0ull : 0;
}
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
{
/*
* When a PTE transitions from NONE to !NONE or vice-versa
* invert the PFN part to stop speculation.
* pte_pfn undoes this when needed.
*/
if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
val = (val & ~mask) | (~val & mask);
return val;
}
#endif /* __ASSEMBLY__ */
#endif

View file

@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte)
return pte_flags(pte) & _PAGE_SPECIAL;
}
/* Entries that were set to PROT_NONE are inverted */
static inline u64 protnone_mask(u64 val);
static inline unsigned long pte_pfn(pte_t pte)
{
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
phys_addr_t pfn = pte_val(pte);
pfn ^= protnone_mask(pfn);
return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
phys_addr_t pfn = pmd_val(pmd);
pfn ^= protnone_mask(pfn);
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
static inline unsigned long pud_pfn(pud_t pud)
{
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
phys_addr_t pfn = pud_val(pud);
pfn ^= protnone_mask(pfn);
return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
@ -305,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_RW);
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
}
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
@ -359,19 +364,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
pfn ^= protnone_mask(pgprot_val(pgprot));
pfn &= PTE_PFN_MASK;
return __pte(pfn | massage_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
pfn ^= protnone_mask(pgprot_val(pgprot));
pfn &= PHYSICAL_PMD_PAGE_MASK;
return __pmd(pfn | massage_pgprot(pgprot));
}
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
phys_addr_t pfn = page_nr << PAGE_SHIFT;
pfn ^= protnone_mask(pgprot_val(pgprot));
pfn &= PHYSICAL_PUD_PAGE_MASK;
return __pud(pfn | massage_pgprot(pgprot));
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pfn_pmd(pmd_pfn(pmd),
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
pudval_t v = native_pud_val(pud);
return __pud(v | set);
}
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
pudval_t v = native_pud_val(pud);
return __pud(v & ~clear);
}
static inline pud_t pud_mkhuge(pud_t pud)
{
return pud_set_flags(pud, _PAGE_PSE);
}
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = pte_val(pte);
pteval_t val = pte_val(pte), oldval = val;
/*
* Chop off the NX bit (if present), and add the NX portion of
@ -379,17 +423,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
val &= _PAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmdval_t val = pmd_val(pmd);
pmdval_t val = pmd_val(pmd), oldval = val;
val &= _HPAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
return __pmd(val);
}
@ -926,6 +970,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
}
#endif
#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
static inline bool arch_has_pfn_modify_check(void)
{
return boot_cpu_has_bug(X86_BUG_L1TF);
}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */

View file

@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
/* Encode and de-code a swap entry */
#define SWP_TYPE_BITS 5
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
/*
* Encode and de-code a swap entry
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
* | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
* there. We also need to avoid using A and D because of an
* erratum where they can be incorrectly set by hardware on
* non-present PTEs.
*
* SD (1) in swp entry is used to store soft dirty bit, which helps us
* remember soft dirty over page migration
*
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
* but also L and G.
*
* The offset is inverted by a binary not operation to make the high
* physical bits set.
*/
#define SWP_TYPE_BITS 5
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
/* We always extract/encode the offset by shifting it all the way up, and then down again */
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
& ((1U << SWP_TYPE_BITS) - 1))
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) << (_PAGE_BIT_PRESENT + 1)) \
| ((offset) << SWP_OFFSET_SHIFT) })
/* Extract the high bits for type */
#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
/* Shift up (to get rid of type), then down to get value */
#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
/*
* Shift the offset up "too far" by TYPE bits, then down again
* The offset is inverted by a binary not operation to make the high
* physical bits set.
*/
#define __swp_entry(type, offset) ((swp_entry_t) { \
(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
@ -201,6 +235,8 @@ extern void cleanup_highmap(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#include <asm/pgtable-invert.h>
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */

View file

@ -70,15 +70,15 @@
/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
* with swap entry format. On x86 bits 6 and 7 are *not* involved
* into swap entry computation, but bit 6 is used for nonlinear
* file mapping, so we borrow bit 7 for soft dirty tracking.
* with swap entry format. On x86 bits 1-4 are *not* involved
* into swap entry computation, but bit 7 is used for thp migration,
* so we borrow bit 1 for soft dirty tracking.
*
* Please note that this bit must be treated as swap dirty page
* mark if and only if the PTE has present bit clear!
* mark if and only if the PTE/PMD has present bit clear!
*/
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
#else
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
#endif

View file

@ -172,6 +172,11 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long l1tf_pfn_limit(void)
{
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
}
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);

View file

@ -26,9 +26,11 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
#include <asm/e820.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@ -80,6 +82,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
l1tf_select_mitigation();
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
/* Check for Skylake-like CPUs (for RSB handling) */
static bool __init is_skylake_era(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6) {
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
return true;
}
}
return false;
}
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@ -386,22 +373,15 @@ retpoline_auto:
pr_info("%s\n", spectre_v2_strings[mode]);
/*
* If neither SMEP nor PTI are available, there is a risk of
* hitting userspace addresses in the RSB after a context switch
* from a shallow call stack to a deeper one. To prevent this fill
* the entire RSB, even when using IBRS.
* If spectre v2 protection has been enabled, unconditionally fill
* RSB during a context switch; this protects against two independent
* issues:
*
* Skylake era CPUs have a separate issue with *underflow* of the
* RSB, when they will predict 'ret' targets from the generic BTB.
* The proper mitigation for this is IBRS. If IBRS is not supported
* or deactivated in favour of retpolines the RSB fill on context
* switch is required.
* - RSB underflow (and switch to BTB) on Skylake+
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
}
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
#if CONFIG_PGTABLE_LEVELS == 2
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
return;
#endif
/*
* This is extremely unlikely to happen because almost all
* systems have far more MAX_PA/2 than RAM can be fit into
* DIMM slots.
*/
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
return;
}
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
}
#undef pr_fmt
#ifdef CONFIG_SYSFS
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
case X86_BUG_L1TF:
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
return sprintf(buf, "Mitigation: Page Table Inversion\n");
break;
default:
break;
}
@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
#endif

View file

@ -880,6 +880,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{}
};
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
/* in addition to cpu_no_speculation */
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{}
};
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
@ -905,6 +920,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
if (x86_match_cpu(cpu_no_l1tf))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
}
/*

View file

@ -394,7 +394,6 @@ int __copy_instruction(u8 *dest, u8 *src)
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(&insn);
@ -610,8 +609,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
p->addr);
pr_err("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
default:

View file

@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (tgt_clobbers & ~site_clobbers)
return len; /* target would clobber too much for this site */
if (len < 5)
if (len < 5) {
#ifdef CONFIG_RETPOLINE
WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
b->opcode = 0xe8; /* call */
b->delta = delta;
@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5)
if (len < 5) {
#ifdef CONFIG_RETPOLINE
WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
b->opcode = 0xe9; /* jmp */
b->delta = delta;

View file

@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
/*
* Make sure page 0 is always reserved because on systems with
* L1TF its contents can be leaked to user processes.
*/
memblock_reserve(0, PAGE_SIZE);
early_reserve_initrd();
/*

View file

@ -4,6 +4,8 @@
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/bootmem.h> /* for max_low_pfn */
#include <linux/swapfile.h>
#include <linux/swapops.h>
#include <asm/cacheflush.h>
#include <asm/e820.h>
@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
__pte2cachemode_tbl[entry] = cache;
}
#ifdef CONFIG_SWAP
unsigned long max_swapfile_size(void)
{
unsigned long pages;
pages = generic_max_swapfile_size();
if (boot_cpu_has_bug(X86_BUG_L1TF)) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
/*
* We encode swap offsets also with 3 bits below those for pfn
* which makes the usable limit higher.
*/
#if CONFIG_PGTABLE_LEVELS > 2
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
#endif
pages = min_t(unsigned long, l1tf_limit, pages);
}
return pages;
}
#endif

View file

@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
{
pmd_t new_pmd;
pmdval_t v = pmd_val(*pmd);
if (clear) {
*old = v & _PAGE_PRESENT;
v &= ~_PAGE_PRESENT;
} else /* presume this has been called with clear==true previously */
v |= *old;
set_pmd(pmd, __pmd(v));
*old = v;
new_pmd = pmd_mknotpresent(*pmd);
} else {
/* Presume this has been called with clear==true previously */
new_pmd = __pmd(*old);
}
set_pmd(pmd, new_pmd);
}
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
{
pteval_t v = pte_val(*pte);
if (clear) {
*old = v & _PAGE_PRESENT;
v &= ~_PAGE_PRESENT;
} else /* presume this has been called with clear==true previously */
v |= *old;
set_pte_atomic(pte, __pte(v));
*old = v;
/* Nothing should care about address */
pte_clear(&init_mm, 0, pte);
} else {
/* Presume this has been called with clear==true previously */
set_pte_atomic(pte, __pte(*old));
}
}
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)

View file

@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
/*
* Only allow root to set high MMIO mappings to PROT_NONE.
* This prevents an unpriv. user to set them to PROT_NONE and invert
* them, then pointing to valid memory for L1TF speculation.
*
* Note: for locked down kernels may want to disable the root override.
*/
bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return true;
if (!__pte_needs_invert(pgprot_val(prot)))
return true;
/* If it's real memory always allow */
if (pfn_valid(pfn))
return true;
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
return false;
return true;
}

View file

@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
massage_pgprot(pmd_pgprot)));
set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
canon_pgprot(pmd_pgprot))));
start += PMD_SIZE;
cpa->pfn += PMD_SIZE;
@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (end - start >= PUD_SIZE) {
set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
massage_pgprot(pud_pgprot)));
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
canon_pgprot(pud_pgprot))));
start += PUD_SIZE;
cpa->pfn += PUD_SIZE;

View file

@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
.prv_offset = 0x800,
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
};
static const struct lpss_device_desc byt_uart_dev_desc = {

View file

@ -699,16 +699,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
return sprintf(buf, "Not affected\n");
}
ssize_t __weak cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
NULL
};

View file

@ -68,6 +68,16 @@
#define FASTRPC_CTX_MAGIC (0xbeeddeed)
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_CTXID_MASK (0xFF0)
#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
#define MINOR_NUM_DEV 0
#define MINOR_NUM_SECURE_DEV 1
#define NON_SECURE_CHANNEL 0
#define SECURE_CHANNEL 1
#define ADSP_DOMAIN_ID (0)
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
@ -232,6 +242,8 @@ struct fastrpc_channel_ctx {
int ramdumpenabled;
void *remoteheap_ramdump_dev;
struct fastrpc_glink_info link;
/* Indicates, if channel is restricted to secure node only */
int secure;
};
struct fastrpc_apps {
@ -305,6 +317,8 @@ struct fastrpc_file {
struct fastrpc_perf perf;
struct dentry *debugfs_file;
struct mutex map_mutex;
/* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
int dev_minor;
};
static struct fastrpc_apps gfa;
@ -1568,7 +1582,11 @@ static void fastrpc_init(struct fastrpc_apps *me)
init_completion(&me->channel[i].work);
init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
/* All channels are secure by default except CDSP */
me->channel[i].secure = SECURE_CHANNEL;
}
/* Set CDSP channel to non secure */
me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
}
static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
@ -2493,6 +2511,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
len += scnprintf(fileinfo + len,
DEBUGFS_SIZE - len, "%s\n\n",
chan->name);
len += scnprintf(fileinfo + len,
DEBUGFS_SIZE - len, "%s %d\n",
"secure:", chan->secure);
len += scnprintf(fileinfo + len,
DEBUGFS_SIZE - len, "%s %d\n",
"sesscount:", chan->sesscount);
@ -2521,6 +2542,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"%s %d\n\n",
"SSRCOUNT:", fl->ssrcount);
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"%s %d\n\n",
"DEV_MINOR:", fl->dev_minor);
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"%s\n",
"LIST OF BUFS:");
@ -2658,6 +2682,19 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
struct fastrpc_file *fl = NULL;
struct fastrpc_apps *me = &gfa;
/*
* Indicates the device node opened
* MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
*/
int dev_minor = MINOR(inode->i_rdev);
VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
(dev_minor == MINOR_NUM_SECURE_DEV)));
if (err) {
pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
return err;
}
VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
if (err)
return err;
@ -2672,6 +2709,8 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->apps = me;
fl->mode = FASTRPC_MODE_SERIAL;
fl->cid = -1;
fl->dev_minor = dev_minor;
if (debugfs_file != NULL)
fl->debugfs_file = debugfs_file;
memset(&fl->perf, 0, sizeof(fl->perf));
@ -2696,6 +2735,23 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
VERIFY(err, cid < NUM_CHANNELS);
if (err)
goto bail;
/* Check to see if the device node is non-secure */
if (fl->dev_minor == MINOR_NUM_DEV) {
/*
* For non secure device node check and make sure that
* the channel allows non-secure access
* If not, bail. Session will not start.
* cid will remain -1 and client will not be able to
* invoke any other methods without failure
*/
if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
err = -EPERM;
pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
fl->dev_minor, cid,
fl->apps->channel[cid].secure);
goto bail;
}
}
fl->cid = cid;
fl->ssrcount = fl->apps->channel[cid].ssrcount;
VERIFY(err, !fastrpc_session_alloc_locked(
@ -3094,12 +3150,41 @@ bail:
return err;
}
static void configure_secure_channels(uint32_t secure_domains)
{
struct fastrpc_apps *me = &gfa;
int ii = 0;
/*
* secure_domains contains the bitmask of the secure channels
* Bit 0 - ADSP
* Bit 1 - MDSP
* Bit 2 - SLPI
* Bit 3 - CDSP
*/
for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
int secure = (secure_domains >> ii) & 0x01;
me->channel[ii].secure = secure;
}
}
static int fastrpc_probe(struct platform_device *pdev)
{
int err = 0;
struct fastrpc_apps *me = &gfa;
struct device *dev = &pdev->dev;
uint32_t secure_domains;
if (of_get_property(dev->of_node,
"qcom,secure-domains", NULL) != NULL) {
VERIFY(err, !of_property_read_u32(dev->of_node,
"qcom,secure-domains",
&secure_domains));
if (!err)
configure_secure_channels(secure_domains);
else
pr_info("adsprpc: unable to read the domain configuration from dts\n");
}
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute-cb"))
return fastrpc_cb_probe(dev);
@ -3174,6 +3259,7 @@ static int __init fastrpc_device_init(void)
{
struct fastrpc_apps *me = &gfa;
struct device *dev = NULL;
struct device *secure_dev = NULL;
int err = 0, i;
memset(me, 0, sizeof(*me));
@ -3190,7 +3276,7 @@ static int __init fastrpc_device_init(void)
cdev_init(&me->cdev, &fops);
me->cdev.owner = THIS_MODULE;
VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
1));
NUM_DEVICES));
if (err)
goto cdev_init_bail;
me->class = class_create(THIS_MODULE, "fastrpc");
@ -3198,14 +3284,29 @@ static int __init fastrpc_device_init(void)
if (err)
goto class_create_bail;
me->compat = (NULL == fops.compat_ioctl) ? 0 : 1;
/*
* Create devices and register with sysfs
* Create first device with minor number 0
*/
dev = device_create(me->class, NULL,
MKDEV(MAJOR(me->dev_no), 0),
NULL, gcinfo[0].name);
MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
NULL, DEVICE_NAME);
VERIFY(err, !IS_ERR_OR_NULL(dev));
if (err)
goto device_create_bail;
/* Create secure device with minor number for secure device */
secure_dev = device_create(me->class, NULL,
MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
NULL, DEVICE_NAME_SECURE);
VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
if (err)
goto device_create_bail;
for (i = 0; i < NUM_CHANNELS; i++) {
me->channel[i].dev = dev;
me->channel[i].dev = secure_dev;
if (i == CDSP_DOMAIN_ID)
me->channel[i].dev = dev;
me->channel[i].ssrcount = 0;
me->channel[i].prevssrcount = 0;
me->channel[i].issubsystemup = 1;
@ -3230,7 +3331,11 @@ device_create_bail:
&me->channel[i].nb);
}
if (!IS_ERR_OR_NULL(dev))
device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
MINOR_NUM_DEV));
if (!IS_ERR_OR_NULL(secure_dev))
device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
MINOR_NUM_SECURE_DEV));
class_destroy(me->class);
class_create_bail:
cdev_del(&me->cdev);
@ -3252,10 +3357,15 @@ static void __exit fastrpc_device_exit(void)
for (i = 0; i < NUM_CHANNELS; i++) {
if (!gcinfo[i].name)
continue;
device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
subsys_notif_unregister_notifier(me->channel[i].handle,
&me->channel[i].nb);
}
/* Destroy the secure and non secure devices */
device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
MINOR_NUM_SECURE_DEV));
class_destroy(me->class);
cdev_del(&me->cdev);
unregister_chrdev_region(me->dev_no, NUM_CHANNELS);

View file

@ -33,6 +33,7 @@
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
#define DEVICE_NAME "adsprpc-smd"
#define DEVICE_NAME_SECURE "adsprpc-smd-secure"
/* Set for buffers that have no virtual mapping in userspace */
#define FASTRPC_ATTR_NOVA 0x1

View file

@ -337,13 +337,14 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
int err = 0;
int header_len = sizeof(struct diag_ctrl_msg_mask);
int temp_len = 0;
uint8_t *buf = NULL, upd = 0;
uint8_t *temp = NULL;
uint8_t *buf = NULL, *temp = NULL;
uint8_t upd = 0;
uint8_t msg_mask_tbl_count_local = 0;
uint32_t mask_size = 0;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
uint8_t msg_mask_tbl_count_local;
struct diag_md_session_t *md_session_info = NULL;
if (peripheral >= NUM_PERIPHERALS)
return;
@ -357,15 +358,20 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
if (driver->md_session_mask != 0) {
if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
if (driver->md_session_map[peripheral])
if (driver->md_session_map[peripheral]) {
mask_info =
driver->md_session_map[peripheral]->msg_mask;
md_session_info =
driver->md_session_map[peripheral];
}
} else if (driver->md_session_mask &
MD_PERIPHERAL_PD_MASK(peripheral)) {
upd = diag_mask_to_pd_value(driver->md_session_mask);
if (upd && driver->md_session_map[upd])
if (upd && driver->md_session_map[upd]) {
mask_info =
driver->md_session_map[upd]->msg_mask;
md_session_info = driver->md_session_map[upd];
}
} else {
DIAG_LOG(DIAG_DEBUG_MASKS,
"asking for mask update with unknown session mask\n");
@ -384,7 +390,10 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
return;
}
buf = mask_info->update_buf;
msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
if (md_session_info)
msg_mask_tbl_count_local = md_session_info->msg_mask_tbl_count;
else
msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
mutex_unlock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
switch (mask_info->status) {
@ -567,6 +576,7 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
{
int i;
int write_len = 0;
uint8_t msg_mask_tbl_count = 0;
struct diag_msg_mask_t *mask_ptr = NULL;
struct diag_msg_ssid_query_t rsp;
struct diag_ssid_range_t ssid_range;
@ -596,15 +606,17 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
return 0;
}
mutex_lock(&driver->msg_mask_lock);
msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
rsp.status = MSG_STATUS_SUCCESS;
rsp.padding = 0;
rsp.count = driver->msg_mask_tbl_count;
rsp.count = msg_mask_tbl_count;
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
for (i = 0; i < msg_mask_tbl_count; i++, mask_ptr++) {
if (write_len + sizeof(ssid_range) > dest_len) {
pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
__func__);
@ -681,6 +693,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
int i;
int write_len = 0;
uint32_t mask_size = 0;
uint8_t msg_mask_tbl_count = 0;
struct diag_msg_mask_t *mask = NULL;
struct diag_build_mask_req_t *req = NULL;
struct diag_msg_build_mask_t rsp;
@ -711,6 +724,8 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
}
mutex_lock(&driver->msg_mask_lock);
msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
req = (struct diag_build_mask_req_t *)src_buf;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
@ -726,7 +741,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
if (!mask->ptr)
continue;
if ((req->ssid_first < mask->ssid_first) ||
@ -765,6 +780,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
struct diag_msg_mask_t *mask_next = NULL;
uint32_t *temp = NULL;
struct diag_md_session_t *info = NULL;
uint8_t msg_mask_tbl_count = 0;
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
@ -797,10 +813,12 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
if (!mask->ptr)
continue;
if (i < (driver->msg_mask_tbl_count - 1)) {
if (i < (msg_mask_tbl_count - 1)) {
mask_next = mask;
mask_next++;
} else
@ -904,6 +922,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
struct diag_msg_mask_t *mask = NULL;
struct diag_mask_info *mask_info = NULL;
struct diag_md_session_t *info = NULL;
uint8_t msg_mask_tbl_count = 0;
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
@ -938,9 +957,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mutex_unlock(&driver->md_session_lock);
return -EINVAL;
}
msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
if (mask && mask->ptr) {
mutex_lock(&mask->lock);
memset(mask->ptr, req->rt_mask,
@ -1731,7 +1752,6 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
}
kmemleak_not_leak(mask_info->update_buf);
}
mutex_init(&mask_info->lock);
return 0;
}
@ -1755,9 +1775,10 @@ int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
struct diag_log_mask_t *src_mask = NULL;
struct diag_log_mask_t *dest_mask = NULL;
if (!src)
if (!src || !dest)
return -EINVAL;
mutex_init(&dest->lock);
err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
@ -1820,9 +1841,11 @@ static int diag_msg_mask_init(void)
int err = 0;
int i;
mutex_init(&msg_mask.lock);
err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
err = diag_create_msg_mask_table();
if (err) {
pr_err("diag: Unable to create msg masks, err: %d\n", err);
@ -1837,7 +1860,8 @@ static int diag_msg_mask_init(void)
return 0;
}
int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
int diag_msg_mask_copy(struct diag_md_session_t *new_session,
struct diag_mask_info *dest, struct diag_mask_info *src)
{
int i;
int err = 0;
@ -1848,17 +1872,25 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
if (!src || !dest)
return -EINVAL;
err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
mutex_init(&dest->lock);
mutex_lock(&dest->lock);
mutex_lock(&driver->msg_mask_lock);
new_session->msg_mask_tbl_count =
driver->msg_mask_tbl_count;
err = __diag_mask_init(dest,
(new_session->msg_mask_tbl_count *
sizeof(struct diag_msg_mask_t)), APPS_BUF_SIZE);
if (err) {
mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&dest->lock);
return err;
}
src_mask = (struct diag_msg_mask_t *)src->ptr;
dest_mask = (struct diag_msg_mask_t *)dest->ptr;
dest->mask_len = src->mask_len;
dest->status = src->status;
for (i = 0; i < driver->msg_mask_tbl_count; i++) {
for (i = 0; i < new_session->msg_mask_tbl_count; i++) {
range.ssid_first = src_mask->ssid_first;
range.ssid_last = src_mask->ssid_last;
err = diag_create_msg_mask_table_entry(dest_mask, &range);
@ -1874,10 +1906,12 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
return err;
}
void diag_msg_mask_free(struct diag_mask_info *mask_info)
void diag_msg_mask_free(struct diag_mask_info *mask_info,
struct diag_md_session_t *session_info)
{
int i;
struct diag_msg_mask_t *mask = NULL;
uint8_t msg_mask_tbl_count = 0;
if (!mask_info || !mask_info->ptr)
return;
@ -1891,7 +1925,10 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info)
mutex_unlock(&mask_info->lock);
return;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
msg_mask_tbl_count = (session_info) ?
session_info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
}
@ -1922,6 +1959,7 @@ static int diag_build_time_mask_init(void)
int err = 0;
/* There is no need for update buffer for Build Time masks */
mutex_init(&msg_bt_mask.lock);
err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
if (err)
return err;
@ -1955,6 +1993,7 @@ static int diag_log_mask_init(void)
int err = 0;
int i;
mutex_init(&log_mask.lock);
err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
@ -1989,6 +2028,7 @@ static int diag_event_mask_init(void)
int err = 0;
int i;
mutex_init(&event_mask.lock);
err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
@ -2010,6 +2050,7 @@ int diag_event_mask_copy(struct diag_mask_info *dest,
if (!src || !dest)
return -EINVAL;
mutex_init(&dest->lock);
err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
@ -2049,6 +2090,7 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
unsigned char *ptr = NULL;
uint8_t msg_mask_tbl_count = 0;
if (!buf || count == 0)
return -EINVAL;
@ -2081,7 +2123,9 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
mutex_unlock(&mask_info->lock);
return -EINVAL;
}
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
driver->msg_mask_tbl_count;
for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
if (!mask->ptr)
continue;
ptr = mask_info->update_buf;

View file

@ -160,12 +160,13 @@ int diag_masks_init(void);
void diag_masks_exit(void);
int diag_log_mask_copy(struct diag_mask_info *dest,
struct diag_mask_info *src);
int diag_msg_mask_copy(struct diag_mask_info *dest,
struct diag_mask_info *src);
int diag_msg_mask_copy(struct diag_md_session_t *new_session,
struct diag_mask_info *dest, struct diag_mask_info *src);
int diag_event_mask_copy(struct diag_mask_info *dest,
struct diag_mask_info *src);
void diag_log_mask_free(struct diag_mask_info *mask_info);
void diag_msg_mask_free(struct diag_mask_info *mask_info);
void diag_msg_mask_free(struct diag_mask_info *mask_info,
struct diag_md_session_t *session_info);
void diag_event_mask_free(struct diag_mask_info *mask_info);
int diag_process_apps_masks(unsigned char *buf, int len, int pid);
void diag_send_updates_peripheral(uint8_t peripheral);

View file

@ -442,6 +442,7 @@ struct diag_md_session_t {
int pid;
int peripheral_mask;
uint8_t hdlc_disabled;
uint8_t msg_mask_tbl_count;
struct timer_list hdlc_reset_timer;
struct diag_mask_info *msg_mask;
struct diag_mask_info *log_mask;

View file

@ -1299,7 +1299,8 @@ static void diag_md_session_exit(void)
diag_log_mask_free(session_info->log_mask);
kfree(session_info->log_mask);
session_info->log_mask = NULL;
diag_msg_mask_free(session_info->msg_mask);
diag_msg_mask_free(session_info->msg_mask,
session_info);
kfree(session_info->msg_mask);
session_info->msg_mask = NULL;
diag_event_mask_free(session_info->event_mask);
@ -1371,7 +1372,9 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc)
"return value of event copy. err %d\n", err);
goto fail_peripheral;
}
err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
new_session->msg_mask_tbl_count = 0;
err = diag_msg_mask_copy(new_session, new_session->msg_mask,
&msg_mask);
if (err) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"return value of msg copy. err %d\n", err);
@ -1407,7 +1410,8 @@ fail_peripheral:
diag_event_mask_free(new_session->event_mask);
kfree(new_session->event_mask);
new_session->event_mask = NULL;
diag_msg_mask_free(new_session->msg_mask);
diag_msg_mask_free(new_session->msg_mask,
new_session);
kfree(new_session->msg_mask);
new_session->msg_mask = NULL;
kfree(new_session);
@ -1435,7 +1439,8 @@ static void diag_md_session_close(int pid)
diag_log_mask_free(session_info->log_mask);
kfree(session_info->log_mask);
session_info->log_mask = NULL;
diag_msg_mask_free(session_info->msg_mask);
diag_msg_mask_free(session_info->msg_mask,
session_info);
kfree(session_info->msg_mask);
session_info->msg_mask = NULL;
diag_event_mask_free(session_info->event_mask);

View file

@ -613,7 +613,12 @@ static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
}
if (range->ssid_last >= mask->ssid_last) {
temp_range = range->ssid_last - mask->ssid_first + 1;
mask->ssid_last = range->ssid_last;
if (temp_range > MAX_SSID_PER_RANGE) {
temp_range = MAX_SSID_PER_RANGE;
mask->ssid_last = mask->ssid_first + temp_range - 1;
} else
mask->ssid_last = range->ssid_last;
mask->ssid_last_tools = mask->ssid_last;
mask->range = temp_range;
}

View file

@ -328,13 +328,16 @@ static void hsic_read_complete_work_fn(struct work_struct *work)
read_complete_work);
struct diag_hsic_buf_tbl_t *item;
item = hsic_buf_tbl_pop(ch);
if (item) {
if (diag_remote_dev_read_done(ch->dev_id, item->buf, item->len))
goto fail;
}
do {
item = hsic_buf_tbl_pop(ch);
if (item) {
if (diag_remote_dev_read_done(ch->dev_id,
item->buf, item->len))
goto fail;
kfree(item);
}
} while (item);
kfree(item);
return;
fail:

View file

@ -25,7 +25,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
atomic_t data_pending;
size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
atomic_set(&priv->data_pending, 0);
priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
}
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
setup_timer(&priv->user_read_timer, user_reader_timeout,
(unsigned long)priv);
@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
ssize_t ret_size;
ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
ret_size = atomic_read(&priv->data_pending);
if (ret_size > 0) { /* relay data */
ssize_t orig_ret_size = ret_size;
if (size < ret_size)
ret_size = size;
mutex_lock(&priv->buffer_mutex);
mutex_lock(&priv->buffer_mutex);
if (priv->data_pending) {
ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
memset(priv->data_buffer, 0, orig_ret_size);
memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
mutex_unlock(&priv->buffer_mutex);
priv->data_pending = 0;
}
atomic_set(&priv->data_pending, 0);
mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
/* cannot perform a write until the read has cleared
either via tpm_read or a user_read_timer timeout.
This also prevents splitted buffered writes from blocking here.
*/
if (atomic_read(&priv->data_pending) != 0)
return -EBUSY;
if (in_size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
/* Cannot perform a write until the read has cleared either via
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
if (priv->data_pending != 0) {
mutex_unlock(&priv->buffer_mutex);
return -EBUSY;
}
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
mutex_unlock(&priv->buffer_mutex);
@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return out_size;
}
atomic_set(&priv->data_pending, out_size);
priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
priv->data_pending = 0;
clear_bit(0, &priv->chip->is_open);
kfree(priv);
return 0;

View file

@ -318,7 +318,7 @@ static int _msm_parse_dt(struct device_node *node, u32 *client_id)
const char *client_id_str;
client_id_str = of_get_property(node, "qcom,client-id", &len);
if (len != CLIENT_ID_LEN_IN_CHARS) {
if (!client_id_str || len != CLIENT_ID_LEN_IN_CHARS) {
DBG("client_id_str len(%d) is invalid\n", len);
ret = -EINVAL;
} else {

View file

@ -176,18 +176,6 @@ static int dsi_display_ctrl_power_on(struct dsi_display *display)
if (display->cont_splash_enabled) {
pr_debug("skip ctrl power on\n");
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl)
continue;
if (!ctrl->ctrl->current_state.pwr_enabled) {
ctrl->ctrl->pwr_info.host_pwr.refcount++;
ctrl->ctrl->pwr_info.digital.refcount++;
ctrl->ctrl->current_state.power_state =
DSI_CTRL_POWER_VREG_ON;
ctrl->ctrl->current_state.pwr_enabled = true;
}
}
return rc;
}
@ -251,16 +239,6 @@ static int dsi_display_phy_power_on(struct dsi_display *display)
/* early return for splash enabled case */
if (display->cont_splash_enabled) {
pr_debug("skip phy power on\n");
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl)
continue;
if (!ctrl->phy->power_state) {
ctrl->phy->pwr_info.digital.refcount++;
ctrl->phy->pwr_info.phy_pwr.refcount++;
ctrl->phy->power_state = true;
}
}
return rc;
}
@ -320,25 +298,9 @@ static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
m_ctrl = &display->ctrl[display->clk_master_idx];
/* early return for splash enabled case */
if (display->cont_splash_enabled) {
pr_debug("skip core clk on calling\n");
m_ctrl->ctrl->current_state.pwr_enabled = true;
m_ctrl->ctrl->current_state.core_clk_enabled = true;
m_ctrl->ctrl->current_state.power_state =
DSI_CTRL_POWER_CORE_CLK_ON;
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl || (ctrl == m_ctrl))
continue;
ctrl->ctrl->current_state.pwr_enabled = true;
ctrl->ctrl->current_state.core_clk_enabled = true;
ctrl->ctrl->current_state.power_state =
DSI_CTRL_POWER_CORE_CLK_ON;
}
return rc;
}
@ -347,6 +309,8 @@ static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
* be enabled before the other controller. Master controller in the
* clock context refers to the controller that sources the clock.
*/
m_ctrl = &display->ctrl[display->clk_master_idx];
rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
if (rc) {
pr_err("[%s] failed to turn on clocks, rc=%d\n",
@ -381,26 +345,9 @@ static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
m_ctrl = &display->ctrl[display->clk_master_idx];
/* early return for splash enabled case */
if (display->cont_splash_enabled) {
pr_debug("skip ctrl link clk on calling\n");
m_ctrl->ctrl->current_state.pwr_enabled = true;
m_ctrl->ctrl->current_state.core_clk_enabled = true;
m_ctrl->ctrl->current_state.link_clk_enabled = true;
m_ctrl->ctrl->current_state.power_state =
DSI_CTRL_POWER_LINK_CLK_ON;
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl || (ctrl == m_ctrl))
continue;
ctrl->ctrl->current_state.pwr_enabled = true;
ctrl->ctrl->current_state.core_clk_enabled = true;
ctrl->ctrl->current_state.link_clk_enabled = true;
ctrl->ctrl->current_state.power_state =
DSI_CTRL_POWER_LINK_CLK_ON;
}
return rc;
}
@ -409,6 +356,7 @@ static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
* be enabled before the other controller. Master controller in the
* clock context refers to the controller that sources the clock.
*/
m_ctrl = &display->ctrl[display->clk_master_idx];
rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
&display->clock_info.src_clks);
@ -2910,10 +2858,20 @@ int dsi_dsiplay_setup_splash_resource(struct dsi_display *display)
if (!ctrl)
return -EINVAL;
/* set dsi ctrl power state */
ret = dsi_ctrl_set_power_state(ctrl->ctrl,
DSI_CTRL_POWER_LINK_CLK_ON);
if (ret) {
SDE_ERROR("calling dsi_ctrl_set_power_state failed\n");
pr_err("%s:fail to call dsi_ctrl_set_power_state\n",
__func__);
return ret;
}
/* set dsi phy power state */
ret = dsi_phy_set_power_state(ctrl->phy, true);
if (ret) {
pr_err("%s:fail to call dsi_phy_set_power_state\n",
__func__);
return ret;
}
}

View file

@ -2754,6 +2754,13 @@ enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
if (actual != requested)
return MODE_CLOCK_RANGE;
/* if no format flags are present remove the mode */
if (!(mode->flags & SDE_DRM_MODE_FLAG_FMT_MASK)) {
SDE_HDMI_DEBUG("removing following mode from list\n");
drm_mode_debug_printmodeline(mode);
return MODE_BAD;
}
return MODE_OK;
}

View file

@ -591,21 +591,24 @@ static void sde_hdmi_update_hdcp_info(struct drm_connector *connector)
DEV_ERR("%s: invalid input\n", __func__);
return;
}
/* check first if hdcp2p2 is supported */
fd = display->hdcp_feat_data[SDE_HDCP_2P2];
if (fd)
ops = sde_hdmi_hdcp2p2_start(fd);
/* If ops is true, sink supports hdcp */
if (ops)
display->sink_hdcp22_support = true;
if (ops && ops->feature_supported)
display->hdcp22_present = ops->feature_supported(fd);
else
if (display->skip_ddc) {
display->sink_hdcp22_support = false;
display->hdcp22_present = false;
} else {
/* check first if hdcp2p2 is supported */
fd = display->hdcp_feat_data[SDE_HDCP_2P2];
if (fd)
ops = sde_hdmi_hdcp2p2_start(fd);
/* If ops is true, sink supports hdcp */
if (ops)
display->sink_hdcp22_support = true;
if (ops && ops->feature_supported)
display->hdcp22_present = ops->feature_supported(fd);
else
display->hdcp22_present = false;
}
/* if hdcp22_present is true, src supports hdcp 2p2 */
if (display->hdcp22_present)
display->src_hdcp22_support = true;

View file

@ -366,6 +366,33 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
else
SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
/*
* As per HDMI 2.0 spec, a sink supporting any modes
* requiring more than 340Mhz clock rate should support
* SCDC as well. This is required because we need the SCDC
* channel to set the TMDS clock ratio. However in cases
* where the TV publishes such a mode in its list of modes
* but does not have SCDC support as per HDMI HFVSDB block
* remove RGB mode support from the flags. Currently, in
* the list of modes not having deep color support only RGB
* modes shall requre a clock of 340Mhz and above such as the
* 4K@60fps case. All other modes shall be YUV.
* Deep color case is handled separately while choosing the
* best mode in the _sde_hdmi_choose_best_format API where
* we enable deep color only if it satisfies both source and
* sink requirements. However, that API assumes that at least
* RGB mode is supported on the mode. Hence, it would be better
* to remove the format support flags while parsing the EDID
* itself if it doesn't satisfy the HDMI spec requirement.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
if ((mode->clock > MIN_SCRAMBLER_REQ_RATE) &&
!connector->scdc_present) {
mode->flags &= ~DRM_MODE_FLAG_SUPPORTS_RGB;
}
}
SDE_EDID_DEBUG("%s -\n", __func__);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -33,6 +33,8 @@
#define SDE_CEA_EXT 0x02
#define SDE_EXTENDED_TAG 0x07
#define MIN_SCRAMBLER_REQ_RATE 340000
#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
enum extended_data_block_types {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1726,7 +1726,7 @@ int qpnp_adc_get_revid_version(struct device *dev)
}
revid_data = get_revid_data(revid_dev_node);
if (IS_ERR(revid_data)) {
if (IS_ERR_OR_NULL(revid_data)) {
pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data));
return -EINVAL;
}

View file

@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));

View file

@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
* We ask for writable memory if any of the following
* access flags are set. "Local write" and "remote write"
* obviously require write access. "Remote atomic" can do
* things like fetch and add, which will modify memory, and
* "MW bind" can change permissions by binding a window.
*/
umem->writable = !!(access &
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
put_pid(umem->pid);

View file

@ -130,6 +130,40 @@ out:
return err;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
u64 length, u64 virt_addr,
int access_flags)
{
/*
* Force registering the memory as writable if the underlying pages
* are writable. This is so rereg can change the access permissions
* from readable to writable without having to run through ib_umem_get
* again
*/
if (!ib_access_writable(access_flags)) {
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
* entirely cover the MR to support RO mappings.
*/
vma = find_vma(current->mm, start);
if (vma && vma->vm_end >= start + length &&
vma->vm_start <= start) {
if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE;
} else {
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
up_read(&current->mm->mmap_sem);
}
return ib_umem_get(context, start, length, access_flags, 0);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
/* Force registering the memory as writable. */
/* Used for memory re-registeration. HCA protects the access */
mr->umem = ib_umem_get(pd->uobject->context, start, length,
access_flags | IB_ACCESS_LOCAL_WRITE, 0);
mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
return -EPERM;
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
mmr->umem = ib_umem_get(mr->uobject->context, start, length,
mr_access_flags |
IB_ACCESS_LOCAL_WRITE,
0);
mmr->umem =
mlx4_get_umem_mr(mr->uobject->context, start, length,
virt_addr, mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */

View file

@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
if (count > 32)
if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
goto err;
if (copy_from_user(tmp_str, buffer, count))

View file

@ -287,6 +287,22 @@
FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
#define FSYNR0_WNR (1 << 4)
#define MAX_GLOBAL_REG_SAVE_ENTRIES (2 * ARM_SMMU_MAX_SMRS + 1)
enum arm_smmu_save_ctx {
SAVE_ARM_SMMU_CB_SCTLR,
SAVE_ARM_SMMU_CB_ACTLR,
SAVE_ARM_SMMU_CB_TTBCR2,
SAVE_ARM_SMMU_CB_TTBR0,
SAVE_ARM_SMMU_CB_TTBR1,
SAVE_ARM_SMMU_CB_TTBCR,
SAVE_ARM_SMMU_CB_CONTEXTIDR,
SAVE_ARM_SMMU_CB_S1_MAIR0,
SAVE_ARM_SMMU_CB_S1_MAIR1,
SAVE_ARM_SMMU_GR1_CBA2R,
SAVE_ARM_SMMU_GR1_CBAR,
SAVE_ARM_SMMU_MAX_CNT,
};
static int force_stage;
module_param_named(force_stage, force_stage, int, S_IRUGO);
@ -407,6 +423,8 @@ struct arm_smmu_device {
enum tz_smmu_device_id sec_id;
int regulator_defer;
u64 regs[ARM_SMMU_MAX_CBS*(SAVE_ARM_SMMU_MAX_CNT)];
u64 reg_global[MAX_GLOBAL_REG_SAVE_ENTRIES];
};
struct arm_smmu_cfg {
@ -4170,6 +4188,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
idr_init(&smmu->asid_idr);
platform_set_drvdata(pdev, smmu);
err = register_regulator_notifier(smmu);
if (err)
goto out_free_irqs;
@ -4295,10 +4315,145 @@ release_memory:
return -ENOMEM;
}
#if CONFIG_PM
static int arm_smmu_pm_suspend(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
u64 *regs, *reg_global;
int j, k = 0;
u32 cb_count = 0;
void __iomem *base, *gr0_base, *gr1_base;
if (!smmu)
return -ENODEV;
if (!smmu->attach_count)
return 0;
if (arm_smmu_enable_clocks(smmu)) {
dev_err(smmu->dev, "failed to enable clocks for smmu");
return -EINVAL;
}
regs = &smmu->regs[0];
reg_global = &smmu->reg_global[0];
cb_count = smmu->num_context_banks;
gr0_base = ARM_SMMU_GR0(smmu);
gr1_base = ARM_SMMU_GR1(smmu);
for (j = 0; j < cb_count; j++) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, j);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_SCTLR);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_ACTLR);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_TTBCR2);
regs[k++] = readq_relaxed(base + ARM_SMMU_CB_TTBR0);
regs[k++] = readq_relaxed(base + ARM_SMMU_CB_TTBR1);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_TTBCR);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_CONTEXTIDR);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_S1_MAIR0);
regs[k++] = readl_relaxed(base + ARM_SMMU_CB_S1_MAIR1);
regs[k++] = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBA2R(j));
regs[k++] = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBAR(j));
}
for (j = 0, k = 0; j < smmu->num_mapping_groups; j++) {
reg_global[k++] = readl_relaxed(
gr0_base + ARM_SMMU_GR0_S2CR(j));
reg_global[k++] = readl_relaxed(
gr0_base + ARM_SMMU_GR0_SMR(j));
}
reg_global[k++] = readl_relaxed(ARM_SMMU_GR0_NS(smmu)
+ ARM_SMMU_GR0_sCR0);
arm_smmu_disable_clocks(smmu);
return 0;
}
static int arm_smmu_pm_resume(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
u64 *regs, *reg_global;
int j, k = 0;
u32 cb_count = 0;
void __iomem *base, *gr0_base, *gr1_base;
if (!smmu)
return -ENODEV;
if (!smmu->attach_count)
return 0;
if (arm_smmu_enable_clocks(smmu)) {
dev_err(smmu->dev, "failed to enable clocks for smmu");
return -EINVAL;
}
regs = &smmu->regs[0];
reg_global = &smmu->reg_global[0];
cb_count = smmu->num_context_banks;
gr0_base = ARM_SMMU_GR0(smmu);
gr1_base = ARM_SMMU_GR1(smmu);
for (j = 0; j < cb_count; j++) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, j);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_SCTLR);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_ACTLR);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_TTBCR2);
writeq_relaxed(regs[k++], base + ARM_SMMU_CB_TTBR0);
writeq_relaxed(regs[k++], base + ARM_SMMU_CB_TTBR1);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_TTBCR);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_CONTEXTIDR);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_S1_MAIR0);
writel_relaxed(regs[k++], base + ARM_SMMU_CB_S1_MAIR1);
writel_relaxed(regs[k++], gr1_base + ARM_SMMU_GR1_CBA2R(j));
writel_relaxed(regs[k++], gr1_base + ARM_SMMU_GR1_CBAR(j));
}
for (j = 0, k = 0; j < smmu->num_mapping_groups; j++) {
writel_relaxed(reg_global[k++],
gr0_base + ARM_SMMU_GR0_S2CR(j));
writel_relaxed(reg_global[k++],
gr0_base + ARM_SMMU_GR0_SMR(j));
}
writel_relaxed(reg_global[k++],
ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
/* Do a tlb flush */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
__arm_smmu_tlb_sync(smmu);
arm_smmu_disable_clocks(smmu);
return 0;
}
#else
static inline int arm_smmu_pm_suspend(struct device *dev)
{
return 0;
}
static inline int arm_smmu_pm_resume(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops arm_smmu_pm_ops = {
#ifdef CONFIG_PM
.freeze_late = arm_smmu_pm_suspend,
.thaw_early = arm_smmu_pm_resume,
.restore_early = arm_smmu_pm_resume,
#endif
};
static struct platform_driver arm_smmu_driver = {
.driver = {
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
.pm = &arm_smmu_pm_ops,
},
.probe = arm_smmu_device_dt_probe,
.remove = arm_smmu_device_remove,

View file

@ -812,8 +812,10 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg)
unsigned char cid = csid_params.lut_params.vc_cfg_a[i].
cid;
csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
csid_params.lut_params.vc_cfg_a[i];
if (cid < MAX_CID)
csid_dev->current_csid_params.lut_params.
vc_cfg_a[cid] = csid_params.lut_params.
vc_cfg_a[i];
CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
csid_params.lut_params.vc_cfg_a[i].cid,
@ -851,8 +853,10 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg)
unsigned char cid = csid_params.lut_params.vc_cfg_a[i].
cid;
csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] =
csid_params.lut_params.vc_cfg_a[i];
if (cid < MAX_CID)
csid_dev->current_csid_params.lut_params.
vc_cfg_a[cid] = csid_params.lut_params.
vc_cfg_a[i];
CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d",
csid_params.lut_params.vc_cfg_a[i].cid,

View file

@ -1481,8 +1481,8 @@ static int cnss_smmu_init(struct device *dev)
penv->smmu_iova_start,
penv->smmu_iova_len);
if (IS_ERR(mapping)) {
pr_err("%s: create mapping failed, err = %d\n", __func__, ret);
ret = PTR_ERR(mapping);
pr_err("%s: create mapping failed, err = %d\n", __func__, ret);
goto map_fail;
}

View file

@ -879,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@ -888,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;

View file

@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
if (acpi_pci_disabled || !bus->bridge)
if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);

View file

@ -908,6 +908,20 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
return 0;
}
static int __ipa_rt_validate_rule_id(u16 rule_id)
{
if (!rule_id)
return 0;
if ((rule_id < ipahal_get_rule_id_hi_bit()) ||
(rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
IPAERR_RL("Invalid rule_id provided 0x%x\n",
rule_id);
return -EPERM;
}
return 0;
}
static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
struct ipa3_hdr_entry **hdr,
struct ipa3_hdr_proc_ctx_entry **proc_ctx)
@ -1023,6 +1037,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
if (__ipa_rt_validate_rule_id(rule_id))
goto error;
tbl = __ipa_add_rt_tbl(ip, name);
if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {

View file

@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
wait_for_completion(&tm_iocb->u.tmf.comp);
rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
rval = tm_iocb->u.tmf.data;
if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
ql_dbg(ql_dbg_taskm, vha, 0x8030,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}

View file

@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data)
}
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
if (test_and_clear_bit
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");

View file

@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
struct scsi_cd *cd;
struct scsi_device *sdev;
int ret = -ENXIO;
cd = scsi_cd_get(bdev->bd_disk);
if (!cd)
goto out;
sdev = cd->device;
scsi_autopm_get_device(sdev);
check_disk_change(bdev);
mutex_lock(&sr_mutex);
cd = scsi_cd_get(bdev->bd_disk);
if (cd) {
ret = cdrom_open(&cd->cdi, bdev, mode);
if (ret)
scsi_cd_put(cd);
}
ret = cdrom_open(&cd->cdi, bdev, mode);
mutex_unlock(&sr_mutex);
scsi_autopm_put_device(sdev);
if (ret)
scsi_cd_put(cd);
out:
return ret;
}
@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret)
goto out;
scsi_autopm_get_device(sdev);
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to cdrom/block level.
@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
ret = scsi_ioctl(sdev, cmd, argp);
goto out;
goto put;
}
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
goto out;
goto put;
ret = scsi_ioctl(sdev, cmd, argp);
put:
scsi_autopm_put_device(sdev);
out:
mutex_unlock(&sr_mutex);
return ret;

View file

@ -278,7 +278,7 @@ static ssize_t vchan_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static ssize_t vchan_store(struct kobject *kobj, struct kobj_attribute *attr,
char *buf, size_t count)
const char *buf, size_t count)
{
int ret;
@ -297,7 +297,7 @@ static ssize_t ctx_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static ssize_t ctx_store(struct kobject *kobj, struct kobj_attribute *attr,
char *buf, size_t count)
const char *buf, size_t count)
{
int ret;
@ -316,7 +316,7 @@ static ssize_t expimp_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static ssize_t expimp_store(struct kobject *kobj, struct kobj_attribute *attr,
char *buf, size_t count)
const char *buf, size_t count)
{
int ret;

View file

@ -2097,6 +2097,7 @@ static int icnss_driver_event_server_arrive(void *data)
err_setup_msa:
icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
err_power_on:
icnss_hw_power_off(penv);
fail:

View file

@ -21,38 +21,77 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <soc/qcom/subsystem_notif.h>
#define CLIENT_STATE_OFFSET 4
#define SUBSYS_STATE_OFFSET 8
static void __iomem *base_reg;
struct state_notifier_block {
const char *subsystem;
struct notifier_block nb;
u32 offset;
void *handle;
struct list_head notifier_list;
enum subsystem_type {
VIRTUAL,
NATIVE,
};
static LIST_HEAD(notifier_block_list);
struct subsystem_descriptor {
const char *name;
u32 offset;
enum subsystem_type type;
struct notifier_block nb;
void *handle;
unsigned int ssr_irq;
struct list_head subsystem_list;
struct work_struct work;
};
static int subsys_state_callback(struct notifier_block *this,
static LIST_HEAD(subsystem_descriptor_list);
static struct workqueue_struct *ssr_wq;
static void subsystem_notif_wq_func(struct work_struct *work)
{
struct subsystem_descriptor *subsystem =
container_of(work, struct subsystem_descriptor, work);
void *subsystem_handle;
int state, ret;
state = readl_relaxed(base_reg + subsystem->offset);
subsystem_handle = subsys_notif_add_subsys(subsystem->name);
ret = subsys_notif_queue_notification(subsystem_handle, state, NULL);
writel_relaxed(ret, base_reg + subsystem->offset + CLIENT_STATE_OFFSET);
}
static int subsystem_state_callback(struct notifier_block *this,
unsigned long value, void *priv)
{
struct state_notifier_block *notifier =
container_of(this, struct state_notifier_block, nb);
struct subsystem_descriptor *subsystem =
container_of(this, struct subsystem_descriptor, nb);
writel_relaxed(value, base_reg + notifier->offset);
writel_relaxed(value, base_reg + subsystem->offset +
SUBSYS_STATE_OFFSET);
return NOTIFY_OK;
}
static irqreturn_t subsystem_restart_irq_handler(int irq, void *dev_id)
{
struct subsystem_descriptor *subsystem = dev_id;
queue_work(ssr_wq, &subsystem->work);
return IRQ_HANDLED;
}
static int subsys_notif_virt_probe(struct platform_device *pdev)
{
struct device_node *node;
struct device_node *child = NULL;
const char *ss_type;
struct resource *res;
struct state_notifier_block *notif_block;
struct subsystem_descriptor *subsystem;
int ret = 0;
if (!pdev) {
@ -69,65 +108,109 @@ static int subsys_notif_virt_probe(struct platform_device *pdev)
return -ENOMEM;
}
ssr_wq = create_singlethread_workqueue("ssr_wq");
if (!ssr_wq) {
dev_err(&pdev->dev, "Workqueue creation failed\n");
return -ENOMEM;
}
for_each_child_of_node(node, child) {
notif_block = devm_kmalloc(&pdev->dev,
sizeof(struct state_notifier_block),
subsystem = devm_kmalloc(&pdev->dev,
sizeof(struct subsystem_descriptor),
GFP_KERNEL);
if (!notif_block)
return -ENOMEM;
notif_block->subsystem =
of_get_property(child, "subsys-name", NULL);
if (IS_ERR_OR_NULL(notif_block->subsystem)) {
dev_err(&pdev->dev, "Could not find subsystem name\n");
ret = -EINVAL;
goto err_nb;
if (!subsystem) {
ret = -ENOMEM;
goto err;
}
notif_block->nb.notifier_call = subsys_state_callback;
notif_block->handle =
subsys_notif_register_notifier(notif_block->subsystem,
&notif_block->nb);
if (IS_ERR_OR_NULL(notif_block->handle)) {
dev_err(&pdev->dev, "Could not register SSR notifier cb\n");
subsystem->name =
of_get_property(child, "subsys-name", NULL);
if (IS_ERR_OR_NULL(subsystem->name)) {
dev_err(&pdev->dev, "Could not find subsystem name\n");
ret = -EINVAL;
goto err_nb;
goto err;
}
ret = of_property_read_u32(child, "offset",
&notif_block->offset);
&subsystem->offset);
if (ret) {
dev_err(&pdev->dev, "offset reading for %s failed\n",
notif_block->subsystem);
subsystem->name);
ret = -EINVAL;
goto err_offset;
goto err;
}
list_add_tail(&notif_block->notifier_list,
&notifier_block_list);
ret = of_property_read_string(child, "type",
&ss_type);
if (ret) {
dev_err(&pdev->dev, "type reading for %s failed\n",
subsystem->name);
ret = -EINVAL;
goto err;
}
if (!strcmp(ss_type, "virtual"))
subsystem->type = VIRTUAL;
if (!strcmp(ss_type, "native"))
subsystem->type = NATIVE;
switch (subsystem->type) {
case NATIVE:
subsystem->nb.notifier_call =
subsystem_state_callback;
subsystem->handle =
subsys_notif_register_notifier(
subsystem->name, &subsystem->nb);
if (IS_ERR_OR_NULL(subsystem->handle)) {
dev_err(&pdev->dev,
"Could not register SSR notifier cb\n");
ret = -EINVAL;
goto err;
}
list_add_tail(&subsystem->subsystem_list,
&subsystem_descriptor_list);
break;
case VIRTUAL:
subsystem->ssr_irq =
of_irq_get_byname(child, "state-irq");
if (IS_ERR_OR_NULL(subsystem->ssr_irq)) {
dev_err(&pdev->dev, "Could not find IRQ\n");
ret = -EINVAL;
goto err;
}
ret = devm_request_threaded_irq(&pdev->dev,
subsystem->ssr_irq, NULL,
subsystem_restart_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
subsystem->name, subsystem);
break;
default:
dev_err(&pdev->dev, "Unsupported type %d\n",
subsystem->type);
}
}
return 0;
err_offset:
subsys_notif_unregister_notifier(notif_block->handle,
&notif_block->nb);
err_nb:
kfree(notif_block);
INIT_WORK(&subsystem->work, subsystem_notif_wq_func);
return 0;
err:
destroy_workqueue(ssr_wq);
return ret;
}
static int subsys_notif_virt_remove(struct platform_device *pdev)
{
struct state_notifier_block *notif_block;
struct subsystem_descriptor *subsystem, *node;
list_for_each_entry(notif_block, &notifier_block_list,
notifier_list) {
subsys_notif_unregister_notifier(notif_block->handle,
&notif_block->nb);
list_del(&notif_block->notifier_list);
destroy_workqueue(ssr_wq);
list_for_each_entry_safe(subsystem, node, &subsystem_descriptor_list,
subsystem_list) {
subsys_notif_unregister_notifier(subsystem->handle,
&subsystem->nb);
list_del(&subsystem->subsystem_list);
}
return 0;
}

View file

@ -70,7 +70,15 @@ static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper)
return ret;
}
static void dual_role_changed_work(struct work_struct *work);
static void dual_role_changed_work(struct work_struct *work)
{
struct dual_role_phy_instance *dual_role =
container_of(work, struct dual_role_phy_instance,
changed_work);
dev_dbg(&dual_role->dev, "%s\n", __func__);
kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
}
void dual_role_instance_changed(struct dual_role_phy_instance *dual_role)
{
@ -497,17 +505,6 @@ out:
return ret;
}
static void dual_role_changed_work(struct work_struct *work)
{
struct dual_role_phy_instance *dual_role =
container_of(work, struct dual_role_phy_instance,
changed_work);
dev_dbg(&dual_role->dev, "%s\n", __func__);
sysfs_update_group(&dual_role->dev.kobj, &dual_role_attr_group);
kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
}
/******************* Module Init ***********************************/
static int __init dual_role_class_init(void)

View file

@ -1932,10 +1932,12 @@ struct dentry *d_make_root(struct inode *root_inode)
static const struct qstr name = QSTR_INIT("/", 1);
res = __d_alloc(root_inode->i_sb, &name);
if (res)
if (res) {
res->d_flags |= DCACHE_RCUACCESS;
d_instantiate(res, root_inode);
else
} else {
iput(root_inode);
}
}
return res;
}

View file

@ -2101,7 +2101,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
@ -3770,13 +3770,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);

View file

@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
* contiguous buffer to work with
* contiguous buffer to work with. Make the buffer large
* enough to make use of the whole extent.
*/
ea_buf->xattr = kmalloc(size, GFP_KERNEL);
ea_buf->max_size = (size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
ea_buf->max_size = (size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;

View file

@ -605,12 +605,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
smp_mb(); // see mntput_no_expire()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
lock_mount_hash();
if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
mnt_add_count(mnt, -1);
unlock_mount_hash();
return 1;
}
unlock_mount_hash();
/* caller will mntput() */
return -1;
}
@ -1142,12 +1151,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
mnt_add_count(mnt, -1);
if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
if (likely(READ_ONCE(mnt->mnt_ns))) {
/*
* Since we don't do lock_mount_hash() here,
* ->mnt_ns can change under us. However, if it's
* non-NULL, then there's a reference that won't
* be dropped until after an RCU delay done after
* turning ->mnt_ns NULL. So if we observe it
* non-NULL under rcu_read_lock(), the reference
* we are dropping is not the final one.
*/
mnt_add_count(mnt, -1);
rcu_read_unlock();
return;
}
lock_mount_hash();
/*
* make sure that if __legitimize_mnt() has not seen us grab
* mount_lock, we'll see their refcount increment here.
*/
smp_mb();
mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
rcu_read_unlock();
unlock_mount_hash();

View file

@ -799,6 +799,18 @@ static inline int pmd_free_pte_page(pmd_t *pmd)
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
return true;
}
static inline bool arch_has_pfn_modify_check(void)
{
return false;
}
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range

View file

@ -61,6 +61,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,

View file

@ -2103,6 +2103,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);

View file

@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);

View file

@ -14,5 +14,7 @@ extern int try_to_unuse(unsigned int, bool, unsigned long);
extern int swap_ratio(struct swap_info_struct **si);
extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
extern bool is_swap_ratio_group(int prio);
extern unsigned long generic_max_swapfile_size(void);
extern unsigned long max_swapfile_size(void);
#endif /* _LINUX_SWAPFILE_H */

View file

@ -26,11 +26,7 @@
#ifdef __KERNEL__
#ifdef CONFIG_DEBUG_STACK_USAGE
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/*
* flag set/clear/test wrappers

View file

@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
static inline bool ib_access_writable(int access_flags)
{
/*
* We have writable memory backing the MR if any of the following
* access flags are set. "Local write" and "remote write" obviously
* require write access. "Remote atomic" can do things like fetch and
* add, which will modify memory, and "MW bind" can change permissions
* by binding a window.
*/
return access_flags &
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
}
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected

View file

@ -292,6 +292,9 @@ int q6asm_open_read_v4(struct audio_client *ac, uint32_t format,
int q6asm_open_read_v5(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, bool ts_mode);
int q6asm_open_read_with_retry(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample, bool ts_mode);
int q6asm_open_write(struct audio_client *ac, uint32_t format
/*, uint16_t bits_per_sample*/);
@ -340,6 +343,9 @@ int q6asm_open_read_write_v2(struct audio_client *ac, uint32_t rd_format,
int q6asm_open_loopback_v2(struct audio_client *ac,
uint16_t bits_per_sample);
int q6asm_open_loopback_with_retry(struct audio_client *ac,
uint16_t bits_per_sample);
int q6asm_open_transcode_loopback(struct audio_client *ac,
uint16_t bits_per_sample, uint32_t source_format,
uint32_t sink_format);

Some files were not shown because too many files have changed in this diff Show more