Merge "Merge android-4.4.117 (4ec3656) into msm-4.4"

This commit is contained in:
Linux Build Service Account 2018-03-07 09:16:32 -08:00 committed by Gerrit - the friendly Code Review server
commit 296628bc78
56 changed files with 1630 additions and 489 deletions

View file

@ -58,6 +58,6 @@ Example:
interrupts = <0 35 0x4>; interrupts = <0 35 0x4>;
status = "disabled"; status = "disabled";
dmas = <&dmahost 12 0 1>, dmas = <&dmahost 12 0 1>,
<&dmahost 13 0 1 0>; <&dmahost 13 1 0>;
dma-names = "rx", "rx"; dma-names = "rx", "rx";
}; };

View file

@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
data_err=abort Abort the journal if an error occurs in a file data_err=abort Abort the journal if an error occurs in a file
data buffer in ordered mode. data buffer in ordered mode.
grpid Give objects the same group ID as their creator. grpid New objects have the group ID of their parent.
bsdgroups bsdgroups
nogrpid (*) New objects have the group ID of their creator. nogrpid (*) New objects have the group ID of their creator.

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 116 SUBLEVEL = 117
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -461,6 +461,7 @@
compatible = "samsung,exynos4210-ohci"; compatible = "samsung,exynos4210-ohci";
reg = <0xec300000 0x100>; reg = <0xec300000 0x100>;
interrupts = <23>; interrupts = <23>;
interrupt-parent = <&vic1>;
clocks = <&clocks CLK_USB_HOST>; clocks = <&clocks CLK_USB_HOST>;
clock-names = "usbhost"; clock-names = "usbhost";
#address-cells = <1>; #address-cells = <1>;

View file

@ -349,7 +349,7 @@
spi0: spi@e0100000 { spi0: spi@e0100000 {
status = "okay"; status = "okay";
num-cs = <3>; num-cs = <3>;
cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
stmpe610@0 { stmpe610@0 {
compatible = "st,stmpe610"; compatible = "st,stmpe610";

View file

@ -141,8 +141,8 @@
reg = <0xb4100000 0x1000>; reg = <0xb4100000 0x1000>;
interrupts = <0 105 0x4>; interrupts = <0 105 0x4>;
status = "disabled"; status = "disabled";
dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ dmas = <&dwdma0 12 0 1>,
<&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ <&dwdma0 13 1 0>;
dma-names = "tx", "rx"; dma-names = "tx", "rx";
}; };

View file

@ -100,7 +100,7 @@
reg = <0xb2800000 0x1000>; reg = <0xb2800000 0x1000>;
interrupts = <0 29 0x4>; interrupts = <0 29 0x4>;
status = "disabled"; status = "disabled";
dmas = <&dwdma0 0 0 0 0>; dmas = <&dwdma0 0 0 0>;
dma-names = "data"; dma-names = "data";
}; };
@ -288,8 +288,8 @@
#size-cells = <0>; #size-cells = <0>;
interrupts = <0 31 0x4>; interrupts = <0 31 0x4>;
status = "disabled"; status = "disabled";
dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ dmas = <&dwdma0 4 0 0>,
<&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ <&dwdma0 5 0 0>;
dma-names = "tx", "rx"; dma-names = "tx", "rx";
}; };

View file

@ -194,6 +194,7 @@
rtc@fc900000 { rtc@fc900000 {
compatible = "st,spear600-rtc"; compatible = "st,spear600-rtc";
reg = <0xfc900000 0x1000>; reg = <0xfc900000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <10>; interrupts = <10>;
status = "disabled"; status = "disabled";
}; };

View file

@ -8,6 +8,7 @@
*/ */
#include "stih407-clock.dtsi" #include "stih407-clock.dtsi"
#include "stih407-family.dtsi" #include "stih407-family.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ { / {
soc { soc {
sti-display-subsystem { sti-display-subsystem {
@ -112,7 +113,7 @@
<&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 1>; <&clk_s_d2_quadfs 1>;
hdmi,hpd-gpio = <&pio5 3>; hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
reset-names = "hdmi"; reset-names = "hdmi";
resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
ddc = <&hdmiddc>; ddc = <&hdmiddc>;

View file

@ -9,6 +9,7 @@
#include "stih410-clock.dtsi" #include "stih410-clock.dtsi"
#include "stih407-family.dtsi" #include "stih407-family.dtsi"
#include "stih410-pinctrl.dtsi" #include "stih410-pinctrl.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ { / {
aliases { aliases {
bdisp0 = &bdisp0; bdisp0 = &bdisp0;
@ -203,7 +204,7 @@
<&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 1>; <&clk_s_d2_quadfs 1>;
hdmi,hpd-gpio = <&pio5 3>; hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
reset-names = "hdmi"; reset-names = "hdmi";
resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
ddc = <&hdmiddc>; ddc = <&hdmiddc>;

View file

@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
}, },
}; };
module_platform_driver(tosa_bt_driver); module_platform_driver(tosa_bt_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dmitry Baryshkov");
MODULE_DESCRIPTION("Bluetooth built-in chip control");

View file

@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
{ {
return sys_setgid((gid_t)gid); return sys_setgid(low2highgid(gid));
} }
COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
{ {
return sys_setuid((uid_t)uid); return sys_setuid(low2highuid(uid));
} }
COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
{ {
return sys_setfsuid((uid_t)uid); return sys_setfsuid(low2highuid(uid));
} }
COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
{ {
return sys_setfsgid((gid_t)gid); return sys_setfsgid(low2highgid(gid));
} }
static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)

View file

@ -113,7 +113,7 @@ struct cpuinfo_x86 {
char x86_vendor_id[16]; char x86_vendor_id[16];
char x86_model_id[64]; char x86_model_id[64];
/* in KB - valid for CPUS which support this call: */ /* in KB - valid for CPUS which support this call: */
int x86_cache_size; unsigned int x86_cache_size;
int x86_cache_alignment; /* In bytes */ int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */ /* Cache QoS architectural values: */
int x86_cache_max_rmid; /* max index */ int x86_cache_max_rmid; /* max index */

View file

@ -955,7 +955,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
int i; int i;
c->loops_per_jiffy = loops_per_jiffy; c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = -1; c->x86_cache_size = 0;
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_vendor_id[0] = '\0'; /* Unset */

View file

@ -1075,7 +1075,7 @@ static struct microcode_ops microcode_intel_ops = {
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
{ {
u64 llc_size = c->x86_cache_size * 1024; u64 llc_size = c->x86_cache_size * 1024ULL;
do_div(llc_size, c->x86_max_cores); do_div(llc_size, c->x86_max_cores);

View file

@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
} }
/* Cache size */ /* Cache size */
if (c->x86_cache_size >= 0) if (c->x86_cache_size)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
show_cpuinfo_core(m, c, cpu); show_cpuinfo_core(m, c, cpu);
show_cpuinfo_misc(m, c); show_cpuinfo_misc(m, c);

View file

@ -4503,7 +4503,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
/* The caller should hold mmu-lock before calling this function. */ /* The caller should hold mmu-lock before calling this function. */
static bool static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
@ -4533,7 +4533,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
return flush; return flush;
} }
static bool static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb) bool lock_flush_tlb)
@ -4544,7 +4544,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_flush_tlb); lock_flush_tlb);
} }
static bool static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool lock_flush_tlb)
{ {
@ -4552,7 +4552,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
} }
static bool static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool lock_flush_tlb)
{ {
@ -4560,7 +4560,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
} }
static bool static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool lock_flush_tlb)
{ {

View file

@ -607,7 +607,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data); devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) { if (IS_ERR(devfreq)) {
devres_free(ptr); devres_free(ptr);
return ERR_PTR(-ENOMEM); return devfreq;
} }
*ptr = devfreq; *ptr = devfreq;

View file

@ -946,7 +946,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
/* calc dclk divider with current vco freq */ /* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even); pd_min, pd_even);
if (vclk_div > pd_max) if (dclk_div > pd_max)
break; /* vco is too big, it has to stop */ break; /* vco is too big, it has to stop */
/* calc score with current vco freq */ /* calc score with current vco freq */

View file

@ -2483,7 +2483,6 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap); kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release: err_steer_qp_release:
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
mlx4_qp_release_range(dev, ibdev->steer_qpn_base, mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count); ibdev->steer_qpn_count);
err_counter: err_counter:
@ -2586,11 +2585,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL; ibdev->iboe.nb.notifier_call = NULL;
} }
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_qp_release_range(dev, ibdev->steer_qpn_base, mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count); ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap); kfree(ibdev->ib_uc_qpns_bitmap);
}
iounmap(ibdev->uar_map); iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p) for (p = 0; p < ibdev->num_ports; ++p)

View file

@ -974,6 +974,7 @@ static void dec_pending(struct dm_io *io, int error)
} else { } else {
/* done with normal IO or empty flush */ /* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error); trace_block_bio_complete(md->queue, bio, io_error);
if (io_error)
bio->bi_error = io_error; bio->bi_error = io_error;
bio_endio(bio); bio_endio(bio);
} }

View file

@ -410,9 +410,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
return 0; return 0;
} }
static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
{ {
return r820t_write(priv, reg, &val, 1); u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
return r820t_write(priv, reg, &tmp, 1);
} }
static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
@ -425,17 +427,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
return -EINVAL; return -EINVAL;
} }
static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
u8 bit_mask) u8 bit_mask)
{ {
u8 tmp = val;
int rc = r820t_read_cache_reg(priv, reg); int rc = r820t_read_cache_reg(priv, reg);
if (rc < 0) if (rc < 0)
return rc; return rc;
val = (rc & ~bit_mask) | (val & bit_mask); tmp = (rc & ~bit_mask) | (tmp & bit_mask);
return r820t_write(priv, reg, &val, 1); return r820t_write(priv, reg, &tmp, 1);
} }
static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)

View file

@ -5666,6 +5666,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
int id = port->id; int id = port->id;
bool allmulti = dev->flags & IFF_ALLMULTI; bool allmulti = dev->flags & IFF_ALLMULTI;
retry:
mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@ -5673,9 +5674,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
/* Remove all port->id's mcast enries */ /* Remove all port->id's mcast enries */
mvpp2_prs_mcast_del_all(priv, id); mvpp2_prs_mcast_del_all(priv, id);
if (allmulti && !netdev_mc_empty(dev)) { if (!allmulti) {
netdev_for_each_mc_addr(ha, dev) netdev_for_each_mc_addr(ha, dev) {
mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
allmulti = true;
goto retry;
}
}
} }
} }

View file

@ -280,6 +280,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0; u64 in_param = 0;
int err; int err;
if (!cnt)
return;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn); set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt); set_param_h(&in_param, cnt);

View file

@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
} }
if (0 == tmp) { if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4; read_addr = REG_DBI_RDATA + addr % 4;
ret = rtl_read_word(rtlpriv, read_addr); ret = rtl_read_byte(rtlpriv, read_addr);
} }
return ret; return ret;
} }
@ -1169,7 +1169,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
} }
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
_rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
ASPM_L1_LATENCY << 3);
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
_rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));

View file

@ -99,6 +99,7 @@
#define RTL_USB_MAX_RX_COUNT 100 #define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5 #define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64 #define MAX_WMMELE_LENGTH 64
#define ASPM_L1_LATENCY 7
#define TOTAL_CAM_ENTRY 32 #define TOTAL_CAM_ENTRY 32

View file

@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{ {
long rc = OPAL_BUSY; long rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d; u32 y_m_d;
u64 h_m_s_ms; u64 h_m_s_ms;
__be32 __y_m_d; __be32 __y_m_d;
@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT) if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL); opal_poll_events(NULL);
else else if (retries-- && (rc == OPAL_HARDWARE
|| rc == OPAL_INTERNAL_ERROR))
msleep(10); msleep(10);
else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
break;
} }
if (rc != OPAL_SUCCESS) if (rc != OPAL_SUCCESS)
@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{ {
long rc = OPAL_BUSY; long rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d = 0; u32 y_m_d = 0;
u64 h_m_s_ms = 0; u64 h_m_s_ms = 0;
@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
rc = opal_rtc_write(y_m_d, h_m_s_ms); rc = opal_rtc_write(y_m_d, h_m_s_ms);
if (rc == OPAL_BUSY_EVENT) if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL); opal_poll_events(NULL);
else else if (retries-- && (rc == OPAL_HARDWARE
|| rc == OPAL_INTERNAL_ERROR))
msleep(10); msleep(10);
else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
break;
} }
return rc == OPAL_SUCCESS ? 0 : -EIO; return rc == OPAL_SUCCESS ? 0 : -EIO;

View file

@ -3,3 +3,4 @@ optee-objs += core.o
optee-objs += call.o optee-objs += call.o
optee-objs += rpc.o optee-objs += rpc.o
optee-objs += supp.o optee-objs += supp.o
optee-objs += shm_pool.o

View file

@ -15,6 +15,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/tee_drv.h> #include <linux/tee_drv.h>
#include <linux/types.h> #include <linux/types.h>
@ -135,6 +136,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w; struct optee_call_waiter w;
struct optee_rpc_param param = { }; struct optee_rpc_param param = { };
struct optee_call_ctx call_ctx = { };
u32 ret; u32 ret;
param.a0 = OPTEE_SMC_CALL_WITH_ARG; param.a0 = OPTEE_SMC_CALL_WITH_ARG;
@ -159,13 +161,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
param.a1 = res.a1; param.a1 = res.a1;
param.a2 = res.a2; param.a2 = res.a2;
param.a3 = res.a3; param.a3 = res.a3;
optee_handle_rpc(ctx, &param); optee_handle_rpc(ctx, &param, &call_ctx);
} else { } else {
ret = res.a0; ret = res.a0;
break; break;
} }
} }
optee_rpc_finalize_call(&call_ctx);
/* /*
* We're done with our thread in secure world, if there's any * We're done with our thread in secure world, if there's any
* thread waiters wake up one. * thread waiters wake up one.
@ -442,3 +445,218 @@ void optee_disable_shm_cache(struct optee *optee)
} }
optee_cq_wait_final(&optee->call_queue, &w); optee_cq_wait_final(&optee->call_queue, &w);
} }
#define PAGELIST_ENTRIES_PER_PAGE \
((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
/**
* optee_fill_pages_list() - write list of user pages to given shared
* buffer.
*
* @dst: page-aligned buffer where list of pages will be stored
* @pages: array of pages that represents shared buffer
* @num_pages: number of entries in @pages
* @page_offset: offset of user buffer from page start
*
* @dst should be big enough to hold list of user page addresses and
* links to the next pages of buffer
*/
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset)
{
int n = 0;
phys_addr_t optee_page;
/*
* Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
* for details.
*/
struct {
u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
u64 next_page_data;
} *pages_data;
/*
* Currently OP-TEE uses 4k page size and it does not looks
* like this will change in the future. On other hand, there are
* no know ARM architectures with page size < 4k.
* Thus the next built assert looks redundant. But the following
* code heavily relies on this assumption, so it is better be
* safe than sorry.
*/
BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
pages_data = (void *)dst;
/*
* If linux page is bigger than 4k, and user buffer offset is
* larger than 4k/8k/12k/etc this will skip first 4k pages,
* because they bear no value data for OP-TEE.
*/
optee_page = page_to_phys(*pages) +
round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
while (true) {
pages_data->pages_list[n++] = optee_page;
if (n == PAGELIST_ENTRIES_PER_PAGE) {
pages_data->next_page_data =
virt_to_phys(pages_data + 1);
pages_data++;
n = 0;
}
optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
if (!(optee_page & ~PAGE_MASK)) {
if (!--num_pages)
break;
pages++;
optee_page = page_to_phys(*pages);
}
}
}
/*
* The final entry in each pagelist page is a pointer to the next
* pagelist page.
*/
static size_t get_pages_list_size(size_t num_entries)
{
int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
}
u64 *optee_allocate_pages_list(size_t num_entries)
{
return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
}
void optee_free_pages_list(void *list, size_t num_entries)
{
free_pages_exact(list, get_pages_list_size(num_entries));
}
static bool is_normal_memory(pgprot_t p)
{
#if defined(CONFIG_ARM)
return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
#elif defined(CONFIG_ARM64)
return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
#else
#error "Unuspported architecture"
#endif
}
static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
{
while (vma && is_normal_memory(vma->vm_page_prot)) {
if (vma->vm_end >= end)
return 0;
vma = vma->vm_next;
}
return -EINVAL;
}
static int check_mem_type(unsigned long start, size_t num_pages)
{
struct mm_struct *mm = current->mm;
int rc;
down_read(&mm->mmap_sem);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);
up_read(&mm->mmap_sem);
return rc;
}
int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
unsigned long start)
{
struct tee_shm *shm_arg = NULL;
struct optee_msg_arg *msg_arg;
u64 *pages_list;
phys_addr_t msg_parg;
int rc;
if (!num_pages)
return -EINVAL;
rc = check_mem_type(start, num_pages);
if (rc)
return rc;
pages_list = optee_allocate_pages_list(num_pages);
if (!pages_list)
return -ENOMEM;
shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
if (IS_ERR(shm_arg)) {
rc = PTR_ERR(shm_arg);
goto out;
}
optee_fill_pages_list(pages_list, pages, num_pages,
tee_shm_get_page_offset(shm));
msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
/*
* In the least bits of msg_arg->params->u.tmem.buf_ptr we
* store buffer offset from 4k page, as described in OP-TEE ABI.
*/
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
if (optee_do_call_with_arg(ctx, msg_parg) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
tee_shm_free(shm_arg);
out:
optee_free_pages_list(pages_list, num_pages);
return rc;
}
int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
{
struct tee_shm *shm_arg;
struct optee_msg_arg *msg_arg;
phys_addr_t msg_parg;
int rc = 0;
shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
if (IS_ERR(shm_arg))
return PTR_ERR(shm_arg);
msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
if (optee_do_call_with_arg(ctx, msg_parg) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
tee_shm_free(shm_arg);
return rc;
}
int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
unsigned long start)
{
/*
* We don't want to register supplicant memory in OP-TEE.
* Instead information about it will be passed in RPC code.
*/
return check_mem_type(start, num_pages);
}
int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
{
return 0;
}

View file

@ -28,6 +28,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "optee_private.h" #include "optee_private.h"
#include "optee_smc.h" #include "optee_smc.h"
#include "shm_pool.h"
#define DRIVER_NAME "optee" #define DRIVER_NAME "optee"
@ -97,6 +98,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return rc; return rc;
} }
break; break;
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
p->u.memref.size = mp->u.rmem.size;
shm = (struct tee_shm *)(unsigned long)
mp->u.rmem.shm_ref;
if (!shm) {
p->u.memref.shm_offs = 0;
p->u.memref.shm = NULL;
break;
}
p->u.memref.shm_offs = mp->u.rmem.offs;
p->u.memref.shm = shm;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -104,6 +124,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return 0; return 0;
} }
static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
const struct tee_param *p)
{
int rc;
phys_addr_t pa;
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
mp->u.tmem.size = p->u.memref.size;
if (!p->u.memref.shm) {
mp->u.tmem.buf_ptr = 0;
return 0;
}
rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
if (rc)
return rc;
mp->u.tmem.buf_ptr = pa;
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
OPTEE_MSG_ATTR_CACHE_SHIFT;
return 0;
}
static int to_msg_param_reg_mem(struct optee_msg_param *mp,
const struct tee_param *p)
{
mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
mp->u.rmem.size = p->u.memref.size;
mp->u.rmem.offs = p->u.memref.shm_offs;
return 0;
}
/** /**
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
* @msg_params: OPTEE_MSG parameters * @msg_params: OPTEE_MSG parameters
@ -116,7 +176,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
{ {
int rc; int rc;
size_t n; size_t n;
phys_addr_t pa;
for (n = 0; n < num_params; n++) { for (n = 0; n < num_params; n++) {
const struct tee_param *p = params + n; const struct tee_param *p = params + n;
@ -139,22 +198,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + if (tee_shm_is_registered(p->u.memref.shm))
p->attr - rc = to_msg_param_reg_mem(mp, p);
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; else
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; rc = to_msg_param_tmp_mem(mp, p);
mp->u.tmem.size = p->u.memref.size;
if (!p->u.memref.shm) {
mp->u.tmem.buf_ptr = 0;
break;
}
rc = tee_shm_get_pa(p->u.memref.shm,
p->u.memref.shm_offs, &pa);
if (rc) if (rc)
return rc; return rc;
mp->u.tmem.buf_ptr = pa;
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
OPTEE_MSG_ATTR_CACHE_SHIFT;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -171,6 +220,10 @@ static void optee_get_version(struct tee_device *teedev,
.impl_caps = TEE_OPTEE_CAP_TZ, .impl_caps = TEE_OPTEE_CAP_TZ,
.gen_caps = TEE_GEN_CAP_GP, .gen_caps = TEE_GEN_CAP_GP,
}; };
struct optee *optee = tee_get_drvdata(teedev);
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
v.gen_caps |= TEE_GEN_CAP_REG_MEM;
*vers = v; *vers = v;
} }
@ -187,12 +240,12 @@ static int optee_open(struct tee_context *ctx)
if (teedev == optee->supp_teedev) { if (teedev == optee->supp_teedev) {
bool busy = true; bool busy = true;
mutex_lock(&optee->supp.ctx_mutex); mutex_lock(&optee->supp.mutex);
if (!optee->supp.ctx) { if (!optee->supp.ctx) {
busy = false; busy = false;
optee->supp.ctx = ctx; optee->supp.ctx = ctx;
} }
mutex_unlock(&optee->supp.ctx_mutex); mutex_unlock(&optee->supp.mutex);
if (busy) { if (busy) {
kfree(ctxdata); kfree(ctxdata);
return -EBUSY; return -EBUSY;
@ -252,11 +305,8 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL; ctx->data = NULL;
if (teedev == optee->supp_teedev) { if (teedev == optee->supp_teedev)
mutex_lock(&optee->supp.ctx_mutex); optee_supp_release(&optee->supp);
optee->supp.ctx = NULL;
mutex_unlock(&optee->supp.ctx_mutex);
}
} }
static const struct tee_driver_ops optee_ops = { static const struct tee_driver_ops optee_ops = {
@ -267,6 +317,8 @@ static const struct tee_driver_ops optee_ops = {
.close_session = optee_close_session, .close_session = optee_close_session,
.invoke_func = optee_invoke_func, .invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req, .cancel_req = optee_cancel_req,
.shm_register = optee_shm_register,
.shm_unregister = optee_shm_unregister,
}; };
static const struct tee_desc optee_desc = { static const struct tee_desc optee_desc = {
@ -281,6 +333,8 @@ static const struct tee_driver_ops optee_supp_ops = {
.release = optee_release, .release = optee_release,
.supp_recv = optee_supp_recv, .supp_recv = optee_supp_recv,
.supp_send = optee_supp_send, .supp_send = optee_supp_send,
.shm_register = optee_shm_register_supp,
.shm_unregister = optee_shm_unregister_supp,
}; };
static const struct tee_desc optee_supp_desc = { static const struct tee_desc optee_supp_desc = {
@ -345,21 +399,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
} }
static struct tee_shm_pool * static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
u32 sec_caps)
{ {
union { union {
struct arm_smccc_res smccc; struct arm_smccc_res smccc;
struct optee_smc_get_shm_config_result result; struct optee_smc_get_shm_config_result result;
} res; } res;
struct tee_shm_pool *pool;
unsigned long vaddr; unsigned long vaddr;
phys_addr_t paddr; phys_addr_t paddr;
size_t size; size_t size;
phys_addr_t begin; phys_addr_t begin;
phys_addr_t end; phys_addr_t end;
void *va; void *va;
struct tee_shm_pool_mem_info priv_info; struct tee_shm_pool_mgr *priv_mgr;
struct tee_shm_pool_mem_info dmabuf_info; struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) { if (res.result.status != OPTEE_SMC_RETURN_OK) {
@ -389,22 +444,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
} }
vaddr = (unsigned long)va; vaddr = (unsigned long)va;
priv_info.vaddr = vaddr; /*
priv_info.paddr = paddr; * If OP-TEE can work with unregistered SHM, we will use own pool
priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; * for private shm
dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; */
dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; rc = optee_shm_pool_alloc_pages();
if (IS_ERR(rc))
goto err_memunmap;
priv_mgr = rc;
} else {
const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
if (IS_ERR(pool)) { 3 /* 8 bytes aligned */);
memunmap(va); if (IS_ERR(rc))
goto out; goto err_memunmap;
priv_mgr = rc;
vaddr += sz;
paddr += sz;
size -= sz;
} }
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
if (IS_ERR(rc))
goto err_free_priv_mgr;
dmabuf_mgr = rc;
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
if (IS_ERR(rc))
goto err_free_dmabuf_mgr;
*memremaped_shm = va; *memremaped_shm = va;
out:
return pool; return rc;
err_free_dmabuf_mgr:
tee_shm_pool_mgr_destroy(dmabuf_mgr);
err_free_priv_mgr:
tee_shm_pool_mgr_destroy(priv_mgr);
err_memunmap:
memunmap(va);
return rc;
} }
/* Simple wrapper functions to be able to use a function pointer */ /* Simple wrapper functions to be able to use a function pointer */
@ -482,7 +564,7 @@ static struct optee *optee_probe(struct device_node *np)
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool)) if (IS_ERR(pool))
return (void *)pool; return (void *)pool;
@ -493,6 +575,7 @@ static struct optee *optee_probe(struct device_node *np)
} }
optee->invoke_fn = invoke_fn; optee->invoke_fn = invoke_fn;
optee->sec_caps = sec_caps;
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
if (IS_ERR(teedev)) { if (IS_ERR(teedev)) {

View file

@ -67,11 +67,32 @@
#define OPTEE_MSG_ATTR_META BIT(8) #define OPTEE_MSG_ATTR_META BIT(8)
/* /*
* The temporary shared memory object is not physically contigous and this * Pointer to a list of pages used to register user-defined SHM buffer.
* temp memref is followed by another fragment until the last temp memref * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
* that doesn't have this bit set. * buf_ptr should point to the beginning of the buffer. Buffer will contain
* list of page addresses. OP-TEE core can reconstruct contiguous buffer from
* that page addresses list. Page addresses are stored as 64 bit values.
* Last entry on a page should point to the next page of buffer.
* Every entry in buffer should point to a 4k page beginning (12 least
* significant bits must be equal to zero).
*
* 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
* offset of the user buffer.
*
* So, entries should be placed like members of this structure:
*
* struct page_data {
* uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
* uint64_t next_page_data;
* };
*
* Structure is designed to exactly fit into the page size
* OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
*
* The size of 4KB is chosen because this is the smallest page size for ARM
* architectures. If REE uses larger pages, it should divide them to 4KB ones.
*/ */
#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) #define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
/* /*
* Memory attributes for caching passed with temp memrefs. The actual value * Memory attributes for caching passed with temp memrefs. The actual value
@ -94,6 +115,11 @@
#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 #define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 #define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
/*
* Page size used in non-contiguous buffer entries
*/
#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
/** /**
* struct optee_msg_param_tmem - temporary memory reference parameter * struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer * @buf_ptr: Address of the buffer
@ -145,8 +171,8 @@ struct optee_msg_param_value {
* *
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/ */
struct optee_msg_param { struct optee_msg_param {

View file

@ -53,36 +53,24 @@ struct optee_wait_queue {
* @ctx the context of current connected supplicant. * @ctx the context of current connected supplicant.
* if !NULL the supplicant device is available for use, * if !NULL the supplicant device is available for use,
* else busy * else busy
* @ctx_mutex: held while accessing @ctx * @mutex: held while accessing content of this struct
* @func: supplicant function id to call * @req_id: current request id if supplicant is doing synchronous
* @ret: call return value * communication, else -1
* @num_params: number of elements in @param * @reqs: queued request not yet retrieved by supplicant
* @param: parameters for @func * @idr: IDR holding all requests currently being processed
* @req_posted: if true, a request has been posted to the supplicant * by supplicant
* @supp_next_send: if true, next step is for supplicant to send response * @reqs_c: completion used by supplicant when waiting for a
* @thrd_mutex: held by the thread doing a request to supplicant * request to be queued.
* @supp_mutex: held by supplicant while operating on this struct
* @data_to_supp: supplicant is waiting on this for next request
* @data_from_supp: requesting thread is waiting on this to get the result
*/ */
struct optee_supp { struct optee_supp {
/* Serializes access to this struct */
struct mutex mutex;
struct tee_context *ctx; struct tee_context *ctx;
/* Serializes access of ctx */
struct mutex ctx_mutex;
u32 func; int req_id;
u32 ret; struct list_head reqs;
size_t num_params; struct idr idr;
struct tee_param *param; struct completion reqs_c;
bool req_posted;
bool supp_next_send;
/* Serializes access to this struct for requesting thread */
struct mutex thrd_mutex;
/* Serializes access to this struct for supplicant threads */
struct mutex supp_mutex;
struct completion data_to_supp;
struct completion data_from_supp;
}; };
/** /**
@ -96,6 +84,8 @@ struct optee_supp {
* @supp: supplicant synchronization struct for RPC to supplicant * @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool * @pool: shared memory pool
* @memremaped_shm virtual address of memory in shared memory pool * @memremaped_shm virtual address of memory in shared memory pool
* @sec_caps: secure world capabilities defined by
* OPTEE_SMC_SEC_CAP_* in optee_smc.h
*/ */
struct optee { struct optee {
struct tee_device *supp_teedev; struct tee_device *supp_teedev;
@ -106,6 +96,7 @@ struct optee {
struct optee_supp supp; struct optee_supp supp;
struct tee_shm_pool *pool; struct tee_shm_pool *pool;
void *memremaped_shm; void *memremaped_shm;
u32 sec_caps;
}; };
struct optee_session { struct optee_session {
@ -130,7 +121,16 @@ struct optee_rpc_param {
u32 a7; u32 a7;
}; };
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); /* Holds context that is preserved during one STD call */
struct optee_call_ctx {
/* information about pages list used in last allocation */
void *pages_list;
size_t num_entries;
};
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
struct optee_call_ctx *call_ctx);
void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
void optee_wait_queue_init(struct optee_wait_queue *wq); void optee_wait_queue_init(struct optee_wait_queue *wq);
void optee_wait_queue_exit(struct optee_wait_queue *wq); void optee_wait_queue_exit(struct optee_wait_queue *wq);
@ -142,6 +142,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp); void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp); void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param); struct tee_param *param);
@ -160,11 +161,26 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
void optee_enable_shm_cache(struct optee *optee); void optee_enable_shm_cache(struct optee *optee);
void optee_disable_shm_cache(struct optee *optee); void optee_disable_shm_cache(struct optee *optee);
int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
unsigned long start);
int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
unsigned long start);
int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
int optee_from_msg_param(struct tee_param *params, size_t num_params, int optee_from_msg_param(struct tee_param *params, size_t num_params,
const struct optee_msg_param *msg_params); const struct optee_msg_param *msg_params);
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
const struct tee_param *params); const struct tee_param *params);
u64 *optee_allocate_pages_list(size_t num_entries);
void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
/* /*
* Small helpers * Small helpers
*/ */

View file

@ -222,6 +222,13 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) #define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */ /* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) #define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
/*
* Secure world supports commands "register/unregister shared memory",
* secure world accepts command buffers located in any parts of non-secure RAM
*/
#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ #define OPTEE_SMC_EXCHANGE_CAPABILITIES \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)

View file

@ -192,15 +192,16 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
if (ret) if (ret)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_lock(&optee->supp.ctx_mutex); mutex_lock(&optee->supp.mutex);
/* Increases count as secure world doesn't have a reference */ /* Increases count as secure world doesn't have a reference */
shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
mutex_unlock(&optee->supp.ctx_mutex); mutex_unlock(&optee->supp.mutex);
return shm; return shm;
} }
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
struct optee_msg_arg *arg) struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{ {
phys_addr_t pa; phys_addr_t pa;
struct tee_shm *shm; struct tee_shm *shm;
@ -245,10 +246,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
goto bad; goto bad;
} }
sz = tee_shm_get_size(shm);
if (tee_shm_is_registered(shm)) {
struct page **pages;
u64 *pages_list;
size_t page_num;
pages = tee_shm_get_pages(shm, &page_num);
if (!pages || !page_num) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
pages_list = optee_allocate_pages_list(page_num);
if (!pages_list) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
call_ctx->pages_list = pages_list;
call_ctx->num_entries = page_num;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
/*
* In the least bits of u.tmem.buf_ptr we store buffer offset
* from 4k page, as described in OP-TEE ABI.
*/
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) &
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
arg->params[0].u.tmem.size = tee_shm_get_size(shm);
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
optee_fill_pages_list(pages_list, pages, page_num,
tee_shm_get_page_offset(shm));
} else {
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
arg->params[0].u.tmem.buf_ptr = pa; arg->params[0].u.tmem.buf_ptr = pa;
arg->params[0].u.tmem.size = sz; arg->params[0].u.tmem.size = sz;
arg->params[0].u.tmem.shm_ref = (unsigned long)shm; arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
}
arg->ret = TEEC_SUCCESS; arg->ret = TEEC_SUCCESS;
return; return;
bad: bad:
@ -307,8 +347,24 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
arg->ret = TEEC_SUCCESS; arg->ret = TEEC_SUCCESS;
} }
static void free_pages_list(struct optee_call_ctx *call_ctx)
{
if (call_ctx->pages_list) {
optee_free_pages_list(call_ctx->pages_list,
call_ctx->num_entries);
call_ctx->pages_list = NULL;
call_ctx->num_entries = 0;
}
}
void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
{
free_pages_list(call_ctx);
}
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
struct tee_shm *shm) struct tee_shm *shm,
struct optee_call_ctx *call_ctx)
{ {
struct optee_msg_arg *arg; struct optee_msg_arg *arg;
@ -329,7 +385,8 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
handle_rpc_func_cmd_wait(arg); handle_rpc_func_cmd_wait(arg);
break; break;
case OPTEE_MSG_RPC_CMD_SHM_ALLOC: case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
handle_rpc_func_cmd_shm_alloc(ctx, arg); free_pages_list(call_ctx);
handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
break; break;
case OPTEE_MSG_RPC_CMD_SHM_FREE: case OPTEE_MSG_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg); handle_rpc_func_cmd_shm_free(ctx, arg);
@ -343,10 +400,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
* optee_handle_rpc() - handle RPC from secure world * optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC * @ctx: context doing the RPC
* @param: value of registers for the RPC * @param: value of registers for the RPC
* @call_ctx: call context. Preserved during one OP-TEE invocation
* *
* Result of RPC is written back into @param. * Result of RPC is written back into @param.
*/ */
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
struct optee_call_ctx *call_ctx)
{ {
struct tee_device *teedev = ctx->teedev; struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev); struct optee *optee = tee_get_drvdata(teedev);
@ -381,7 +440,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
break; break;
case OPTEE_SMC_RPC_FUNC_CMD: case OPTEE_SMC_RPC_FUNC_CMD:
shm = reg_pair_to_ptr(param->a1, param->a2); shm = reg_pair_to_ptr(param->a1, param->a2);
handle_rpc_func_cmd(ctx, optee, shm); handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
break; break;
default: default:
pr_warn("Unknown RPC func 0x%x\n", pr_warn("Unknown RPC func 0x%x\n",

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2015, Linaro Limited
* Copyright (c) 2017, EPAM Systems
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include "optee_private.h"
#include "optee_smc.h"
#include "shm_pool.h"
static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
struct tee_shm *shm, size_t size)
{
unsigned int order = get_order(size);
struct page *page;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
shm->kaddr = page_address(page);
shm->paddr = page_to_phys(page);
shm->size = PAGE_SIZE << order;
return 0;
}
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
struct tee_shm *shm)
{
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
shm->kaddr = NULL;
}
static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
{
kfree(poolm);
}
static const struct tee_shm_pool_mgr_ops pool_ops = {
.alloc = pool_op_alloc,
.free = pool_op_free,
.destroy_poolmgr = pool_op_destroy_poolmgr,
};
/**
* optee_shm_pool_alloc_pages() - create page-based allocator pool
*
* This pool is used when OP-TEE supports dymanic SHM. In this case
* command buffers and such are allocated from kernel's own memory.
*/
struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
{
struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return ERR_PTR(-ENOMEM);
mgr->ops = &pool_ops;
return mgr;
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2015, Linaro Limited
* Copyright (c) 2016, EPAM Systems
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef SHM_POOL_H
#define SHM_POOL_H
#include <linux/tee_drv.h>
struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
#endif

View file

@ -16,21 +16,61 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "optee_private.h" #include "optee_private.h"
struct optee_supp_req {
struct list_head link;
bool busy;
u32 func;
u32 ret;
size_t num_params;
struct tee_param *param;
struct completion c;
};
void optee_supp_init(struct optee_supp *supp) void optee_supp_init(struct optee_supp *supp)
{ {
memset(supp, 0, sizeof(*supp)); memset(supp, 0, sizeof(*supp));
mutex_init(&supp->ctx_mutex); mutex_init(&supp->mutex);
mutex_init(&supp->thrd_mutex); init_completion(&supp->reqs_c);
mutex_init(&supp->supp_mutex); idr_init(&supp->idr);
init_completion(&supp->data_to_supp); INIT_LIST_HEAD(&supp->reqs);
init_completion(&supp->data_from_supp); supp->req_id = -1;
} }
void optee_supp_uninit(struct optee_supp *supp) void optee_supp_uninit(struct optee_supp *supp)
{ {
mutex_destroy(&supp->ctx_mutex); mutex_destroy(&supp->mutex);
mutex_destroy(&supp->thrd_mutex); idr_destroy(&supp->idr);
mutex_destroy(&supp->supp_mutex); }
void optee_supp_release(struct optee_supp *supp)
{
int id;
struct optee_supp_req *req;
struct optee_supp_req *req_tmp;
mutex_lock(&supp->mutex);
/* Abort all request retrieved by supplicant */
idr_for_each_entry(&supp->idr, req, id) {
req->busy = false;
idr_remove(&supp->idr, id);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
/* Abort all queued requests */
list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
list_del(&req->link);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
supp->ctx = NULL;
supp->req_id = -1;
mutex_unlock(&supp->mutex);
} }
/** /**
@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp)
*/ */
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param) struct tee_param *param)
{ {
bool interruptable;
struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
bool interruptable;
u32 ret; u32 ret;
/* if (!req)
* Other threads blocks here until we've copied our answer from return TEEC_ERROR_OUT_OF_MEMORY;
* supplicant.
*/
while (mutex_lock_interruptible(&supp->thrd_mutex)) {
/* See comment below on when the RPC can be interrupted. */
mutex_lock(&supp->ctx_mutex);
interruptable = !supp->ctx;
mutex_unlock(&supp->ctx_mutex);
if (interruptable)
return TEEC_ERROR_COMMUNICATION;
}
/* init_completion(&req->c);
* We have exclusive access now since the supplicant at this req->func = func;
* point is either doing a req->num_params = num_params;
* wait_for_completion_interruptible(&supp->data_to_supp) or is in req->param = param;
* userspace still about to do the ioctl() to enter
* optee_supp_recv() below.
*/
supp->func = func; /* Insert the request in the request list */
supp->num_params = num_params; mutex_lock(&supp->mutex);
supp->param = param; list_add_tail(&req->link, &supp->reqs);
supp->req_posted = true; mutex_unlock(&supp->mutex);
/* Let supplicant get the data */ /* Tell an eventual waiter there's a new request */
complete(&supp->data_to_supp); complete(&supp->reqs_c);
/* /*
* Wait for supplicant to process and return result, once we've * Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(data_from_supp) we have * returned from wait_for_completion(&req->c) successfully we have
* exclusive access again. * exclusive access again.
*/ */
while (wait_for_completion_interruptible(&supp->data_from_supp)) { while (wait_for_completion_interruptible(&req->c)) {
mutex_lock(&supp->ctx_mutex); mutex_lock(&supp->mutex);
interruptable = !supp->ctx; interruptable = !supp->ctx;
if (interruptable) { if (interruptable) {
/* /*
* There's no supplicant available and since the * There's no supplicant available and since the
* supp->ctx_mutex currently is held none can * supp->mutex currently is held none can
* become available until the mutex released * become available until the mutex released
* again. * again.
* *
@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and * will serve all requests in a timely manner and
* interrupting then wouldn't make sense. * interrupting then wouldn't make sense.
*/ */
supp->ret = TEEC_ERROR_COMMUNICATION; interruptable = !req->busy;
init_completion(&supp->data_to_supp); if (!req->busy)
list_del(&req->link);
} }
mutex_unlock(&supp->ctx_mutex); mutex_unlock(&supp->mutex);
if (interruptable)
if (interruptable) {
req->ret = TEEC_ERROR_COMMUNICATION;
break; break;
} }
}
ret = supp->ret; ret = req->ret;
supp->param = NULL; kfree(req);
supp->req_posted = false;
/* We're done, let someone else talk to the supplicant now. */
mutex_unlock(&supp->thrd_mutex);
return ret; return ret;
} }
static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
int num_params, int *id)
{
struct optee_supp_req *req;
if (supp->req_id != -1) {
/*
* Supplicant should not mix synchronous and asnynchronous
* requests.
*/
return ERR_PTR(-EINVAL);
}
if (list_empty(&supp->reqs))
return NULL;
req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
if (num_params < req->num_params) {
/* Not enough room for parameters */
return ERR_PTR(-EINVAL);
}
*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
if (*id < 0)
return ERR_PTR(-ENOMEM);
list_del(&req->link);
req->busy = true;
return req;
}
static int supp_check_recv_params(size_t num_params, struct tee_param *params,
size_t *num_meta)
{
size_t n;
if (!num_params)
return -EINVAL;
/*
* If there's memrefs we need to decrease those as they where
* increased earlier and we'll even refuse to accept any below.
*/
for (n = 0; n < num_params; n++)
if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
/*
* We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
* or without the TEE_IOCTL_PARAM_ATTR_META bit set.
*/
for (n = 0; n < num_params; n++)
if (params[n].attr &&
params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
return -EINVAL;
/* At most we'll need one meta parameter so no need to check for more */
if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
*num_meta = 1;
else
*num_meta = 0;
return 0;
}
/** /**
* optee_supp_recv() - receive request for supplicant * optee_supp_recv() - receive request for supplicant
* @ctx: context receiving the request * @ctx: context receiving the request
@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_device *teedev = ctx->teedev; struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev); struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = NULL;
int id;
size_t num_meta;
int rc; int rc;
/* rc = supp_check_recv_params(*num_params, param, &num_meta);
* In case two threads in one supplicant is calling this function if (rc)
* simultaneously we need to protect the data with a mutex which
* we'll release before returning.
*/
mutex_lock(&supp->supp_mutex);
if (supp->supp_next_send) {
/*
* optee_supp_recv() has been called again without
* a optee_supp_send() in between. Supplicant has
* probably been restarted before it was able to
* write back last result. Abort last request and
* wait for a new.
*/
if (supp->req_posted) {
supp->ret = TEEC_ERROR_COMMUNICATION;
supp->supp_next_send = false;
complete(&supp->data_from_supp);
}
}
/*
* This is where supplicant will be hanging most of the
* time, let's make this interruptable so we can easily
* restart supplicant if needed.
*/
if (wait_for_completion_interruptible(&supp->data_to_supp)) {
rc = -ERESTARTSYS;
goto out;
}
/* We have exlusive access to the data */
if (*num_params < supp->num_params) {
/*
* Not enough room for parameters, tell supplicant
* it failed and abort last request.
*/
supp->ret = TEEC_ERROR_COMMUNICATION;
rc = -EINVAL;
complete(&supp->data_from_supp);
goto out;
}
*func = supp->func;
*num_params = supp->num_params;
memcpy(param, supp->param,
sizeof(struct tee_param) * supp->num_params);
/* Allow optee_supp_send() below to do its work */
supp->supp_next_send = true;
rc = 0;
out:
mutex_unlock(&supp->supp_mutex);
return rc; return rc;
while (true) {
mutex_lock(&supp->mutex);
req = supp_pop_entry(supp, *num_params - num_meta, &id);
mutex_unlock(&supp->mutex);
if (req) {
if (IS_ERR(req))
return PTR_ERR(req);
break;
}
/*
* If we didn't get a request we'll block in
* wait_for_completion() to avoid needless spinning.
*
* This is where supplicant will be hanging most of
* the time, let's make this interruptable so we
* can easily restart supplicant if needed.
*/
if (wait_for_completion_interruptible(&supp->reqs_c))
return -ERESTARTSYS;
}
if (num_meta) {
/*
* tee-supplicant support meta parameters -> requsts can be
* processed asynchronously.
*/
param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META;
param->u.value.a = id;
param->u.value.b = 0;
param->u.value.c = 0;
} else {
mutex_lock(&supp->mutex);
supp->req_id = id;
mutex_unlock(&supp->mutex);
}
*func = req->func;
*num_params = req->num_params + num_meta;
memcpy(param + num_meta, req->param,
sizeof(struct tee_param) * req->num_params);
return 0;
}
static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
size_t num_params,
struct tee_param *param,
size_t *num_meta)
{
struct optee_supp_req *req;
int id;
size_t nm;
const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META;
if (!num_params)
return ERR_PTR(-EINVAL);
if (supp->req_id == -1) {
if (param->attr != attr)
return ERR_PTR(-EINVAL);
id = param->u.value.a;
nm = 1;
} else {
id = supp->req_id;
nm = 0;
}
req = idr_find(&supp->idr, id);
if (!req)
return ERR_PTR(-ENOENT);
if ((num_params - nm) != req->num_params)
return ERR_PTR(-EINVAL);
req->busy = false;
idr_remove(&supp->idr, id);
supp->req_id = -1;
*num_meta = nm;
return req;
} }
/** /**
@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_device *teedev = ctx->teedev; struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev); struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
size_t n; size_t n;
int rc = 0; size_t num_meta;
/* mutex_lock(&supp->mutex);
* We still have exclusive access to the data since that's how we req = supp_pop_req(supp, num_params, param, &num_meta);
* left it when returning from optee_supp_read(). mutex_unlock(&supp->mutex);
*/
/* See comment on mutex in optee_supp_read() above */ if (IS_ERR(req)) {
mutex_lock(&supp->supp_mutex); /* Something is wrong, let supplicant restart. */
return PTR_ERR(req);
if (!supp->supp_next_send) {
/*
* Something strange is going on, supplicant shouldn't
* enter optee_supp_send() in this state
*/
rc = -ENOENT;
goto out;
}
if (num_params != supp->num_params) {
/*
* Something is wrong, let supplicant restart. Next call to
* optee_supp_recv() will give an error to the requesting
* thread and release it.
*/
rc = -EINVAL;
goto out;
} }
/* Update out and in/out parameters */ /* Update out and in/out parameters */
for (n = 0; n < num_params; n++) { for (n = 0; n < req->num_params; n++) {
struct tee_param *p = supp->param + n; struct tee_param *p = req->param + n;
switch (p->attr) { switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
p->u.value.a = param[n].u.value.a; p->u.value.a = param[n + num_meta].u.value.a;
p->u.value.b = param[n].u.value.b; p->u.value.b = param[n + num_meta].u.value.b;
p->u.value.c = param[n].u.value.c; p->u.value.c = param[n + num_meta].u.value.c;
break; break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
p->u.memref.size = param[n].u.memref.size; p->u.memref.size = param[n + num_meta].u.memref.size;
break; break;
default: default:
break; break;
} }
} }
supp->ret = ret; req->ret = ret;
/* Allow optee_supp_recv() above to do its work */
supp->supp_next_send = false;
/* Let the requesting thread continue */ /* Let the requesting thread continue */
complete(&supp->data_from_supp); complete(&req->c);
out:
mutex_unlock(&supp->supp_mutex); return 0;
return rc;
} }

View file

@ -54,6 +54,7 @@ static int tee_open(struct inode *inode, struct file *filp)
goto err; goto err;
} }
kref_init(&ctx->refcount);
ctx->teedev = teedev; ctx->teedev = teedev;
INIT_LIST_HEAD(&ctx->list_shm); INIT_LIST_HEAD(&ctx->list_shm);
filp->private_data = ctx; filp->private_data = ctx;
@ -68,19 +69,40 @@ err:
return rc; return rc;
} }
void teedev_ctx_get(struct tee_context *ctx)
{
if (ctx->releasing)
return;
kref_get(&ctx->refcount);
}
static void teedev_ctx_release(struct kref *ref)
{
struct tee_context *ctx = container_of(ref, struct tee_context,
refcount);
ctx->releasing = true;
ctx->teedev->desc->ops->release(ctx);
kfree(ctx);
}
void teedev_ctx_put(struct tee_context *ctx)
{
if (ctx->releasing)
return;
kref_put(&ctx->refcount, teedev_ctx_release);
}
static void teedev_close_context(struct tee_context *ctx)
{
tee_device_put(ctx->teedev);
teedev_ctx_put(ctx);
}
static int tee_release(struct inode *inode, struct file *filp) static int tee_release(struct inode *inode, struct file *filp)
{ {
struct tee_context *ctx = filp->private_data; teedev_close_context(filp->private_data);
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
ctx->teedev->desc->ops->release(ctx);
mutex_lock(&ctx->teedev->mutex);
list_for_each_entry(shm, &ctx->list_shm, link)
shm->ctx = NULL;
mutex_unlock(&ctx->teedev->mutex);
kfree(ctx);
tee_device_put(teedev);
return 0; return 0;
} }
@ -114,8 +136,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
if (data.flags) if (data.flags)
return -EINVAL; return -EINVAL;
data.id = -1;
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(shm)) if (IS_ERR(shm))
return PTR_ERR(shm); return PTR_ERR(shm);
@ -138,6 +158,43 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
return ret; return ret;
} }
static int
tee_ioctl_shm_register(struct tee_context *ctx,
struct tee_ioctl_shm_register_data __user *udata)
{
long ret;
struct tee_ioctl_shm_register_data data;
struct tee_shm *shm;
if (copy_from_user(&data, udata, sizeof(data)))
return -EFAULT;
/* Currently no input flags are supported */
if (data.flags)
return -EINVAL;
shm = tee_shm_register(ctx, data.addr, data.length,
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
if (IS_ERR(shm))
return PTR_ERR(shm);
data.id = shm->id;
data.flags = shm->flags;
data.length = shm->size;
if (copy_to_user(udata, &data, sizeof(data)))
ret = -EFAULT;
else
ret = tee_shm_get_fd(shm);
/*
* When user space closes the file descriptor the shared memory
* should be freed or if tee_shm_get_fd() failed then it will
* be freed immediately.
*/
tee_shm_put(shm);
return ret;
}
static int params_from_user(struct tee_context *ctx, struct tee_param *params, static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t num_params, size_t num_params,
struct tee_ioctl_param __user *uparams) struct tee_ioctl_param __user *uparams)
@ -152,11 +209,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
return -EFAULT; return -EFAULT;
/* All unused attribute bits has to be zero */ /* All unused attribute bits has to be zero */
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL; return -EINVAL;
params[n].attr = ip.attr; params[n].attr = ip.attr;
switch (ip.attr) { switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
break; break;
@ -221,18 +278,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
return 0; return 0;
} }
static bool param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
return true;
default:
return false;
}
}
static int tee_ioctl_open_session(struct tee_context *ctx, static int tee_ioctl_open_session(struct tee_context *ctx,
struct tee_ioctl_buf_data __user *ubuf) struct tee_ioctl_buf_data __user *ubuf)
{ {
@ -296,7 +341,7 @@ out:
if (params) { if (params) {
/* Decrease ref count for all valid shared memory pointers */ /* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++) for (n = 0; n < arg.num_params; n++)
if (param_is_memref(params + n) && if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm) params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm); tee_shm_put(params[n].u.memref.shm);
kfree(params); kfree(params);
@ -358,7 +403,7 @@ out:
if (params) { if (params) {
/* Decrease ref count for all valid shared memory pointers */ /* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++) for (n = 0; n < arg.num_params; n++)
if (param_is_memref(params + n) && if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm) params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm); tee_shm_put(params[n].u.memref.shm);
kfree(params); kfree(params);
@ -406,8 +451,8 @@ static int params_to_supp(struct tee_context *ctx,
struct tee_ioctl_param ip; struct tee_ioctl_param ip;
struct tee_param *p = params + n; struct tee_param *p = params + n;
ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; ip.attr = p->attr;
switch (p->attr) { switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
ip.a = p->u.value.a; ip.a = p->u.value.a;
@ -471,6 +516,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (!params) if (!params)
return -ENOMEM; return -ENOMEM;
rc = params_from_user(ctx, params, num_params, uarg->params);
if (rc)
goto out;
rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
if (rc) if (rc)
goto out; goto out;
@ -500,11 +549,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
return -EFAULT; return -EFAULT;
/* All unused attribute bits has to be zero */ /* All unused attribute bits has to be zero */
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL; return -EINVAL;
p->attr = ip.attr; p->attr = ip.attr;
switch (ip.attr) { switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
/* Only out and in/out values can be updated */ /* Only out and in/out values can be updated */
@ -586,6 +635,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return tee_ioctl_version(ctx, uarg); return tee_ioctl_version(ctx, uarg);
case TEE_IOC_SHM_ALLOC: case TEE_IOC_SHM_ALLOC:
return tee_ioctl_shm_alloc(ctx, uarg); return tee_ioctl_shm_alloc(ctx, uarg);
case TEE_IOC_SHM_REGISTER:
return tee_ioctl_shm_register(ctx, uarg);
case TEE_IOC_OPEN_SESSION: case TEE_IOC_OPEN_SESSION:
return tee_ioctl_open_session(ctx, uarg); return tee_ioctl_open_session(ctx, uarg);
case TEE_IOC_INVOKE: case TEE_IOC_INVOKE:

View file

@ -21,68 +21,15 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/types.h> #include <linux/types.h>
struct tee_device;
/**
* struct tee_shm - shared memory object
* @teedev: device used to allocate the object
* @ctx: context using the object, if NULL the context is gone
* @link link element
* @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory
* @size: size of shared memory
* @dmabuf: dmabuf used to for exporting to user space
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device
*/
struct tee_shm {
struct tee_device *teedev;
struct tee_context *ctx;
struct list_head link;
phys_addr_t paddr;
void *kaddr;
size_t size;
struct dma_buf *dmabuf;
u32 flags;
int id;
};
struct tee_shm_pool_mgr;
/**
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
* @alloc: called when allocating shared memory
* @free: called when freeing shared memory
*/
struct tee_shm_pool_mgr_ops {
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
size_t size);
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
};
/**
* struct tee_shm_pool_mgr - shared memory manager
* @ops: operations
* @private_data: private data for the shared memory manager
*/
struct tee_shm_pool_mgr {
const struct tee_shm_pool_mgr_ops *ops;
void *private_data;
};
/** /**
* struct tee_shm_pool - shared memory pool * struct tee_shm_pool - shared memory pool
* @private_mgr: pool manager for shared memory only between kernel * @private_mgr: pool manager for shared memory only between kernel
* and secure world * and secure world
* @dma_buf_mgr: pool manager for shared memory exported to user space * @dma_buf_mgr: pool manager for shared memory exported to user space
* @destroy: called when destroying the pool
* @private_data: private data for the pool
*/ */
struct tee_shm_pool { struct tee_shm_pool {
struct tee_shm_pool_mgr private_mgr; struct tee_shm_pool_mgr *private_mgr;
struct tee_shm_pool_mgr dma_buf_mgr; struct tee_shm_pool_mgr *dma_buf_mgr;
void (*destroy)(struct tee_shm_pool *pool);
void *private_data;
}; };
#define TEE_DEVICE_FLAG_REGISTERED 0x1 #define TEE_DEVICE_FLAG_REGISTERED 0x1
@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev); bool tee_device_get(struct tee_device *teedev);
void tee_device_put(struct tee_device *teedev); void tee_device_put(struct tee_device *teedev);
void teedev_ctx_get(struct tee_context *ctx);
void teedev_ctx_put(struct tee_context *ctx);
#endif /*TEE_PRIVATE_H*/ #endif /*TEE_PRIVATE_H*/

View file

@ -23,7 +23,6 @@
static void tee_shm_release(struct tee_shm *shm) static void tee_shm_release(struct tee_shm *shm)
{ {
struct tee_device *teedev = shm->teedev; struct tee_device *teedev = shm->teedev;
struct tee_shm_pool_mgr *poolm;
mutex_lock(&teedev->mutex); mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id); idr_remove(&teedev->idr, shm->id);
@ -31,12 +30,32 @@ static void tee_shm_release(struct tee_shm *shm)
list_del(&shm->link); list_del(&shm->link);
mutex_unlock(&teedev->mutex); mutex_unlock(&teedev->mutex);
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
if (shm->flags & TEE_SHM_DMA_BUF) if (shm->flags & TEE_SHM_DMA_BUF)
poolm = &teedev->pool->dma_buf_mgr; poolm = teedev->pool->dma_buf_mgr;
else else
poolm = &teedev->pool->private_mgr; poolm = teedev->pool->private_mgr;
poolm->ops->free(poolm, shm); poolm->ops->free(poolm, shm);
} else if (shm->flags & TEE_SHM_REGISTER) {
size_t n;
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
if (rc)
dev_err(teedev->dev.parent,
"unregister shm %p failed: %d", shm, rc);
for (n = 0; n < shm->num_pages; n++)
put_page(shm->pages[n]);
kfree(shm->pages);
}
if (shm->ctx)
teedev_ctx_put(shm->ctx);
kfree(shm); kfree(shm);
tee_device_put(teedev); tee_device_put(teedev);
@ -76,6 +95,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct tee_shm *shm = dmabuf->priv; struct tee_shm *shm = dmabuf->priv;
size_t size = vma->vm_end - vma->vm_start; size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_REGISTER)
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
size, vma->vm_page_prot); size, vma->vm_page_prot);
} }
@ -89,26 +112,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap, .mmap = tee_shm_op_mmap,
}; };
/** static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
* tee_shm_alloc() - Allocate shared memory struct tee_device *teedev,
* @ctx: Context that allocates the shared memory size_t size, u32 flags)
* @size: Requested size of shared memory
* @flags: Flags setting properties for the requested shared memory.
*
* Memory allocated as global shared memory is automatically freed when the
* TEE file pointer is closed. The @flags field uses the bits defined by
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
* associated with a dma-buf handle, else driver private memory.
*/
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{ {
struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm; struct tee_shm *shm;
void *ret; void *ret;
int rc; int rc;
if (ctx && ctx->teedev != teedev) {
dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
return ERR_PTR(-EINVAL);
}
if (!(flags & TEE_SHM_MAPPED)) { if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent, dev_err(teedev->dev.parent,
"only mapped allocations supported\n"); "only mapped allocations supported\n");
@ -135,13 +152,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_dev_put; goto err_dev_put;
} }
shm->flags = flags; shm->flags = flags | TEE_SHM_POOL;
shm->teedev = teedev; shm->teedev = teedev;
shm->ctx = ctx; shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF) if (flags & TEE_SHM_DMA_BUF)
poolm = &teedev->pool->dma_buf_mgr; poolm = teedev->pool->dma_buf_mgr;
else else
poolm = &teedev->pool->private_mgr; poolm = teedev->pool->private_mgr;
rc = poolm->ops->alloc(poolm, shm, size); rc = poolm->ops->alloc(poolm, shm, size);
if (rc) { if (rc) {
@ -171,9 +188,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_rem; goto err_rem;
} }
} }
if (ctx) {
teedev_ctx_get(ctx);
mutex_lock(&teedev->mutex); mutex_lock(&teedev->mutex);
list_add_tail(&shm->link, &ctx->list_shm); list_add_tail(&shm->link, &ctx->list_shm);
mutex_unlock(&teedev->mutex); mutex_unlock(&teedev->mutex);
}
return shm; return shm;
err_rem: err_rem:
@ -188,8 +209,145 @@ err_dev_put:
tee_device_put(teedev); tee_device_put(teedev);
return ret; return ret;
} }
/**
* tee_shm_alloc() - Allocate shared memory
* @ctx: Context that allocates the shared memory
* @size: Requested size of shared memory
* @flags: Flags setting properties for the requested shared memory.
*
* Memory allocated as global shared memory is automatically freed when the
* TEE file pointer is closed. The @flags field uses the bits defined by
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
* associated with a dma-buf handle, else driver private memory.
*/
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
}
EXPORT_SYMBOL_GPL(tee_shm_alloc); EXPORT_SYMBOL_GPL(tee_shm_alloc);
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
{
return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
}
EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
struct tee_device *teedev = ctx->teedev;
const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
struct tee_shm *shm;
void *ret;
int rc;
int num_pages;
unsigned long start;
if (flags != req_flags)
return ERR_PTR(-ENOTSUPP);
if (!tee_device_get(teedev))
return ERR_PTR(-EINVAL);
if (!teedev->desc->ops->shm_register ||
!teedev->desc->ops->shm_unregister) {
tee_device_put(teedev);
return ERR_PTR(-ENOTSUPP);
}
teedev_ctx_get(ctx);
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
if (!shm) {
ret = ERR_PTR(-ENOMEM);
goto err;
}
shm->flags = flags | TEE_SHM_REGISTER;
shm->teedev = teedev;
shm->ctx = ctx;
shm->id = -1;
start = rounddown(addr, PAGE_SIZE);
shm->offset = addr - start;
shm->size = length;
num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
goto err;
}
rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
if (rc >= 0)
rc = -ENOMEM;
ret = ERR_PTR(rc);
goto err;
}
mutex_lock(&teedev->mutex);
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
if (shm->id < 0) {
ret = ERR_PTR(shm->id);
goto err;
}
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
ret = ERR_PTR(rc);
goto err;
}
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
exp_info.priv = shm;
shm->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(shm->dmabuf)) {
ret = ERR_CAST(shm->dmabuf);
teedev->desc->ops->shm_unregister(ctx, shm);
goto err;
}
}
mutex_lock(&teedev->mutex);
list_add_tail(&shm->link, &ctx->list_shm);
mutex_unlock(&teedev->mutex);
return shm;
err:
if (shm) {
size_t n;
if (shm->id >= 0) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
}
if (shm->pages) {
for (n = 0; n < shm->num_pages; n++)
put_page(shm->pages[n]);
kfree(shm->pages);
}
}
kfree(shm);
teedev_ctx_put(ctx);
tee_device_put(teedev);
return ret;
}
EXPORT_SYMBOL_GPL(tee_shm_register);
/** /**
* tee_shm_get_fd() - Increase reference count and return file descriptor * tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle * @shm: Shared memory handle
@ -197,10 +355,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
*/ */
int tee_shm_get_fd(struct tee_shm *shm) int tee_shm_get_fd(struct tee_shm *shm)
{ {
u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
int fd; int fd;
if ((shm->flags & req_flags) != req_flags) if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL; return -EINVAL;
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
@ -238,6 +395,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
*/ */
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
{ {
if (!(shm->flags & TEE_SHM_MAPPED))
return -EINVAL;
/* Check that we're in the range of the shm */ /* Check that we're in the range of the shm */
if ((char *)va < (char *)shm->kaddr) if ((char *)va < (char *)shm->kaddr)
return -EINVAL; return -EINVAL;
@ -258,6 +417,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
*/ */
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
{ {
if (!(shm->flags & TEE_SHM_MAPPED))
return -EINVAL;
/* Check that we're in the range of the shm */ /* Check that we're in the range of the shm */
if (pa < shm->paddr) if (pa < shm->paddr)
return -EINVAL; return -EINVAL;
@ -284,6 +445,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
*/ */
void *tee_shm_get_va(struct tee_shm *shm, size_t offs) void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
{ {
if (!(shm->flags & TEE_SHM_MAPPED))
return ERR_PTR(-EINVAL);
if (offs >= shm->size) if (offs >= shm->size)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return (char *)shm->kaddr + offs; return (char *)shm->kaddr + offs;
@ -335,17 +498,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
} }
EXPORT_SYMBOL_GPL(tee_shm_get_from_id); EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
/**
* tee_shm_get_id() - Get id of a shared memory object
* @shm: Shared memory handle
* @returns id
*/
int tee_shm_get_id(struct tee_shm *shm)
{
return shm->id;
}
EXPORT_SYMBOL_GPL(tee_shm_get_id);
/** /**
* tee_shm_put() - Decrease reference count on a shared memory handle * tee_shm_put() - Decrease reference count on a shared memory handle
* @shm: Shared memory handle * @shm: Shared memory handle

View file

@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
shm->kaddr = NULL; shm->kaddr = NULL;
} }
static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
{
gen_pool_destroy(poolm->private_data);
kfree(poolm);
}
static const struct tee_shm_pool_mgr_ops pool_ops_generic = { static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
.alloc = pool_op_gen_alloc, .alloc = pool_op_gen_alloc,
.free = pool_op_gen_free, .free = pool_op_gen_free,
.destroy_poolmgr = pool_op_gen_destroy_poolmgr,
}; };
static void pool_res_mem_destroy(struct tee_shm_pool *pool)
{
gen_pool_destroy(pool->private_mgr.private_data);
gen_pool_destroy(pool->dma_buf_mgr.private_data);
}
static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
struct tee_shm_pool_mem_info *info,
int min_alloc_order)
{
size_t page_mask = PAGE_SIZE - 1;
struct gen_pool *genpool = NULL;
int rc;
/*
* Start and end must be page aligned
*/
if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
(info->size & page_mask))
return -EINVAL;
genpool = gen_pool_create(min_alloc_order, -1);
if (!genpool)
return -ENOMEM;
gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
-1);
if (rc) {
gen_pool_destroy(genpool);
return rc;
}
mgr->private_data = genpool;
mgr->ops = &pool_ops_generic;
return 0;
}
/** /**
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
* memory range * memory range
@ -104,43 +73,110 @@ struct tee_shm_pool *
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
struct tee_shm_pool_mem_info *dmabuf_info) struct tee_shm_pool_mem_info *dmabuf_info)
{ {
struct tee_shm_pool *pool = NULL; struct tee_shm_pool_mgr *priv_mgr;
int ret; struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
ret = -ENOMEM;
goto err;
}
/* /*
* Create the pool for driver private shared memory * Create the pool for driver private shared memory
*/ */
ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
priv_info->size,
3 /* 8 byte aligned */); 3 /* 8 byte aligned */);
if (ret) if (IS_ERR(rc))
goto err; return rc;
priv_mgr = rc;
/* /*
* Create the pool for dma_buf shared memory * Create the pool for dma_buf shared memory
*/ */
ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
PAGE_SHIFT); dmabuf_info->paddr,
if (ret) dmabuf_info->size, PAGE_SHIFT);
goto err; if (IS_ERR(rc))
goto err_free_priv_mgr;
dmabuf_mgr = rc;
pool->destroy = pool_res_mem_destroy; rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
return pool; if (IS_ERR(rc))
err: goto err_free_dmabuf_mgr;
if (ret == -ENOMEM)
pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); return rc;
if (pool && pool->private_mgr.private_data)
gen_pool_destroy(pool->private_mgr.private_data); err_free_dmabuf_mgr:
kfree(pool); tee_shm_pool_mgr_destroy(dmabuf_mgr);
return ERR_PTR(ret); err_free_priv_mgr:
tee_shm_pool_mgr_destroy(priv_mgr);
return rc;
} }
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
phys_addr_t paddr,
size_t size,
int min_alloc_order)
{
const size_t page_mask = PAGE_SIZE - 1;
struct tee_shm_pool_mgr *mgr;
int rc;
/* Start and end must be page aligned */
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
return ERR_PTR(-EINVAL);
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return ERR_PTR(-ENOMEM);
mgr->private_data = gen_pool_create(min_alloc_order, -1);
if (!mgr->private_data) {
rc = -ENOMEM;
goto err;
}
gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
if (rc) {
gen_pool_destroy(mgr->private_data);
goto err;
}
mgr->ops = &pool_ops_generic;
return mgr;
err:
kfree(mgr);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
{
return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
mgr->ops->destroy_poolmgr;
}
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
struct tee_shm_pool_mgr *dmabuf_mgr)
{
struct tee_shm_pool *pool;
if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
return ERR_PTR(-EINVAL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
pool->private_mgr = priv_mgr;
pool->dma_buf_mgr = dmabuf_mgr;
return pool;
}
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
/** /**
* tee_shm_pool_free() - Free a shared memory pool * tee_shm_pool_free() - Free a shared memory pool
* @pool: The shared memory pool to free * @pool: The shared memory pool to free
@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
*/ */
void tee_shm_pool_free(struct tee_shm_pool *pool) void tee_shm_pool_free(struct tee_shm_pool *pool)
{ {
pool->destroy(pool); if (pool->private_mgr)
tee_shm_pool_mgr_destroy(pool->private_mgr);
if (pool->dma_buf_mgr)
tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
kfree(pool); kfree(pool);
} }
EXPORT_SYMBOL_GPL(tee_shm_pool_free); EXPORT_SYMBOL_GPL(tee_shm_pool_free);

View file

@ -68,7 +68,6 @@ const struct consw dummy_con = {
.con_switch = DUMMY, .con_switch = DUMMY,
.con_blank = DUMMY, .con_blank = DUMMY,
.con_font_set = DUMMY, .con_font_set = DUMMY,
.con_font_get = DUMMY,
.con_font_default = DUMMY, .con_font_default = DUMMY,
.con_font_copy = DUMMY, .con_font_copy = DUMMY,
.con_set_palette = DUMMY, .con_set_palette = DUMMY,

View file

@ -1120,7 +1120,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
goto put_display_node; goto put_display_node;
} }
timings_np = of_find_node_by_name(display_np, "display-timings"); timings_np = of_get_child_by_name(display_np, "display-timings");
if (!timings_np) { if (!timings_np) {
dev_err(dev, "failed to find display-timings node\n"); dev_err(dev, "failed to find display-timings node\n");
ret = -ENODEV; ret = -ENODEV;
@ -1141,6 +1141,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
fb_add_videomode(&fb_vm, &info->modelist); fb_add_videomode(&fb_vm, &info->modelist);
} }
/*
* FIXME: Make sure we are not referencing any fields in display_np
* and timings_np and drop our references to them before returning to
* avoid leaking the nodes on probe deferral and driver unbind.
*/
return 0; return 0;
put_timings_node: put_timings_node:

View file

@ -1292,8 +1292,11 @@ next_slot:
leaf = path->nodes[0]; leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) { if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path); ret = btrfs_next_leaf(root, path);
if (ret < 0) if (ret < 0) {
if (cow_start != (u64)-1)
cur_offset = cow_start;
goto error; goto error;
}
if (ret > 0) if (ret > 0)
break; break;
leaf = path->nodes[0]; leaf = path->nodes[0];

View file

@ -26,6 +26,7 @@
#include "print-tree.h" #include "print-tree.h"
#include "backref.h" #include "backref.h"
#include "hash.h" #include "hash.h"
#include "inode-map.h"
/* magic values for the inode_only field in btrfs_log_inode: /* magic values for the inode_only field in btrfs_log_inode:
* *
@ -2445,6 +2446,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
next); next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
} }
WARN_ON(root_owner != WARN_ON(root_owner !=
@ -2524,6 +2528,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
next); next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
} }
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@ -2600,6 +2607,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(trans, log->fs_info, next); clean_tree_block(trans, log->fs_info, next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
} }
WARN_ON(log->root_key.objectid != WARN_ON(log->root_key.objectid !=
@ -5514,6 +5524,23 @@ again:
path); path);
} }
if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
struct btrfs_root *root = wc.replay_dest;
btrfs_release_path(path);
/*
* We have just replayed everything, and the highest
* objectid of fs roots probably has changed in case
* some inode_item's got replayed.
*
* root->objectid_mutex is not acquired as log replay
* could only happen during mount.
*/
ret = btrfs_find_highest_objectid(root,
&root->highest_objectid);
}
key.offset = found_key.offset - 1; key.offset = found_key.offset - 1;
wc.replay_dest->log_root = NULL; wc.replay_dest->log_root = NULL;
free_extent_buffer(log->node); free_extent_buffer(log->node);

View file

@ -688,6 +688,7 @@ __acquires(bitlock)
} }
ext4_unlock_group(sb, grp); ext4_unlock_group(sb, grp);
ext4_commit_super(sb, 1);
ext4_handle_error(sb); ext4_handle_error(sb);
/* /*
* We only get here in the ERRORS_RO case; relocking the group * We only get here in the ERRORS_RO case; relocking the group

View file

@ -169,11 +169,6 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq)
return waitqueue_active(wq); return waitqueue_active(wq);
} }
static inline void inode_nohighmem(struct inode *inode)
{
mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
}
/** /**
* current_time - Return FS time * current_time - Return FS time
* @inode: inode. * @inode: inode.

View file

@ -2015,6 +2015,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
int retval = 0; int retval = 0;
const char *s = nd->name->name; const char *s = nd->name->name;
if (!*s)
flags &= ~LOOKUP_RCU;
nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0; nd->depth = 0;

View file

@ -32,7 +32,7 @@ static inline void kaiser_init(void)
{ {
} }
static inline int kaiser_add_mapping(unsigned long addr, static inline int kaiser_add_mapping(unsigned long addr,
unsigned long size, unsigned long flags) unsigned long size, u64 flags)
{ {
return 0; return 0;
} }

View file

@ -17,6 +17,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/tee.h> #include <linux/tee.h>
@ -25,8 +26,12 @@
* specific TEE driver. * specific TEE driver.
*/ */
#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ #define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ #define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
struct device; struct device;
struct tee_device; struct tee_device;
@ -38,11 +43,17 @@ struct tee_shm_pool;
* @teedev: pointer to this drivers struct tee_device * @teedev: pointer to this drivers struct tee_device
* @list_shm: List of shared memory object owned by this context * @list_shm: List of shared memory object owned by this context
* @data: driver specific context data, managed by the driver * @data: driver specific context data, managed by the driver
* @refcount: reference counter for this structure
* @releasing: flag that indicates if context is being released right now.
* It is needed to break circular dependency on context during
* shared memory release.
*/ */
struct tee_context { struct tee_context {
struct tee_device *teedev; struct tee_device *teedev;
struct list_head list_shm; struct list_head list_shm;
void *data; void *data;
struct kref refcount;
bool releasing;
}; };
struct tee_param_memref { struct tee_param_memref {
@ -76,6 +87,8 @@ struct tee_param {
* @cancel_req: request cancel of an ongoing invoke or open * @cancel_req: request cancel of an ongoing invoke or open
* @supp_revc: called for supplicant to get a command * @supp_revc: called for supplicant to get a command
* @supp_send: called for supplicant to send a response * @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
*/ */
struct tee_driver_ops { struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev, void (*get_version)(struct tee_device *teedev,
@ -94,6 +107,10 @@ struct tee_driver_ops {
struct tee_param *param); struct tee_param *param);
int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_param *param); struct tee_param *param);
int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
unsigned long start);
int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
}; };
/** /**
@ -149,6 +166,97 @@ int tee_device_register(struct tee_device *teedev);
*/ */
void tee_device_unregister(struct tee_device *teedev); void tee_device_unregister(struct tee_device *teedev);
/**
* struct tee_shm - shared memory object
* @teedev: device used to allocate the object
* @ctx: context using the object, if NULL the context is gone
* @link link element
* @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory
* @size: size of shared memory
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
* @dmabuf: dmabuf used to for exporting to user space
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device
*
* This pool is only supposed to be accessed directly from the TEE
* subsystem and from drivers that implements their own shm pool manager.
*/
struct tee_shm {
struct tee_device *teedev;
struct tee_context *ctx;
struct list_head link;
phys_addr_t paddr;
void *kaddr;
size_t size;
unsigned int offset;
struct page **pages;
size_t num_pages;
struct dma_buf *dmabuf;
u32 flags;
int id;
};
/**
* struct tee_shm_pool_mgr - shared memory manager
* @ops: operations
* @private_data: private data for the shared memory manager
*/
struct tee_shm_pool_mgr {
const struct tee_shm_pool_mgr_ops *ops;
void *private_data;
};
/**
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
* @alloc: called when allocating shared memory
* @free: called when freeing shared memory
* @destroy_poolmgr: called when destroying the pool manager
*/
struct tee_shm_pool_mgr_ops {
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
size_t size);
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
};
/**
* tee_shm_pool_alloc() - Create a shared memory pool from shm managers
* @priv_mgr: manager for driver private shared memory allocations
* @dmabuf_mgr: manager for dma-buf shared memory allocations
*
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
* in @dmabuf, others will use the range provided by @priv.
*
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
*/
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
struct tee_shm_pool_mgr *dmabuf_mgr);
/*
* tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
* memory
* @vaddr: Virtual address of start of pool
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*
* @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
*/
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
phys_addr_t paddr,
size_t size,
int min_alloc_order);
/**
* tee_shm_pool_mgr_destroy() - Free a shared memory manager
*/
static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
{
poolm->ops->destroy_poolmgr(poolm);
}
/** /**
* struct tee_shm_pool_mem_info - holds information needed to create a shared * struct tee_shm_pool_mem_info - holds information needed to create a shared
* memory pool * memory pool
@ -210,6 +318,40 @@ void *tee_get_drvdata(struct tee_device *teedev);
*/ */
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
/**
* tee_shm_priv_alloc() - Allocate shared memory privately
* @dev: Device that allocates the shared memory
* @size: Requested size of shared memory
*
* Allocates shared memory buffer that is not associated with any client
* context. Such buffers are owned by TEE driver and used for internal calls.
*
* @returns a pointer to 'struct tee_shm'
*/
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
/**
* tee_shm_register() - Register shared memory buffer
* @ctx: Context that registers the shared memory
* @addr: Address is userspace of the shared buffer
* @length: Length of the shared buffer
* @flags: Flags setting properties for the requested shared memory.
*
* @returns a pointer to 'struct tee_shm'
*/
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags);
/**
* tee_shm_is_registered() - Check if shared memory object in registered in TEE
* @shm: Shared memory handle
* @returns true if object is registered in TEE
*/
static inline bool tee_shm_is_registered(struct tee_shm *shm)
{
return shm && (shm->flags & TEE_SHM_REGISTER);
}
/** /**
* tee_shm_free() - Free shared memory * tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free * @shm: Handle to shared memory to free
@ -259,12 +401,48 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
*/ */
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
/**
* tee_shm_get_size() - Get size of shared memory buffer
* @shm: Shared memory handle
* @returns size of shared memory
*/
static inline size_t tee_shm_get_size(struct tee_shm *shm)
{
return shm->size;
}
/**
* tee_shm_get_pages() - Get list of pages that hold shared buffer
* @shm: Shared memory handle
* @num_pages: Number of pages will be stored there
* @returns pointer to pages array
*/
static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
size_t *num_pages)
{
*num_pages = shm->num_pages;
return shm->pages;
}
/**
* tee_shm_get_page_offset() - Get shared buffer offset from page start
* @shm: Shared memory handle
* @returns page offset of shared buffer
*/
static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
{
return shm->offset;
}
/** /**
* tee_shm_get_id() - Get id of a shared memory object * tee_shm_get_id() - Get id of a shared memory object
* @shm: Shared memory handle * @shm: Shared memory handle
* @returns id * @returns id
*/ */
int tee_shm_get_id(struct tee_shm *shm); static inline int tee_shm_get_id(struct tee_shm *shm)
{
return shm->id;
}
/** /**
* tee_shm_get_from_id() - Find shared memory object and increase reference * tee_shm_get_from_id() - Find shared memory object and increase reference
@ -275,4 +453,16 @@ int tee_shm_get_id(struct tee_shm *shm);
*/ */
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
static inline bool tee_param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
return true;
default:
return false;
}
}
#endif /*__TEE_DRV_H*/ #endif /*__TEE_DRV_H*/

View file

@ -50,6 +50,7 @@
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ #define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
/* /*
* TEE Implementation ID * TEE Implementation ID
@ -154,6 +155,13 @@ struct tee_ioctl_buf_data {
*/ */
#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
/* Meta parameter carrying extra information about the message. */
#define TEE_IOCTL_PARAM_ATTR_META 0x100
/* Mask of all known attr bits */
#define TEE_IOCTL_PARAM_ATTR_MASK \
(TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
/* /*
* Matches TEEC_LOGIN_* in GP TEE Client API * Matches TEEC_LOGIN_* in GP TEE Client API
* Are only defined for GP compliant TEEs * Are only defined for GP compliant TEEs
@ -332,6 +340,35 @@ struct tee_iocl_supp_send_arg {
#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ #define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
struct tee_ioctl_buf_data) struct tee_ioctl_buf_data)
/**
* struct tee_ioctl_shm_register_data - Shared memory register argument
* @addr: [in] Start address of shared memory to register
* @length: [in/out] Length of shared memory to register
* @flags: [in/out] Flags to/from registration.
* @id: [out] Identifier of the shared memory
*
* The flags field should currently be zero as input. Updated by the call
* with actual flags as defined by TEE_IOCTL_SHM_* above.
* This structure is used as argument for TEE_IOC_SHM_REGISTER below.
*/
struct tee_ioctl_shm_register_data {
__u64 addr;
__u64 length;
__u32 flags;
__s32 id;
};
/**
* TEE_IOC_SHM_REGISTER - Register shared memory argument
*
* Registers shared memory between the user space process and secure OS.
*
* Returns a file descriptor on success or < 0 on failure
*
* The shared memory is unregisterred when the descriptor is closed.
*/
#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \
struct tee_ioctl_shm_register_data)
/* /*
* Five syscalls are used when communicating with the TEE driver. * Five syscalls are used when communicating with the TEE driver.
* open(): opens the device associated with the driver * open(): opens the device associated with the driver

View file

@ -72,7 +72,7 @@
#include "internal.h" #include "internal.h"
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif #endif

View file

@ -1012,7 +1012,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
{ {
struct snd_seq_client *client = file->private_data; struct snd_seq_client *client = file->private_data;
int written = 0, len; int written = 0, len;
int err = -EINVAL; int err;
struct snd_seq_event event; struct snd_seq_event event;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@ -1027,11 +1027,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/* allocate the pool now if the pool is not allocated yet */ /* allocate the pool now if the pool is not allocated yet */
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
if (snd_seq_pool_init(client->pool) < 0) mutex_lock(&client->ioctl_mutex);
err = snd_seq_pool_init(client->pool);
mutex_unlock(&client->ioctl_mutex);
if (err < 0)
return -ENOMEM; return -ENOMEM;
} }
/* only process whole events */ /* only process whole events */
err = -EINVAL;
while (count >= sizeof(struct snd_seq_event)) { while (count >= sizeof(struct snd_seq_event)) {
/* Read in the event header from the user */ /* Read in the event header from the user */
len = sizeof(event); len = sizeof(event);
@ -2196,7 +2200,6 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
void __user *arg) void __user *arg)
{ {
struct seq_ioctl_table *p; struct seq_ioctl_table *p;
int ret;
switch (cmd) { switch (cmd) {
case SNDRV_SEQ_IOCTL_PVERSION: case SNDRV_SEQ_IOCTL_PVERSION:
@ -2210,12 +2213,8 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
if (! arg) if (! arg)
return -EFAULT; return -EFAULT;
for (p = ioctl_tables; p->cmd; p++) { for (p = ioctl_tables; p->cmd; p++) {
if (p->cmd == cmd) { if (p->cmd == cmd)
mutex_lock(&client->ioctl_mutex); return p->func(client, arg);
ret = p->func(client, arg);
mutex_unlock(&client->ioctl_mutex);
return ret;
}
} }
pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n", pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
@ -2226,11 +2225,15 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ {
struct snd_seq_client *client = file->private_data; struct snd_seq_client *client = file->private_data;
long ret;
if (snd_BUG_ON(!client)) if (snd_BUG_ON(!client))
return -ENXIO; return -ENXIO;
return snd_seq_do_ioctl(client, cmd, (void __user *) arg); mutex_lock(&client->ioctl_mutex);
ret = snd_seq_do_ioctl(client, cmd, (void __user *) arg);
mutex_unlock(&client->ioctl_mutex);
return ret;
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT

View file

@ -3130,6 +3130,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
} }
static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
{
unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
if (cfg_headphone && cfg_headset_mic == 0x411111f0)
snd_hda_codec_set_pincfg(codec, 0x19,
(cfg_headphone & ~AC_DEFCFG_DEVICE) |
(AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
}
static void alc269_fixup_hweq(struct hda_codec *codec, static void alc269_fixup_hweq(struct hda_codec *codec,
const struct hda_fixup *fix, int action) const struct hda_fixup *fix, int action)
{ {
@ -4782,6 +4795,7 @@ enum {
ALC269_FIXUP_LIFEBOOK_EXTMIC, ALC269_FIXUP_LIFEBOOK_EXTMIC,
ALC269_FIXUP_LIFEBOOK_HP_PIN, ALC269_FIXUP_LIFEBOOK_HP_PIN,
ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
ALC269_FIXUP_AMIC, ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC, ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC, ALC269VB_FIXUP_AMIC,
@ -4972,6 +4986,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC, .type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout, .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
}, },
[ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_U7x7_headset_mic,
},
[ALC269_FIXUP_AMIC] = { [ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS, .type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) { .v.pins = (const struct hda_pintbl[]) {
@ -5687,6 +5705,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@ -5975,6 +5994,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0xb7a60130}, {0x12, 0xb7a60130},
{0x14, 0x90170110}, {0x14, 0x90170110},
{0x21, 0x02211020}), {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60130},
{0x14, 0x90170110},
{0x14, 0x01011020},
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS), ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
@ -6031,6 +6055,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60120}, {0x12, 0x90a60120},
{0x14, 0x90170110}, {0x14, 0x90170110},
{0x21, 0x0321101f}), {0x21, 0x0321101f}),
SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0xb7a60130},
{0x14, 0x90170110},
{0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
ALC290_STANDARD_PINS, ALC290_STANDARD_PINS,
{0x15, 0x04211040}, {0x15, 0x04211040},

View file

@ -355,17 +355,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret) int validx, int *value_ret)
{ {
struct snd_usb_audio *chip = cval->head.mixer->chip; struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ /* enough space for one range */
unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
unsigned char *val; unsigned char *val;
int idx = 0, ret, size; int idx = 0, ret, val_size, size;
__u8 bRequest; __u8 bRequest;
val_size = uac2_ctl_value_size(cval->val_type);
if (request == UAC_GET_CUR) { if (request == UAC_GET_CUR) {
bRequest = UAC2_CS_CUR; bRequest = UAC2_CS_CUR;
size = uac2_ctl_value_size(cval->val_type); size = val_size;
} else { } else {
bRequest = UAC2_CS_RANGE; bRequest = UAC2_CS_RANGE;
size = sizeof(buf); size = sizeof(__u16) + 3 * val_size;
} }
memset(buf, 0, sizeof(buf)); memset(buf, 0, sizeof(buf));
@ -398,16 +401,17 @@ error:
val = buf + sizeof(__u16); val = buf + sizeof(__u16);
break; break;
case UAC_GET_MAX: case UAC_GET_MAX:
val = buf + sizeof(__u16) * 2; val = buf + sizeof(__u16) + val_size;
break; break;
case UAC_GET_RES: case UAC_GET_RES:
val = buf + sizeof(__u16) * 3; val = buf + sizeof(__u16) + val_size * 2;
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); *value_ret = convert_signed_value(cval,
snd_usb_combine_bytes(val, val_size));
return 0; return 0;
} }

View file

@ -343,6 +343,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81; ep = 0x81;
iface = usb_ifnum_to_if(dev, 2); iface = usb_ifnum_to_if(dev, 2);
if (!iface || iface->num_altsetting == 0)
return -EINVAL;
alts = &iface->altsetting[1];
goto add_sync_ep;
case USB_ID(0x1397, 0x0002):
ep = 0x81;
iface = usb_ifnum_to_if(dev, 1);
if (!iface || iface->num_altsetting == 0) if (!iface || iface->num_altsetting == 0)
return -EINVAL; return -EINVAL;