PFK: fix race between key set and key invalidate in TZ

When working with multiple files and multiple threads, the following
scenario can occur:
1. File Close -> Key cache removal -> context switch
2. Open new file -> occupy the entry cleaned in 1
   -> TZ_ES_SET_ICE_KEY -> context switch
3. Back to 1 -> TZ_ES_INVALIDATE_ICE_KEY
4. Back to 2 -> ICE uses the key that is already invalid
5. Crash due to PARTIALLY_SET_KEY_USED

To fix this, pfk must know when requests using some key are completed.
Only then key removal shall be allowed and until then key invalidation
must wait.
A new callback was added to let clients inform PFK when requests end.

Change-Id: Id7f8a3302fac9fafd1203d8d56ca13d59b45bbd5
Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
Signed-off-by: Andrey Markovytch <andreym@codeaurora.org>
This commit is contained in:
Andrey Markovytch 2016-04-20 17:40:00 +03:00 committed by Jeevan Shriram
parent 03a29a9ee5
commit e29851c910
7 changed files with 679 additions and 234 deletions

View file

@ -97,19 +97,16 @@ struct ice_device {
static int qti_ice_setting_config(struct request *req,
struct platform_device *pdev,
struct ice_crypto_setting *crypto_data,
struct ice_data_setting *setting,
bool *configured)
struct ice_data_setting *setting)
{
struct ice_device *ice_dev = NULL;
*configured = false;
ice_dev = platform_get_drvdata(pdev);
if (!ice_dev) {
pr_debug("%s no ICE device\n", __func__);
/* make the caller finish peacfully */
*configured = true;
return 0;
}
@ -120,8 +117,6 @@ static int qti_ice_setting_config(struct request *req,
if ((short)(crypto_data->key_index) >= 0) {
*configured = true;
memcpy(&setting->crypto_data, crypto_data,
sizeof(setting->crypto_data));
@ -1375,7 +1370,7 @@ static int qcom_ice_config(struct platform_device *pdev, struct request *req,
struct ice_crypto_setting pfk_crypto_data = {0};
union map_info *info;
int ret = 0;
bool configured = 0;
bool is_pfe = false;
if (!pdev || !req || !setting) {
pr_err("%s: Invalid params passed\n", __func__);
@ -1397,25 +1392,17 @@ static int qcom_ice_config(struct platform_device *pdev, struct request *req,
return 0;
}
ret = pfk_load_key(req->bio, &pfk_crypto_data);
if (0 == ret) {
ret = qti_ice_setting_config(req, pdev, &pfk_crypto_data,
setting, &configured);
if (0 == ret) {
/**
* if configuration was complete, we are done, no need
* to go further with FDE
*/
if (configured)
return 0;
} else {
/**
* there was an error with configuring the setting,
* exit with error
*/
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, false);
if (is_pfe) {
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("%s error %d while configuring ice key for PFE\n",
__func__, ret);
return ret;
}
return qti_ice_setting_config(req, pdev,
&pfk_crypto_data, setting);
}
/*
@ -1438,8 +1425,8 @@ static int qcom_ice_config(struct platform_device *pdev, struct request *req,
return -EINVAL;
}
return qti_ice_setting_config(req, pdev, crypto_data,
setting, &configured);
return qti_ice_setting_config(req, pdev,
crypto_data, setting);
}
/*
@ -1450,6 +1437,35 @@ static int qcom_ice_config(struct platform_device *pdev, struct request *req,
return 0;
}
static int qcom_ice_config_end(struct request *req)
{
int ret = 0;
bool is_pfe = false;
if (!req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
if (!req->bio) {
/* It is not an error to have a request with no bio */
return 0;
}
ret = pfk_load_key_end(req->bio, &is_pfe);
if (is_pfe) {
if (ret != 0)
pr_err("%s error %d while end configuring ice key for PFE\n",
__func__, ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(qcom_ice_config_end);
static int qcom_ice_status(struct platform_device *pdev)
{
struct ice_device *ice_dev;
@ -1481,7 +1497,8 @@ struct qcom_ice_variant_ops qcom_ice_ops = {
.reset = qcom_ice_reset,
.resume = qcom_ice_resume,
.suspend = qcom_ice_suspend,
.config = qcom_ice_config,
.config = qcom_ice_config,
.config_end = qcom_ice_config_end,
.status = qcom_ice_status,
.debug = qcom_ice_debug,
};

View file

@ -366,6 +366,36 @@ out:
return err;
}
/**
* ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers
* for an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and
* qcom_host->hba->dev should all
* be valid pointers.
* @cmd: Pointer to a valid scsi command. cmd->request should also be
* a valid pointer.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req)
{
int err = 0;
struct device *dev = qcom_host->hba->dev;
if (qcom_host->ice.vops->config_end) {
err = qcom_host->ice.vops->config_end(req);
if (err) {
dev_err(dev, "%s: error in ice_vops->config_end %d\n",
__func__, err);
return err;
}
}
return 0;
}
/**
* ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.

View file

@ -69,8 +69,9 @@ struct qcom_ice_variant_ops {
int (*reset)(struct platform_device *);
int (*resume)(struct platform_device *);
int (*suspend)(struct platform_device *);
int (*config)(struct platform_device *, struct request* ,
struct ice_data_setting*);
int (*config)(struct platform_device *, struct request *,
struct ice_data_setting *);
int (*config_end)(struct request *);
int (*status)(struct platform_device *);
void (*debug)(struct platform_device *);
};

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -19,13 +19,20 @@ struct ice_crypto_setting;
#ifdef CONFIG_PFK
int pfk_load_key(const struct bio *bio, struct ice_crypto_setting *ice_setting);
int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
int pfk_remove_key(const unsigned char *key, size_t key_size);
bool pfk_allow_merge_bio(struct bio *bio1, struct bio *bio2);
#else
static inline int pfk_load_key(const struct bio *bio,
struct ice_crypto_setting *ice_setting)
static inline int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
{
return -ENODEV;
}
static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
return -ENODEV;
}

View file

@ -164,7 +164,7 @@ static inline bool pfk_is_ready(void)
static int pfk_get_page_index(const struct bio *bio, pgoff_t *page_index)
{
if (!bio || !page_index)
return -EPERM;
return -EINVAL;
/* check bio bi_size > 0 before using the bio->bi_io_vec[] array */
if (!(bio->bi_iter).bi_size)
@ -260,7 +260,7 @@ static int pfk_set_ecryptfs_data(struct inode *inode, void *ecryptfs_data)
struct inode_security_struct *isec = NULL;
if (!inode)
return -EPERM;
return -EINVAL;
isec = inode->i_security;
@ -333,54 +333,40 @@ static int pfk_key_size_to_key_type(size_t key_size,
return 0;
}
/**
* pfk_load_key() - loads the encryption key to the ICE
* @bio: Pointer to the BIO structure
* @ice_setting: Pointer to ice setting structure that will be filled with
* ice configuration values, including the index to which the key was loaded
*
* Via bio gets access to ecryptfs key stored in auxiliary structure inside
* inode and loads it to encryption hw.
* Returns the index where the key is stored in encryption hw and additional
* information that will be used later for configuration of the encryption hw.
*
*/
int pfk_load_key(const struct bio *bio, struct ice_crypto_setting *ice_setting)
static int pfk_bio_to_key(const struct bio *bio, unsigned char const **key,
size_t *key_size, unsigned char const **salt, size_t *salt_size,
bool *is_pfe)
{
struct inode *inode = NULL;
int ret = 0;
const unsigned char *key = NULL;
const unsigned char *salt = NULL;
const unsigned char *cipher = NULL;
void *ecryptfs_data = NULL;
u32 key_index = 0;
enum ice_cryto_algo_mode algo_mode = 0;
enum ice_crpto_key_size key_size_type = 0;
size_t key_size = 0;
size_t salt_size = 0;
pgoff_t offset;
bool is_metadata = false;
if (!pfk_is_ready())
return -ENODEV;
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!bio)
return -EPERM;
if (!ice_setting) {
pr_err("ice setting is NULL\n");
return -EPERM;
}
inode = pfk_bio_get_inode(bio);
if (!inode)
return -EINVAL;
ecryptfs_data = pfk_get_ecryptfs_data(inode);
if (!key || !salt || !key_size || !salt_size)
return -EINVAL;
inode = pfk_bio_get_inode(bio);
if (!inode) {
*is_pfe = false;
return -EINVAL;
}
ecryptfs_data = pfk_get_ecryptfs_data(inode);
if (!ecryptfs_data) {
ret = -EINVAL;
goto end;
*is_pfe = false;
return -EPERM;
}
pr_debug("loading key for file %s\n", inode_to_filename(inode));
@ -388,49 +374,118 @@ int pfk_load_key(const struct bio *bio, struct ice_crypto_setting *ice_setting)
ret = pfk_get_page_index(bio, &offset);
if (ret != 0) {
pr_err("could not get page index from bio, probably bug %d\n",
ret);
ret = -EINVAL;
goto end;
ret);
return -EINVAL;
}
is_metadata = ecryptfs_is_page_in_metadata(ecryptfs_data, offset);
if (is_metadata == true) {
pr_debug("ecryptfs metadata, bypassing ICE\n");
ret = -ESPIPE;
goto end;
*is_pfe = false;
return -EPERM;
}
key = ecryptfs_get_key(ecryptfs_data);
*key = ecryptfs_get_key(ecryptfs_data);
if (!key) {
pr_err("could not parse key from ecryptfs\n");
ret = -EINVAL;
goto end;
return -EINVAL;
}
key_size = ecryptfs_get_key_size(ecryptfs_data);
if (!key_size) {
*key_size = ecryptfs_get_key_size(ecryptfs_data);
if (!(*key_size)) {
pr_err("could not parse key size from ecryptfs\n");
ret = -EINVAL;
goto end;
return -EINVAL;
}
salt = ecryptfs_get_salt(ecryptfs_data);
*salt = ecryptfs_get_salt(ecryptfs_data);
if (!salt) {
pr_err("could not parse salt from ecryptfs\n");
ret = -EINVAL;
goto end;
return -EINVAL;
}
salt_size = ecryptfs_get_salt_size(ecryptfs_data);
if (!salt_size) {
*salt_size = ecryptfs_get_salt_size(ecryptfs_data);
if (!(*salt_size)) {
pr_err("could not parse salt size from ecryptfs\n");
ret = -EINVAL;
goto end;
return -EINVAL;
}
ret = pfk_parse_cipher(cipher, &algo_mode);
return 0;
}
/**
* pfk_load_key_start() - loads PFE encryption key to the ICE
* Can also be invoked from non
* PFE context, than it is not
* relevant and is_pfe flag is
* set to true
* @bio: Pointer to the BIO structure
* @ice_setting: Pointer to ice setting structure that will be filled with
* ice configuration values, including the index to which the key was loaded
* @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
* from PFE context
*
* Via bio gets access to ecryptfs key stored in auxiliary structure inside
* inode and loads it to encryption hw.
* Returns the index where the key is stored in encryption hw and additional
* information that will be used later for configuration of the encryption hw.
*
* Must be followed by pfk_load_key_end when key is no longer used by ice
*
*/
int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe,
bool async)
{
int ret = 0;
const unsigned char *key = NULL;
const unsigned char *salt = NULL;
size_t key_size = 0;
size_t salt_size = 0;
enum ice_cryto_algo_mode algo_mode = 0;
enum ice_crpto_key_size key_size_type = 0;
void *ecryptfs_data = NULL;
u32 key_index = 0;
struct inode *inode = NULL;
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
if (!ice_setting) {
pr_err("ice setting is NULL\n");
return -EINVAL;
}
ret = pfk_bio_to_key(bio, &key, &key_size, &salt, &salt_size, is_pfe);
if (ret != 0)
return ret;
inode = pfk_bio_get_inode(bio);
if (!inode) {
*is_pfe = false;
return -EINVAL;
}
ecryptfs_data = pfk_get_ecryptfs_data(inode);
if (!ecryptfs_data) {
*is_pfe = false;
return -EPERM;
}
ret = pfk_parse_cipher(ecryptfs_data, &algo_mode);
if (ret != 0) {
pr_debug("not supported cipher\n");
pr_err("not supported cipher\n");
return ret;
}
@ -438,11 +493,14 @@ int pfk_load_key(const struct bio *bio, struct ice_crypto_setting *ice_setting)
if (ret != 0)
return ret;
ret = pfk_kc_load_key(key, key_size, salt, salt_size, &key_index);
if (ret != 0) {
pr_err("could not load key into pfk key cache, error %d\n",
ret);
return -EINVAL;
ret = pfk_kc_load_key_start(key, key_size, salt, salt_size, &key_index,
async);
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("start: could not load key into pfk key cache, error %d\n",
ret);
return ret;
}
ice_setting->key_size = key_size_type;
@ -452,10 +510,51 @@ int pfk_load_key(const struct bio *bio, struct ice_crypto_setting *ice_setting)
ice_setting->key_index = key_index;
return 0;
}
end:
/**
* pfk_load_key_end() - marks the PFE key as no longer used by ICE
* Can also be invoked from non
* PFE context, than it is not
* relevant and is_pfe flag is
* set to true
* @bio: Pointer to the BIO structure
* @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
* from PFE context
*
* Via bio gets access to ecryptfs key stored in auxiliary structure inside
* inode and loads it to encryption hw.
*
*/
int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
int ret = 0;
const unsigned char *key = NULL;
const unsigned char *salt = NULL;
size_t key_size = 0;
size_t salt_size = 0;
return ret;
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
ret = pfk_bio_to_key(bio, &key, &key_size, &salt, &salt_size, is_pfe);
if (ret != 0)
return ret;
pfk_kc_load_key_end(key, key_size, salt, salt_size);
return 0;
}
/**
@ -477,7 +576,7 @@ int pfk_remove_key(const unsigned char *key, size_t key_size)
return -ENODEV;
if (!key)
return -EPERM;
return -EINVAL;
ret = pfk_kc_remove_key(key, key_size);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -15,14 +15,12 @@
* PFK Key Cache
*
* Key Cache used internally in PFK.
* The purpose of the cache is to save access time to QSEE
* when loading the keys.
* The purpose of the cache is to save access time to QSEE when loading keys.
* Currently the cache is the same size as the total number of keys that can
* be loaded to ICE. Since this number is relatively small, the alghoritms for
* be loaded to ICE. Since this number is relatively small, the algorithms for
* cache eviction are simple, linear and based on last usage timestamp, i.e
* the node that will be evicted is the one with the oldest timestamp.
* Empty entries always have the oldest timestamp.
*
*/
#include <linux/mutex.h>
@ -54,8 +52,40 @@
#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
static DEFINE_SPINLOCK(kc_lock);
static unsigned long flags;
static bool kc_ready;
/**
* enum pfk_kc_entry_state - state of the entry inside kc table
*
* @FREE: entry is free
* @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
and cannot be used by others. SCM call
to load key to ICE is pending to be performed
* @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
cannot be used by others. SCM call to load the
key to ICE was successfully executed and key is
now loaded
* @INACTIVE_INVALIDATING: entry is being invalidated during file close
and cannot be used by others until invalidation
is complete
* @INACTIVE: entry's key is already loaded, but is not
currently being used. It can be re-used for
optimization and to avoid SCM call cost or
it can be taken by another key if there are
no FREE entries
* @SCM_ERROR: error occurred while scm call was performed to
load the key to ICE
*/
enum pfk_kc_entry_state {
FREE,
ACTIVE_ICE_PRELOAD,
ACTIVE_ICE_LOADED,
INACTIVE_INVALIDATING,
INACTIVE,
SCM_ERROR
};
struct kc_entry {
unsigned char key[PFK_MAX_KEY_SIZE];
size_t key_size;
@ -65,31 +95,14 @@ struct kc_entry {
u64 time_stamp;
u32 key_index;
struct task_struct *thread_pending;
enum pfk_kc_entry_state state;
int scm_error;
};
static struct kc_entry kc_table[PFK_KC_TABLE_SIZE] = {{{0}, 0, {0}, 0, 0, 0} };
/**
* pfk_min_time_entry() - update min time and update min entry
* @min_time: pointer to current min_time, might be updated with new value
* @time: time to compare minimum with
* @min_entry: ptr to ptr to current min_entry, might be updated with
* ptr to new entry
* @entry: will be the new min_entry if the time was updated
*
*
* Calculates the minimum between min_time and time. Replaces the min_time
* if time is less and replaces min_entry with entry
*
*/
static inline void pfk_min_time_entry(u64 *min_time, u64 time,
struct kc_entry **min_entry, struct kc_entry *entry)
{
if (time_before64(time, *min_time)) {
*min_time = time;
*min_entry = entry;
}
}
static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
/**
* kc_is_ready() - driver is initialized and ready.
@ -98,7 +111,137 @@ static inline void pfk_min_time_entry(u64 *min_time, u64 time,
*/
static inline bool kc_is_ready(void)
{
return kc_ready == true;
return kc_ready;
}
static inline void kc_spin_lock(void)
{
spin_lock_irqsave(&kc_lock, flags);
}
static inline void kc_spin_unlock(void)
{
spin_unlock_irqrestore(&kc_lock, flags);
}
/**
* kc_entry_is_available() - checks whether the entry is available
*
* Return true if it is , false otherwise or if invalid
* Should be invoked under spinlock
*/
static bool kc_entry_is_available(const struct kc_entry *entry)
{
if (!entry)
return false;
return (entry->state == FREE || entry->state == INACTIVE);
}
/**
* kc_entry_wait_till_available() - waits till entry is available
*
* Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
* by signal
*
* Should be invoked under spinlock
*/
static int kc_entry_wait_till_available(struct kc_entry *entry)
{
int res = 0;
while (!kc_entry_is_available(entry)) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
/* assuming only one thread can try to invalidate
* the same entry
*/
entry->thread_pending = current;
kc_spin_unlock();
schedule();
kc_spin_lock();
}
set_current_state(TASK_RUNNING);
return res;
}
/**
* kc_entry_start_invalidating() - moves entry to state
* INACTIVE_INVALIDATING
* If entry is in use, waits till
* it gets available
* @entry: pointer to entry
*
* Return 0 in case of success, otherwise error
* Should be invoked under spinlock
*/
static int kc_entry_start_invalidating(struct kc_entry *entry)
{
int res;
res = kc_entry_wait_till_available(entry);
if (res)
return res;
entry->state = INACTIVE_INVALIDATING;
return 0;
}
/**
* kc_entry_finish_invalidating() - moves entry to state FREE
* wakes up all the tasks waiting
* on it
*
* @entry: pointer to entry
*
* Return 0 in case of success, otherwise error
* Should be invoked under spinlock
*/
static void kc_entry_finish_invalidating(struct kc_entry *entry)
{
if (!entry)
return;
if (entry->state != INACTIVE_INVALIDATING)
return;
entry->state = FREE;
}
/**
* kc_min_entry() - compare two entries to find one with minimal time
* @a: ptr to the first entry. If NULL the other entry will be returned
* @b: pointer to the second entry
*
* Return the entry which timestamp is the minimal, or b if a is NULL
*/
static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
struct kc_entry *b)
{
if (!a)
return b;
if (time_before64(b->time_stamp, a->time_stamp))
return b;
return a;
}
/**
* kc_entry_at_index() - return entry at specific index
* @index: index of entry to be accessed
*
* Return entry
* Should be invoked under spinlock
*/
static struct kc_entry *kc_entry_at_index(int index)
{
return &(kc_table[index]);
}
/**
@ -111,7 +254,7 @@ static inline bool kc_is_ready(void)
* index of that entry
*
* Return entry or NULL in case of error
* Should be invoked under lock
* Should be invoked under spinlock
*/
static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
size_t key_size, const unsigned char *salt, size_t salt_size,
@ -121,7 +264,7 @@ static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
int i = 0;
for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
entry = &(kc_table[i]);
entry = kc_entry_at_index(i);
if (NULL != salt) {
if (entry->salt_size != salt_size)
@ -151,7 +294,7 @@ static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
* @salt_size: the salt size
*
* Return entry or NULL in case of error
* Should be invoked under lock
* Should be invoked under spinlock
*/
static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
@ -162,29 +305,28 @@ static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
}
/**
* kc_find_oldest_entry() - finds the entry with minimal timestamp
* kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
* that is not locked
*
* Returns entry with minimal timestamp. Empty entries have timestamp
* of 0, therefore they are returned first.
* Should always succeed, the returned entry should never be NULL
* Should be invoked under lock
* If all the entries are locked, will return NULL
* Should be invoked under spin lock
*/
static struct kc_entry *kc_find_oldest_entry(void)
static struct kc_entry *kc_find_oldest_entry_non_locked(void)
{
struct kc_entry *curr_min_entry = NULL;
struct kc_entry *entry = NULL;
u64 min_time = 0;
int i = 0;
min_time = kc_table[0].time_stamp;
curr_min_entry = &(kc_table[0]);
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = &(kc_table[i]);
if (!entry->time_stamp)
entry = kc_entry_at_index(i);
if (entry->state == FREE)
return entry;
pfk_min_time_entry(&min_time, entry->time_stamp,
&curr_min_entry, entry);
if (entry->state == INACTIVE)
curr_min_entry = kc_min_entry(curr_min_entry, entry);
}
return curr_min_entry;
@ -195,8 +337,6 @@ static struct kc_entry *kc_find_oldest_entry(void)
*
* @entry: entry to update
*
* If system time can't be retrieved, timestamp will not be updated
* Should be invoked under lock
*/
static void kc_update_timestamp(struct kc_entry *entry)
{
@ -207,12 +347,11 @@ static void kc_update_timestamp(struct kc_entry *entry)
}
/**
* kc_clear_entry() - clear the key from entry and remove the key from ICE
* kc_clear_entry() - clear the key from entry and mark entry not in use
*
* @entry: pointer to entry
*
* Securely wipe and release the key memory, remove the key from ICE
* Should be invoked under lock
* Should be invoked under spinlock
*/
static void kc_clear_entry(struct kc_entry *entry)
{
@ -222,11 +361,15 @@ static void kc_clear_entry(struct kc_entry *entry)
memset(entry->key, 0, entry->key_size);
memset(entry->salt, 0, entry->salt_size);
entry->key_size = 0;
entry->salt_size = 0;
entry->time_stamp = 0;
entry->scm_error = 0;
}
/**
* kc_replace_entry() - replaces the key in given entry and
* kc_update_entry() - replaces the key in given entry and
* loads the new key to ICE
*
* @entry: entry to replace key in
@ -237,12 +380,12 @@ static void kc_clear_entry(struct kc_entry *entry)
*
* The previous key is securely released and wiped, the new one is loaded
* to ICE.
* Should be invoked under lock
* Should be invoked under spinlock
*/
static int kc_replace_entry(struct kc_entry *entry, const unsigned char *key,
static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
size_t key_size, const unsigned char *salt, size_t salt_size)
{
int ret = 0;
int ret;
kc_clear_entry(entry);
@ -252,23 +395,15 @@ static int kc_replace_entry(struct kc_entry *entry, const unsigned char *key,
memcpy(entry->salt, salt, salt_size);
entry->salt_size = salt_size;
ret = qti_pfk_ice_set_key(entry->key_index, (uint8_t *) key,
(uint8_t *) salt);
if (ret != 0) {
ret = -EINVAL;
goto err;
}
/* Mark entry as no longer free before releasing the lock */
entry->state = ACTIVE_ICE_PRELOAD;
kc_spin_unlock();
kc_update_timestamp(entry);
return 0;
err:
kc_clear_entry(entry);
ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
entry->salt);
kc_spin_lock();
return ret;
}
/**
@ -279,19 +414,19 @@ err:
int pfk_kc_init(void)
{
int i = 0;
struct kc_entry *entry = NULL;
spin_lock(&kc_lock);
for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
kc_table[i].key_index = PFK_KC_STARTING_INDEX + i;
spin_unlock(&kc_lock);
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
entry->key_index = PFK_KC_STARTING_INDEX + i;
}
kc_ready = true;
kc_spin_unlock();
return 0;
}
/**
* pfk_kc_denit() - deinit function
*
@ -299,74 +434,172 @@ int pfk_kc_init(void)
*/
int pfk_kc_deinit(void)
{
pfk_kc_clear();
int res = pfk_kc_clear();
kc_ready = false;
return 0;
return res;
}
/**
* pfk_kc_load_key() - retrieve the key from cache or add it if it's not there
* return the ICE hw key index
* pfk_kc_load_key_start() - retrieve the key from cache or add it if
* it's not there and return the ICE hw key index in @key_index.
* @key: pointer to the key
* @key_size: the size of the key
* @salt: pointer to the salt
* @salt_size: the size of the salt
* @key_index: the pointer to key_index where the output will be stored
* @async: whether scm calls are allowed in the caller context
*
* If key is present in cache, than the key_index will be retrieved from cache.
* If it is not present, the oldest entry from kc table will be evicted,
* the key will be loaded to ICE via QSEE to the index that is the evicted
* entry number and stored in cache
* entry number and stored in cache.
* Entry that is going to be used is marked as being used, it will mark
* as not being used when ICE finishes using it and pfk_kc_load_key_end
* will be invoked.
* As QSEE calls can only be done from a non-atomic context, when @async flag
* is set to 'false', it specifies that it is ok to make the calls in the
* current context. Otherwise, when @async is set, the caller should retry the
* call again from a different context, and -EAGAIN error will be returned.
*
* Return 0 in case of success, error otherwise
*/
int pfk_kc_load_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index)
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
bool async)
{
int ret = 0;
struct kc_entry *entry = NULL;
bool entry_exists = false;
if (!kc_is_ready())
return -ENODEV;
if (!key || !salt || !key_index)
return -EPERM;
return -EINVAL;
if (key_size != PFK_KC_KEY_SIZE)
return -EPERM;
if (key_size != PFK_KC_KEY_SIZE) {
pr_err("unsupported key size %lu\n", key_size);
return -EINVAL;
}
if (salt_size != PFK_KC_SALT_SIZE)
return -EPERM;
if (salt_size != PFK_KC_SALT_SIZE) {
pr_err("unsupported salt size %lu\n", salt_size);
return -EINVAL;
}
kc_spin_lock();
spin_lock(&kc_lock);
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
entry = kc_find_oldest_entry();
if (async) {
kc_spin_unlock();
return -EAGAIN;
}
entry = kc_find_oldest_entry_non_locked();
if (!entry) {
pr_err("internal error, there should always be an oldest entry\n");
spin_unlock(&kc_lock);
return -EINVAL;
/* could not find a single non locked entry,
* return EBUSY to upper layers so that the
* request will be rescheduled
*/
kc_spin_unlock();
return -EBUSY;
}
pr_debug("didn't found key in cache, replacing entry with index %d\n",
entry->key_index);
ret = kc_replace_entry(entry, key, key_size, salt, salt_size);
if (ret) {
spin_unlock(&kc_lock);
return -EINVAL;
}
} else {
pr_debug("found key in cache, index %d\n", entry->key_index);
entry_exists = true;
}
pr_debug("entry with index %d is in state %d\n",
entry->key_index, entry->state);
switch (entry->state) {
case (INACTIVE):
if (entry_exists) {
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
break;
}
case (FREE):
ret = kc_update_entry(entry, key, key_size, salt, salt_size);
if (ret) {
entry->state = SCM_ERROR;
entry->scm_error = ret;
pr_err("%s: key load error (%d)\n", __func__, ret);
} else {
entry->state = ACTIVE_ICE_LOADED;
kc_update_timestamp(entry);
}
break;
case (ACTIVE_ICE_PRELOAD):
case (INACTIVE_INVALIDATING):
ret = -EAGAIN;
break;
case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry);
break;
case(SCM_ERROR):
ret = entry->scm_error;
kc_clear_entry(entry);
entry->state = FREE;
break;
default:
pr_err("invalid state %d for entry with key index %d\n",
entry->state, entry->key_index);
ret = -EINVAL;
}
*key_index = entry->key_index;
spin_unlock(&kc_lock);
kc_spin_unlock();
return 0;
return ret;
}
/**
* pfk_kc_load_key_end() - finish the process of key loading that was started
* by pfk_kc_load_key_start
* by marking the entry as not
* being in use
* @key: pointer to the key
* @key_size: the size of the key
* @salt: pointer to the salt
* @salt_size: the size of the salt
*
*/
void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
if (!kc_is_ready())
return;
if (!key || !salt)
return;
if (key_size != PFK_KC_KEY_SIZE)
return;
if (salt_size != PFK_KC_SALT_SIZE)
return;
kc_spin_lock();
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
kc_spin_unlock();
pr_err("internal error, there should an entry to unlock\n");
return;
}
entry->state = INACTIVE;
/* wake-up invalidation if it's waiting for the entry to be released */
if (entry->thread_pending) {
wake_up_process(entry->thread_pending);
entry->thread_pending = NULL;
}
kc_spin_unlock();
}
/**
@ -383,35 +616,47 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
int res = 0;
if (!kc_is_ready())
return -ENODEV;
if (!key)
return -EPERM;
return -EINVAL;
if (!salt)
return -EPERM;
return -EINVAL;
if (key_size != PFK_KC_KEY_SIZE)
return -EPERM;
return -EINVAL;
if (salt_size != PFK_KC_SALT_SIZE)
return -EPERM;
return -EINVAL;
kc_spin_lock();
spin_lock(&kc_lock);
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
pr_err("key does not exist\n");
spin_unlock(&kc_lock);
pr_debug("%s: key does not exist\n", __func__);
kc_spin_unlock();
return -EINVAL;
}
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
return res;
}
kc_clear_entry(entry);
spin_unlock(&kc_lock);
kc_spin_unlock();
qti_pfk_ice_invalidate_key(entry->key_index);
kc_spin_lock();
kc_entry_finish_invalidating(entry);
kc_spin_unlock();
return 0;
}
@ -423,80 +668,123 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
* @key: pointer to the key
* @key_size: the size of the key
*
* Return 0 in case of success, error otherwise (also in case of non
* (existing key)
* Return 0 in case of success, error otherwise (also for non-existing key)
*/
int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
{
struct kc_entry *entry = NULL;
int index = 0;
int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
int temp_indexes_size = 0;
int i = 0;
int res = 0;
if (!kc_is_ready())
return -ENODEV;
if (!key)
return -EPERM;
return -EINVAL;
if (key_size != PFK_KC_KEY_SIZE)
return -EPERM;
return -EINVAL;
memset(temp_indexes, -1, sizeof(temp_indexes));
spin_lock(&kc_lock);
kc_spin_lock();
entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
if (!entry) {
pr_debug("key does not exist\n");
spin_unlock(&kc_lock);
pr_err("%s: key does not exist\n", __func__);
kc_spin_unlock();
return -EINVAL;
}
temp_indexes[i++] = entry->key_index;
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
return res;
}
temp_indexes[temp_indexes_size++] = index;
kc_clear_entry(entry);
/* let's clean additional entries with the same key if there are any */
do {
index++;
entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
if (!entry)
break;
temp_indexes[i++] = entry->key_index;
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
goto out;
}
temp_indexes[temp_indexes_size++] = index;
kc_clear_entry(entry);
} while (true);
spin_unlock(&kc_lock);
kc_spin_unlock();
for (i--; i >= 0 ; i--)
qti_pfk_ice_invalidate_key(temp_indexes[i]);
temp_indexes_size--;
for (i = temp_indexes_size; i >= 0 ; i--)
qti_pfk_ice_invalidate_key(
kc_entry_at_index(temp_indexes[i])->key_index);
return 0;
/* fall through */
res = 0;
out:
kc_spin_lock();
for (i = temp_indexes_size; i >= 0 ; i--)
kc_entry_finish_invalidating(
kc_entry_at_index(temp_indexes[i]));
kc_spin_unlock();
return res;
}
/**
* pfk_kc_clear() - clear the table and remove all keys from ICE
*
* Return 0 on success, error otherwise
*
*/
void pfk_kc_clear(void)
int pfk_kc_clear(void)
{
struct kc_entry *entry = NULL;
int i = 0;
int res = 0;
if (!kc_is_ready())
return;
return -ENODEV;
spin_lock(&kc_lock);
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = &(kc_table[i]);
entry = kc_entry_at_index(i);
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
goto out;
}
kc_clear_entry(entry);
}
spin_unlock(&kc_lock);
kc_spin_unlock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
qti_pfk_ice_invalidate_key(entry->key_index);
qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index);
/* fall through */
res = 0;
out:
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
kc_entry_finish_invalidating(kc_entry_at_index(i));
kc_spin_unlock();
return res;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -17,12 +17,15 @@
int pfk_kc_init(void);
int pfk_kc_deinit(void);
int pfk_kc_load_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index);
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
bool async);
void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
void pfk_kc_clear(void);
int pfk_kc_clear(void);