ufs: fixed bugs in ice related to key syncronization

1. Added reference count for requests in HW queue for particular key
2. Fixed race between block/unblock requests with asynchronous job
for key configuration in ice

Change-Id: Iaefc25739b420b2e5feae1895c7c2495b4850539
Signed-off-by: Andrey Markovytch <andreym@codeaurora.org>
This commit is contained in:
Andrey Markovytch 2017-01-19 19:53:03 +02:00
parent 33eba0ae11
commit d751a8d90f
5 changed files with 125 additions and 23 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -14,6 +14,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <crypto/ice.h> #include <crypto/ice.h>
#include "ufs-qcom-ice.h" #include "ufs-qcom-ice.h"
@ -168,6 +169,7 @@ out:
static void ufs_qcom_ice_cfg_work(struct work_struct *work) static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{ {
unsigned long flags;
struct ice_data_setting ice_set; struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host = struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work); container_of(work, struct ufs_qcom_host, ice_cfg_work);
@ -185,12 +187,17 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
qcom_host->ice.vops->config_start(qcom_host->ice.pdev, qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, &ice_set, false); qcom_host->req_pending, &ice_set, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
/* /*
* Resume with requests processing. We assume config_start has been * Resume with requests processing. We assume config_start has been
* successful, but even if it wasn't we still must resume in order to * successful, but even if it wasn't we still must resume in order to
* allow for the request to be retried. * allow for the request to be retried.
*/ */
ufshcd_scsi_unblock_requests(qcom_host->hba); ufshcd_scsi_unblock_requests(qcom_host->hba);
} }
/** /**
@ -246,6 +253,7 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
struct ice_data_setting ice_set; struct ice_data_setting ice_set;
char cmd_op = cmd->cmnd[0]; char cmd_op = cmd->cmnd[0];
int err; int err;
unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n", dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
@ -272,14 +280,36 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
dev_dbg(qcom_host->hba->dev, dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n", "%s: scheduling task for ice setup\n",
__func__); __func__);
qcom_host->req_pending = cmd->request;
if (schedule_work(&qcom_host->ice_cfg_work)) spin_lock_irqsave(
&qcom_host->ice_work_lock, flags);
if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests( ufshcd_scsi_block_requests(
qcom_host->hba); qcom_host->hba);
qcom_host->req_pending = cmd->request;
if (!schedule_work(
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
ufshcd_scsi_unblock_requests(
qcom_host->hba);
return err;
}
}
spin_unlock_irqrestore(
&qcom_host->ice_work_lock, flags);
} else { } else {
dev_err(qcom_host->hba->dev, if (err != -EBUSY)
"%s: error in ice_vops->config %d\n", dev_err(qcom_host->hba->dev,
__func__, err); "%s: error in ice_vops->config %d\n",
__func__, err);
} }
return err; return err;
@ -320,6 +350,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
unsigned int bypass = 0; unsigned int bypass = 0;
struct request *req; struct request *req;
char cmd_op; char cmd_op;
unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) { if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__); dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
@ -365,12 +396,43 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
* request processing. * request processing.
*/ */
if (err == -EAGAIN) { if (err == -EAGAIN) {
qcom_host->req_pending = req;
if (schedule_work(&qcom_host->ice_cfg_work)) dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
spin_lock_irqsave(
&qcom_host->ice_work_lock, flags);
if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests( ufshcd_scsi_block_requests(
qcom_host->hba);
qcom_host->req_pending = cmd->request;
if (!schedule_work(
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
ufshcd_scsi_unblock_requests(
qcom_host->hba); qcom_host->hba);
return err;
}
}
spin_unlock_irqrestore(
&qcom_host->ice_work_lock, flags);
} else {
if (err != -EBUSY)
dev_err(qcom_host->hba->dev,
"%s: error in ice_vops->config %d\n",
__func__, err);
} }
goto out;
return err;
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2016, Linux Foundation. All rights reserved. * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -1981,6 +1981,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
/* Make a two way bind between the qcom host and the hba */ /* Make a two way bind between the qcom host and the hba */
host->hba = hba; host->hba = hba;
spin_lock_init(&host->ice_work_lock);
ufshcd_set_variant(hba, host); ufshcd_set_variant(hba, host);
err = ufs_qcom_ice_get_dev(host); err = ufs_qcom_ice_get_dev(host);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -370,6 +370,7 @@ struct ufs_qcom_host {
u32 dbg_print_en; u32 dbg_print_en;
struct ufs_qcom_testbus testbus; struct ufs_qcom_testbus testbus;
spinlock_t ice_work_lock;
struct work_struct ice_cfg_work; struct work_struct ice_cfg_work;
struct request *req_pending; struct request *req_pending;
}; };

View file

@ -48,6 +48,7 @@
#include "ufshci.h" #include "ufshci.h"
#include "ufs_quirks.h" #include "ufs_quirks.h"
#include "ufs-debugfs.h" #include "ufs-debugfs.h"
#include "ufs-qcom.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/ufs.h> #include <trace/events/ufs.h>
@ -2877,11 +2878,11 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
"%s: failed to compose upiu %d\n", "%s: failed to compose upiu %d\n",
__func__, err); __func__, err);
lrbp->cmd = NULL; lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use); clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba); ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out; goto out;
} }
err = ufshcd_map_sg(lrbp); err = ufshcd_map_sg(lrbp);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -100,6 +100,9 @@ struct kc_entry {
struct task_struct *thread_pending; struct task_struct *thread_pending;
enum pfk_kc_entry_state state; enum pfk_kc_entry_state state;
/* ref count for the number of requests in the HW queue for this key */
int loaded_ref_cnt;
int scm_error; int scm_error;
}; };
@ -520,6 +523,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
if (entry_exists) { if (entry_exists) {
kc_update_timestamp(entry); kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED; entry->state = ACTIVE_ICE_LOADED;
if (async)
entry->loaded_ref_cnt++;
break; break;
} }
case (FREE): case (FREE):
@ -529,8 +536,17 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
entry->scm_error = ret; entry->scm_error = ret;
pr_err("%s: key load error (%d)\n", __func__, ret); pr_err("%s: key load error (%d)\n", __func__, ret);
} else { } else {
entry->state = ACTIVE_ICE_LOADED;
kc_update_timestamp(entry); kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
/*
* only increase ref cnt for async calls,
* sync calls from within work thread do not pass
* requests further to HW
*/
if (async)
entry->loaded_ref_cnt++;
} }
break; break;
case (ACTIVE_ICE_PRELOAD): case (ACTIVE_ICE_PRELOAD):
@ -539,6 +555,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
break; break;
case (ACTIVE_ICE_LOADED): case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry); kc_update_timestamp(entry);
if (async)
entry->loaded_ref_cnt++;
break; break;
case(SCM_ERROR): case(SCM_ERROR):
ret = entry->scm_error; ret = entry->scm_error;
@ -572,6 +592,8 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size) const unsigned char *salt, size_t salt_size)
{ {
struct kc_entry *entry = NULL; struct kc_entry *entry = NULL;
struct task_struct *tmp_pending = NULL;
int ref_cnt = 0;
if (!kc_is_ready()) if (!kc_is_ready())
return; return;
@ -591,14 +613,28 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
if (!entry) { if (!entry) {
kc_spin_unlock(); kc_spin_unlock();
pr_err("internal error, there should an entry to unlock\n"); pr_err("internal error, there should an entry to unlock\n");
return; return;
} }
entry->state = INACTIVE; ref_cnt = --entry->loaded_ref_cnt;
/* wake-up invalidation if it's waiting for the entry to be released */ if (ref_cnt < 0)
if (entry->thread_pending) { pr_err("internal error, ref count should never be negative\n");
wake_up_process(entry->thread_pending);
entry->thread_pending = NULL; if (!ref_cnt) {
entry->state = INACTIVE;
/*
* wake-up invalidation if it's waiting
* for the entry to be released
*/
if (entry->thread_pending) {
tmp_pending = entry->thread_pending;
entry->thread_pending = NULL;
kc_spin_unlock();
wake_up_process(tmp_pending);
return;
}
} }
kc_spin_unlock(); kc_spin_unlock();