scsi: ufs: add Inline Crypto Engine (ICE) support to UFS

In-order to enhance storage encryption performance,
an Inline Cryptographic Engine is introduced to UFS.
This patch adds in-line encryption capabilities to the UFS
driver.

Change-Id: Id3cb913498809b32e1f7eba96395b05a9bf3219f
Signed-off-by: Noa Rubens <noag@codeaurora.org>
Signed-off-by: Yaniv Gardi <ygardi@codeaurora.org>
Signed-off-by: Maya Erez <merez@codeaurora.org>
[subhashj@codeaurora.org: resolved trivial merge conflicts]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
[venkatg@codeaurora.org: resolved trivial merge conflicts]
Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
This commit is contained in:
Maya Erez 2014-10-27 21:57:45 +02:00 committed by David Keitel
parent 1eabea8b95
commit 89dc95d277
9 changed files with 917 additions and 11 deletions

View file

@ -46,6 +46,7 @@ Optional properties:
4 - UFS device in Power-down state and Link in Hibern8 state 4 - UFS device in Power-down state and Link in Hibern8 state
5 - UFS device in Power-down state and Link in OFF state (Lowest power consumption) 5 - UFS device in Power-down state and Link in OFF state (Lowest power consumption)
- spm-level : UFS System power management level. Allowed PM levels are same as rpm-level. - spm-level : UFS System power management level. Allowed PM levels are same as rpm-level.
- ufs-qcom-crypto : phandle to UFS-QCOM ICE (Inline Cryptographic Engine) node
Note: If above properties are not defined it can be assumed that the supply Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on. regulators or clocks are always on.
@ -56,6 +57,7 @@ Example:
reg = <0xfc598000 0x800>; reg = <0xfc598000 0x800>;
interrupts = <0 28 0>; interrupts = <0 28 0>;
ufs-qcom-crypto = <&ufs_ice>;
vdd-hba-supply = <&xxx_reg0>; vdd-hba-supply = <&xxx_reg0>;
vdd-hba-fixed-regulator; vdd-hba-fixed-regulator;
vcc-supply = <&xxx_reg1>; vcc-supply = <&xxx_reg1>;

View file

@ -84,6 +84,19 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset. Select this if you have UFS controller on QCOM chipset.
If unsure, say N. If unsure, say N.
config SCSI_UFS_QCOM_ICE
bool "QCOM specific hooks to Inline Crypto Engine for UFS driver"
depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE
help
This selects the QCOM specific additions to support Inline Crypto
Engine (ICE).
ICE accelerates the crypto operations and maintains the high UFS
performance.
Select this if you have ICE supported for UFS on QCOM chipset.
If unsure, say N.
config SCSI_UFS_TEST config SCSI_UFS_TEST
tristate "Universal Flash Storage host controller driver unit-tests" tristate "Universal Flash Storage host controller driver unit-tests"
depends on SCSI_UFSHCD && IOSCHED_TEST depends on SCSI_UFSHCD && IOSCHED_TEST

View file

@ -1,5 +1,6 @@
# UFSHCD makefile # UFSHCD makefile
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o

View file

@ -0,0 +1,522 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/scsi/ufs/ufshcd.h>
#include <crypto/ice.h>
#include "ufs-qcom-ice.h"
#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto"
/* Timeout waiting for ICE initialization, that requires TZ access */
#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500
static void ufs_qcom_ice_success_cb(void *host_ctrl,
enum ice_event_completion evt)
{
struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl;
if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED &&
evt == ICE_INIT_COMPLETION)
qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_SUSPENDED &&
evt == ICE_RESUME_COMPLETION)
qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
complete(&qcom_host->ice.async_done);
}
static void ufs_qcom_ice_error_cb(void *host_ctrl, enum ice_error_code evt)
{
struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl;
dev_err(qcom_host->hba->dev, "%s: Error in ice operation %d",
__func__, evt);
if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE)
qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
complete(&qcom_host->ice.async_done);
}
static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev)
{
struct device_node *node;
struct platform_device *ice_pdev = NULL;
node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
if (!node) {
dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
__func__);
goto out;
}
ice_pdev = qcom_ice_get_pdevice(node);
out:
return ice_pdev;
}
static
struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev)
{
struct qcom_ice_variant_ops *ice_vops = NULL;
struct device_node *node;
node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
if (!node) {
dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
__func__);
goto out;
}
ice_vops = qcom_ice_get_variant_ops(node);
if (!ice_vops)
dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__);
of_node_put(node);
out:
return ice_vops;
}
/**
* ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host
* @qcom_host: Pointer to a UFS QCom internal host structure.
*
* Sets ICE platform device pointer and ICE vops structure
* corresponding to the current UFS device.
*
* Return: -EINVAL in-case of invalid input parameters:
* qcom_host, qcom_host->hba or qcom_host->hba->dev
* -ENODEV in-case ICE device is not required
* -EPROBE_DEFER in-case ICE is required and hasn't been probed yet
* 0 otherwise
*/
int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
{
struct device *ufs_dev;
int err = 0;
if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) {
pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n",
__func__, qcom_host);
err = -EINVAL;
goto out;
}
ufs_dev = qcom_host->hba->dev;
qcom_host->ice.vops = ufs_qcom_ice_get_vops(ufs_dev);
qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev);
if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
dev_err(ufs_dev, "%s: ICE device not probed yet\n",
__func__);
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
err = -EPROBE_DEFER;
goto out;
}
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n",
__func__, qcom_host->ice.pdev, qcom_host->ice.vops);
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
err = -ENODEV;
goto out;
}
qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
out:
return err;
}
/**
* ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
{
struct device *ufs_dev = qcom_host->hba->dev;
int err = -EINVAL;
init_completion(&qcom_host->ice.async_done);
err = qcom_host->ice.vops->init(qcom_host->ice.pdev,
qcom_host,
ufs_qcom_ice_success_cb,
ufs_qcom_ice_error_cb);
if (err) {
dev_err(ufs_dev, "%s: ice init failed. err = %d\n",
__func__, err);
goto out;
}
if (!wait_for_completion_timeout(&qcom_host->ice.async_done,
msecs_to_jiffies(UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS))) {
dev_err(qcom_host->hba->dev,
"%s: error. got timeout after %d ms\n",
__func__, UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS);
err = -ETIMEDOUT;
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
dev_err(qcom_host->hba->dev,
"%s: error. ice.state (%d) is not in active state\n",
__func__, qcom_host->ice.state);
err = -EINVAL;
}
out:
return err;
}
static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write)
{
if (is_write) {
if (cmd_op == WRITE_6 || cmd_op == WRITE_10 ||
cmd_op == WRITE_16)
return true;
} else {
if (cmd_op == READ_6 || cmd_op == READ_10 ||
cmd_op == READ_16)
return true;
}
return false;
}
/**
* ufs_qcom_ice_cfg() - configures UFS's ICE registers for an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
* @cmd: Pointer to a valid scsi command. cmd->request should also be
* a valid pointer.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_cfg(struct ufs_qcom_host *qcom_host, struct scsi_cmnd *cmd)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
struct ice_data_setting ice_set;
unsigned int slot = 0;
sector_t lba = 0;
unsigned int ctrl_info_2_val = 0;
unsigned int bypass = 0;
struct request *req;
char cmd_op;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
dev_err(dev, "%s: ice state (%d) is not active\n",
__func__, qcom_host->ice.state);
return -EINVAL;
}
req = cmd->request;
if (req->bio)
lba = req->bio->bi_sector;
slot = req->tag;
if (slot < 0 || slot > qcom_host->hba->nutrs) {
dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n",
__func__, slot, qcom_host->hba->nutrs);
return -EINVAL;
}
memset(&ice_set, sizeof(ice_set), 0);
if (qcom_host->ice.vops->config) {
err = qcom_host->ice.vops->config(qcom_host->ice.pdev,
req, &ice_set);
if (err) {
dev_err(dev, "%s: error in ice_vops->config %d\n",
__func__, err);
goto out;
}
}
cmd_op = cmd->cmnd[0];
#define UFS_QCOM_DIR_WRITE true
#define UFS_QCOM_DIR_READ false
/* if non data command, bypass shall be enabled */
if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) &&
!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
bypass = UFS_QCOM_ICE_ENABLE_BYPASS;
/* if writing data command */
else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE))
bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
/* if reading data command */
else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
/* Configure ICE index */
ctrl_info_2_val =
(ice_set.crypto_data.key_index &
MASK_UFS_QCOM_ICE_CTRL_INFO_2_KEY_INDEX)
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_KEY_INDEX;
/* Configure data unit size of transfer request */
ctrl_info_2_val |=
(UFS_QCOM_ICE_TR_DATA_UNIT_4_KB &
MASK_UFS_QCOM_ICE_CTRL_INFO_2_CDU)
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_CDU;
/* Configure ICE bypass mode */
ctrl_info_2_val |=
(bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_2_BYPASS)
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_BYPASS;
ufshcd_writel(qcom_host->hba, lba,
(REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot));
ufshcd_writel(qcom_host->hba, ctrl_info_2_val,
(REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot));
/*
* Ensure UFS-ICE registers are being configured
* before next operation, otherwise UFS Host Controller might
* set get errors
*/
mb();
out:
return err;
}
/**
* ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE)
goto out;
init_completion(&qcom_host->ice.async_done);
if (qcom_host->ice.vops->reset) {
err = qcom_host->ice.vops->reset(qcom_host->ice.pdev);
if (err) {
dev_err(dev, "%s: ice_vops->reset failed. err %d\n",
__func__, err);
goto out;
}
}
if (!wait_for_completion_timeout(&qcom_host->ice.async_done,
msecs_to_jiffies(UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS))) {
dev_err(dev,
"%s: error. got timeout after %d ms\n",
__func__, UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS);
err = -ETIMEDOUT;
}
out:
return err;
}
/**
* ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power
* collapse
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state !=
UFS_QCOM_ICE_STATE_SUSPENDED) {
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
init_completion(&qcom_host->ice.async_done);
if (qcom_host->ice.vops->resume) {
err = qcom_host->ice.vops->resume(qcom_host->ice.pdev);
if (err) {
dev_err(dev, "%s: ice_vops->resume failed. err %d\n",
__func__, err);
return -EINVAL;
}
}
if (!wait_for_completion_timeout(&qcom_host->ice.async_done,
msecs_to_jiffies(UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS))) {
dev_err(dev,
"%s: error. got timeout after %d ms\n",
__func__, UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS);
err = -ETIMEDOUT;
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE)
err = -EINVAL;
out:
return err;
}
/**
* ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.vops->suspend) {
err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev);
if (err) {
dev_err(qcom_host->hba->dev,
"%s: ice_vops->suspend failed. err %d\n",
__func__, err);
return -EINVAL;
}
}
if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) {
qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED;
} else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) {
dev_err(qcom_host->hba->dev,
"%s: ice state is invalid: disabled\n",
__func__);
err = -EINVAL;
}
out:
return err;
}
/**
* ufs_qcom_ice_get_status() - returns the status of an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
* @ice_status: Pointer to a valid output parameter.
* < 0 in case of ICE transaction failure.
* 0 otherwise.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status)
{
struct device *dev = NULL;
int err = 0;
int stat = -EINVAL;
ice_status = 0;
dev = qcom_host->hba->dev;
if (!dev) {
err = -EINVAL;
goto out;
}
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
err = -EINVAL;
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
if (qcom_host->ice.vops->status) {
stat = qcom_host->ice.vops->status(qcom_host->ice.pdev);
if (stat < 0) {
dev_err(dev, "%s: ice_vops->status failed. stat %d\n",
__func__, stat);
err = -EINVAL;
goto out;
}
*ice_status = stat;
}
out:
return err;
}

View file

@ -0,0 +1,113 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UFS_QCOM_ICE_H_
#define _UFS_QCOM_ICE_H_
#include <scsi/scsi_cmnd.h>
#include <linux/scsi/ufs/ufs-qcom.h>
/*
* UFS host controller ICE registers. There are n [0..31]
* of each of these registers
*/
enum {
REG_UFS_QCOM_ICE_CTRL_INFO_1_n = 0x2204,
REG_UFS_QCOM_ICE_CTRL_INFO_2_n = 0x2208,
};
/* UFS QCOM ICE CTRL Info 2 register offset */
enum {
OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_BYPASS = 0,
OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_KEY_INDEX = 0x1,
OFFSET_UFS_QCOM_ICE_CTRL_INFO_2_CDU = 0x6,
};
/* UFS QCOM ICE CTRL Info 2 register masks */
enum {
MASK_UFS_QCOM_ICE_CTRL_INFO_2_BYPASS = 0x1,
MASK_UFS_QCOM_ICE_CTRL_INFO_2_KEY_INDEX = 0x1F,
MASK_UFS_QCOM_ICE_CTRL_INFO_2_CDU = 0x8,
};
/* UFS QCOM ICE encryption/decryption bypass state */
enum {
UFS_QCOM_ICE_DISABLE_BYPASS = 0,
UFS_QCOM_ICE_ENABLE_BYPASS = 1,
};
/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */
enum {
UFS_QCOM_ICE_TR_DATA_UNIT_512_B = 0,
UFS_QCOM_ICE_TR_DATA_UNIT_1_KB = 1,
UFS_QCOM_ICE_TR_DATA_UNIT_2_KB = 2,
UFS_QCOM_ICE_TR_DATA_UNIT_4_KB = 3,
UFS_QCOM_ICE_TR_DATA_UNIT_8_KB = 4,
UFS_QCOM_ICE_TR_DATA_UNIT_16_KB = 5,
UFS_QCOM_ICE_TR_DATA_UNIT_32_KB = 6,
};
/* UFS QCOM ICE internal state */
enum {
UFS_QCOM_ICE_STATE_DISABLED = 0,
UFS_QCOM_ICE_STATE_ACTIVE = 1,
UFS_QCOM_ICE_STATE_SUSPENDED = 2,
};
#ifdef CONFIG_SCSI_UFS_QCOM_ICE
int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_cfg(struct ufs_qcom_host *qcom_host, struct scsi_cmnd *cmd);
int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
#else
inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
{
if (qcom_host) {
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
}
return -ENODEV;
}
inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_cfg(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd)
{
return 0;
}
inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
int *ice_status)
{
return 0;
}
#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
#endif /* UFS_QCOM_ICE_H_ */

View file

@ -23,6 +23,7 @@
#include <linux/msm-bus.h> #include <linux/msm-bus.h>
#include <soc/qcom/scm.h> #include <soc/qcom/scm.h>
#include <linux/phy/phy.h>
#include <linux/scsi/ufs/ufshcd.h> #include <linux/scsi/ufs/ufshcd.h>
#include <linux/scsi/ufs/ufs-qcom.h> #include <linux/scsi/ufs/ufs-qcom.h>
@ -32,6 +33,7 @@
#include "unipro.h" #include "unipro.h"
#include "ufs-qcom.h" #include "ufs-qcom.h"
#include "ufshci.h" #include "ufshci.h"
#include "ufs-qcom-ice.h"
static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result); static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
@ -301,6 +303,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
/* check if UFS PHY moved from DISABLED to HIBERN8 */ /* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba); err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba); ufs_qcom_enable_hw_clk_gating(hba);
if (!err) {
err = ufs_qcom_ice_reset(host);
if (err)
dev_err(hba->dev,
"%s: ufs_qcom_ice_reset() failed %d\n",
__func__, err);
}
break; break;
default: default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@ -459,6 +469,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/ */
ufs_qcom_disable_lane_clks(host); ufs_qcom_disable_lane_clks(host);
phy_power_off(phy); phy_power_off(phy);
ret = ufs_qcom_ice_suspend(host);
if (ret)
dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
__func__, ret);
goto out; goto out;
} }
@ -476,6 +490,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
__func__, ret); __func__, ret);
} }
phy_power_off(phy); phy_power_off(phy);
ufs_qcom_ice_suspend(host);
} }
out: out:
@ -545,11 +560,84 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
} }
} }
err = ufs_qcom_ice_resume(host);
if (err) {
dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
__func__, err);
goto out;
}
hba->is_sys_suspended = false; hba->is_sys_suspended = false;
out: out:
return err; return err;
} }
static
int ufs_qcom_crytpo_engine_cfg(struct ufs_hba *hba, unsigned int task_tag)
{
struct ufs_qcom_host *host = hba->priv;
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
int err = 0;
if (!host->ice.pdev ||
!lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
goto out;
err = ufs_qcom_ice_cfg(host, lrbp->cmd);
out:
return err;
}
static int ufs_qcom_crypto_engine_eh(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = hba->priv;
int ice_status = 0;
int err = 0;
host->ice.crypto_engine_err = 0;
if (host->ice.quirks &
UFS_QCOM_ICE_QUIRK_HANDLE_CRYPTO_ENGINE_ERRORS) {
err = ufs_qcom_ice_get_status(host, &ice_status);
if (!err)
host->ice.crypto_engine_err = ice_status;
if (host->ice.crypto_engine_err) {
dev_err(hba->dev, "%s handling crypto engine error\n",
__func__);
/*
* block commands from scsi mid-layer.
* As crypto error is a fatal error and will result in
* a host reset we should leave scsi mid layer blocked
* until host reset is completed.
* Host reset will be handled in a seperate workqueue
* and will be triggered from ufshcd_check_errors.
*/
scsi_block_requests(hba->host);
ufshcd_abort_outstanding_transfer_requests(hba,
DID_TARGET_FAILURE);
}
}
return host->ice.crypto_engine_err;
}
static int ufs_qcom_crypto_engine_get_err(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = hba->priv;
return host->ice.crypto_engine_err;
}
static void ufs_qcom_crypto_engine_reset_err(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = hba->priv;
host->ice.crypto_engine_err = 0;
}
struct ufs_qcom_dev_params { struct ufs_qcom_dev_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */ u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */ u32 pwm_tx_gear; /* pwm tx gear to work in */
@ -993,16 +1081,40 @@ static int ufs_qcom_init(struct ufs_hba *hba)
} }
host->hba = hba; host->hba = hba;
hba->priv = (void *)host;
err = ufs_qcom_ice_get_dev(host);
if (err == -EPROBE_DEFER) {
/*
* UFS driver might be probed before ICE driver does.
* In that case we would like to return EPROBE_DEFER code
* in order to delay its probing.
*/
dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
__func__, err);
goto out_host_free;
} else if (err == -ENODEV) {
/*
* ICE device is not enabled in DTS file. No need for further
* initialization of ICE driver.
*/
dev_warn(dev, "%s: ICE device is not enabled",
__func__);
} else if (err) {
dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
__func__, err);
goto out_host_free;
}
host->generic_phy = devm_phy_get(dev, "ufsphy"); host->generic_phy = devm_phy_get(dev, "ufsphy");
if (IS_ERR(host->generic_phy)) { if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy); err = PTR_ERR(host->generic_phy);
dev_err(dev, "PHY get failed %d\n", err); dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
goto out; goto out;
} }
hba->priv = (void *)host;
/* restore the secure configuration */ /* restore the secure configuration */
ufs_qcom_update_sec_cfg(hba, true); ufs_qcom_update_sec_cfg(hba, true);
@ -1026,6 +1138,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE; hba->caps |= UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
ufs_qcom_setup_clocks(hba, true); ufs_qcom_setup_clocks(hba, true);
if (host->ice.pdev) {
err = ufs_qcom_ice_init(host);
if (err) {
dev_err(dev, "%s: ICE driver initialization failed (%d)\n",
__func__, err);
device_remove_file(dev, &host->bus_vote.max_bus_bw);
goto out_disable_phy;
}
}
goto out; goto out;
out_disable_phy: out_disable_phy:
@ -1145,6 +1267,10 @@ const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.resume = ufs_qcom_resume, .resume = ufs_qcom_resume,
.update_sec_cfg = ufs_qcom_update_sec_cfg, .update_sec_cfg = ufs_qcom_update_sec_cfg,
.dbg_register_dump = ufs_qcom_dump_dbg_regs, .dbg_register_dump = ufs_qcom_dump_dbg_regs,
.crypto_engine_cfg = ufs_qcom_crytpo_engine_cfg,
.crypto_engine_eh = ufs_qcom_crypto_engine_eh,
.crypto_engine_get_err = ufs_qcom_crypto_engine_get_err,
.crypto_engine_reset_err = ufs_qcom_crypto_engine_reset_err,
}; };
/** /**

View file

@ -1449,14 +1449,26 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
* @task_tag: Task tag of the command * @task_tag: Task tag of the command
*/ */
static inline static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{ {
int ret = 0;
if (hba->vops->crypto_engine_cfg) {
ret = hba->vops->crypto_engine_cfg(hba, task_tag);
if (ret) {
dev_err(hba->dev,
"%s: failed to configure crypto engine %d\n",
__func__, ret);
return ret;
}
}
ufshcd_clk_scaling_start_busy(hba); ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs); __set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is commited immediately */ /* Make sure that doorbell is commited immediately */
wmb(); wmb();
UFSHCD_UPDATE_TAG_STATS(hba, task_tag); UFSHCD_UPDATE_TAG_STATS(hba, task_tag);
return ret;
} }
/** /**
@ -2034,7 +2046,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
wmb(); wmb();
/* issue command to the controller */ /* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag);
err = ufshcd_send_command(hba, tag);
if (err) {
scsi_dma_unmap(lrbp->cmd);
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
dev_err(hba->dev, "%s: failed sending command, %d\n",
__func__, err);
err = DID_ERROR;
}
out_unlock: out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
out: out:
@ -2236,9 +2259,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */ /* Make sure descriptors are ready before ringing the doorbell */
wmb(); wmb();
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag); err = ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
if (err) {
dev_err(hba->dev, "%s: failed sending command, %d\n",
__func__, err);
goto out_put_tag;
}
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out_put_tag: out_put_tag:
@ -4166,6 +4193,48 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
complete(hba->uic_async_done); complete(hba->uic_async_done);
} }
/**
* ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
* @hba: per adapter instance
* @result: error result to inform scsi layer about
*/
void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
{
u8 index;
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
if (!hba->outstanding_reqs)
return;
for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
if (cmd) {
ufshcd_cond_add_cmd_trace(hba, index, "failed");
UFSHCD_UPDATE_ERROR_STATS(hba,
UFS_ERR_INT_FATAL_ERRORS);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
/* Clear pending transfer requests */
ufshcd_clear_cmd(hba, index);
__clear_bit(index, &hba->outstanding_tasks);
clear_bit_unlock(index, &hba->lrb_in_use);
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
ufshcd_release_all(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) {
ufshcd_cond_add_cmd_trace(hba, index,
"dev_failed");
complete(hba->dev_cmd.complete);
}
}
}
}
/** /**
* ufshcd_transfer_req_compl - handle SCSI and query command completion * ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance * @hba: per adapter instance
@ -4500,6 +4569,7 @@ static void ufshcd_err_handler(struct work_struct *work)
u32 err_tm = 0; u32 err_tm = 0;
int err = 0; int err = 0;
int tag; int tag;
int crypto_engine_err = 0;
hba = container_of(work, struct ufs_hba, eh_work); hba = container_of(work, struct ufs_hba, eh_work);
@ -4548,12 +4618,16 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_tmc_handler(hba); ufshcd_tmc_handler(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
if (hba->vops && hba->vops->crypto_engine_get_err)
crypto_engine_err = hba->vops->crypto_engine_get_err(hba);
/* Fatal errors need reset */ /* Fatal errors need reset */
if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) && ((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR)) ||
crypto_engine_err) {
if (hba->saved_err & INT_FATAL_ERRORS) if (hba->saved_err & INT_FATAL_ERRORS || crypto_engine_err)
UFSHCD_UPDATE_ERROR_STATS(hba, UFSHCD_UPDATE_ERROR_STATS(hba,
UFS_ERR_INT_FATAL_ERRORS); UFS_ERR_INT_FATAL_ERRORS);
@ -4578,6 +4652,8 @@ static void ufshcd_err_handler(struct work_struct *work)
scsi_report_bus_reset(hba->host, 0); scsi_report_bus_reset(hba->host, 0);
hba->saved_err = 0; hba->saved_err = 0;
hba->saved_uic_err = 0; hba->saved_uic_err = 0;
if (hba->vops && hba->vops->crypto_engine_reset_err)
hba->vops->crypto_engine_reset_err(hba);
} }
ufshcd_clear_eh_in_progress(hba); ufshcd_clear_eh_in_progress(hba);
@ -4635,8 +4711,12 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
static void ufshcd_check_errors(struct ufs_hba *hba) static void ufshcd_check_errors(struct ufs_hba *hba)
{ {
bool queue_eh_work = false; bool queue_eh_work = false;
int crypto_engine_err = 0;
if (hba->errors & INT_FATAL_ERRORS) if (hba->vops && hba->vops->crypto_engine_get_err)
crypto_engine_err = hba->vops->crypto_engine_get_err(hba);
if (hba->errors & INT_FATAL_ERRORS || crypto_engine_err)
queue_eh_work = true; queue_eh_work = true;
if (hba->errors & UIC_ERROR) { if (hba->errors & UIC_ERROR) {
@ -4688,10 +4768,15 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
*/ */
static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{ {
bool crypto_engine_err = false;
ufsdbg_fail_request(hba, &intr_status); ufsdbg_fail_request(hba, &intr_status);
if (hba->vops && hba->vops->crypto_engine_eh)
crypto_engine_err = hba->vops->crypto_engine_eh(hba);
hba->errors = UFSHCD_ERROR_MASK & intr_status; hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors) if (hba->errors || crypto_engine_err)
ufshcd_check_errors(hba); ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK) if (intr_status & UFSHCD_UIC_MASK)

View file

@ -137,6 +137,33 @@ struct ufs_qcom_bus_vote {
struct device_attribute max_bus_bw; struct device_attribute max_bus_bw;
}; };
/**
* struct ufs_qcom_ice_data - ICE related information
* @vops: pointer to variant operations of ICE
* @async_done: completion for supporting ICE's driver asynchronous nature
* @pdev: pointer to the proper ICE platform device
* @state: UFS-ICE interface's internal state (see
* ufs-qcom-ice.h for possible internal states)
* @quirks: UFS-ICE interface related quirks
* @crypto_engine_err: crypto engine errors
*/
struct ufs_qcom_ice_data {
struct qcom_ice_variant_ops *vops;
struct completion async_done;
struct platform_device *pdev;
int state;
/*
* If UFS host controller should handle cryptographic engine's
* errors, enables this quirk.
*/
#define UFS_QCOM_ICE_QUIRK_HANDLE_CRYPTO_ENGINE_ERRORS UFS_BIT(0)
u16 quirks;
bool crypto_engine_err;
};
struct ufs_qcom_host { struct ufs_qcom_host {
struct phy *generic_phy; struct phy *generic_phy;
struct ufs_hba *hba; struct ufs_hba *hba;
@ -148,6 +175,7 @@ struct ufs_qcom_host {
struct clk *tx_l1_sync_clk; struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled; bool is_lane_clks_enabled;
bool sec_cfg_updated; bool sec_cfg_updated;
struct ufs_qcom_ice_data ice;
}; };
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)

View file

@ -283,6 +283,16 @@ struct ufs_pwr_mode_info {
* @resume: called during host controller PM callback * @resume: called during host controller PM callback
* @update_sec_cfg: called to restore host controller secure configuration * @update_sec_cfg: called to restore host controller secure configuration
* @dbg_register_dump: used to dump controller debug information * @dbg_register_dump: used to dump controller debug information
* @crypto_engine_cfg: configure cryptographic engine according to tag parameter
* @crypto_engine_eh: cryptographic engine error handling.
* Return true is it detects an error, false on
* success
* @crypto_engine_get_err: returns the saved error status of the
* cryptographic engine.If a positive
* value is returned, host controller
* should be reset.
* @crypto_engine_reset_err: resets the saved error status of
* the cryptographic engine
*/ */
struct ufs_hba_variant_ops { struct ufs_hba_variant_ops {
const char *name; const char *name;
@ -305,6 +315,10 @@ struct ufs_hba_variant_ops {
int (*resume)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op);
int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg); int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
void (*dbg_register_dump)(struct ufs_hba *hba); void (*dbg_register_dump)(struct ufs_hba *hba);
int (*crypto_engine_cfg)(struct ufs_hba *, unsigned int);
int (*crypto_engine_eh)(struct ufs_hba *);
int (*crypto_engine_get_err)(struct ufs_hba *);
void (*crypto_engine_reset_err)(struct ufs_hba *);
}; };
/* clock gating state */ /* clock gating state */
@ -833,6 +847,8 @@ void ufshcd_release(struct ufs_hba *hba);
int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us); int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
int ufshcd_change_power_mode(struct ufs_hba *hba, int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode); struct ufs_pa_layer_attr *pwr_mode);
void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
int result);
/* Wrapper functions for safely calling variant operations */ /* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)