qseecom: add bus scaling support for GP operations

When a GP application is loaded and the crypto engine is accessed,
a NOC error will happen if CE clock is not enabled. Thus, add bus
scaling support for GP operations.

Change-Id: I59d63be9e08b2c135edc6ec224ec1a355434e1ec
Signed-off-by: Zhen Kong <zkong@codeaurora.org>
This commit is contained in:
Zhen Kong 2017-04-28 13:32:32 -07:00 committed by Gerrit - the friendly Code Review server
parent c4a48b7ed2
commit c5d33082ab

View file

@ -6843,6 +6843,60 @@ static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
}
}
static int __qseecom_bus_scaling_enable(struct qseecom_dev_handle *data,
bool *perf_enabled)
{
int ret = 0;
if (qseecom.support_bus_scaling) {
if (!data->mode) {
mutex_lock(&qsee_bw_mutex);
__qseecom_register_bus_bandwidth_needs(
data, HIGH);
mutex_unlock(&qsee_bw_mutex);
}
ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
if (ret) {
pr_err("Failed to set bw\n");
ret = -EINVAL;
goto exit;
}
}
/*
* On targets where crypto clock is handled by HLOS,
* if clk_access_cnt is zero and perf_enabled is false,
* then the crypto clock was not enabled before sending cmd
* to tz, qseecom will enable the clock to avoid service failure.
*/
if (!qseecom.no_clock_support &&
!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
pr_debug("ce clock is not enabled\n");
ret = qseecom_perf_enable(data);
if (ret) {
pr_err("Failed to vote for clock with err %d\n",
ret);
ret = -EINVAL;
goto exit;
}
*perf_enabled = true;
}
exit:
return ret;
}
static void __qseecom_bus_scaling_disable(struct qseecom_dev_handle *data,
bool perf_enabled)
{
if (qseecom.support_bus_scaling)
__qseecom_add_bw_scale_down_timer(
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
if (perf_enabled) {
qsee_disable_clock_vote(data, CLK_DFAB);
qsee_disable_clock_vote(data, CLK_SFPB);
}
}
long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
int ret = 0;
@ -6909,50 +6963,14 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
if (qseecom.support_bus_scaling) {
/* register bus bw in case the client doesn't do it */
if (!data->mode) {
mutex_lock(&qsee_bw_mutex);
__qseecom_register_bus_bandwidth_needs(
data, HIGH);
mutex_unlock(&qsee_bw_mutex);
}
ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
if (ret) {
pr_err("Failed to set bw.\n");
ret = -EINVAL;
mutex_unlock(&app_access_lock);
break;
}
}
/*
* On targets where crypto clock is handled by HLOS,
* if clk_access_cnt is zero and perf_enabled is false,
* then the crypto clock was not enabled before sending cmd
* to tz, qseecom will enable the clock to avoid service failure.
*/
if (!qseecom.no_clock_support &&
!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
pr_debug("ce clock is not enabled!\n");
ret = qseecom_perf_enable(data);
if (ret) {
pr_err("Failed to vote for clock with err %d\n",
ret);
mutex_unlock(&app_access_lock);
ret = -EINVAL;
break;
}
perf_enabled = true;
ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
if (ret) {
mutex_unlock(&app_access_lock);
break;
}
atomic_inc(&data->ioctl_count);
ret = qseecom_send_cmd(data, argp);
if (qseecom.support_bus_scaling)
__qseecom_add_bw_scale_down_timer(
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
if (perf_enabled) {
qsee_disable_clock_vote(data, CLK_DFAB);
qsee_disable_clock_vote(data, CLK_SFPB);
}
__qseecom_bus_scaling_disable(data, perf_enabled);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);
@ -6971,52 +6989,17 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
if (qseecom.support_bus_scaling) {
if (!data->mode) {
mutex_lock(&qsee_bw_mutex);
__qseecom_register_bus_bandwidth_needs(
data, HIGH);
mutex_unlock(&qsee_bw_mutex);
}
ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
if (ret) {
pr_err("Failed to set bw.\n");
mutex_unlock(&app_access_lock);
ret = -EINVAL;
break;
}
}
/*
* On targets where crypto clock is handled by HLOS,
* if clk_access_cnt is zero and perf_enabled is false,
* then the crypto clock was not enabled before sending cmd
* to tz, qseecom will enable the clock to avoid service failure.
*/
if (!qseecom.no_clock_support &&
!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
pr_debug("ce clock is not enabled!\n");
ret = qseecom_perf_enable(data);
if (ret) {
pr_err("Failed to vote for clock with err %d\n",
ret);
mutex_unlock(&app_access_lock);
ret = -EINVAL;
break;
}
perf_enabled = true;
ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
if (ret) {
mutex_unlock(&app_access_lock);
break;
}
atomic_inc(&data->ioctl_count);
if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
ret = qseecom_send_modfd_cmd(data, argp);
else
ret = qseecom_send_modfd_cmd_64(data, argp);
if (qseecom.support_bus_scaling)
__qseecom_add_bw_scale_down_timer(
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
if (perf_enabled) {
qsee_disable_clock_vote(data, CLK_DFAB);
qsee_disable_clock_vote(data, CLK_SFPB);
}
__qseecom_bus_scaling_disable(data, perf_enabled);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);
@ -7418,8 +7401,14 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
if (ret) {
mutex_unlock(&app_access_lock);
break;
}
atomic_inc(&data->ioctl_count);
ret = qseecom_qteec_open_session(data, argp);
__qseecom_bus_scaling_disable(data, perf_enabled);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);
@ -7467,8 +7456,14 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
}
/* Only one client allowed here at a time */
mutex_lock(&app_access_lock);
ret = __qseecom_bus_scaling_enable(data, &perf_enabled);
if (ret) {
mutex_unlock(&app_access_lock);
break;
}
atomic_inc(&data->ioctl_count);
ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
__qseecom_bus_scaling_disable(data, perf_enabled);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);