crypto: msm: qcrypto: Provide fallback for aead aes192

Crypto 5 HW does not provide aes 192 function. aes 192 fallback function
has been in place. The fallback for authenc(hmac(sha256),cbc(aes)) and
authenc(hmac(sha1),cbc(aes)) are provided now in the qcrypto driver.

Change-Id: I2c61b6df12f3a4b40e0e13170a803c44270db5cf
Acked-by: Che-Min Hsieh <cheminh@qti.qualcomm.com>
Signed-off-by: Kranthikumar Kurapati <kkurap@codeaurora.org>
This commit is contained in:
Kranthikumar Kurapati 2015-10-06 09:29:16 -04:00 committed by David Keitel
parent 6f4bd0df93
commit fe90a105fb

View file

@ -390,10 +390,9 @@ struct qcrypto_cipher_ctx {
u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
union {
struct crypto_ablkcipher *cipher_fb;
struct crypto_aead *aead_fb;
} fallback;
struct crypto_ablkcipher *cipher_aes192_fb;
struct crypto_ahash *ahash_aead_aes192_fb;
};
struct qcrypto_resp_ctx {
@ -424,6 +423,17 @@ struct qcrypto_cipher_req_ctx {
struct scatterlist ssg; /* Source Data sg */
unsigned char *data; /* Incoming data pointer*/
struct aead_request *aead_req;
struct ahash_request *fb_hash_req;
uint8_t fb_ahash_digest[SHA256_DIGEST_SIZE];
struct scatterlist fb_ahash_sg[3];
char *fb_ahash_assoc_iv;
char *fb_aes_iv;
unsigned int fb_ahash_length;
struct ablkcipher_request *fb_aes_req;
struct scatterlist *fb_aes_src;
struct scatterlist *fb_aes_dst;
unsigned int fb_aes_cryptlen;
};
#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
@ -873,15 +883,15 @@ static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
struct crypto_priv *cp = &qcrypto_dev;
if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
ctx->fallback.cipher_fb = NULL;
ctx->cipher_aes192_fb = NULL;
return _qcrypto_cra_ablkcipher_init(tfm);
}
ctx->fallback.cipher_fb = crypto_alloc_ablkcipher(name, 0,
ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback.cipher_fb)) {
if (IS_ERR(ctx->cipher_aes192_fb)) {
pr_err("Error allocating fallback algo %s\n", name);
ret = PTR_ERR(ctx->fallback.cipher_fb);
ctx->fallback.cipher_fb = NULL;
ret = PTR_ERR(ctx->cipher_aes192_fb);
ctx->cipher_aes192_fb = NULL;
return ret;
}
return _qcrypto_cra_ablkcipher_init(tfm);
@ -910,6 +920,68 @@ static int _qcrypto_cra_aead_sha256_init(struct crypto_tfm *tfm)
return rc;
}
static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_tfm *tfm)
{
int rc;
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_priv *cp = &qcrypto_dev;
tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
rc = _qcrypto_cipher_cra_init(tfm);
if (rc)
return rc;
ctx->cipher_aes192_fb = NULL;
ctx->ahash_aead_aes192_fb = NULL;
if (!cp->ce_support.aes_key_192) {
ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(
"cbc(aes)", 0, 0);
if (IS_ERR(ctx->cipher_aes192_fb)) {
ctx->cipher_aes192_fb = NULL;
} else {
ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
"hmac(sha1)", 0, 0);
if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
ctx->ahash_aead_aes192_fb = NULL;
crypto_free_ablkcipher(ctx->cipher_aes192_fb);
ctx->cipher_aes192_fb = NULL;
}
}
}
ctx->auth_alg = QCE_HASH_SHA1_HMAC;
return 0;
}
static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_tfm *tfm)
{
int rc;
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_priv *cp = &qcrypto_dev;
tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
rc = _qcrypto_cipher_cra_init(tfm);
if (rc)
return rc;
ctx->cipher_aes192_fb = NULL;
ctx->ahash_aead_aes192_fb = NULL;
if (!cp->ce_support.aes_key_192) {
ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(
"cbc(aes)", 0, 0);
if (IS_ERR(ctx->cipher_aes192_fb)) {
ctx->cipher_aes192_fb = NULL;
} else {
ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
"hmac(sha256)", 0, 0);
if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
ctx->ahash_aead_aes192_fb = NULL;
crypto_free_ablkcipher(ctx->cipher_aes192_fb);
ctx->cipher_aes192_fb = NULL;
}
}
}
ctx->auth_alg = QCE_HASH_SHA256_HMAC;
return 0;
}
static int _qcrypto_cra_aead_ccm_init(struct crypto_tfm *tfm)
{
int rc;
@ -946,9 +1018,9 @@ static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
_qcrypto_cra_ablkcipher_exit(tfm);
if (ctx->fallback.cipher_fb)
crypto_free_ablkcipher(ctx->fallback.cipher_fb);
ctx->fallback.cipher_fb = NULL;
if (ctx->cipher_aes192_fb)
crypto_free_ablkcipher(ctx->cipher_aes192_fb);
ctx->cipher_aes192_fb = NULL;
}
static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
@ -957,7 +1029,21 @@ static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
if (!list_empty(&ctx->rsp_queue))
pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
};
}
static void _qcrypto_cra_aead_aes_exit(struct crypto_tfm *tfm)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (!list_empty(&ctx->rsp_queue))
pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
if (ctx->cipher_aes192_fb)
crypto_free_ablkcipher(ctx->cipher_aes192_fb);
if (ctx->ahash_aead_aes192_fb)
crypto_free_ahash(ctx->ahash_aead_aes192_fb);
ctx->cipher_aes192_fb = NULL;
ctx->ahash_aead_aes192_fb = NULL;
}
static int _disp_stats(int id)
{
@ -1197,10 +1283,10 @@ static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
int ret;
ctx->enc_key_len = AES_KEYSIZE_192;
ctx->fallback.cipher_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback.cipher_fb->base.crt_flags |=
ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->cipher_aes192_fb->base.crt_flags |=
(cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_ablkcipher_setkey(ctx->fallback.cipher_fb, key,
ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb, key,
AES_KEYSIZE_192);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
@ -1221,7 +1307,7 @@ static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
&& ctx->fallback.cipher_fb)
&& ctx->cipher_aes192_fb)
return _qcrypto_setkey_aes_192_fallback(cipher, key);
if (_qcrypto_check_aes_keylen(cipher, cp, len)) {
@ -2399,7 +2485,7 @@ static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int err;
ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb);
ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb);
err = crypto_ablkcipher_encrypt(req);
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
return err;
@ -2412,7 +2498,7 @@ static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int err;
ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb);
ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb);
err = crypto_ablkcipher_decrypt(req);
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
return err;
@ -2436,7 +2522,7 @@ static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_enc_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2466,7 +2552,7 @@ static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_enc_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2496,7 +2582,7 @@ static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_enc_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2680,7 +2766,7 @@ static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_dec_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2710,7 +2796,7 @@ static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_dec_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2740,7 +2826,7 @@ static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
(!cp->ce_support.aes_key_192) &&
ctx->fallback.cipher_fb)
ctx->cipher_aes192_fb)
return _qcrypto_dec_aes_192_fallback(req);
rctx = ablkcipher_request_ctx(req);
@ -2962,6 +3048,7 @@ static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
int ret;
if (!RTA_OK(rta, keylen))
goto badkey;
@ -2987,6 +3074,20 @@ static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
memcpy(ctx->auth_key, key, ctx->auth_key_len);
if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
ctx->ahash_aead_aes192_fb) {
crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
ctx->auth_key, ctx->auth_key_len);
if (ret)
goto badkey;
crypto_ablkcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb,
ctx->enc_key, ctx->enc_key_len);
if (ret)
goto badkey;
}
return 0;
badkey:
ctx->enc_key_len = 0;
@ -3036,6 +3137,253 @@ static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
return ret;
};
static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
int res)
{
struct aead_request *req;
struct crypto_async_request *areq;
req = rctx->aead_req;
areq = &req->base;
if (rctx->fb_aes_req)
ablkcipher_request_free(rctx->fb_aes_req);
if (rctx->fb_hash_req)
ahash_request_free(rctx->fb_hash_req);
rctx->fb_aes_req = NULL;
rctx->fb_hash_req = NULL;
kfree(rctx->fb_ahash_assoc_iv);
kfree(rctx->fb_aes_iv);
areq->complete(areq, res);
}
static void _aead_aes_fb_stage2_ahash_complete(
struct crypto_async_request *base, int err)
{
struct qcrypto_cipher_req_ctx *rctx;
struct aead_request *req;
struct qcrypto_cipher_ctx *ctx;
rctx = base->data;
req = rctx->aead_req;
ctx = crypto_tfm_ctx(req->base.tfm);
/* copy icv */
if (err == 0)
scatterwalk_map_and_copy(rctx->fb_ahash_digest,
req->dst,
req->cryptlen,
ctx->authsize, 1);
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
}
static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
{
struct ahash_request *ahash_req;
ahash_req = rctx->fb_hash_req;
ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
_aead_aes_fb_stage2_ahash_complete, rctx);
return crypto_ahash_digest(ahash_req);
}
static void _aead_aes_fb_stage2_decrypt_complete(
struct crypto_async_request *base, int err)
{
struct qcrypto_cipher_req_ctx *rctx;
rctx = base->data;
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
}
static int _start_aead_aes_fb_stage2_decrypt(
struct qcrypto_cipher_req_ctx *rctx)
{
struct ablkcipher_request *aes_req;
aes_req = rctx->fb_aes_req;
ablkcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
_aead_aes_fb_stage2_decrypt_complete, rctx);
return crypto_ablkcipher_decrypt(aes_req);
}
static void _aead_aes_fb_stage1_ahash_complete(
struct crypto_async_request *base, int err)
{
struct qcrypto_cipher_req_ctx *rctx;
struct aead_request *req;
struct qcrypto_cipher_ctx *ctx;
rctx = base->data;
req = rctx->aead_req;
ctx = crypto_tfm_ctx(req->base.tfm);
/* compare icv */
if (err == 0) {
unsigned char tmp[ctx->authsize];
scatterwalk_map_and_copy(tmp, req->src,
req->cryptlen - ctx->authsize, ctx->authsize, 0);
if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
err = -EBADMSG;
}
if (err)
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
else {
err = _start_aead_aes_fb_stage2_decrypt(rctx);
if (err != -EINPROGRESS && err != -EBUSY)
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
}
}
static void _aead_aes_fb_stage1_encrypt_complete(
struct crypto_async_request *base, int err)
{
struct qcrypto_cipher_req_ctx *rctx;
struct aead_request *req;
struct qcrypto_cipher_ctx *ctx;
rctx = base->data;
req = rctx->aead_req;
ctx = crypto_tfm_ctx(req->base.tfm);
memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
if (err) {
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
return;
}
err = _start_aead_aes_fb_stage2_hmac(rctx);
/* copy icv */
if (err == 0) {
scatterwalk_map_and_copy(rctx->fb_ahash_digest,
req->dst,
req->cryptlen,
ctx->authsize, 1);
}
if (err != -EINPROGRESS && err != -EBUSY)
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
}
static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
bool is_encrypt)
{
struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
struct ablkcipher_request *aes_req = NULL;
struct ahash_request *ahash_req = NULL;
int rc = -EINVAL;
int nbytes;
int num_sg;
rctx->fb_ahash_assoc_iv = NULL;
rctx->fb_aes_iv = NULL;
aes_req = ablkcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
if (!aes_req)
return -ENOMEM;
ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
if (!ahash_req)
goto ret;
rctx->fb_aes_req = aes_req;
rctx->fb_hash_req = ahash_req;
rctx->aead_req = req;
num_sg = qcrypto_count_sg(req->assoc, req->assoclen);
rctx->fb_ahash_assoc_iv = kzalloc(req->assoclen +
crypto_aead_ivsize(aead_tfm), GFP_ATOMIC);
if (!rctx->fb_ahash_assoc_iv)
goto ret;
if (req->assoclen)
qcrypto_sg_copy_to_buffer(req->assoc, num_sg,
rctx->fb_ahash_assoc_iv, req->assoclen);
memcpy(rctx->fb_ahash_assoc_iv + req->assoclen,
req->iv, crypto_aead_ivsize(aead_tfm));
memset(rctx->fb_ahash_sg, 0, sizeof(rctx->fb_ahash_sg));
sg_set_buf(&rctx->fb_ahash_sg[0], rctx->fb_ahash_assoc_iv,
req->assoclen + crypto_aead_ivsize(aead_tfm));
sg_mark_end(&rctx->fb_ahash_sg[1]);
nbytes = req->cryptlen;
if (is_encrypt) {
sg_chain(&rctx->fb_ahash_sg[0], 2, req->dst);
} else {
sg_chain(&rctx->fb_ahash_sg[0], 2, req->src);
nbytes -= ctx->authsize;
}
rctx->fb_ahash_length = nbytes + crypto_aead_ivsize(aead_tfm)
+ req->assoclen;
rctx->fb_aes_src = req->src;
rctx->fb_aes_dst = req->dst;
rctx->fb_aes_cryptlen = nbytes;
rctx->ivsize = crypto_aead_ivsize(aead_tfm);
rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
if (!rctx->fb_aes_iv)
goto ret;
memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
ablkcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
rctx->fb_aes_dst,
rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
ahash_request_set_crypt(ahash_req, &rctx->fb_ahash_sg[0],
rctx->fb_ahash_digest,
rctx->fb_ahash_length);
if (is_encrypt) {
ablkcipher_request_set_callback(aes_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_aead_aes_fb_stage1_encrypt_complete, rctx);
rc = crypto_ablkcipher_encrypt(aes_req);
if (rc == 0) {
memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
rc = _start_aead_aes_fb_stage2_hmac(rctx);
if (rc == 0) {
/* copy icv */
scatterwalk_map_and_copy(rctx->fb_ahash_digest,
req->dst,
req->cryptlen,
ctx->authsize, 1);
}
}
if (rc == -EINPROGRESS || rc == -EBUSY)
return rc;
goto ret;
} else {
ahash_request_set_callback(ahash_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
_aead_aes_fb_stage1_ahash_complete, rctx);
rc = crypto_ahash_digest(ahash_req);
if (rc == 0) {
unsigned char tmp[ctx->authsize];
/* compare icv */
scatterwalk_map_and_copy(tmp,
req->src, req->cryptlen - ctx->authsize,
ctx->authsize, 0);
if (memcmp(rctx->fb_ahash_digest, tmp,
ctx->authsize) != 0)
rc = -EBADMSG;
else
rc = _start_aead_aes_fb_stage2_decrypt(rctx);
}
if (rc == -EINPROGRESS || rc == -EBUSY)
return rc;
goto ret;
}
ret:
if (aes_req)
ablkcipher_request_free(aes_req);
if (ahash_req)
ahash_request_free(ahash_req);
kfree(rctx->fb_ahash_assoc_iv);
kfree(rctx->fb_aes_iv);
return rc;
}
static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@ -3056,10 +3404,14 @@ static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_aes_enc++;
else
pstat->aead_sha256_aes_enc++;
if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
ctx->ahash_aead_aes192_fb)
return _qcrypto_aead_aes_192_fallback(req, true);
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
@ -3082,11 +3434,16 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
rctx->dir = QCE_DECRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_aes_dec++;
else
pstat->aead_sha256_aes_dec++;
if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
ctx->ahash_aead_aes192_fb)
return _qcrypto_aead_aes_192_fallback(req, false);
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
@ -3107,6 +3464,7 @@ static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->giv; /* generated iv */
rctx->aead_req = areq;
memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
/* avoid consecutive packets going out with same IV */
@ -3116,6 +3474,11 @@ static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
pstat->aead_sha1_aes_enc++;
else
pstat->aead_sha256_aes_enc++;
if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
ctx->ahash_aead_aes192_fb) {
areq->iv = req->giv;
return _qcrypto_aead_aes_192_fallback(areq, true);
}
return _qcrypto_queue_req(cp, ctx->pengine, &areq->base);
}
@ -3134,6 +3497,7 @@ static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_des_enc++;
@ -3157,6 +3521,7 @@ static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
rctx->dir = QCE_DECRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_des_dec++;
@ -3182,6 +3547,7 @@ static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->giv; /* generated iv */
rctx->aead_req = areq;
memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
/* avoid consecutive packets going out with same IV */
@ -3208,6 +3574,7 @@ static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_3des_enc++;
@ -3231,6 +3598,7 @@ static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
rctx->dir = QCE_DECRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->iv;
rctx->aead_req = req;
if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
pstat->aead_sha1_3des_dec++;
@ -3256,6 +3624,7 @@ static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
rctx->dir = QCE_ENCRYPT;
rctx->mode = QCE_MODE_CBC;
rctx->iv = req->giv; /* generated iv */
rctx->aead_req = areq;
memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
/* avoid consecutive packets going out with same IV */
@ -4483,8 +4852,8 @@ static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_init = _qcrypto_cra_aead_sha1_init,
.cra_exit = _qcrypto_cra_aead_exit,
.cra_init = _qcrypto_cra_aead_aes_sha1_init,
.cra_exit = _qcrypto_cra_aead_aes_exit,
.cra_u = {
.aead = {
.ivsize = AES_BLOCK_SIZE,
@ -4561,8 +4930,8 @@ static struct crypto_alg _qcrypto_aead_sha256_hmac_algos[] = {
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_init = _qcrypto_cra_aead_sha256_init,
.cra_exit = _qcrypto_cra_aead_exit,
.cra_init = _qcrypto_cra_aead_aes_sha256_init,
.cra_exit = _qcrypto_cra_aead_aes_exit,
.cra_u = {
.aead = {
.ivsize = AES_BLOCK_SIZE,