fscrypt currently only supports AES encryption. However, many low-end mobile devices have older CPUs that don't have AES instructions, e.g. the ARMv8 Cryptography Extensions. Currently, user data on such devices is not encrypted at rest because AES is too slow, even when the NEON bit-sliced implementation of AES is used. Unfortunately, it is infeasible to encrypt these devices at all when AES is the only option. Therefore, this patch updates fscrypt to support the Speck block cipher, which was recently added to the crypto API. The C implementation of Speck is not especially fast, but Speck can be implemented very efficiently with general-purpose vector instructions, e.g. ARM NEON. For example, on an ARMv7 processor, we measured the NEON-accelerated Speck128/256-XTS at 69 MB/s for both encryption and decryption, while AES-256-XTS with the NEON bit-sliced implementation was only 22 MB/s encryption and 19 MB/s decryption. There are multiple variants of Speck. This patch only adds support for Speck128/256, which is the variant with a 128-bit block size and 256-bit key size -- the same as AES-256. This is believed to be the most secure variant of Speck, and it's only about 6% slower than Speck128/128. Speck64/128 would be at least 20% faster because it has 20% rounds, and it can be even faster on CPUs that can't efficiently do the 64-bit operations needed for Speck128. However, Speck64's 64-bit block size is not preferred security-wise. ARM NEON also supports the needed 64-bit operations even on 32-bit CPUs, resulting in Speck128 being fast enough for our targeted use cases so far. The chosen modes of operation are XTS for contents and CTS-CBC for filenames. These are the same modes of operation that fscrypt defaults to for AES. Note that as with the other fscrypt modes, Speck will not be used unless userspace chooses to use it. Nor are any of the existing modes (which are all AES-based) being removed, of course. We intentionally don't make CONFIG_FS_ENCRYPTION select CONFIG_CRYPTO_SPECK, so people will have to enable Speck support themselves if they need it. This is because we shouldn't bloat the FS_ENCRYPTION dependencies with every new cipher, especially ones that aren't recommended for most users. Moreover, CRYPTO_SPECK is just the generic implementation, which won't be fast enough for many users; in practice, they'll need to enable CRYPTO_SPECK_NEON to get acceptable performance. More details about our choice of Speck can be found in our patches that added Speck to the crypto API, and the follow-on discussion threads. We're planning a publication that explains the choice in more detail. But briefly, we can't use ChaCha20 as we previously proposed, since it would be insecure to use a stream cipher in this context, with potential IV reuse during writes on f2fs and/or on wear-leveling flash storage. We also evaluated many other lightweight and/or ARX-based block ciphers such as Chaskey-LTS, RC5, LEA, CHAM, Threefish, RC6, NOEKEON, SPARX, and XTEA. However, all had disadvantages vs. Speck, such as insufficient performance with NEON, much less published cryptanalysis, or an insufficient security level. Various design choices in Speck make it perform better with NEON than competing ciphers while still having a security margin similar to AES, and in the case of Speck128 also the same available security levels. Unfortunately, Speck does have some political baggage attached -- it's an NSA designed cipher, and was rejected from an ISO standard (though for context, as far as I know none of the above-mentioned alternatives are ISO standards either). Nevertheless, we believe it is a good solution to the problem from a technical perspective. Certain algorithms constructed from ChaCha or the ChaCha permutation, such as MEM (Masked Even-Mansour) or HPolyC, may also meet our performance requirements. However, these are new constructions that need more time to receive the cryptographic review and acceptance needed to be confident in their security. HPolyC hasn't been published yet, and we are concerned that MEM makes stronger assumptions about the underlying permutation than the ChaCha stream cipher does. In contrast, the XTS mode of operation is relatively well accepted, and Speck has over 70 cryptanalysis papers. Of course, these ChaCha-based algorithms can still be added later if they become ready. The best known attack on Speck128/256 is a differential cryptanalysis attack on 25 of 34 rounds with 2^253 time complexity and 2^125 chosen plaintexts, i.e. only marginally faster than brute force. There is no known attack on the full 34 rounds. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu> (cherry-picked from commit 12d28f79558f2e987c5f3817f89e1ccc0f11a7b5 https://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git master) (dropped Documentation/filesystems/fscrypt.rst change) (fixed merge conflict in fs/crypto/keyinfo.c) (also ported change to fs/ext4/, which isn't using fs/crypto/ in this kernel version) Change-Id: I62c632044dfd06a2c5b74c2fb058f9c3b8af0add Signed-off-by: Eric Biggers <ebiggers@google.com>
465 lines
12 KiB
C
465 lines
12 KiB
C
/*
|
|
* linux/fs/ext4/crypto_fname.c
|
|
*
|
|
* Copyright (C) 2015, Google, Inc.
|
|
*
|
|
* This contains functions for filename crypto management in ext4
|
|
*
|
|
* Written by Uday Savagaonkar, 2014.
|
|
*
|
|
* This has not yet undergone a rigorous security audit.
|
|
*
|
|
*/
|
|
|
|
#include <crypto/hash.h>
|
|
#include <crypto/sha.h>
|
|
#include <keys/encrypted-type.h>
|
|
#include <keys/user-type.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/key.h>
|
|
#include <linux/list.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/random.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/spinlock_types.h>
|
|
|
|
#include "ext4.h"
|
|
#include "ext4_crypto.h"
|
|
#include "xattr.h"
|
|
|
|
/**
|
|
* ext4_dir_crypt_complete() -
|
|
*/
|
|
static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
|
|
{
|
|
struct ext4_completion_result *ecr = req->data;
|
|
|
|
if (res == -EINPROGRESS)
|
|
return;
|
|
ecr->res = res;
|
|
complete(&ecr->completion);
|
|
}
|
|
|
|
static unsigned max_name_len(struct inode *inode)
|
|
{
|
|
return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
|
|
EXT4_NAME_LEN;
|
|
}
|
|
|
|
/**
|
|
* ext4_fname_encrypt() -
|
|
*
|
|
* This function encrypts the input filename, and returns the length of the
|
|
* ciphertext. Errors are returned as negative numbers. We trust the caller to
|
|
* allocate sufficient memory to oname string.
|
|
*/
|
|
static int ext4_fname_encrypt(struct inode *inode,
|
|
const struct qstr *iname,
|
|
struct ext4_str *oname)
|
|
{
|
|
u32 ciphertext_len;
|
|
struct ablkcipher_request *req = NULL;
|
|
DECLARE_EXT4_COMPLETION_RESULT(ecr);
|
|
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
|
|
struct crypto_ablkcipher *tfm = ci->ci_ctfm;
|
|
int res = 0;
|
|
char iv[EXT4_CRYPTO_BLOCK_SIZE];
|
|
struct scatterlist src_sg, dst_sg;
|
|
int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
|
|
char *workbuf, buf[32], *alloc_buf = NULL;
|
|
unsigned lim = max_name_len(inode);
|
|
|
|
if (iname->len <= 0 || iname->len > lim)
|
|
return -EIO;
|
|
|
|
ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
|
|
EXT4_CRYPTO_BLOCK_SIZE : iname->len;
|
|
ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
|
|
ciphertext_len = (ciphertext_len > lim)
|
|
? lim : ciphertext_len;
|
|
|
|
if (ciphertext_len <= sizeof(buf)) {
|
|
workbuf = buf;
|
|
} else {
|
|
alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
|
|
if (!alloc_buf)
|
|
return -ENOMEM;
|
|
workbuf = alloc_buf;
|
|
}
|
|
|
|
/* Allocate request */
|
|
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
|
|
if (!req) {
|
|
printk_ratelimited(
|
|
KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
|
|
kfree(alloc_buf);
|
|
return -ENOMEM;
|
|
}
|
|
ablkcipher_request_set_callback(req,
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
ext4_dir_crypt_complete, &ecr);
|
|
|
|
/* Copy the input */
|
|
memcpy(workbuf, iname->name, iname->len);
|
|
if (iname->len < ciphertext_len)
|
|
memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
|
|
|
|
/* Initialize IV */
|
|
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
|
|
|
|
/* Create encryption request */
|
|
sg_init_one(&src_sg, workbuf, ciphertext_len);
|
|
sg_init_one(&dst_sg, oname->name, ciphertext_len);
|
|
ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
|
|
res = crypto_ablkcipher_encrypt(req);
|
|
if (res == -EINPROGRESS || res == -EBUSY) {
|
|
wait_for_completion(&ecr.completion);
|
|
res = ecr.res;
|
|
}
|
|
kfree(alloc_buf);
|
|
ablkcipher_request_free(req);
|
|
if (res < 0) {
|
|
printk_ratelimited(
|
|
KERN_ERR "%s: Error (error code %d)\n", __func__, res);
|
|
}
|
|
oname->len = ciphertext_len;
|
|
return res;
|
|
}
|
|
|
|
/*
|
|
* ext4_fname_decrypt()
|
|
* This function decrypts the input filename, and returns
|
|
* the length of the plaintext.
|
|
* Errors are returned as negative numbers.
|
|
* We trust the caller to allocate sufficient memory to oname string.
|
|
*/
|
|
static int ext4_fname_decrypt(struct inode *inode,
|
|
const struct ext4_str *iname,
|
|
struct ext4_str *oname)
|
|
{
|
|
struct ext4_str tmp_in[2], tmp_out[1];
|
|
struct ablkcipher_request *req = NULL;
|
|
DECLARE_EXT4_COMPLETION_RESULT(ecr);
|
|
struct scatterlist src_sg, dst_sg;
|
|
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
|
|
struct crypto_ablkcipher *tfm = ci->ci_ctfm;
|
|
int res = 0;
|
|
char iv[EXT4_CRYPTO_BLOCK_SIZE];
|
|
unsigned lim = max_name_len(inode);
|
|
|
|
if (iname->len <= 0 || iname->len > lim)
|
|
return -EIO;
|
|
|
|
tmp_in[0].name = iname->name;
|
|
tmp_in[0].len = iname->len;
|
|
tmp_out[0].name = oname->name;
|
|
|
|
/* Allocate request */
|
|
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
|
|
if (!req) {
|
|
printk_ratelimited(
|
|
KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
|
|
return -ENOMEM;
|
|
}
|
|
ablkcipher_request_set_callback(req,
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
ext4_dir_crypt_complete, &ecr);
|
|
|
|
/* Initialize IV */
|
|
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
|
|
|
|
/* Create encryption request */
|
|
sg_init_one(&src_sg, iname->name, iname->len);
|
|
sg_init_one(&dst_sg, oname->name, oname->len);
|
|
ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
|
|
res = crypto_ablkcipher_decrypt(req);
|
|
if (res == -EINPROGRESS || res == -EBUSY) {
|
|
wait_for_completion(&ecr.completion);
|
|
res = ecr.res;
|
|
}
|
|
ablkcipher_request_free(req);
|
|
if (res < 0) {
|
|
printk_ratelimited(
|
|
KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
|
|
__func__, res);
|
|
return res;
|
|
}
|
|
|
|
oname->len = strnlen(oname->name, iname->len);
|
|
return oname->len;
|
|
}
|
|
|
|
static const char *lookup_table =
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
|
|
|
|
/**
|
|
* ext4_fname_encode_digest() -
|
|
*
|
|
* Encodes the input digest using characters from the set [a-zA-Z0-9_+].
|
|
* The encoded string is roughly 4/3 times the size of the input string.
|
|
*/
|
|
static int digest_encode(const char *src, int len, char *dst)
|
|
{
|
|
int i = 0, bits = 0, ac = 0;
|
|
char *cp = dst;
|
|
|
|
while (i < len) {
|
|
ac += (((unsigned char) src[i]) << bits);
|
|
bits += 8;
|
|
do {
|
|
*cp++ = lookup_table[ac & 0x3f];
|
|
ac >>= 6;
|
|
bits -= 6;
|
|
} while (bits >= 6);
|
|
i++;
|
|
}
|
|
if (bits)
|
|
*cp++ = lookup_table[ac & 0x3f];
|
|
return cp - dst;
|
|
}
|
|
|
|
static int digest_decode(const char *src, int len, char *dst)
|
|
{
|
|
int i = 0, bits = 0, ac = 0;
|
|
const char *p;
|
|
char *cp = dst;
|
|
|
|
while (i < len) {
|
|
p = strchr(lookup_table, src[i]);
|
|
if (p == NULL || src[i] == 0)
|
|
return -2;
|
|
ac += (p - lookup_table) << bits;
|
|
bits += 6;
|
|
if (bits >= 8) {
|
|
*cp++ = ac & 0xff;
|
|
ac >>= 8;
|
|
bits -= 8;
|
|
}
|
|
i++;
|
|
}
|
|
if (ac)
|
|
return -1;
|
|
return cp - dst;
|
|
}
|
|
|
|
/**
|
|
* ext4_fname_crypto_round_up() -
|
|
*
|
|
* Return: The next multiple of block size
|
|
*/
|
|
u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
|
|
{
|
|
return ((size+blksize-1)/blksize)*blksize;
|
|
}
|
|
|
|
unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen)
|
|
{
|
|
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
|
|
int padding = 32;
|
|
|
|
if (ci)
|
|
padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
|
|
if (ilen < EXT4_CRYPTO_BLOCK_SIZE)
|
|
ilen = EXT4_CRYPTO_BLOCK_SIZE;
|
|
return ext4_fname_crypto_round_up(ilen, padding);
|
|
}
|
|
|
|
/*
|
|
* ext4_fname_crypto_alloc_buffer() -
|
|
*
|
|
* Allocates an output buffer that is sufficient for the crypto operation
|
|
* specified by the context and the direction.
|
|
*/
|
|
int ext4_fname_crypto_alloc_buffer(struct inode *inode,
|
|
u32 ilen, struct ext4_str *crypto_str)
|
|
{
|
|
unsigned int olen = ext4_fname_encrypted_size(inode, ilen);
|
|
|
|
crypto_str->len = olen;
|
|
if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
|
|
olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
|
|
/* Allocated buffer can hold one more character to null-terminate the
|
|
* string */
|
|
crypto_str->name = kmalloc(olen+1, GFP_NOFS);
|
|
if (!(crypto_str->name))
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* ext4_fname_crypto_free_buffer() -
|
|
*
|
|
* Frees the buffer allocated for crypto operation.
|
|
*/
|
|
void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
|
|
{
|
|
if (!crypto_str)
|
|
return;
|
|
kfree(crypto_str->name);
|
|
crypto_str->name = NULL;
|
|
}
|
|
|
|
/**
|
|
* ext4_fname_disk_to_usr() - converts a filename from disk space to user space
|
|
*/
|
|
int _ext4_fname_disk_to_usr(struct inode *inode,
|
|
struct dx_hash_info *hinfo,
|
|
const struct ext4_str *iname,
|
|
struct ext4_str *oname)
|
|
{
|
|
char buf[24];
|
|
int ret;
|
|
|
|
if (iname->len < 3) {
|
|
/*Check for . and .. */
|
|
if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
|
|
oname->name[0] = '.';
|
|
oname->name[iname->len-1] = '.';
|
|
oname->len = iname->len;
|
|
return oname->len;
|
|
}
|
|
}
|
|
if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) {
|
|
EXT4_ERROR_INODE(inode, "encrypted inode too small");
|
|
return -EUCLEAN;
|
|
}
|
|
if (EXT4_I(inode)->i_crypt_info)
|
|
return ext4_fname_decrypt(inode, iname, oname);
|
|
|
|
if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
|
|
ret = digest_encode(iname->name, iname->len, oname->name);
|
|
oname->len = ret;
|
|
return ret;
|
|
}
|
|
if (hinfo) {
|
|
memcpy(buf, &hinfo->hash, 4);
|
|
memcpy(buf+4, &hinfo->minor_hash, 4);
|
|
} else
|
|
memset(buf, 0, 8);
|
|
memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
|
|
oname->name[0] = '_';
|
|
ret = digest_encode(buf, 24, oname->name+1);
|
|
oname->len = ret + 1;
|
|
return ret + 1;
|
|
}
|
|
|
|
int ext4_fname_disk_to_usr(struct inode *inode,
|
|
struct dx_hash_info *hinfo,
|
|
const struct ext4_dir_entry_2 *de,
|
|
struct ext4_str *oname)
|
|
{
|
|
struct ext4_str iname = {.name = (unsigned char *) de->name,
|
|
.len = de->name_len };
|
|
|
|
return _ext4_fname_disk_to_usr(inode, hinfo, &iname, oname);
|
|
}
|
|
|
|
|
|
/**
|
|
* ext4_fname_usr_to_disk() - converts a filename from user space to disk space
|
|
*/
|
|
int ext4_fname_usr_to_disk(struct inode *inode,
|
|
const struct qstr *iname,
|
|
struct ext4_str *oname)
|
|
{
|
|
int res;
|
|
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
|
|
|
|
if (iname->len < 3) {
|
|
/*Check for . and .. */
|
|
if (iname->name[0] == '.' &&
|
|
iname->name[iname->len-1] == '.') {
|
|
oname->name[0] = '.';
|
|
oname->name[iname->len-1] = '.';
|
|
oname->len = iname->len;
|
|
return oname->len;
|
|
}
|
|
}
|
|
if (ci) {
|
|
res = ext4_fname_encrypt(inode, iname, oname);
|
|
return res;
|
|
}
|
|
/* Without a proper key, a user is not allowed to modify the filenames
|
|
* in a directory. Consequently, a user space name cannot be mapped to
|
|
* a disk-space name */
|
|
return -EACCES;
|
|
}
|
|
|
|
int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
|
|
int lookup, struct ext4_filename *fname)
|
|
{
|
|
struct ext4_crypt_info *ci;
|
|
int ret = 0, bigname = 0;
|
|
|
|
memset(fname, 0, sizeof(struct ext4_filename));
|
|
fname->usr_fname = iname;
|
|
|
|
if (!ext4_encrypted_inode(dir) ||
|
|
((iname->name[0] == '.') &&
|
|
((iname->len == 1) ||
|
|
((iname->name[1] == '.') && (iname->len == 2))))) {
|
|
fname->disk_name.name = (unsigned char *) iname->name;
|
|
fname->disk_name.len = iname->len;
|
|
return 0;
|
|
}
|
|
ret = ext4_get_encryption_info(dir);
|
|
if (ret)
|
|
return ret;
|
|
ci = EXT4_I(dir)->i_crypt_info;
|
|
if (ci) {
|
|
ret = ext4_fname_crypto_alloc_buffer(dir, iname->len,
|
|
&fname->crypto_buf);
|
|
if (ret < 0)
|
|
return ret;
|
|
ret = ext4_fname_encrypt(dir, iname, &fname->crypto_buf);
|
|
if (ret < 0)
|
|
goto errout;
|
|
fname->disk_name.name = fname->crypto_buf.name;
|
|
fname->disk_name.len = fname->crypto_buf.len;
|
|
return 0;
|
|
}
|
|
if (!lookup)
|
|
return -EACCES;
|
|
|
|
/* We don't have the key and we are doing a lookup; decode the
|
|
* user-supplied name
|
|
*/
|
|
if (iname->name[0] == '_')
|
|
bigname = 1;
|
|
if ((bigname && (iname->len != 33)) ||
|
|
(!bigname && (iname->len > 43)))
|
|
return -ENOENT;
|
|
|
|
fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
|
|
if (fname->crypto_buf.name == NULL)
|
|
return -ENOMEM;
|
|
ret = digest_decode(iname->name + bigname, iname->len - bigname,
|
|
fname->crypto_buf.name);
|
|
if (ret < 0) {
|
|
ret = -ENOENT;
|
|
goto errout;
|
|
}
|
|
fname->crypto_buf.len = ret;
|
|
if (bigname) {
|
|
memcpy(&fname->hinfo.hash, fname->crypto_buf.name, 4);
|
|
memcpy(&fname->hinfo.minor_hash, fname->crypto_buf.name + 4, 4);
|
|
} else {
|
|
fname->disk_name.name = fname->crypto_buf.name;
|
|
fname->disk_name.len = fname->crypto_buf.len;
|
|
}
|
|
return 0;
|
|
errout:
|
|
kfree(fname->crypto_buf.name);
|
|
fname->crypto_buf.name = NULL;
|
|
return ret;
|
|
}
|
|
|
|
void ext4_fname_free_filename(struct ext4_filename *fname)
|
|
{
|
|
kfree(fname->crypto_buf.name);
|
|
fname->crypto_buf.name = NULL;
|
|
fname->usr_fname = NULL;
|
|
fname->disk_name.name = NULL;
|
|
}
|