android_kernel_oneplus_msm8998/fs/ext4/ext4_crypto.h

174 lines
4.4 KiB
C
Raw Normal View History

/*
* linux/fs/ext4/ext4_crypto.h
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption header content for ext4
*
* Written by Michael Halcrow, 2015.
*/
#ifndef _EXT4_CRYPTO_H
#define _EXT4_CRYPTO_H
#include <linux/fs.h>
#include <linux/pfk.h>
#define EXT4_KEY_DESCRIPTOR_SIZE 8
/* Policy provided via an ioctl on the topmost directory */
struct ext4_encryption_policy {
char version;
char contents_encryption_mode;
char filenames_encryption_mode;
char flags;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
} __attribute__((__packed__));
#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
#define EXT4_POLICY_FLAGS_PAD_4 0x00
#define EXT4_POLICY_FLAGS_PAD_8 0x01
#define EXT4_POLICY_FLAGS_PAD_16 0x02
#define EXT4_POLICY_FLAGS_PAD_32 0x03
#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
#define EXT4_POLICY_FLAGS_VALID 0x03
/**
* Encryption context for inode
*
* Protector format:
* 1 byte: Protector format (1 = this version)
* 1 byte: File contents encryption mode
* 1 byte: File names encryption mode
* 1 byte: Reserved
* 8 bytes: Master Key descriptor
* 16 bytes: Encryption Key derivation nonce
*/
struct ext4_encryption_context {
char format;
char contents_encryption_mode;
char filenames_encryption_mode;
char flags;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
} __attribute__((__packed__));
/* Encryption parameters */
#define EXT4_XTS_TWEAK_SIZE 16
#define EXT4_AES_128_ECB_KEY_SIZE 16
#define EXT4_AES_256_GCM_KEY_SIZE 32
ANDROID: ext4: add a non-reversible key derivation method Add a new per-file key derivation method to ext4 encryption defined as: derived_key[0:127] = AES-256-ENCRYPT(master_key[0:255], nonce) derived_key[128:255] = AES-256-ENCRYPT(master_key[0:255], nonce ^ 0x01) derived_key[256:383] = AES-256-ENCRYPT(master_key[256:511], nonce) derived_key[384:511] = AES-256-ENCRYPT(master_key[256:511], nonce ^ 0x01) ... where the derived key and master key are both 512 bits, the nonce is 128 bits, AES-256-ENCRYPT takes the arguments (key, plaintext), and 'nonce ^ 0x01' denotes flipping the low order bit of the last byte. The existing key derivation method is 'derived_key = AES-128-ECB-ENCRYPT(key=nonce, plaintext=master_key)'. We want to make this change because currently, given a derived key you can easily compute the master key by computing 'AES-128-ECB-DECRYPT(key=nonce, ciphertext=derived_key)'. This was formerly OK because the previous threat model assumed that the master key and derived keys are equally hard to obtain by an attacker. However, we are looking to move the master key into secure hardware in some cases, so we want to make sure that an attacker with access to a derived key cannot compute the master key. We are doing this instead of increasing the nonce to 512 bits because it's important that the per-file xattr fit in the inode itself. By default, inodes are 256 bytes, and on Android we're already pretty close to that limit. If we increase the nonce size, we end up allocating a new filesystem block for each and every encrypted file, which has a substantial performance and disk utilization impact. Another option considered was to use the HMAC-SHA512 of the nonce, keyed by the master key. However this would be a little less performant, would be less extensible to other key sizes and MAC algorithms, and would pull in a dependency (security-wise and code-wise) on SHA-512. Due to the use of "aes" rather than "ecb(aes)" in the implementation, the new key derivation method is actually about twice as fast as the old one, though the old one could be optimized similarly as well. This patch makes the new key derivation method be used whenever HEH is used to encrypt filenames. Although these two features are logically independent, it was decided to bundle them together for now. Note that neither feature is upstream yet, and it cannot be guaranteed that the on-disk format won't change if/when these features are upstreamed. For this reason, and as noted in the previous patch, the features are both behind a special mode number for now. Signed-off-by: Eric Biggers <ebiggers@google.com> Change-Id: Iee4113f57e59dc8c0b7dc5238d7003c83defb986
2017-01-10 17:02:40 -08:00
#define EXT4_AES_256_ECB_KEY_SIZE 32
#define EXT4_AES_256_CBC_KEY_SIZE 32
#define EXT4_AES_256_CTS_KEY_SIZE 32
#define EXT4_AES_256_HEH_KEY_SIZE 32
#define EXT4_AES_256_XTS_KEY_SIZE 64
#define EXT4_PRIVATE_KEY_SIZE 64
#define EXT4_MAX_KEY_SIZE 64
#define EXT4_KEY_DESC_PREFIX "ext4:"
#define EXT4_KEY_DESC_PREFIX_SIZE 5
/* This is passed in from userspace into the kernel keyring */
struct ext4_encryption_key {
__u32 mode;
char raw[EXT4_MAX_KEY_SIZE];
__u32 size;
} __attribute__((__packed__));
struct ext4_crypt_info {
ext4 crypto: reorganize how we store keys in the inode This is a pretty massive patch which does a number of different things: 1) The per-inode encryption information is now stored in an allocated data structure, ext4_crypt_info, instead of directly in the node. This reduces the size usage of an in-memory inode when it is not using encryption. 2) We drop the ext4_fname_crypto_ctx entirely, and use the per-inode encryption structure instead. This remove an unnecessary memory allocation and free for the fname_crypto_ctx as well as allowing us to reuse the ctfm in a directory for multiple lookups and file creations. 3) We also cache the inode's policy information in the ext4_crypt_info structure so we don't have to continually read it out of the extended attributes. 4) We now keep the keyring key in the inode's encryption structure instead of releasing it after we are done using it to derive the per-inode key. This allows us to test to see if the key has been revoked; if it has, we prevent the use of the derived key and free it. 5) When an inode is released (or when the derived key is freed), we will use memset_explicit() to zero out the derived key, so it's not left hanging around in memory. This implies that when a user logs out, it is important to first revoke the key, and then unlink it, and then finally, to use "echo 3 > /proc/sys/vm/drop_caches" to release any decrypted pages and dcache entries from the system caches. 6) All this, and we also shrink the number of lines of code by around 100. :-) Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2015-05-18 13:17:47 -04:00
char ci_data_mode;
char ci_filename_mode;
char ci_flags;
struct crypto_ablkcipher *ci_ctfm;
struct key *ci_keyring_key;
char ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
char ci_raw_key[EXT4_MAX_KEY_SIZE];
};
#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define EXT4_WRITE_PATH_FL 0x00000002
struct ext4_crypto_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
};
char flags; /* Flags */
char mode; /* Encryption mode for tfm */
};
struct ext4_completion_result {
struct completion completion;
int res;
};
#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
struct ext4_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
static inline int ext4_encryption_key_size(int mode)
{
switch (mode) {
case EXT4_ENCRYPTION_MODE_AES_256_XTS:
case EXT4_ENCRYPTION_MODE_PRIVATE:
return EXT4_AES_256_XTS_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_GCM:
return EXT4_AES_256_GCM_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_CBC:
return EXT4_AES_256_CBC_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_CTS:
return EXT4_AES_256_CTS_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_HEH:
return EXT4_AES_256_HEH_KEY_SIZE;
BACKPORT, FROMLIST: fscrypt: add Speck128/256 support fscrypt currently only supports AES encryption. However, many low-end mobile devices have older CPUs that don't have AES instructions, e.g. the ARMv8 Cryptography Extensions. Currently, user data on such devices is not encrypted at rest because AES is too slow, even when the NEON bit-sliced implementation of AES is used. Unfortunately, it is infeasible to encrypt these devices at all when AES is the only option. Therefore, this patch updates fscrypt to support the Speck block cipher, which was recently added to the crypto API. The C implementation of Speck is not especially fast, but Speck can be implemented very efficiently with general-purpose vector instructions, e.g. ARM NEON. For example, on an ARMv7 processor, we measured the NEON-accelerated Speck128/256-XTS at 69 MB/s for both encryption and decryption, while AES-256-XTS with the NEON bit-sliced implementation was only 22 MB/s encryption and 19 MB/s decryption. There are multiple variants of Speck. This patch only adds support for Speck128/256, which is the variant with a 128-bit block size and 256-bit key size -- the same as AES-256. This is believed to be the most secure variant of Speck, and it's only about 6% slower than Speck128/128. Speck64/128 would be at least 20% faster because it has 20% rounds, and it can be even faster on CPUs that can't efficiently do the 64-bit operations needed for Speck128. However, Speck64's 64-bit block size is not preferred security-wise. ARM NEON also supports the needed 64-bit operations even on 32-bit CPUs, resulting in Speck128 being fast enough for our targeted use cases so far. The chosen modes of operation are XTS for contents and CTS-CBC for filenames. These are the same modes of operation that fscrypt defaults to for AES. Note that as with the other fscrypt modes, Speck will not be used unless userspace chooses to use it. Nor are any of the existing modes (which are all AES-based) being removed, of course. We intentionally don't make CONFIG_FS_ENCRYPTION select CONFIG_CRYPTO_SPECK, so people will have to enable Speck support themselves if they need it. This is because we shouldn't bloat the FS_ENCRYPTION dependencies with every new cipher, especially ones that aren't recommended for most users. Moreover, CRYPTO_SPECK is just the generic implementation, which won't be fast enough for many users; in practice, they'll need to enable CRYPTO_SPECK_NEON to get acceptable performance. More details about our choice of Speck can be found in our patches that added Speck to the crypto API, and the follow-on discussion threads. We're planning a publication that explains the choice in more detail. But briefly, we can't use ChaCha20 as we previously proposed, since it would be insecure to use a stream cipher in this context, with potential IV reuse during writes on f2fs and/or on wear-leveling flash storage. We also evaluated many other lightweight and/or ARX-based block ciphers such as Chaskey-LTS, RC5, LEA, CHAM, Threefish, RC6, NOEKEON, SPARX, and XTEA. However, all had disadvantages vs. Speck, such as insufficient performance with NEON, much less published cryptanalysis, or an insufficient security level. Various design choices in Speck make it perform better with NEON than competing ciphers while still having a security margin similar to AES, and in the case of Speck128 also the same available security levels. Unfortunately, Speck does have some political baggage attached -- it's an NSA designed cipher, and was rejected from an ISO standard (though for context, as far as I know none of the above-mentioned alternatives are ISO standards either). Nevertheless, we believe it is a good solution to the problem from a technical perspective. Certain algorithms constructed from ChaCha or the ChaCha permutation, such as MEM (Masked Even-Mansour) or HPolyC, may also meet our performance requirements. However, these are new constructions that need more time to receive the cryptographic review and acceptance needed to be confident in their security. HPolyC hasn't been published yet, and we are concerned that MEM makes stronger assumptions about the underlying permutation than the ChaCha stream cipher does. In contrast, the XTS mode of operation is relatively well accepted, and Speck has over 70 cryptanalysis papers. Of course, these ChaCha-based algorithms can still be added later if they become ready. The best known attack on Speck128/256 is a differential cryptanalysis attack on 25 of 34 rounds with 2^253 time complexity and 2^125 chosen plaintexts, i.e. only marginally faster than brute force. There is no known attack on the full 34 rounds. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu> (cherry-picked from commit 12d28f79558f2e987c5f3817f89e1ccc0f11a7b5 https://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git master) (dropped Documentation/filesystems/fscrypt.rst change) (fixed merge conflict in fs/crypto/keyinfo.c) (also ported change to fs/ext4/, which isn't using fs/crypto/ in this kernel version) Change-Id: I62c632044dfd06a2c5b74c2fb058f9c3b8af0add Signed-off-by: Eric Biggers <ebiggers@google.com>
2018-05-07 17:22:08 -07:00
case EXT4_ENCRYPTION_MODE_SPECK128_256_XTS:
return 64;
case EXT4_ENCRYPTION_MODE_SPECK128_256_CTS:
return 32;
default:
BUG();
}
return 0;
}
#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
#define EXT4_CRYPTO_BLOCK_SIZE 16
#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
struct ext4_str {
unsigned char *name;
u32 len;
};
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
*/
struct ext4_encrypted_symlink_data {
__le16 len;
char encrypted_path[1];
} __attribute__((__packed__));
/**
* This function is used to calculate the disk space required to
* store a filename of length l in encrypted symlink format.
*/
static inline u32 encrypted_symlink_data_len(u32 l)
{
if (l < EXT4_CRYPTO_BLOCK_SIZE)
l = EXT4_CRYPTO_BLOCK_SIZE;
return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
}
#endif /* _EXT4_CRYPTO_H */