Merge cae5c018df
on remote branch
Change-Id: I3b826888f04fa8fa8f34e3e64fbcb19e93a528be
This commit is contained in:
commit
c7252b3f79
366 changed files with 7934 additions and 2827 deletions
|
@ -276,6 +276,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
|||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
|
626
Documentation/filesystems/fscrypt.rst
Normal file
626
Documentation/filesystems/fscrypt.rst
Normal file
|
@ -0,0 +1,626 @@
|
|||
=====================================
|
||||
Filesystem-level encryption (fscrypt)
|
||||
=====================================
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
fscrypt is a library which filesystems can hook into to support
|
||||
transparent encryption of files and directories.
|
||||
|
||||
Note: "fscrypt" in this document refers to the kernel-level portion,
|
||||
implemented in ``fs/crypto/``, as opposed to the userspace tool
|
||||
`fscrypt <https://github.com/google/fscrypt>`_. This document only
|
||||
covers the kernel-level portion. For command-line examples of how to
|
||||
use encryption, see the documentation for the userspace tool `fscrypt
|
||||
<https://github.com/google/fscrypt>`_. Also, it is recommended to use
|
||||
the fscrypt userspace tool, or other existing userspace tools such as
|
||||
`fscryptctl <https://github.com/google/fscryptctl>`_ or `Android's key
|
||||
management system
|
||||
<https://source.android.com/security/encryption/file-based>`_, over
|
||||
using the kernel's API directly. Using existing tools reduces the
|
||||
chance of introducing your own security bugs. (Nevertheless, for
|
||||
completeness this documentation covers the kernel's API anyway.)
|
||||
|
||||
Unlike dm-crypt, fscrypt operates at the filesystem level rather than
|
||||
at the block device level. This allows it to encrypt different files
|
||||
with different keys and to have unencrypted files on the same
|
||||
filesystem. This is useful for multi-user systems where each user's
|
||||
data-at-rest needs to be cryptographically isolated from the others.
|
||||
However, except for filenames, fscrypt does not encrypt filesystem
|
||||
metadata.
|
||||
|
||||
Unlike eCryptfs, which is a stacked filesystem, fscrypt is integrated
|
||||
directly into supported filesystems --- currently ext4, F2FS, and
|
||||
UBIFS. This allows encrypted files to be read and written without
|
||||
caching both the decrypted and encrypted pages in the pagecache,
|
||||
thereby nearly halving the memory used and bringing it in line with
|
||||
unencrypted files. Similarly, half as many dentries and inodes are
|
||||
needed. eCryptfs also limits encrypted filenames to 143 bytes,
|
||||
causing application compatibility issues; fscrypt allows the full 255
|
||||
bytes (NAME_MAX). Finally, unlike eCryptfs, the fscrypt API can be
|
||||
used by unprivileged users, with no need to mount anything.
|
||||
|
||||
fscrypt does not support encrypting files in-place. Instead, it
|
||||
supports marking an empty directory as encrypted. Then, after
|
||||
userspace provides the key, all regular files, directories, and
|
||||
symbolic links created in that directory tree are transparently
|
||||
encrypted.
|
||||
|
||||
Threat model
|
||||
============
|
||||
|
||||
Offline attacks
|
||||
---------------
|
||||
|
||||
Provided that userspace chooses a strong encryption key, fscrypt
|
||||
protects the confidentiality of file contents and filenames in the
|
||||
event of a single point-in-time permanent offline compromise of the
|
||||
block device content. fscrypt does not protect the confidentiality of
|
||||
non-filename metadata, e.g. file sizes, file permissions, file
|
||||
timestamps, and extended attributes. Also, the existence and location
|
||||
of holes (unallocated blocks which logically contain all zeroes) in
|
||||
files is not protected.
|
||||
|
||||
fscrypt is not guaranteed to protect confidentiality or authenticity
|
||||
if an attacker is able to manipulate the filesystem offline prior to
|
||||
an authorized user later accessing the filesystem.
|
||||
|
||||
Online attacks
|
||||
--------------
|
||||
|
||||
fscrypt (and storage encryption in general) can only provide limited
|
||||
protection, if any at all, against online attacks. In detail:
|
||||
|
||||
fscrypt is only resistant to side-channel attacks, such as timing or
|
||||
electromagnetic attacks, to the extent that the underlying Linux
|
||||
Cryptographic API algorithms are. If a vulnerable algorithm is used,
|
||||
such as a table-based implementation of AES, it may be possible for an
|
||||
attacker to mount a side channel attack against the online system.
|
||||
Side channel attacks may also be mounted against applications
|
||||
consuming decrypted data.
|
||||
|
||||
After an encryption key has been provided, fscrypt is not designed to
|
||||
hide the plaintext file contents or filenames from other users on the
|
||||
same system, regardless of the visibility of the keyring key.
|
||||
Instead, existing access control mechanisms such as file mode bits,
|
||||
POSIX ACLs, LSMs, or mount namespaces should be used for this purpose.
|
||||
Also note that as long as the encryption keys are *anywhere* in
|
||||
memory, an online attacker can necessarily compromise them by mounting
|
||||
a physical attack or by exploiting any kernel security vulnerability
|
||||
which provides an arbitrary memory read primitive.
|
||||
|
||||
While it is ostensibly possible to "evict" keys from the system,
|
||||
recently accessed encrypted files will remain accessible at least
|
||||
until the filesystem is unmounted or the VFS caches are dropped, e.g.
|
||||
using ``echo 2 > /proc/sys/vm/drop_caches``. Even after that, if the
|
||||
RAM is compromised before being powered off, it will likely still be
|
||||
possible to recover portions of the plaintext file contents, if not
|
||||
some of the encryption keys as well. (Since Linux v4.12, all
|
||||
in-kernel keys related to fscrypt are sanitized before being freed.
|
||||
However, userspace would need to do its part as well.)
|
||||
|
||||
Currently, fscrypt does not prevent a user from maliciously providing
|
||||
an incorrect key for another user's existing encrypted files. A
|
||||
protection against this is planned.
|
||||
|
||||
Key hierarchy
|
||||
=============
|
||||
|
||||
Master Keys
|
||||
-----------
|
||||
|
||||
Each encrypted directory tree is protected by a *master key*. Master
|
||||
keys can be up to 64 bytes long, and must be at least as long as the
|
||||
greater of the key length needed by the contents and filenames
|
||||
encryption modes being used. For example, if AES-256-XTS is used for
|
||||
contents encryption, the master key must be 64 bytes (512 bits). Note
|
||||
that the XTS mode is defined to require a key twice as long as that
|
||||
required by the underlying block cipher.
|
||||
|
||||
To "unlock" an encrypted directory tree, userspace must provide the
|
||||
appropriate master key. There can be any number of master keys, each
|
||||
of which protects any number of directory trees on any number of
|
||||
filesystems.
|
||||
|
||||
Userspace should generate master keys either using a cryptographically
|
||||
secure random number generator, or by using a KDF (Key Derivation
|
||||
Function). Note that whenever a KDF is used to "stretch" a
|
||||
lower-entropy secret such as a passphrase, it is critical that a KDF
|
||||
designed for this purpose be used, such as scrypt, PBKDF2, or Argon2.
|
||||
|
||||
Per-file keys
|
||||
-------------
|
||||
|
||||
Master keys are not used to encrypt file contents or names directly.
|
||||
Instead, a unique key is derived for each encrypted file, including
|
||||
each regular file, directory, and symbolic link. This has several
|
||||
advantages:
|
||||
|
||||
- In cryptosystems, the same key material should never be used for
|
||||
different purposes. Using the master key as both an XTS key for
|
||||
contents encryption and as a CTS-CBC key for filenames encryption
|
||||
would violate this rule.
|
||||
- Per-file keys simplify the choice of IVs (Initialization Vectors)
|
||||
for contents encryption. Without per-file keys, to ensure IV
|
||||
uniqueness both the inode and logical block number would need to be
|
||||
encoded in the IVs. This would make it impossible to renumber
|
||||
inodes, which e.g. ``resize2fs`` can do when resizing an ext4
|
||||
filesystem. With per-file keys, it is sufficient to encode just the
|
||||
logical block number in the IVs.
|
||||
- Per-file keys strengthen the encryption of filenames, where IVs are
|
||||
reused out of necessity. With a unique key per directory, IV reuse
|
||||
is limited to within a single directory.
|
||||
- Per-file keys allow individual files to be securely erased simply by
|
||||
securely erasing their keys. (Not yet implemented.)
|
||||
|
||||
A KDF (Key Derivation Function) is used to derive per-file keys from
|
||||
the master key. This is done instead of wrapping a randomly-generated
|
||||
key for each file because it reduces the size of the encryption xattr,
|
||||
which for some filesystems makes the xattr more likely to fit in-line
|
||||
in the filesystem's inode table. With a KDF, only a 16-byte nonce is
|
||||
required --- long enough to make key reuse extremely unlikely. A
|
||||
wrapped key, on the other hand, would need to be up to 64 bytes ---
|
||||
the length of an AES-256-XTS key. Furthermore, currently there is no
|
||||
requirement to support unlocking a file with multiple alternative
|
||||
master keys or to support rotating master keys. Instead, the master
|
||||
keys may be wrapped in userspace, e.g. as done by the `fscrypt
|
||||
<https://github.com/google/fscrypt>`_ tool.
|
||||
|
||||
The current KDF encrypts the master key using the 16-byte nonce as an
|
||||
AES-128-ECB key. The output is used as the derived key. If the
|
||||
output is longer than needed, then it is truncated to the needed
|
||||
length. Truncation is the norm for directories and symlinks, since
|
||||
those use the CTS-CBC encryption mode which requires a key half as
|
||||
long as that required by the XTS encryption mode.
|
||||
|
||||
Note: this KDF meets the primary security requirement, which is to
|
||||
produce unique derived keys that preserve the entropy of the master
|
||||
key, assuming that the master key is already a good pseudorandom key.
|
||||
However, it is nonstandard and has some problems such as being
|
||||
reversible, so it is generally considered to be a mistake! It may be
|
||||
replaced with HKDF or another more standard KDF in the future.
|
||||
|
||||
Encryption modes and usage
|
||||
==========================
|
||||
|
||||
fscrypt allows one encryption mode to be specified for file contents
|
||||
and one encryption mode to be specified for filenames. Different
|
||||
directory trees are permitted to use different encryption modes.
|
||||
Currently, the following pairs of encryption modes are supported:
|
||||
|
||||
- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
|
||||
- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
|
||||
- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
|
||||
|
||||
It is strongly recommended to use AES-256-XTS for contents encryption.
|
||||
AES-128-CBC was added only for low-powered embedded devices with
|
||||
crypto accelerators such as CAAM or CESA that do not support XTS.
|
||||
|
||||
Similarly, Speck128/256 support was only added for older or low-end
|
||||
CPUs which cannot do AES fast enough -- especially ARM CPUs which have
|
||||
NEON instructions but not the Cryptography Extensions -- and for which
|
||||
it would not otherwise be feasible to use encryption at all. It is
|
||||
not recommended to use Speck on CPUs that have AES instructions.
|
||||
Speck support is only available if it has been enabled in the crypto
|
||||
API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
|
||||
acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
|
||||
|
||||
New encryption modes can be added relatively easily, without changes
|
||||
to individual filesystems. However, authenticated encryption (AE)
|
||||
modes are not currently supported because of the difficulty of dealing
|
||||
with ciphertext expansion.
|
||||
|
||||
For file contents, each filesystem block is encrypted independently.
|
||||
Currently, only the case where the filesystem block size is equal to
|
||||
the system's page size (usually 4096 bytes) is supported. With the
|
||||
XTS mode of operation (recommended), the logical block number within
|
||||
the file is used as the IV. With the CBC mode of operation (not
|
||||
recommended), ESSIV is used; specifically, the IV for CBC is the
|
||||
logical block number encrypted with AES-256, where the AES-256 key is
|
||||
the SHA-256 hash of the inode's data encryption key.
|
||||
|
||||
For filenames, the full filename is encrypted at once. Because of the
|
||||
requirements to retain support for efficient directory lookups and
|
||||
filenames of up to 255 bytes, a constant initialization vector (IV) is
|
||||
used. However, each encrypted directory uses a unique key, which
|
||||
limits IV reuse to within a single directory. Note that IV reuse in
|
||||
the context of CTS-CBC encryption means that when the original
|
||||
filenames share a common prefix at least as long as the cipher block
|
||||
size (16 bytes for AES), the corresponding encrypted filenames will
|
||||
also share a common prefix. This is undesirable; it may be fixed in
|
||||
the future by switching to an encryption mode that is a strong
|
||||
pseudorandom permutation on arbitrary-length messages, e.g. the HEH
|
||||
(Hash-Encrypt-Hash) mode.
|
||||
|
||||
Since filenames are encrypted with the CTS-CBC mode of operation, the
|
||||
plaintext and ciphertext filenames need not be multiples of the AES
|
||||
block size, i.e. 16 bytes. However, the minimum size that can be
|
||||
encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes
|
||||
before being encrypted. In addition, to reduce leakage of filename
|
||||
lengths via their ciphertexts, all filenames are NUL-padded to the
|
||||
next 4, 8, 16, or 32-byte boundary (configurable). 32 is recommended
|
||||
since this provides the best confidentiality, at the cost of making
|
||||
directory entries consume slightly more space. Note that since NUL
|
||||
(``\0``) is not otherwise a valid character in filenames, the padding
|
||||
will never produce duplicate plaintexts.
|
||||
|
||||
Symbolic link targets are considered a type of filename and are
|
||||
encrypted in the same way as filenames in directory entries. Each
|
||||
symlink also uses a unique key; hence, the hardcoded IV is not a
|
||||
problem for symlinks.
|
||||
|
||||
User API
|
||||
========
|
||||
|
||||
Setting an encryption policy
|
||||
----------------------------
|
||||
|
||||
The FS_IOC_SET_ENCRYPTION_POLICY ioctl sets an encryption policy on an
|
||||
empty directory or verifies that a directory or regular file already
|
||||
has the specified encryption policy. It takes in a pointer to a
|
||||
:c:type:`struct fscrypt_policy`, defined as follows::
|
||||
|
||||
#define FS_KEY_DESCRIPTOR_SIZE 8
|
||||
|
||||
struct fscrypt_policy {
|
||||
__u8 version;
|
||||
__u8 contents_encryption_mode;
|
||||
__u8 filenames_encryption_mode;
|
||||
__u8 flags;
|
||||
__u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
|
||||
};
|
||||
|
||||
This structure must be initialized as follows:
|
||||
|
||||
- ``version`` must be 0.
|
||||
|
||||
- ``contents_encryption_mode`` and ``filenames_encryption_mode`` must
|
||||
be set to constants from ``<linux/fs.h>`` which identify the
|
||||
encryption modes to use. If unsure, use
|
||||
FS_ENCRYPTION_MODE_AES_256_XTS (1) for ``contents_encryption_mode``
|
||||
and FS_ENCRYPTION_MODE_AES_256_CTS (4) for
|
||||
``filenames_encryption_mode``.
|
||||
|
||||
- ``flags`` must be set to a value from ``<linux/fs.h>`` which
|
||||
identifies the amount of NUL-padding to use when encrypting
|
||||
filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3).
|
||||
|
||||
- ``master_key_descriptor`` specifies how to find the master key in
|
||||
the keyring; see `Adding keys`_. It is up to userspace to choose a
|
||||
unique ``master_key_descriptor`` for each master key. The e4crypt
|
||||
and fscrypt tools use the first 8 bytes of
|
||||
``SHA-512(SHA-512(master_key))``, but this particular scheme is not
|
||||
required. Also, the master key need not be in the keyring yet when
|
||||
FS_IOC_SET_ENCRYPTION_POLICY is executed. However, it must be added
|
||||
before any files can be created in the encrypted directory.
|
||||
|
||||
If the file is not yet encrypted, then FS_IOC_SET_ENCRYPTION_POLICY
|
||||
verifies that the file is an empty directory. If so, the specified
|
||||
encryption policy is assigned to the directory, turning it into an
|
||||
encrypted directory. After that, and after providing the
|
||||
corresponding master key as described in `Adding keys`_, all regular
|
||||
files, directories (recursively), and symlinks created in the
|
||||
directory will be encrypted, inheriting the same encryption policy.
|
||||
The filenames in the directory's entries will be encrypted as well.
|
||||
|
||||
Alternatively, if the file is already encrypted, then
|
||||
FS_IOC_SET_ENCRYPTION_POLICY validates that the specified encryption
|
||||
policy exactly matches the actual one. If they match, then the ioctl
|
||||
returns 0. Otherwise, it fails with EEXIST. This works on both
|
||||
regular files and directories, including nonempty directories.
|
||||
|
||||
Note that the ext4 filesystem does not allow the root directory to be
|
||||
encrypted, even if it is empty. Users who want to encrypt an entire
|
||||
filesystem with one key should consider using dm-crypt instead.
|
||||
|
||||
FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
|
||||
|
||||
- ``EACCES``: the file is not owned by the process's uid, nor does the
|
||||
process have the CAP_FOWNER capability in a namespace with the file
|
||||
owner's uid mapped
|
||||
- ``EEXIST``: the file is already encrypted with an encryption policy
|
||||
different from the one specified
|
||||
- ``EINVAL``: an invalid encryption policy was specified (invalid
|
||||
version, mode(s), or flags)
|
||||
- ``ENOTDIR``: the file is unencrypted and is a regular file, not a
|
||||
directory
|
||||
- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
|
||||
- ``ENOTTY``: this type of filesystem does not implement encryption
|
||||
- ``EOPNOTSUPP``: the kernel was not configured with encryption
|
||||
support for this filesystem, or the filesystem superblock has not
|
||||
had encryption enabled on it. (For example, to use encryption on an
|
||||
ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
|
||||
kernel config, and the superblock must have had the "encrypt"
|
||||
feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
|
||||
encrypt``.)
|
||||
- ``EPERM``: this directory may not be encrypted, e.g. because it is
|
||||
the root directory of an ext4 filesystem
|
||||
- ``EROFS``: the filesystem is readonly
|
||||
|
||||
Getting an encryption policy
|
||||
----------------------------
|
||||
|
||||
The FS_IOC_GET_ENCRYPTION_POLICY ioctl retrieves the :c:type:`struct
|
||||
fscrypt_policy`, if any, for a directory or regular file. See above
|
||||
for the struct definition. No additional permissions are required
|
||||
beyond the ability to open the file.
|
||||
|
||||
FS_IOC_GET_ENCRYPTION_POLICY can fail with the following errors:
|
||||
|
||||
- ``EINVAL``: the file is encrypted, but it uses an unrecognized
|
||||
encryption context format
|
||||
- ``ENODATA``: the file is not encrypted
|
||||
- ``ENOTTY``: this type of filesystem does not implement encryption
|
||||
- ``EOPNOTSUPP``: the kernel was not configured with encryption
|
||||
support for this filesystem
|
||||
|
||||
Note: if you only need to know whether a file is encrypted or not, on
|
||||
most filesystems it is also possible to use the FS_IOC_GETFLAGS ioctl
|
||||
and check for FS_ENCRYPT_FL, or to use the statx() system call and
|
||||
check for STATX_ATTR_ENCRYPTED in stx_attributes.
|
||||
|
||||
Getting the per-filesystem salt
|
||||
-------------------------------
|
||||
|
||||
Some filesystems, such as ext4 and F2FS, also support the deprecated
|
||||
ioctl FS_IOC_GET_ENCRYPTION_PWSALT. This ioctl retrieves a randomly
|
||||
generated 16-byte value stored in the filesystem superblock. This
|
||||
value is intended to used as a salt when deriving an encryption key
|
||||
from a passphrase or other low-entropy user credential.
|
||||
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to
|
||||
generate and manage any needed salt(s) in userspace.
|
||||
|
||||
Adding keys
|
||||
-----------
|
||||
|
||||
To provide a master key, userspace must add it to an appropriate
|
||||
keyring using the add_key() system call (see:
|
||||
``Documentation/security/keys/core.rst``). The key type must be
|
||||
"logon"; keys of this type are kept in kernel memory and cannot be
|
||||
read back by userspace. The key description must be "fscrypt:"
|
||||
followed by the 16-character lower case hex representation of the
|
||||
``master_key_descriptor`` that was set in the encryption policy. The
|
||||
key payload must conform to the following structure::
|
||||
|
||||
#define FS_MAX_KEY_SIZE 64
|
||||
|
||||
struct fscrypt_key {
|
||||
u32 mode;
|
||||
u8 raw[FS_MAX_KEY_SIZE];
|
||||
u32 size;
|
||||
};
|
||||
|
||||
``mode`` is ignored; just set it to 0. The actual key is provided in
|
||||
``raw`` with ``size`` indicating its size in bytes. That is, the
|
||||
bytes ``raw[0..size-1]`` (inclusive) are the actual key.
|
||||
|
||||
The key description prefix "fscrypt:" may alternatively be replaced
|
||||
with a filesystem-specific prefix such as "ext4:". However, the
|
||||
filesystem-specific prefixes are deprecated and should not be used in
|
||||
new programs.
|
||||
|
||||
There are several different types of keyrings in which encryption keys
|
||||
may be placed, such as a session keyring, a user session keyring, or a
|
||||
user keyring. Each key must be placed in a keyring that is "attached"
|
||||
to all processes that might need to access files encrypted with it, in
|
||||
the sense that request_key() will find the key. Generally, if only
|
||||
processes belonging to a specific user need to access a given
|
||||
encrypted directory and no session keyring has been installed, then
|
||||
that directory's key should be placed in that user's user session
|
||||
keyring or user keyring. Otherwise, a session keyring should be
|
||||
installed if needed, and the key should be linked into that session
|
||||
keyring, or in a keyring linked into that session keyring.
|
||||
|
||||
Note: introducing the complex visibility semantics of keyrings here
|
||||
was arguably a mistake --- especially given that by design, after any
|
||||
process successfully opens an encrypted file (thereby setting up the
|
||||
per-file key), possessing the keyring key is not actually required for
|
||||
any process to read/write the file until its in-memory inode is
|
||||
evicted. In the future there probably should be a way to provide keys
|
||||
directly to the filesystem instead, which would make the intended
|
||||
semantics clearer.
|
||||
|
||||
Access semantics
|
||||
================
|
||||
|
||||
With the key
|
||||
------------
|
||||
|
||||
With the encryption key, encrypted regular files, directories, and
|
||||
symlinks behave very similarly to their unencrypted counterparts ---
|
||||
after all, the encryption is intended to be transparent. However,
|
||||
astute users may notice some differences in behavior:
|
||||
|
||||
- Unencrypted files, or files encrypted with a different encryption
|
||||
policy (i.e. different key, modes, or flags), cannot be renamed or
|
||||
linked into an encrypted directory; see `Encryption policy
|
||||
enforcement`_. Attempts to do so will fail with EPERM. However,
|
||||
encrypted files can be renamed within an encrypted directory, or
|
||||
into an unencrypted directory.
|
||||
|
||||
- Direct I/O is not supported on encrypted files. Attempts to use
|
||||
direct I/O on such files will fall back to buffered I/O.
|
||||
|
||||
- The fallocate operations FALLOC_FL_COLLAPSE_RANGE,
|
||||
FALLOC_FL_INSERT_RANGE, and FALLOC_FL_ZERO_RANGE are not supported
|
||||
on encrypted files and will fail with EOPNOTSUPP.
|
||||
|
||||
- Online defragmentation of encrypted files is not supported. The
|
||||
EXT4_IOC_MOVE_EXT and F2FS_IOC_MOVE_RANGE ioctls will fail with
|
||||
EOPNOTSUPP.
|
||||
|
||||
- The ext4 filesystem does not support data journaling with encrypted
|
||||
regular files. It will fall back to ordered data mode instead.
|
||||
|
||||
- DAX (Direct Access) is not supported on encrypted files.
|
||||
|
||||
- The st_size of an encrypted symlink will not necessarily give the
|
||||
length of the symlink target as required by POSIX. It will actually
|
||||
give the length of the ciphertext, which will be slightly longer
|
||||
than the plaintext due to NUL-padding and an extra 2-byte overhead.
|
||||
|
||||
- The maximum length of an encrypted symlink is 2 bytes shorter than
|
||||
the maximum length of an unencrypted symlink. For example, on an
|
||||
EXT4 filesystem with a 4K block size, unencrypted symlinks can be up
|
||||
to 4095 bytes long, while encrypted symlinks can only be up to 4093
|
||||
bytes long (both lengths excluding the terminating null).
|
||||
|
||||
Note that mmap *is* supported. This is possible because the pagecache
|
||||
for an encrypted file contains the plaintext, not the ciphertext.
|
||||
|
||||
Without the key
|
||||
---------------
|
||||
|
||||
Some filesystem operations may be performed on encrypted regular
|
||||
files, directories, and symlinks even before their encryption key has
|
||||
been provided:
|
||||
|
||||
- File metadata may be read, e.g. using stat().
|
||||
|
||||
- Directories may be listed, in which case the filenames will be
|
||||
listed in an encoded form derived from their ciphertext. The
|
||||
current encoding algorithm is described in `Filename hashing and
|
||||
encoding`_. The algorithm is subject to change, but it is
|
||||
guaranteed that the presented filenames will be no longer than
|
||||
NAME_MAX bytes, will not contain the ``/`` or ``\0`` characters, and
|
||||
will uniquely identify directory entries.
|
||||
|
||||
The ``.`` and ``..`` directory entries are special. They are always
|
||||
present and are not encrypted or encoded.
|
||||
|
||||
- Files may be deleted. That is, nondirectory files may be deleted
|
||||
with unlink() as usual, and empty directories may be deleted with
|
||||
rmdir() as usual. Therefore, ``rm`` and ``rm -r`` will work as
|
||||
expected.
|
||||
|
||||
- Symlink targets may be read and followed, but they will be presented
|
||||
in encrypted form, similar to filenames in directories. Hence, they
|
||||
are unlikely to point to anywhere useful.
|
||||
|
||||
Without the key, regular files cannot be opened or truncated.
|
||||
Attempts to do so will fail with ENOKEY. This implies that any
|
||||
regular file operations that require a file descriptor, such as
|
||||
read(), write(), mmap(), fallocate(), and ioctl(), are also forbidden.
|
||||
|
||||
Also without the key, files of any type (including directories) cannot
|
||||
be created or linked into an encrypted directory, nor can a name in an
|
||||
encrypted directory be the source or target of a rename, nor can an
|
||||
O_TMPFILE temporary file be created in an encrypted directory. All
|
||||
such operations will fail with ENOKEY.
|
||||
|
||||
It is not currently possible to backup and restore encrypted files
|
||||
without the encryption key. This would require special APIs which
|
||||
have not yet been implemented.
|
||||
|
||||
Encryption policy enforcement
|
||||
=============================
|
||||
|
||||
After an encryption policy has been set on a directory, all regular
|
||||
files, directories, and symbolic links created in that directory
|
||||
(recursively) will inherit that encryption policy. Special files ---
|
||||
that is, named pipes, device nodes, and UNIX domain sockets --- will
|
||||
not be encrypted.
|
||||
|
||||
Except for those special files, it is forbidden to have unencrypted
|
||||
files, or files encrypted with a different encryption policy, in an
|
||||
encrypted directory tree. Attempts to link or rename such a file into
|
||||
an encrypted directory will fail with EPERM. This is also enforced
|
||||
during ->lookup() to provide limited protection against offline
|
||||
attacks that try to disable or downgrade encryption in known locations
|
||||
where applications may later write sensitive data. It is recommended
|
||||
that systems implementing a form of "verified boot" take advantage of
|
||||
this by validating all top-level encryption policies prior to access.
|
||||
|
||||
Implementation details
|
||||
======================
|
||||
|
||||
Encryption context
|
||||
------------------
|
||||
|
||||
An encryption policy is represented on-disk by a :c:type:`struct
|
||||
fscrypt_context`. It is up to individual filesystems to decide where
|
||||
to store it, but normally it would be stored in a hidden extended
|
||||
attribute. It should *not* be exposed by the xattr-related system
|
||||
calls such as getxattr() and setxattr() because of the special
|
||||
semantics of the encryption xattr. (In particular, there would be
|
||||
much confusion if an encryption policy were to be added to or removed
|
||||
from anything other than an empty directory.) The struct is defined
|
||||
as follows::
|
||||
|
||||
#define FS_KEY_DESCRIPTOR_SIZE 8
|
||||
#define FS_KEY_DERIVATION_NONCE_SIZE 16
|
||||
|
||||
struct fscrypt_context {
|
||||
u8 format;
|
||||
u8 contents_encryption_mode;
|
||||
u8 filenames_encryption_mode;
|
||||
u8 flags;
|
||||
u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
|
||||
u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
|
||||
};
|
||||
|
||||
Note that :c:type:`struct fscrypt_context` contains the same
|
||||
information as :c:type:`struct fscrypt_policy` (see `Setting an
|
||||
encryption policy`_), except that :c:type:`struct fscrypt_context`
|
||||
also contains a nonce. The nonce is randomly generated by the kernel
|
||||
and is used to derive the inode's encryption key as described in
|
||||
`Per-file keys`_.
|
||||
|
||||
Data path changes
|
||||
-----------------
|
||||
|
||||
For the read path (->readpage()) of regular files, filesystems can
|
||||
read the ciphertext into the page cache and decrypt it in-place. The
|
||||
page lock must be held until decryption has finished, to prevent the
|
||||
page from becoming visible to userspace prematurely.
|
||||
|
||||
For the write path (->writepage()) of regular files, filesystems
|
||||
cannot encrypt data in-place in the page cache, since the cached
|
||||
plaintext must be preserved. Instead, filesystems must encrypt into a
|
||||
temporary buffer or "bounce page", then write out the temporary
|
||||
buffer. Some filesystems, such as UBIFS, already use temporary
|
||||
buffers regardless of encryption. Other filesystems, such as ext4 and
|
||||
F2FS, have to allocate bounce pages specially for encryption.
|
||||
|
||||
Filename hashing and encoding
|
||||
-----------------------------
|
||||
|
||||
Modern filesystems accelerate directory lookups by using indexed
|
||||
directories. An indexed directory is organized as a tree keyed by
|
||||
filename hashes. When a ->lookup() is requested, the filesystem
|
||||
normally hashes the filename being looked up so that it can quickly
|
||||
find the corresponding directory entry, if any.
|
||||
|
||||
With encryption, lookups must be supported and efficient both with and
|
||||
without the encryption key. Clearly, it would not work to hash the
|
||||
plaintext filenames, since the plaintext filenames are unavailable
|
||||
without the key. (Hashing the plaintext filenames would also make it
|
||||
impossible for the filesystem's fsck tool to optimize encrypted
|
||||
directories.) Instead, filesystems hash the ciphertext filenames,
|
||||
i.e. the bytes actually stored on-disk in the directory entries. When
|
||||
asked to do a ->lookup() with the key, the filesystem just encrypts
|
||||
the user-supplied name to get the ciphertext.
|
||||
|
||||
Lookups without the key are more complicated. The raw ciphertext may
|
||||
contain the ``\0`` and ``/`` characters, which are illegal in
|
||||
filenames. Therefore, readdir() must base64-encode the ciphertext for
|
||||
presentation. For most filenames, this works fine; on ->lookup(), the
|
||||
filesystem just base64-decodes the user-supplied name to get back to
|
||||
the raw ciphertext.
|
||||
|
||||
However, for very long filenames, base64 encoding would cause the
|
||||
filename length to exceed NAME_MAX. To prevent this, readdir()
|
||||
actually presents long filenames in an abbreviated form which encodes
|
||||
a strong "hash" of the ciphertext filename, along with the optional
|
||||
filesystem-specific hash(es) needed for directory lookups. This
|
||||
allows the filesystem to still, with a high degree of confidence, map
|
||||
the filename given in ->lookup() back to a particular directory entry
|
||||
that was previously listed by readdir(). See :c:type:`struct
|
||||
fscrypt_digested_name` in the source for more details.
|
||||
|
||||
Note that the precise way that filenames are presented to userspace
|
||||
without the key is subject to change in the future. It is only meant
|
||||
as a way to temporarily present valid filenames so that commands like
|
||||
``rm -r`` work as expected on encrypted directories.
|
|
@ -2530,6 +2530,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
allow data leaks with this option, which is equivalent
|
||||
to spectre_v2=off.
|
||||
|
||||
nospec_store_bypass_disable
|
||||
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
|
||||
|
||||
noxsave [BUGS=X86] Disables x86 extended register state save
|
||||
and restore using xsave. The kernel will fallback to
|
||||
enabling legacy floating-point and sse state.
|
||||
|
@ -3702,6 +3705,48 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
||||
spec_store_bypass_disable=
|
||||
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
|
||||
(Speculative Store Bypass vulnerability)
|
||||
|
||||
Certain CPUs are vulnerable to an exploit against a
|
||||
a common industry wide performance optimization known
|
||||
as "Speculative Store Bypass" in which recent stores
|
||||
to the same memory location may not be observed by
|
||||
later loads during speculative execution. The idea
|
||||
is that such stores are unlikely and that they can
|
||||
be detected prior to instruction retirement at the
|
||||
end of a particular speculation execution window.
|
||||
|
||||
In vulnerable processors, the speculatively forwarded
|
||||
store can be used in a cache side channel attack, for
|
||||
example to read memory to which the attacker does not
|
||||
directly have access (e.g. inside sandboxed code).
|
||||
|
||||
This parameter controls whether the Speculative Store
|
||||
Bypass optimization is used.
|
||||
|
||||
on - Unconditionally disable Speculative Store Bypass
|
||||
off - Unconditionally enable Speculative Store Bypass
|
||||
auto - Kernel detects whether the CPU model contains an
|
||||
implementation of Speculative Store Bypass and
|
||||
picks the most appropriate mitigation. If the
|
||||
CPU is not vulnerable, "off" is selected. If the
|
||||
CPU is vulnerable the default mitigation is
|
||||
architecture and Kconfig dependent. See below.
|
||||
prctl - Control Speculative Store Bypass per thread
|
||||
via prctl. Speculative Store Bypass is enabled
|
||||
for a process by default. The state of the control
|
||||
is inherited on fork.
|
||||
seccomp - Same as "prctl" above, but all seccomp threads
|
||||
will disable SSB unless they explicitly opt out.
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spec_store_bypass_disable=auto.
|
||||
|
||||
Default mitigations:
|
||||
X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
|
||||
|
||||
spia_io_base= [HW,MTD]
|
||||
spia_fio_base=
|
||||
spia_pedr=
|
||||
|
|
94
Documentation/spec_ctrl.txt
Normal file
94
Documentation/spec_ctrl.txt
Normal file
|
@ -0,0 +1,94 @@
|
|||
===================
|
||||
Speculation Control
|
||||
===================
|
||||
|
||||
Quite some CPUs have speculation-related misfeatures which are in
|
||||
fact vulnerabilities causing data leaks in various forms even across
|
||||
privilege domains.
|
||||
|
||||
The kernel provides mitigation for such vulnerabilities in various
|
||||
forms. Some of these mitigations are compile-time configurable and some
|
||||
can be supplied on the kernel command line.
|
||||
|
||||
There is also a class of mitigations which are very expensive, but they can
|
||||
be restricted to a certain set of processes or tasks in controlled
|
||||
environments. The mechanism to control these mitigations is via
|
||||
:manpage:`prctl(2)`.
|
||||
|
||||
There are two prctl options which are related to this:
|
||||
|
||||
* PR_GET_SPECULATION_CTRL
|
||||
|
||||
* PR_SET_SPECULATION_CTRL
|
||||
|
||||
PR_GET_SPECULATION_CTRL
|
||||
-----------------------
|
||||
|
||||
PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
|
||||
which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
|
||||
the following meaning:
|
||||
|
||||
==== ===================== ===================================================
|
||||
Bit Define Description
|
||||
==== ===================== ===================================================
|
||||
0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
||||
PR_SET_SPECULATION_CTRL.
|
||||
1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
||||
disabled.
|
||||
2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
||||
enabled.
|
||||
3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
|
||||
subsequent prctl(..., PR_SPEC_ENABLE) will fail.
|
||||
==== ===================== ===================================================
|
||||
|
||||
If all bits are 0 the CPU is not affected by the speculation misfeature.
|
||||
|
||||
If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
|
||||
available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
|
||||
misfeature will fail.
|
||||
|
||||
PR_SET_SPECULATION_CTRL
|
||||
-----------------------
|
||||
|
||||
PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
|
||||
is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
|
||||
in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
|
||||
PR_SPEC_FORCE_DISABLE.
|
||||
|
||||
Common error codes
|
||||
------------------
|
||||
======= =================================================================
|
||||
Value Meaning
|
||||
======= =================================================================
|
||||
EINVAL The prctl is not implemented by the architecture or unused
|
||||
prctl(2) arguments are not 0.
|
||||
|
||||
ENODEV arg2 is selecting a not supported speculation misfeature.
|
||||
======= =================================================================
|
||||
|
||||
PR_SET_SPECULATION_CTRL error codes
|
||||
-----------------------------------
|
||||
======= =================================================================
|
||||
Value Meaning
|
||||
======= =================================================================
|
||||
0 Success
|
||||
|
||||
ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
|
||||
PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
|
||||
|
||||
ENXIO Control of the selected speculation misfeature is not possible.
|
||||
See PR_GET_SPECULATION_CTRL.
|
||||
|
||||
EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
|
||||
tried to enable it again.
|
||||
======= =================================================================
|
||||
|
||||
Speculation misfeature controls
|
||||
-------------------------------
|
||||
- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
|
||||
|
||||
Invocations:
|
||||
* prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
|
3
Makefile
3
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 141
|
||||
SUBLEVEL = 146
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
@ -631,6 +631,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
|
|||
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
|
||||
|
||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
|
||||
|
|
|
@ -102,7 +102,7 @@ typedef pte_t * pgtable_t;
|
|||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
/* Default Permissions for stack/heaps pages (Non Executable) */
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
#define WANT_PAGE_VIRTUAL 1
|
||||
|
||||
|
|
|
@ -372,7 +372,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
/* Decode a PTE containing swap "identifier "into constituents */
|
||||
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
|
||||
#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
|
||||
#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
|
||||
|
||||
/* NOPs, to keep generic kernel happy */
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
&soc {
|
||||
/delete-node/ qcom,turing@1a300000;
|
||||
|
||||
/delete-node/ qcom,msm-cdsp-loader;
|
||||
/delete-node/ cti@7068000;
|
||||
/delete-node/ turing_etm0;
|
||||
funnel@6042000 {
|
||||
|
|
|
@ -222,6 +222,7 @@
|
|||
qcom,adc-bit-resolution = <15>;
|
||||
qcom,adc-vdd-reference = <1800>;
|
||||
qcom,vadc-poll-eoc;
|
||||
status = "disabled";
|
||||
|
||||
chan@8 {
|
||||
label = "die_temp";
|
||||
|
@ -271,6 +272,7 @@
|
|||
qcom,adc-bit-resolution = <15>;
|
||||
qcom,adc-vdd-reference = <1800>;
|
||||
qcom,adc_tm-vadc = <&pm8994_vadc>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -124,10 +124,6 @@
|
|||
};
|
||||
};
|
||||
|
||||
&blsp1_uart2 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&reserved_memory {
|
||||
pmem_shared: pmem_shared_region@d0000000 {
|
||||
reg = <0 0xd0000000 0 0x20000000>;
|
||||
|
|
|
@ -251,7 +251,7 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
const typeof(*(p)) __user *__tmp_p = (p); \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
|
|
|
@ -36,7 +36,6 @@ generic-y += poll.h
|
|||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += sections.h
|
||||
generic-y += segment.h
|
||||
generic-y += sembuf.h
|
||||
generic-y += serial.h
|
||||
|
|
|
@ -232,6 +232,16 @@ static inline pte_t pte_mknoncont(pte_t pte)
|
|||
return clear_pte_bit(pte, __pgprot(PTE_CONT));
|
||||
}
|
||||
|
||||
static inline pte_t pte_clear_rdonly(pte_t pte)
|
||||
{
|
||||
return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpresent(pte_t pte)
|
||||
{
|
||||
return set_pte_bit(pte, __pgprot(PTE_VALID));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkcont(pmd_t pmd)
|
||||
{
|
||||
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
|
||||
|
|
28
arch/arm64/include/asm/sections.h
Normal file
28
arch/arm64/include/asm/sections.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (C) 2016 ARM Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_SECTIONS_H
|
||||
#define __ASM_SECTIONS_H
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char __alt_instructions[], __alt_instructions_end[];
|
||||
extern char __exception_text_start[], __exception_text_end[];
|
||||
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
||||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
extern char __irqentry_text_start[], __irqentry_text_end[];
|
||||
extern char __mmuoff_data_start[], __mmuoff_data_end[];
|
||||
|
||||
#endif /* __ASM_SECTIONS_H */
|
|
@ -19,6 +19,7 @@
|
|||
#define __ASM_TRAP_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
|
@ -36,17 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook);
|
|||
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
extern char __irqentry_text_start[];
|
||||
extern char __irqentry_text_end[];
|
||||
|
||||
return ptr >= (unsigned long)&__irqentry_text_start &&
|
||||
ptr < (unsigned long)&__irqentry_text_end;
|
||||
}
|
||||
|
||||
static inline int in_exception_text(unsigned long ptr)
|
||||
{
|
||||
extern char __exception_text_start[];
|
||||
extern char __exception_text_end[];
|
||||
int in;
|
||||
|
||||
in = ptr >= (unsigned long)&__exception_text_start &&
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/*
|
||||
* __boot_cpu_mode records what mode CPUs were booted in.
|
||||
|
|
|
@ -25,14 +25,13 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/sections.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
|
||||
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
||||
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
||||
|
||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||
|
||||
struct alt_region {
|
||||
struct alt_instr *begin;
|
||||
struct alt_instr *end;
|
||||
|
@ -124,8 +123,8 @@ static int __apply_alternatives_multi_stop(void *unused)
|
|||
{
|
||||
static int patched = 0;
|
||||
struct alt_region region = {
|
||||
.begin = __alt_instructions,
|
||||
.end = __alt_instructions_end,
|
||||
.begin = (struct alt_instr *)__alt_instructions,
|
||||
.end = (struct alt_instr *)__alt_instructions_end,
|
||||
};
|
||||
|
||||
/* We always have a CPU 0 at this point (__init) */
|
||||
|
|
|
@ -472,7 +472,7 @@ ENDPROC(__primary_switched)
|
|||
* end early head section, begin head code that is also used for
|
||||
* hotplug and needs to have the same protections as the text region
|
||||
*/
|
||||
.section ".text","ax"
|
||||
.section ".idmap.text","ax"
|
||||
|
||||
ENTRY(kimage_vaddr)
|
||||
.quad _text - TEXT_OFFSET
|
||||
|
@ -594,6 +594,13 @@ set_cpu_boot_mode_flag:
|
|||
ret
|
||||
ENDPROC(set_cpu_boot_mode_flag)
|
||||
|
||||
/*
|
||||
* These values are written with the MMU off, but read with the MMU on.
|
||||
* Writers will invalidate the corresponding address, discarding up to a
|
||||
* 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
|
||||
* sufficient alignment that the CWG doesn't overlap another section.
|
||||
*/
|
||||
.pushsection ".mmuoff.data.write", "aw"
|
||||
/*
|
||||
* We need to find out the CPU boot mode long after boot, so we need to
|
||||
* store it in a writable variable.
|
||||
|
@ -601,11 +608,16 @@ ENDPROC(set_cpu_boot_mode_flag)
|
|||
* This is not in .bss, because we set it sufficiently early that the boot-time
|
||||
* zeroing of .bss would clobber it.
|
||||
*/
|
||||
.pushsection .data..cacheline_aligned
|
||||
.align L1_CACHE_SHIFT
|
||||
ENTRY(__boot_cpu_mode)
|
||||
.long BOOT_CPU_MODE_EL2
|
||||
.long BOOT_CPU_MODE_EL1
|
||||
/*
|
||||
* The booting CPU updates the failed status @__early_cpu_boot_status,
|
||||
* with MMU turned off.
|
||||
*/
|
||||
ENTRY(__early_cpu_boot_status)
|
||||
.long 0
|
||||
|
||||
.popsection
|
||||
|
||||
/*
|
||||
|
@ -679,7 +691,6 @@ ENDPROC(__secondary_switched)
|
|||
* Checks if the selected granule size is supported by the CPU.
|
||||
* If it isn't, park the CPU
|
||||
*/
|
||||
.section ".idmap.text", "ax"
|
||||
ENTRY(__enable_mmu)
|
||||
mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value
|
||||
mrs x1, ID_AA64MMFR0_EL1
|
||||
|
|
|
@ -53,12 +53,6 @@ extern int in_suspend;
|
|||
/* Do we need to reset el2? */
|
||||
#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
|
||||
|
||||
/*
|
||||
* Start/end of the hibernate exit code, this must be copied to a 'safe'
|
||||
* location in memory, and executed from there.
|
||||
*/
|
||||
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
||||
|
||||
/* temporary el2 vectors in the __hibernate_exit_text section. */
|
||||
extern char hibernate_el2_vectors[];
|
||||
|
||||
|
@ -240,6 +234,7 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
|
||||
|
||||
int swsusp_arch_suspend(void)
|
||||
{
|
||||
|
@ -252,8 +247,9 @@ int swsusp_arch_suspend(void)
|
|||
if (__cpu_suspend_enter(&state)) {
|
||||
ret = swsusp_save();
|
||||
} else {
|
||||
/* Clean kernel to PoC for secondary core startup */
|
||||
__flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
|
||||
/* Clean kernel core startup/idle code to PoC*/
|
||||
dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
|
||||
dcache_clean_range(__idmap_text_start, __idmap_text_end);
|
||||
|
||||
/*
|
||||
* Tell the hibernation core that we've just restored
|
||||
|
@ -269,6 +265,33 @@ int swsusp_arch_suspend(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
||||
{
|
||||
pte_t pte = *src_pte;
|
||||
|
||||
if (pte_valid(pte)) {
|
||||
/*
|
||||
* Resume will overwrite areas that may be marked
|
||||
* read only (code, rodata). Clear the RDONLY bit from
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_pte, pte_clear_rdonly(pte));
|
||||
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
|
||||
/*
|
||||
* debug_pagealloc will removed the PTE_VALID bit if
|
||||
* the page isn't in use by the resume kernel. It may have
|
||||
* been in use by the original kernel, in which case we need
|
||||
* to put it back in our copy to do the restore.
|
||||
*
|
||||
* Before marking this entry valid, check the pfn should
|
||||
* be mapped.
|
||||
*/
|
||||
BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
|
||||
}
|
||||
}
|
||||
|
||||
static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
|
@ -284,13 +307,7 @@ static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
|
|||
|
||||
src_pte = pte_offset_kernel(src_pmd, start);
|
||||
do {
|
||||
if (!pte_none(*src_pte))
|
||||
/*
|
||||
* Resume will overwrite areas that may be marked
|
||||
* read only (code, rodata). Clear the RDONLY bit from
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
|
||||
_copy_pte(dst_pte, src_pte, addr);
|
||||
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include <asm/insn.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm-generic/sections.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "decode-insn.h"
|
||||
|
||||
|
|
|
@ -97,16 +97,24 @@ ENTRY(__cpu_suspend_enter)
|
|||
ENDPROC(__cpu_suspend_enter)
|
||||
.ltorg
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
ENTRY(cpu_resume)
|
||||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
||||
ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
||||
adr_l x27, _resume_switched /* __enable_mmu will branch here */
|
||||
adrp x25, idmap_pg_dir
|
||||
adrp x26, swapper_pg_dir
|
||||
b __cpu_setup
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
_resume_switched:
|
||||
ldr x8, =_cpu_resume
|
||||
br x8
|
||||
ENDPROC(_resume_switched)
|
||||
.ltorg
|
||||
.popsection
|
||||
|
||||
ENTRY(_cpu_resume)
|
||||
mrs x1, mpidr_el1
|
||||
adrp x8, mpidr_hash
|
||||
|
|
|
@ -30,7 +30,8 @@
|
|||
#include <asm/smp_plat.h>
|
||||
|
||||
extern void secondary_holding_pen(void);
|
||||
volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
|
||||
volatile unsigned long __section(".mmuoff.data.read")
|
||||
secondary_holding_pen_release = INVALID_HWID;
|
||||
|
||||
static phys_addr_t cpu_release_addr[NR_CPUS];
|
||||
|
||||
|
|
|
@ -197,6 +197,25 @@ SECTIONS
|
|||
_data = .;
|
||||
_sdata = .;
|
||||
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
|
||||
/*
|
||||
* Data written with the MMU off but read with the MMU on requires
|
||||
* cache lines to be invalidated, discarding up to a Cache Writeback
|
||||
* Granule (CWG) of data from the cache. Keep the section that
|
||||
* requires this type of maintenance to be in its own Cache Writeback
|
||||
* Granule (CWG) area so the cache maintenance operations don't
|
||||
* interfere with adjacent data.
|
||||
*/
|
||||
.mmuoff.data.write : ALIGN(SZ_2K) {
|
||||
__mmuoff_data_start = .;
|
||||
*(.mmuoff.data.write)
|
||||
}
|
||||
. = ALIGN(SZ_2K);
|
||||
.mmuoff.data.read : {
|
||||
*(.mmuoff.data.read)
|
||||
__mmuoff_data_end = .;
|
||||
}
|
||||
|
||||
PECOFF_EDATA_PADDING
|
||||
_edata = .;
|
||||
|
||||
|
|
|
@ -146,4 +146,43 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
__pgprot(0),
|
||||
__pgprot(PTE_VALID));
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
/*
|
||||
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
|
||||
* is used to determine if a linear map page has been marked as not-valid by
|
||||
* CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
|
||||
* This is based on kern_addr_valid(), which almost does what we need.
|
||||
*
|
||||
* Because this is only called on the kernel linear map, p?d_sect() implies
|
||||
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
|
||||
* disabled.
|
||||
*/
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
return false;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
return false;
|
||||
if (pud_sect(*pud))
|
||||
return true;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
return false;
|
||||
if (pmd_sect(*pmd))
|
||||
return true;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte_valid(*pte);
|
||||
}
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
|
|
@ -132,6 +132,7 @@ ENDPROC(cpu_do_suspend)
|
|||
*
|
||||
* x0: Address of context pointer
|
||||
*/
|
||||
.pushsection ".idmap.text", "ax"
|
||||
ENTRY(cpu_do_resume)
|
||||
ldp x2, x3, [x0]
|
||||
ldp x4, x5, [x0, #16]
|
||||
|
@ -163,6 +164,7 @@ ENTRY(cpu_do_resume)
|
|||
isb
|
||||
ret
|
||||
ENDPROC(cpu_do_resume)
|
||||
.popsection
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -220,6 +222,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
|
|||
* Initialise the processor for turning the MMU on. Return in x0 the
|
||||
* value of the SCTLR_EL1 register.
|
||||
*/
|
||||
.pushsection ".idmap.text", "ax"
|
||||
ENTRY(__cpu_setup)
|
||||
tlbi vmalle1 // Invalidate local TLB
|
||||
dsb nsh
|
||||
|
@ -321,3 +324,4 @@ crval:
|
|||
#endif
|
||||
.word 0xfcffffff // clear
|
||||
.word 0x34d5d91d | CR_IBIT | CR_CBIT // set
|
||||
.popsection
|
||||
|
|
|
@ -21,17 +21,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
|
|||
quiet_cmd_cp = CP $< $@$2
|
||||
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
|
||||
|
||||
quiet_cmd_strip = STRIP $@
|
||||
quiet_cmd_strip = STRIP $< $@$2
|
||||
cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
|
||||
-K _fdt_start vmlinux -o $@
|
||||
-K _fdt_start $< -o $@$2
|
||||
|
||||
UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
|
||||
UIMAGE_IN = $@
|
||||
UIMAGE_OUT = $@.ub
|
||||
|
||||
$(obj)/simpleImage.%: vmlinux FORCE
|
||||
$(call if_changed,cp,.unstrip)
|
||||
$(call if_changed,objcopy)
|
||||
$(call if_changed,uimage)
|
||||
$(call if_changed,strip)
|
||||
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
|
||||
$(call if_changed,strip,.strip)
|
||||
@echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
|
||||
|
||||
clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
|
||||
|
|
|
@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
|
|||
|
||||
void ath79_ddr_wb_flush(u32 reg)
|
||||
{
|
||||
void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
|
||||
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
|
||||
|
||||
/* Flush the DDR write buffer. */
|
||||
__raw_writel(0x1, flush_reg);
|
||||
|
|
|
@ -89,7 +89,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|||
phys_addr_t size = resource_size(rsrc);
|
||||
|
||||
*start = fixup_bigphys_addr(rsrc->start, size);
|
||||
*end = rsrc->start + size;
|
||||
*end = rsrc->start + size - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -633,21 +633,48 @@ unsigned long arch_align_stack(unsigned long sp)
|
|||
return sp & ALMASK;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
|
||||
static struct cpumask backtrace_csd_busy;
|
||||
|
||||
static void arch_dump_stack(void *info)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
arch_spin_lock(&lock);
|
||||
regs = get_irq_regs();
|
||||
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
else
|
||||
dump_stack();
|
||||
arch_spin_unlock(&lock);
|
||||
|
||||
dump_stack();
|
||||
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
||||
}
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(bool include_self)
|
||||
{
|
||||
smp_call_function(arch_dump_stack, NULL, 1);
|
||||
struct call_single_data *csd;
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
/*
|
||||
* If we previously sent an IPI to the target CPU & it hasn't
|
||||
* cleared its bit in the busy cpumask then it didn't handle
|
||||
* our previous IPI & it's not safe for us to reuse the
|
||||
* call_single_data_t.
|
||||
*/
|
||||
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
||||
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
csd = &per_cpu(backtrace_csd, cpu);
|
||||
csd->func = arch_dump_stack;
|
||||
smp_call_function_single_async(cpu, csd);
|
||||
}
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
|
|
@ -345,6 +345,7 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_regs((struct pt_regs *)regs);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
|
|
@ -720,7 +720,7 @@ start_here:
|
|||
tovirt(r6,r6)
|
||||
lis r5, abatron_pteptrs@h
|
||||
ori r5, r5, abatron_pteptrs@l
|
||||
stw r5, 0xf0(r0) /* Must match your Abatron config file */
|
||||
stw r5, 0xf0(0) /* Must match your Abatron config file */
|
||||
tophys(r5,r5)
|
||||
stw r6, 0(r5)
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/of.h>
|
||||
|
|
|
@ -69,14 +69,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
|
|||
* updating it. No write barriers are needed here, provided
|
||||
* we only update the current CPU's SLB shadow buffer.
|
||||
*/
|
||||
p->save_area[index].esid = 0;
|
||||
p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
|
||||
p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
|
||||
WRITE_ONCE(p->save_area[index].esid, 0);
|
||||
WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
|
||||
WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
|
||||
}
|
||||
|
||||
static inline void slb_shadow_clear(enum slb_index index)
|
||||
{
|
||||
get_slb_shadow()->save_area[index].esid = 0;
|
||||
WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
|
||||
}
|
||||
|
||||
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
#include <platforms/chrp/chrp.h>
|
||||
|
||||
extern spinlock_t rtc_lock;
|
||||
|
||||
#define NVRAM_AS0 0x74
|
||||
|
@ -62,7 +64,7 @@ long __init chrp_time_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int chrp_cmos_clock_read(int addr)
|
||||
static int chrp_cmos_clock_read(int addr)
|
||||
{
|
||||
if (nvram_as1 != 0)
|
||||
outb(addr>>8, nvram_as1);
|
||||
|
@ -70,7 +72,7 @@ int chrp_cmos_clock_read(int addr)
|
|||
return (inb(nvram_data));
|
||||
}
|
||||
|
||||
void chrp_cmos_clock_write(unsigned long val, int addr)
|
||||
static void chrp_cmos_clock_write(unsigned long val, int addr)
|
||||
{
|
||||
if (nvram_as1 != 0)
|
||||
outb(addr>>8, nvram_as1);
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
*/
|
||||
#define HW_BROADWAY_ICR 0x00
|
||||
#define HW_BROADWAY_IMR 0x04
|
||||
#define HW_STARLET_ICR 0x08
|
||||
#define HW_STARLET_IMR 0x0c
|
||||
|
||||
|
||||
/*
|
||||
|
@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
|
|||
void __iomem *io_base = irq_data_get_irq_chip_data(d);
|
||||
|
||||
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
|
||||
|
||||
/* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
|
||||
clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -467,7 +467,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
|
|||
boot_infos_t *bi = (boot_infos_t *) r4;
|
||||
unsigned long hdr;
|
||||
unsigned long space;
|
||||
unsigned long ptr, x;
|
||||
unsigned long ptr;
|
||||
char *model;
|
||||
unsigned long offset = reloc_offset();
|
||||
|
||||
|
@ -561,6 +561,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
|
|||
* MMU switched OFF, so this should not be useful anymore.
|
||||
*/
|
||||
if (bi->version < 4) {
|
||||
unsigned long x __maybe_unused;
|
||||
|
||||
bootx_printf("Touching pages...\n");
|
||||
|
||||
/*
|
||||
|
|
|
@ -359,6 +359,7 @@ static int pmac_late_init(void)
|
|||
}
|
||||
machine_late_initcall(powermac, pmac_late_init);
|
||||
|
||||
void note_bootable_part(dev_t dev, int part, int goodness);
|
||||
/*
|
||||
* This is __init_refok because we check for "initializing" before
|
||||
* touching any of the __init sensitive things and "initializing"
|
||||
|
|
|
@ -113,7 +113,7 @@ struct hws_basic_entry {
|
|||
|
||||
struct hws_diag_entry {
|
||||
unsigned int def:16; /* 0-15 Data Entry Format */
|
||||
unsigned int R:14; /* 16-19 and 20-30 reserved */
|
||||
unsigned int R:15; /* 16-19 and 20-30 reserved */
|
||||
unsigned int I:1; /* 31 entry valid or invalid */
|
||||
u8 data[]; /* Machine-dependent sample data */
|
||||
} __packed;
|
||||
|
@ -129,7 +129,9 @@ struct hws_trailer_entry {
|
|||
unsigned int f:1; /* 0 - Block Full Indicator */
|
||||
unsigned int a:1; /* 1 - Alert request control */
|
||||
unsigned int t:1; /* 2 - Timestamp format */
|
||||
unsigned long long:61; /* 3 - 63: Reserved */
|
||||
unsigned int :29; /* 3 - 31: Reserved */
|
||||
unsigned int bsdes:16; /* 32-47: size of basic SDE */
|
||||
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
|
||||
};
|
||||
unsigned long long flags; /* 0 - 63: All indicators */
|
||||
};
|
||||
|
|
|
@ -214,13 +214,17 @@ CONFIG_SCSI_CONSTANTS=y
|
|||
CONFIG_SCSI_SPI_ATTRS=y
|
||||
CONFIG_SCSI_VIRTIO=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LINEAR=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_MIRROR=y
|
||||
CONFIG_DM_ZERO=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
|
||||
CONFIG_DM_VERITY_FEC=y
|
||||
CONFIG_DM_ANDROID_VERITY=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETCONSOLE=y
|
||||
CONFIG_NETCONSOLE_DYNAMIC=y
|
||||
|
@ -447,3 +451,8 @@ CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
|
|||
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
|
||||
CONFIG_CRYPTO_ECHAINIV=y
|
||||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
|
||||
CONFIG_X509_CERTIFICATE_PARSER=y
|
||||
CONFIG_SYSTEM_TRUSTED_KEYRING=y
|
||||
CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509"
|
||||
|
|
|
@ -79,24 +79,33 @@ ENTRY(entry_SYSENTER_compat)
|
|||
ASM_CLAC /* Clear AC after saving FLAGS */
|
||||
|
||||
pushq $__USER32_CS /* pt_regs->cs */
|
||||
xorq %r8,%r8
|
||||
pushq %r8 /* pt_regs->ip = 0 (placeholder) */
|
||||
pushq $0 /* pt_regs->ip = 0 (placeholder) */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
pushq %r8 /* pt_regs->r8 = 0 */
|
||||
pushq %r8 /* pt_regs->r9 = 0 */
|
||||
pushq %r8 /* pt_regs->r10 = 0 */
|
||||
pushq %r8 /* pt_regs->r11 = 0 */
|
||||
pushq $0 /* pt_regs->r8 = 0 */
|
||||
xorq %r8, %r8 /* nospec r8 */
|
||||
pushq $0 /* pt_regs->r9 = 0 */
|
||||
xorq %r9, %r9 /* nospec r9 */
|
||||
pushq $0 /* pt_regs->r10 = 0 */
|
||||
xorq %r10, %r10 /* nospec r10 */
|
||||
pushq $0 /* pt_regs->r11 = 0 */
|
||||
xorq %r11, %r11 /* nospec r11 */
|
||||
pushq %rbx /* pt_regs->rbx */
|
||||
xorl %ebx, %ebx /* nospec rbx */
|
||||
pushq %rbp /* pt_regs->rbp (will be overwritten) */
|
||||
pushq %r8 /* pt_regs->r12 = 0 */
|
||||
pushq %r8 /* pt_regs->r13 = 0 */
|
||||
pushq %r8 /* pt_regs->r14 = 0 */
|
||||
pushq %r8 /* pt_regs->r15 = 0 */
|
||||
xorl %ebp, %ebp /* nospec rbp */
|
||||
pushq $0 /* pt_regs->r12 = 0 */
|
||||
xorq %r12, %r12 /* nospec r12 */
|
||||
pushq $0 /* pt_regs->r13 = 0 */
|
||||
xorq %r13, %r13 /* nospec r13 */
|
||||
pushq $0 /* pt_regs->r14 = 0 */
|
||||
xorq %r14, %r14 /* nospec r14 */
|
||||
pushq $0 /* pt_regs->r15 = 0 */
|
||||
xorq %r15, %r15 /* nospec r15 */
|
||||
cld
|
||||
|
||||
/*
|
||||
|
@ -185,17 +194,26 @@ ENTRY(entry_SYSCALL_compat)
|
|||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rbp /* pt_regs->cx (stashed in bp) */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
xorq %r8,%r8
|
||||
pushq %r8 /* pt_regs->r8 = 0 */
|
||||
pushq %r8 /* pt_regs->r9 = 0 */
|
||||
pushq %r8 /* pt_regs->r10 = 0 */
|
||||
pushq %r8 /* pt_regs->r11 = 0 */
|
||||
pushq $0 /* pt_regs->r8 = 0 */
|
||||
xorq %r8, %r8 /* nospec r8 */
|
||||
pushq $0 /* pt_regs->r9 = 0 */
|
||||
xorq %r9, %r9 /* nospec r9 */
|
||||
pushq $0 /* pt_regs->r10 = 0 */
|
||||
xorq %r10, %r10 /* nospec r10 */
|
||||
pushq $0 /* pt_regs->r11 = 0 */
|
||||
xorq %r11, %r11 /* nospec r11 */
|
||||
pushq %rbx /* pt_regs->rbx */
|
||||
xorl %ebx, %ebx /* nospec rbx */
|
||||
pushq %rbp /* pt_regs->rbp (will be overwritten) */
|
||||
pushq %r8 /* pt_regs->r12 = 0 */
|
||||
pushq %r8 /* pt_regs->r13 = 0 */
|
||||
pushq %r8 /* pt_regs->r14 = 0 */
|
||||
pushq %r8 /* pt_regs->r15 = 0 */
|
||||
xorl %ebp, %ebp /* nospec rbp */
|
||||
pushq $0 /* pt_regs->r12 = 0 */
|
||||
xorq %r12, %r12 /* nospec r12 */
|
||||
pushq $0 /* pt_regs->r13 = 0 */
|
||||
xorq %r13, %r13 /* nospec r13 */
|
||||
pushq $0 /* pt_regs->r14 = 0 */
|
||||
xorq %r14, %r14 /* nospec r14 */
|
||||
pushq $0 /* pt_regs->r15 = 0 */
|
||||
xorq %r15, %r15 /* nospec r15 */
|
||||
|
||||
/*
|
||||
* User mode is traced as though IRQs are on, and SYSENTER
|
||||
|
@ -292,17 +310,26 @@ ENTRY(entry_INT80_compat)
|
|||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
xorq %r8,%r8
|
||||
pushq %r8 /* pt_regs->r8 = 0 */
|
||||
pushq %r8 /* pt_regs->r9 = 0 */
|
||||
pushq %r8 /* pt_regs->r10 = 0 */
|
||||
pushq %r8 /* pt_regs->r11 = 0 */
|
||||
pushq $0 /* pt_regs->r8 = 0 */
|
||||
xorq %r8, %r8 /* nospec r8 */
|
||||
pushq $0 /* pt_regs->r9 = 0 */
|
||||
xorq %r9, %r9 /* nospec r9 */
|
||||
pushq $0 /* pt_regs->r10 = 0 */
|
||||
xorq %r10, %r10 /* nospec r10 */
|
||||
pushq $0 /* pt_regs->r11 = 0 */
|
||||
xorq %r11, %r11 /* nospec r11 */
|
||||
pushq %rbx /* pt_regs->rbx */
|
||||
xorl %ebx, %ebx /* nospec rbx */
|
||||
pushq %rbp /* pt_regs->rbp */
|
||||
xorl %ebp, %ebp /* nospec rbp */
|
||||
pushq %r12 /* pt_regs->r12 */
|
||||
xorq %r12, %r12 /* nospec r12 */
|
||||
pushq %r13 /* pt_regs->r13 */
|
||||
xorq %r13, %r13 /* nospec r13 */
|
||||
pushq %r14 /* pt_regs->r14 */
|
||||
xorq %r14, %r14 /* nospec r14 */
|
||||
pushq %r15 /* pt_regs->r15 */
|
||||
xorq %r15, %r15 /* nospec r15 */
|
||||
cld
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
|
||||
#define _ASM_X86_MACH_DEFAULT_APM_H
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifdef APM_ZERO_SEGS
|
||||
# define APM_DO_ZERO_SEGS \
|
||||
"pushl %%ds\n\t" \
|
||||
|
@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
|||
* N.B. We do NOT need a cld after the BIOS call
|
||||
* because we always save and restore the flags.
|
||||
*/
|
||||
firmware_restrict_branch_speculation_start();
|
||||
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
||||
"pushl %%edi\n\t"
|
||||
"pushl %%ebp\n\t"
|
||||
|
@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
|||
"=S" (*esi)
|
||||
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
||||
: "memory", "cc");
|
||||
firmware_restrict_branch_speculation_end();
|
||||
}
|
||||
|
||||
static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
||||
|
@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
|||
* N.B. We do NOT need a cld after the BIOS call
|
||||
* because we always save and restore the flags.
|
||||
*/
|
||||
firmware_restrict_branch_speculation_start();
|
||||
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
||||
"pushl %%edi\n\t"
|
||||
"pushl %%ebp\n\t"
|
||||
|
@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
|||
"=S" (si)
|
||||
: "a" (func), "b" (ebx_in), "c" (ecx_in)
|
||||
: "memory", "cc");
|
||||
firmware_restrict_branch_speculation_end();
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,65 @@
|
|||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
#ifndef __x86_64__
|
||||
/* 32 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_AX
|
||||
#define _ASM_ARG2 _ASM_DX
|
||||
#define _ASM_ARG3 _ASM_CX
|
||||
|
||||
#define _ASM_ARG1L eax
|
||||
#define _ASM_ARG2L edx
|
||||
#define _ASM_ARG3L ecx
|
||||
|
||||
#define _ASM_ARG1W ax
|
||||
#define _ASM_ARG2W dx
|
||||
#define _ASM_ARG3W cx
|
||||
|
||||
#define _ASM_ARG1B al
|
||||
#define _ASM_ARG2B dl
|
||||
#define _ASM_ARG3B cl
|
||||
|
||||
#else
|
||||
/* 64 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_DI
|
||||
#define _ASM_ARG2 _ASM_SI
|
||||
#define _ASM_ARG3 _ASM_DX
|
||||
#define _ASM_ARG4 _ASM_CX
|
||||
#define _ASM_ARG5 r8
|
||||
#define _ASM_ARG6 r9
|
||||
|
||||
#define _ASM_ARG1Q rdi
|
||||
#define _ASM_ARG2Q rsi
|
||||
#define _ASM_ARG3Q rdx
|
||||
#define _ASM_ARG4Q rcx
|
||||
#define _ASM_ARG5Q r8
|
||||
#define _ASM_ARG6Q r9
|
||||
|
||||
#define _ASM_ARG1L edi
|
||||
#define _ASM_ARG2L esi
|
||||
#define _ASM_ARG3L edx
|
||||
#define _ASM_ARG4L ecx
|
||||
#define _ASM_ARG5L r8d
|
||||
#define _ASM_ARG6L r9d
|
||||
|
||||
#define _ASM_ARG1W di
|
||||
#define _ASM_ARG2W si
|
||||
#define _ASM_ARG3W dx
|
||||
#define _ASM_ARG4W cx
|
||||
#define _ASM_ARG5W r8w
|
||||
#define _ASM_ARG6W r9w
|
||||
|
||||
#define _ASM_ARG1B dil
|
||||
#define _ASM_ARG2B sil
|
||||
#define _ASM_ARG3B dl
|
||||
#define _ASM_ARG4B cl
|
||||
#define _ASM_ARG5B r8b
|
||||
#define _ASM_ARG6B r9b
|
||||
|
||||
#endif
|
||||
|
||||
/* Exception table entry */
|
||||
#ifdef __ASSEMBLY__
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
|
|
|
@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
|
||||
asm volatile ("cmp %1,%2; sbb %0,%0;"
|
||||
:"=r" (mask)
|
||||
:"r"(size),"r" (index)
|
||||
:"g"(size),"r" (index)
|
||||
:"cc");
|
||||
return mask;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ enum cpuid_leafs
|
|||
CPUID_8000_000A_EDX,
|
||||
CPUID_7_ECX,
|
||||
CPUID_8000_0007_EBX,
|
||||
CPUID_7_EDX,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_FEATURE_NAMES
|
||||
|
@ -78,8 +79,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
|||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
|
||||
REQUIRED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 18))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 19))
|
||||
|
||||
#define DISABLED_MASK_BIT_SET(feature_bit) \
|
||||
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
|
||||
|
@ -100,8 +102,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
|||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
|
||||
DISABLED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 18))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 19))
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 18 /* N 32-bit words worth of info */
|
||||
#define NCAPINTS 19 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
|
@ -194,13 +194,28 @@
|
|||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
|
||||
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
|
||||
|
||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
|
||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
|
||||
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
|
||||
|
@ -251,6 +266,10 @@
|
|||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
|
@ -285,6 +304,15 @@
|
|||
#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
|
@ -302,5 +330,6 @@
|
|||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#define DISABLED_MASK15 0
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
|
||||
#define DISABLED_MASK17 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
|
||||
#define DISABLED_MASK18 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* We map the EFI regions needed for runtime services non-contiguously,
|
||||
|
@ -41,8 +42,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|||
({ \
|
||||
efi_status_t __s; \
|
||||
kernel_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
__s = ((efi_##f##_t __attribute__((regparm(0)))*) \
|
||||
efi.systab->runtime->f)(args); \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
kernel_fpu_end(); \
|
||||
__s; \
|
||||
})
|
||||
|
@ -51,8 +54,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|||
#define __efi_call_virt(f, args...) \
|
||||
({ \
|
||||
kernel_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
((efi_##f##_t __attribute__((regparm(0)))*) \
|
||||
efi.systab->runtime->f)(args); \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
kernel_fpu_end(); \
|
||||
})
|
||||
|
||||
|
@ -73,7 +78,9 @@ extern u64 asmlinkage efi_call(void *fp, ...);
|
|||
efi_sync_low_kernel_mappings(); \
|
||||
preempt_disable(); \
|
||||
__kernel_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
__kernel_fpu_end(); \
|
||||
preempt_enable(); \
|
||||
__s; \
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
|
||||
#define INTEL_FAM6_CORE2_MEROM 0x0F
|
||||
#define INTEL_FAM6_CORE2_MEROM_L 0x16
|
||||
#define INTEL_FAM6_CORE2_PENRYN 0x17
|
||||
|
@ -20,6 +21,7 @@
|
|||
#define INTEL_FAM6_NEHALEM 0x1E
|
||||
#define INTEL_FAM6_NEHALEM_EP 0x1A
|
||||
#define INTEL_FAM6_NEHALEM_EX 0x2E
|
||||
|
||||
#define INTEL_FAM6_WESTMERE 0x25
|
||||
#define INTEL_FAM6_WESTMERE2 0x1F
|
||||
#define INTEL_FAM6_WESTMERE_EP 0x2C
|
||||
|
@ -36,9 +38,9 @@
|
|||
#define INTEL_FAM6_HASWELL_GT3E 0x46
|
||||
|
||||
#define INTEL_FAM6_BROADWELL_CORE 0x3D
|
||||
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
|
||||
#define INTEL_FAM6_BROADWELL_GT3E 0x47
|
||||
#define INTEL_FAM6_BROADWELL_X 0x4F
|
||||
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
|
||||
|
||||
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
|
||||
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
|
||||
|
@ -56,13 +58,15 @@
|
|||
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
|
||||
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* Interrupt control:
|
||||
*/
|
||||
|
||||
static inline unsigned long native_save_fl(void)
|
||||
extern inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -3,12 +3,18 @@
|
|||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/*
|
||||
* The x86 doesn't have a mmu context, but
|
||||
* we put the segment information here.
|
||||
* x86 has arch-specific MMU state beyond what lives in mm_struct.
|
||||
*/
|
||||
typedef struct {
|
||||
/*
|
||||
* ctx_id uniquely identifies this mm_struct. A ctx_id will never
|
||||
* be reused, and zero is not a valid ctx_id.
|
||||
*/
|
||||
u64 ctx_id;
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
struct ldt_struct *ldt;
|
||||
#endif
|
||||
|
@ -24,6 +30,11 @@ typedef struct {
|
|||
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
|
||||
} mm_context_t;
|
||||
|
||||
#define INIT_MM_CONTEXT(mm) \
|
||||
.context = { \
|
||||
.ctx_id = 1, \
|
||||
}
|
||||
|
||||
void leave_mm(int cpu);
|
||||
|
||||
#endif /* _ASM_X86_MMU_H */
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/mpx.h>
|
||||
|
||||
extern atomic64_t last_mm_ctx_id;
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next)
|
||||
|
@ -52,15 +55,15 @@ struct ldt_struct {
|
|||
/*
|
||||
* Used for LDT copy/destruction.
|
||||
*/
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void destroy_context(struct mm_struct *mm);
|
||||
int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void destroy_context_ldt(struct mm_struct *mm);
|
||||
#else /* CONFIG_MODIFY_LDT_SYSCALL */
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
static inline int init_new_context_ldt(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void destroy_context(struct mm_struct *mm) {}
|
||||
static inline void destroy_context_ldt(struct mm_struct *mm) {}
|
||||
#endif
|
||||
|
||||
static inline void load_mm_ldt(struct mm_struct *mm)
|
||||
|
@ -102,6 +105,18 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
||||
}
|
||||
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
||||
init_new_context_ldt(tsk, mm);
|
||||
return 0;
|
||||
}
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
destroy_context_ldt(mm);
|
||||
}
|
||||
|
||||
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk);
|
||||
|
||||
|
|
|
@ -32,6 +32,15 @@
|
|||
#define EFER_FFXSR (1<<_EFER_FFXSR)
|
||||
|
||||
/* Intel MSRs. Some also available on other CPUs */
|
||||
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
||||
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
||||
|
||||
#define MSR_IA32_PERFCTR0 0x000000c1
|
||||
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||
#define MSR_FSB_FREQ 0x000000cd
|
||||
|
@ -45,6 +54,16 @@
|
|||
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
|
||||
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
|
||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SSB_NO (1 << 4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
||||
|
@ -132,6 +151,7 @@
|
|||
|
||||
/* DEBUGCTLMSR bits (others vary by model): */
|
||||
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
|
||||
#define DEBUGCTLMSR_BTF_SHIFT 1
|
||||
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
|
||||
#define DEBUGCTLMSR_TR (1UL << 6)
|
||||
#define DEBUGCTLMSR_BTS (1UL << 7)
|
||||
|
@ -308,6 +328,8 @@
|
|||
#define MSR_AMD64_IBSOPDATA4 0xc001103d
|
||||
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
|
||||
|
||||
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
/*
|
||||
* Fill the CPU return stack buffer.
|
||||
|
@ -171,6 +172,14 @@ enum spectre_v2_mitigation {
|
|||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
/* The Speculative Store Bypass disable variants */
|
||||
enum ssb_mitigation {
|
||||
SPEC_STORE_BYPASS_NONE,
|
||||
SPEC_STORE_BYPASS_DISABLE,
|
||||
SPEC_STORE_BYPASS_PRCTL,
|
||||
SPEC_STORE_BYPASS_SECCOMP,
|
||||
};
|
||||
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
|
@ -194,6 +203,51 @@ static inline void vmexit_fill_RSB(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
||||
: : "c" (msr),
|
||||
"a" ((u32)val),
|
||||
"d" ((u32)(val >> 32)),
|
||||
[feature] "i" (feature)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
u64 val = PRED_CMD_IBPB;
|
||||
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
||||
}
|
||||
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
extern u64 x86_spec_ctrl_base;
|
||||
|
||||
/*
|
||||
* With retpoline, we must use IBRS to restrict branch prediction
|
||||
* before calling into firmware.
|
||||
*
|
||||
* (Implemented as CPP macros due to header hell.)
|
||||
*/
|
||||
#define firmware_restrict_branch_speculation_start() \
|
||||
do { \
|
||||
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
} while (0)
|
||||
|
||||
#define firmware_restrict_branch_speculation_end() \
|
||||
do { \
|
||||
u64 val = x86_spec_ctrl_base; \
|
||||
\
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -100,6 +100,7 @@
|
|||
#define REQUIRED_MASK15 0
|
||||
#define REQUIRED_MASK16 0
|
||||
#define REQUIRED_MASK17 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
|
||||
#define REQUIRED_MASK18 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
||||
|
|
80
arch/x86/include/asm/spec-ctrl.h
Normal file
80
arch/x86/include/asm/spec-ctrl.h
Normal file
|
@ -0,0 +1,80 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_SPECCTRL_H_
|
||||
#define _ASM_X86_SPECCTRL_H_
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
||||
* the guest has, while on VMEXIT we restore the host view. This
|
||||
* would be easier if SPEC_CTRL were architecturally maskable or
|
||||
* shadowable for guests but this is not (currently) the case.
|
||||
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
||||
* the guest's version of VIRT_SPEC_CTRL, if emulated.
|
||||
*/
|
||||
extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
|
||||
|
||||
/**
|
||||
* x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
||||
* @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
||||
* @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
||||
* (may get translated to MSR_AMD64_LS_CFG bits)
|
||||
*
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_spec_ctrl_restore_host - Restore host speculation control registers
|
||||
* @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
||||
* @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
||||
* (may get translated to MSR_AMD64_LS_CFG bits)
|
||||
*
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
|
||||
}
|
||||
|
||||
/* AMD specific Speculative Store Bypass MSR data */
|
||||
extern u64 x86_amd_ls_cfg_base;
|
||||
extern u64 x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
||||
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
||||
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
||||
{
|
||||
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void speculative_store_bypass_ht_init(void);
|
||||
#else
|
||||
static inline void speculative_store_bypass_ht_init(void) { }
|
||||
#endif
|
||||
|
||||
extern void speculative_store_bypass_update(unsigned long tif);
|
||||
|
||||
static inline void speculative_store_bypass_update_current(void)
|
||||
{
|
||||
speculative_store_bypass_update(current_thread_info()->flags);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -92,6 +92,7 @@ struct thread_info {
|
|||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
||||
#define TIF_SSBD 5 /* Reduced data speculation */
|
||||
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 8 /* secure computing */
|
||||
|
@ -114,8 +115,9 @@ struct thread_info {
|
|||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_SSBD (1 << TIF_SSBD)
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
@ -147,7 +149,7 @@ struct thread_info {
|
|||
|
||||
/* flags to check in __switch_to() */
|
||||
#define _TIF_WORK_CTXSW \
|
||||
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
||||
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
|
|
@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
|
|||
struct tlb_state {
|
||||
struct mm_struct *active_mm;
|
||||
int state;
|
||||
/* last user mm's ctx id */
|
||||
u64 last_ctx_id;
|
||||
|
||||
/*
|
||||
* Access to this CR4 shadow and to H/W CR4 is protected by
|
||||
|
@ -109,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void cr4_toggle_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
cr4 ^= mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
|
||||
/* Read the CR4 shadow. */
|
||||
static inline unsigned long cr4_read_shadow(void)
|
||||
{
|
||||
|
|
|
@ -53,6 +53,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
|
|||
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
|
||||
obj-y += pci-iommu_table.o
|
||||
obj-y += resource.o
|
||||
obj-y += irqflags.o
|
||||
|
||||
obj-y += process.o
|
||||
obj-y += fpu/
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/delay.h>
|
||||
|
@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||
|
||||
if (cpu_has(c, X86_FEATURE_MWAITX))
|
||||
use_mwaitx_delay();
|
||||
|
||||
if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
unsigned int bit;
|
||||
|
||||
switch (c->x86) {
|
||||
case 0x15: bit = 54; break;
|
||||
case 0x16: bit = 33; break;
|
||||
case 0x17: bit = 10; break;
|
||||
default: return;
|
||||
}
|
||||
/*
|
||||
* Try to cache the base value so further operations can
|
||||
* avoid RMW. If that faults, do not enable SSBD.
|
||||
*/
|
||||
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
|
||||
setup_force_cpu_cap(X86_FEATURE_SSBD);
|
||||
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
|
@ -692,6 +713,17 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
||||
* all up to and including B1.
|
||||
*/
|
||||
if (c->x86_model <= 1 && c->x86_mask <= 1)
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 dummy;
|
||||
|
@ -722,6 +754,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
case 0x10: init_amd_gh(c); break;
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x17: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
/* Enable workaround for FXSAVE leak */
|
||||
|
@ -791,8 +824,9 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
|
||||
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
|
||||
|
||||
/* AMD CPUs don't reset SS attributes on SYSRET */
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
|
||||
if (!cpu_has(c, X86_FEATURE_XENPV))
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -11,8 +11,10 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -26,6 +28,27 @@
|
|||
#include <asm/intel-family.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
||||
*/
|
||||
u64 x86_spec_ctrl_base;
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
|
||||
/*
|
||||
* The vendor and possibly platform specific bits which can be modified in
|
||||
* x86_spec_ctrl_base.
|
||||
*/
|
||||
static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
||||
|
||||
/*
|
||||
* AMD specific MSR info for Speculative Store Bypass control.
|
||||
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
|
||||
*/
|
||||
u64 x86_amd_ls_cfg_base;
|
||||
u64 x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
|
@ -36,9 +59,27 @@ void __init check_bugs(void)
|
|||
print_cpu_info(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the SPEC_CTRL MSR to account for reserved bits which may
|
||||
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
|
||||
* init code as it is not enumerated and depends on the family.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
||||
/* Allow STIBP in MSR_SPEC_CTRL if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
||||
|
||||
/* Select the proper spectre mitigation before patching alternatives */
|
||||
spectre_v2_select_mitigation();
|
||||
|
||||
/*
|
||||
* Select proper mitigation for any exposure to the Speculative Store
|
||||
* Bypass vulnerability.
|
||||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
@ -94,6 +135,73 @@ static const char *spectre_v2_strings[] = {
|
|||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
|
||||
|
||||
void
|
||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
{
|
||||
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
||||
/* Is MSR_SPEC_CTRL implemented ? */
|
||||
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
||||
/*
|
||||
* Restrict guest_spec_ctrl to supported values. Clear the
|
||||
* modifiable bits in the host base value and or the
|
||||
* modifiable bits from the guest value.
|
||||
*/
|
||||
guestval = hostval & ~x86_spec_ctrl_mask;
|
||||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
||||
|
||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
if (hostval != guestval) {
|
||||
msrval = setguest ? guestval : hostval;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
|
||||
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
|
||||
!static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the host has SSBD mitigation enabled, force it in the host's
|
||||
* virtual MSR value. If its not permanently enabled, evaluate
|
||||
* current's TIF_SSBD thread flag.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
|
||||
hostval = SPEC_CTRL_SSBD;
|
||||
else
|
||||
hostval = ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
/* Sanitize the guest value */
|
||||
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
|
||||
|
||||
if (hostval != guestval) {
|
||||
unsigned long tif;
|
||||
|
||||
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
|
||||
ssbd_spec_ctrl_to_tif(hostval);
|
||||
|
||||
speculative_store_bypass_update(tif);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
|
||||
|
||||
static void x86_amd_ssb_disable(void)
|
||||
{
|
||||
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
|
||||
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
||||
}
|
||||
|
||||
#ifdef RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
@ -162,8 +270,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
else {
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
|
||||
sizeof(arg));
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
|
||||
|
@ -184,8 +291,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
|
||||
!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
|
@ -255,14 +361,14 @@ static void __init spectre_v2_select_mitigation(void)
|
|||
goto retpoline_auto;
|
||||
break;
|
||||
}
|
||||
pr_err("kernel not compiled with retpoline; no mitigation available!");
|
||||
pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
|
||||
return;
|
||||
|
||||
retpoline_auto:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
retpoline_amd:
|
||||
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("LFENCE not serializing. Switching to generic retpoline\n");
|
||||
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
|
||||
goto retpoline_generic;
|
||||
}
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
|
||||
|
@ -280,7 +386,7 @@ retpoline_auto:
|
|||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP or KPTI are available, there is a risk of
|
||||
* If neither SMEP nor PTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
|
@ -294,38 +400,309 @@ retpoline_auto:
|
|||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Filling RSB on context switch\n");
|
||||
pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
|
||||
}
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
|
||||
pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Retpoline means the kernel is safe because it has no indirect
|
||||
* branches. But firmware isn't, so use IBRS to protect that.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
|
||||
|
||||
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
|
||||
|
||||
/* The kernel command line selection */
|
||||
enum ssb_mitigation_cmd {
|
||||
SPEC_STORE_BYPASS_CMD_NONE,
|
||||
SPEC_STORE_BYPASS_CMD_AUTO,
|
||||
SPEC_STORE_BYPASS_CMD_ON,
|
||||
SPEC_STORE_BYPASS_CMD_PRCTL,
|
||||
SPEC_STORE_BYPASS_CMD_SECCOMP,
|
||||
};
|
||||
|
||||
static const char *ssb_strings[] = {
|
||||
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
||||
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
|
||||
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
|
||||
[SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
} ssb_mitigation_options[] = {
|
||||
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
||||
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
||||
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
||||
{ "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
||||
{ "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
|
||||
};
|
||||
|
||||
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
||||
{
|
||||
enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
|
||||
return SPEC_STORE_BYPASS_CMD_NONE;
|
||||
} else {
|
||||
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, ssb_mitigation_options[i].option))
|
||||
continue;
|
||||
|
||||
cmd = ssb_mitigation_options[i].cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
}
|
||||
}
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
{
|
||||
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_SSBD))
|
||||
return mode;
|
||||
|
||||
cmd = ssb_parse_cmdline();
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
|
||||
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
|
||||
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
|
||||
return mode;
|
||||
|
||||
switch (cmd) {
|
||||
case SPEC_STORE_BYPASS_CMD_AUTO:
|
||||
case SPEC_STORE_BYPASS_CMD_SECCOMP:
|
||||
/*
|
||||
* Choose prctl+seccomp as the default mode if seccomp is
|
||||
* enabled.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SECCOMP))
|
||||
mode = SPEC_STORE_BYPASS_SECCOMP;
|
||||
else
|
||||
mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_ON:
|
||||
mode = SPEC_STORE_BYPASS_DISABLE;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_PRCTL:
|
||||
mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_NONE:
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have three CPU feature flags that are in play here:
|
||||
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
||||
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
|
||||
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
|
||||
*/
|
||||
if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
||||
/*
|
||||
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
|
||||
* a completely different MSR and bit dependent on family.
|
||||
*/
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
x86_amd_ssb_disable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static void ssb_select_mitigation(void)
|
||||
{
|
||||
ssb_mode = __ssb_select_mitigation();
|
||||
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
pr_info("%s\n", ssb_strings[ssb_mode]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculation prctl: " fmt
|
||||
|
||||
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
bool update;
|
||||
|
||||
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
|
||||
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
|
||||
return -ENXIO;
|
||||
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
/* If speculation is force disabled, enable is not allowed */
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ssb_disable(task);
|
||||
update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If being set on non-current task, delay setting the CPU
|
||||
* mitigation until it is next scheduled.
|
||||
*/
|
||||
if (task == current && update)
|
||||
speculative_store_bypass_update_current();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
unsigned long ctrl)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
void arch_seccomp_spec_mitigate(struct task_struct *task)
|
||||
{
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
|
||||
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ssb_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (ssb_mode) {
|
||||
case SPEC_STORE_BYPASS_DISABLE:
|
||||
return PR_SPEC_DISABLE;
|
||||
case SPEC_STORE_BYPASS_SECCOMP:
|
||||
case SPEC_STORE_BYPASS_PRCTL:
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
default:
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
return PR_SPEC_ENABLE;
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
void x86_spec_ctrl_setup_ap(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||
x86_amd_ssb_disable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
ssize_t cpu_show_meltdown(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
if (!boot_cpu_has_bug(bug))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
if (boot_cpu_has(X86_FEATURE_KAISER))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
|
||||
switch (bug) {
|
||||
case X86_BUG_CPU_MELTDOWN:
|
||||
if (boot_cpu_has(X86_FEATURE_KAISER))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
|
||||
break;
|
||||
|
||||
case X86_BUG_SPECTRE_V1:
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
spectre_v2_module_string());
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
spectre_v2_module_string());
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#include <asm/pat.h>
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/microcode_intel.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
#include <asm/uv/uv.h>
|
||||
|
@ -674,6 +676,40 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
static void init_speculation_control(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
|
||||
* and they also have a different bit for STIBP support. Also,
|
||||
* a hypervisor might have set the individual AMD bits even on
|
||||
* Intel CPUs, for finer-grained selection of what's available.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
||||
set_cpu_cap(c, X86_FEATURE_IBRS);
|
||||
set_cpu_cap(c, X86_FEATURE_IBPB);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
||||
set_cpu_cap(c, X86_FEATURE_STIBP);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
|
||||
set_cpu_cap(c, X86_FEATURE_SSBD);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
||||
set_cpu_cap(c, X86_FEATURE_IBRS);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
||||
set_cpu_cap(c, X86_FEATURE_IBPB);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
||||
set_cpu_cap(c, X86_FEATURE_STIBP);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
@ -686,14 +722,16 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||
c->x86_capability[CPUID_1_EDX] = edx;
|
||||
}
|
||||
|
||||
/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
|
||||
if (c->cpuid_level >= 0x00000006)
|
||||
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
|
||||
|
||||
/* Additional Intel-defined flags: level 0x00000007 */
|
||||
if (c->cpuid_level >= 0x00000007) {
|
||||
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
c->x86_capability[CPUID_7_0_EBX] = ebx;
|
||||
|
||||
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
|
||||
c->x86_capability[CPUID_7_ECX] = ecx;
|
||||
c->x86_capability[CPUID_7_EDX] = edx;
|
||||
}
|
||||
|
||||
/* Extended state features: level 0x0000000d */
|
||||
|
@ -764,6 +802,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
||||
|
||||
init_scattered_cpuid_features(c);
|
||||
init_speculation_control(c);
|
||||
|
||||
/*
|
||||
* Clear/Set all flags overridden by options, after probe.
|
||||
* This needs to happen each time we re-probe, which may happen
|
||||
* several times during CPU initialization.
|
||||
*/
|
||||
apply_forced_caps(c);
|
||||
}
|
||||
|
||||
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
|
@ -792,6 +838,75 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
}
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
|
||||
{ X86_VENDOR_CENTAUR, 5 },
|
||||
{ X86_VENDOR_INTEL, 5 },
|
||||
{ X86_VENDOR_NSC, 5 },
|
||||
{ X86_VENDOR_ANY, 4 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
||||
{ X86_VENDOR_AMD },
|
||||
{}
|
||||
};
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{ X86_VENDOR_CENTAUR, 5, },
|
||||
{ X86_VENDOR_INTEL, 5, },
|
||||
{ X86_VENDOR_NSC, 5, },
|
||||
{ X86_VENDOR_AMD, 0x12, },
|
||||
{ X86_VENDOR_AMD, 0x11, },
|
||||
{ X86_VENDOR_AMD, 0x10, },
|
||||
{ X86_VENDOR_AMD, 0xf, },
|
||||
{ X86_VENDOR_ANY, 4, },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||
|
||||
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
if (x86_match_cpu(cpu_no_speculation))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
if (x86_match_cpu(cpu_no_meltdown))
|
||||
return;
|
||||
|
||||
/* Rogue Data Cache Load? No! */
|
||||
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do minimum CPU detection early.
|
||||
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
||||
|
@ -838,11 +953,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
|
||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD)
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
cpu_set_bug_bits(c);
|
||||
|
||||
fpu__init_system(c);
|
||||
|
||||
|
@ -1131,6 +1242,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
|||
enable_sep_cpu();
|
||||
#endif
|
||||
mtrr_ap_init();
|
||||
x86_spec_ctrl_setup_ap();
|
||||
}
|
||||
|
||||
struct msr_range {
|
||||
|
|
|
@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
|||
|
||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||
|
||||
extern void x86_spec_ctrl_setup_ap(void);
|
||||
|
||||
#endif /* ARCH_X86_CPU_H */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/msr.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <linux/topology.h>
|
||||
|
@ -25,6 +26,62 @@
|
|||
#include <asm/apic.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Early microcode releases for the Spectre v2 mitigation were broken.
|
||||
* Information taken from;
|
||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
|
||||
* - https://kb.vmware.com/s/article/52345
|
||||
* - Microcode revisions observed in the wild
|
||||
* - Release note from 20180108 microcode release
|
||||
*/
|
||||
struct sku_microcode {
|
||||
u8 model;
|
||||
u8 stepping;
|
||||
u32 microcode;
|
||||
};
|
||||
static const struct sku_microcode spectre_bad_microcodes[] = {
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
|
||||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
||||
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
||||
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
||||
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
|
||||
{ INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
|
||||
{ INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
|
||||
{ INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
|
||||
{ INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
|
||||
{ INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
|
||||
{ INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
|
||||
{ INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
|
||||
{ INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
|
||||
/* Observed in the wild */
|
||||
{ INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
|
||||
{ INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
|
||||
};
|
||||
|
||||
static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We know that the hypervisor lie to us on the microcode version so
|
||||
* we may as well hope that it is running the correct version.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||
c->x86_mask == spectre_bad_microcodes[i].stepping)
|
||||
return (c->microcode <= spectre_bad_microcodes[i].microcode);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 misc_enable;
|
||||
|
@ -51,6 +108,22 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
|
||||
}
|
||||
|
||||
/* Now if any of them are set, check the blacklist and clear the lot */
|
||||
if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
|
||||
cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
|
||||
cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
|
||||
cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
|
||||
pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
|
||||
setup_clear_cpu_cap(X86_FEATURE_IBRS);
|
||||
setup_clear_cpu_cap(X86_FEATURE_IBPB);
|
||||
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
||||
setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
|
||||
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atom erratum AAE44/AAF40/AAG38/AAH41:
|
||||
*
|
||||
|
|
|
@ -2294,9 +2294,6 @@ static ssize_t store_int_with_restart(struct device *s,
|
|||
if (check_interval == old_check_interval)
|
||||
return ret;
|
||||
|
||||
if (check_interval < 1)
|
||||
check_interval = 1;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
mce_restart();
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
|
|
@ -229,7 +229,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
|
|||
u64 prev_count, new_count, delta;
|
||||
int shift;
|
||||
|
||||
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
|
||||
if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
|
||||
shift = 64 - uncore_fixed_ctr_bits(box);
|
||||
else
|
||||
shift = 64 - uncore_perf_ctr_bits(box);
|
||||
|
|
|
@ -240,7 +240,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
|
|||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
|
||||
if (hwc->idx == UNCORE_PMC_IDX_FIXED)
|
||||
wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
|
||||
else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
|
||||
wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
|
||||
|
|
26
arch/x86/kernel/irqflags.S
Normal file
26
arch/x86/kernel/irqflags.S
Normal file
|
@ -0,0 +1,26 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm-generic/export.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* unsigned long native_save_fl(void)
|
||||
*/
|
||||
ENTRY(native_save_fl)
|
||||
pushf
|
||||
pop %_ASM_AX
|
||||
ret
|
||||
ENDPROC(native_save_fl)
|
||||
EXPORT_SYMBOL(native_save_fl)
|
||||
|
||||
/*
|
||||
* void native_restore_fl(unsigned long flags)
|
||||
* %eax/%rdi: flags
|
||||
*/
|
||||
ENTRY(native_restore_fl)
|
||||
push %_ASM_ARG1
|
||||
popf
|
||||
ret
|
||||
ENDPROC(native_restore_fl)
|
||||
EXPORT_SYMBOL(native_restore_fl)
|
|
@ -119,7 +119,7 @@ static void free_ldt_struct(struct ldt_struct *ldt)
|
|||
* we do not have to muck with descriptors here, that is
|
||||
* done in switch_mm() as needed.
|
||||
*/
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
struct ldt_struct *new_ldt;
|
||||
struct mm_struct *old_mm;
|
||||
|
@ -160,7 +160,7 @@ out_unlock:
|
|||
*
|
||||
* 64bit: Don't touch the LDT register - we're already in the next thread.
|
||||
*/
|
||||
void destroy_context(struct mm_struct *mm)
|
||||
void destroy_context_ldt(struct mm_struct *mm)
|
||||
{
|
||||
free_ldt_struct(mm->context.ldt);
|
||||
mm->context.ldt = NULL;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
|
@ -116,11 +117,6 @@ void flush_thread(void)
|
|||
fpu__clear(&tsk->thread.fpu);
|
||||
}
|
||||
|
||||
static void hard_disable_TSC(void)
|
||||
{
|
||||
cr4_set_bits(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
void disable_TSC(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
@ -129,15 +125,10 @@ void disable_TSC(void)
|
|||
* Must flip the CPU state synchronously with
|
||||
* TIF_NOTSC in the current running context.
|
||||
*/
|
||||
hard_disable_TSC();
|
||||
cr4_set_bits(X86_CR4_TSD);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void hard_enable_TSC(void)
|
||||
{
|
||||
cr4_clear_bits(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
static void enable_TSC(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
@ -146,7 +137,7 @@ static void enable_TSC(void)
|
|||
* Must flip the CPU state synchronously with
|
||||
* TIF_NOTSC in the current running context.
|
||||
*/
|
||||
hard_enable_TSC();
|
||||
cr4_clear_bits(X86_CR4_TSD);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -174,48 +165,199 @@ int set_tsc_mode(unsigned int val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
struct tss_struct *tss)
|
||||
static inline void switch_to_bitmap(struct tss_struct *tss,
|
||||
struct thread_struct *prev,
|
||||
struct thread_struct *next,
|
||||
unsigned long tifp, unsigned long tifn)
|
||||
{
|
||||
struct thread_struct *prev, *next;
|
||||
|
||||
prev = &prev_p->thread;
|
||||
next = &next_p->thread;
|
||||
|
||||
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
|
||||
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
|
||||
debugctl |= DEBUGCTLMSR_BTF;
|
||||
|
||||
update_debugctlmsr(debugctl);
|
||||
}
|
||||
|
||||
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
||||
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
||||
/* prev and next are different */
|
||||
if (test_tsk_thread_flag(next_p, TIF_NOTSC))
|
||||
hard_disable_TSC();
|
||||
else
|
||||
hard_enable_TSC();
|
||||
}
|
||||
|
||||
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
||||
if (tifn & _TIF_IO_BITMAP) {
|
||||
/*
|
||||
* Copy the relevant range of the IO bitmap.
|
||||
* Normally this is 128 bytes or less:
|
||||
*/
|
||||
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
|
||||
max(prev->io_bitmap_max, next->io_bitmap_max));
|
||||
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
|
||||
} else if (tifp & _TIF_IO_BITMAP) {
|
||||
/*
|
||||
* Clear any possible leftover bits:
|
||||
*/
|
||||
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct ssb_state {
|
||||
struct ssb_state *shared_state;
|
||||
raw_spinlock_t lock;
|
||||
unsigned int disable_state;
|
||||
unsigned long local_state;
|
||||
};
|
||||
|
||||
#define LSTATE_SSB 0
|
||||
|
||||
static DEFINE_PER_CPU(struct ssb_state, ssb_state);
|
||||
|
||||
void speculative_store_bypass_ht_init(void)
|
||||
{
|
||||
struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
st->local_state = 0;
|
||||
|
||||
/*
|
||||
* Shared state setup happens once on the first bringup
|
||||
* of the CPU. It's not destroyed on CPU hotunplug.
|
||||
*/
|
||||
if (st->shared_state)
|
||||
return;
|
||||
|
||||
raw_spin_lock_init(&st->lock);
|
||||
|
||||
/*
|
||||
* Go over HT siblings and check whether one of them has set up the
|
||||
* shared state pointer already.
|
||||
*/
|
||||
for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
|
||||
if (!per_cpu(ssb_state, cpu).shared_state)
|
||||
continue;
|
||||
|
||||
/* Link it to the state of the sibling: */
|
||||
st->shared_state = per_cpu(ssb_state, cpu).shared_state;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* First HT sibling to come up on the core. Link shared state of
|
||||
* the first HT sibling to itself. The siblings on the same core
|
||||
* which come up later will see the shared state pointer and link
|
||||
* themself to the state of this CPU.
|
||||
*/
|
||||
st->shared_state = st;
|
||||
}
|
||||
|
||||
/*
|
||||
* Logic is: First HT sibling enables SSBD for both siblings in the core
|
||||
* and last sibling to disable it, disables it for the whole core. This how
|
||||
* MSR_SPEC_CTRL works in "hardware":
|
||||
*
|
||||
* CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
||||
*/
|
||||
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
||||
{
|
||||
struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
||||
u64 msr = x86_amd_ls_cfg_base;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
||||
msr |= ssbd_tif_to_amd_ls_cfg(tifn);
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tifn & _TIF_SSBD) {
|
||||
/*
|
||||
* Since this can race with prctl(), block reentry on the
|
||||
* same CPU.
|
||||
*/
|
||||
if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
|
||||
return;
|
||||
|
||||
msr |= x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
raw_spin_lock(&st->shared_state->lock);
|
||||
/* First sibling enables SSBD: */
|
||||
if (!st->shared_state->disable_state)
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
st->shared_state->disable_state++;
|
||||
raw_spin_unlock(&st->shared_state->lock);
|
||||
} else {
|
||||
if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
|
||||
return;
|
||||
|
||||
raw_spin_lock(&st->shared_state->lock);
|
||||
st->shared_state->disable_state--;
|
||||
if (!st->shared_state->disable_state)
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
raw_spin_unlock(&st->shared_state->lock);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
||||
{
|
||||
u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
||||
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
||||
{
|
||||
/*
|
||||
* SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
|
||||
* so ssbd_tif_to_spec_ctrl() just works.
|
||||
*/
|
||||
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
||||
}
|
||||
|
||||
static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
||||
{
|
||||
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
||||
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
||||
}
|
||||
|
||||
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
amd_set_ssb_virt_state(tifn);
|
||||
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
amd_set_core_ssb_state(tifn);
|
||||
else
|
||||
intel_set_ssb_state(tifn);
|
||||
}
|
||||
|
||||
void speculative_store_bypass_update(unsigned long tif)
|
||||
{
|
||||
preempt_disable();
|
||||
__speculative_store_bypass_update(tif);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
struct tss_struct *tss)
|
||||
{
|
||||
struct thread_struct *prev, *next;
|
||||
unsigned long tifp, tifn;
|
||||
|
||||
prev = &prev_p->thread;
|
||||
next = &next_p->thread;
|
||||
|
||||
tifn = READ_ONCE(task_thread_info(next_p)->flags);
|
||||
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
|
||||
switch_to_bitmap(tss, prev, next, tifp, tifn);
|
||||
|
||||
propagate_user_return_notify(prev_p, next_p);
|
||||
|
||||
if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
|
||||
arch_has_block_step()) {
|
||||
unsigned long debugctl, msk;
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
msk = tifn & _TIF_BLOCKSTEP;
|
||||
debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
}
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_NOTSC)
|
||||
cr4_toggle_bits(X86_CR4_TSD);
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_SSBD)
|
||||
__speculative_store_bypass_update(tifn);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
#include <asm/i8259.h>
|
||||
#include <asm/realmode.h>
|
||||
#include <asm/misc.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
int smp_num_siblings = 1;
|
||||
|
@ -217,6 +218,8 @@ static void notrace start_secondary(void *unused)
|
|||
*/
|
||||
check_tsc_sync_target();
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
/*
|
||||
* Lock vector_lock and initialize the vectors on this cpu
|
||||
* before setting the cpu online. We must set it online with
|
||||
|
@ -1209,6 +1212,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|||
set_mtrr_aps_delayed_init();
|
||||
|
||||
smp_quirk_init_udelay();
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
}
|
||||
|
||||
void arch_enable_nonboot_cpus_begin(void)
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
#include "trace.h"
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
#include <asm/kexec.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "pmu.h"
|
||||
|
@ -6843,6 +6843,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
HRTIMER_MODE_REL);
|
||||
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
|
||||
|
||||
vmx->nested.vpid02 = allocate_vpid();
|
||||
|
||||
vmx->nested.vmxon = true;
|
||||
|
||||
skip_emulated_instruction(vcpu);
|
||||
|
@ -8887,10 +8889,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
goto free_vmcs;
|
||||
}
|
||||
|
||||
if (nested) {
|
||||
if (nested)
|
||||
nested_vmx_setup_ctls_msrs(vmx);
|
||||
vmx->nested.vpid02 = allocate_vpid();
|
||||
}
|
||||
|
||||
vmx->nested.posted_intr_nv = -1;
|
||||
vmx->nested.current_vmptr = -1ull;
|
||||
|
@ -8899,7 +8899,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
return &vmx->vcpu;
|
||||
|
||||
free_vmcs:
|
||||
free_vpid(vmx->nested.vpid02);
|
||||
free_loaded_vmcs(vmx->loaded_vmcs);
|
||||
free_msrs:
|
||||
kfree(vmx->guest_msrs);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/uv/uv.h>
|
||||
|
@ -29,6 +30,8 @@
|
|||
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
||||
*/
|
||||
|
||||
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
|
||||
|
||||
struct flush_tlb_info {
|
||||
struct mm_struct *flush_mm;
|
||||
unsigned long flush_start;
|
||||
|
@ -104,6 +107,36 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
unsigned cpu = smp_processor_id();
|
||||
|
||||
if (likely(prev != next)) {
|
||||
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
|
||||
|
||||
/*
|
||||
* Avoid user/user BTB poisoning by flushing the branch
|
||||
* predictor when switching between processes. This stops
|
||||
* one process from doing Spectre-v2 attacks on another.
|
||||
*
|
||||
* As an optimization, flush indirect branches only when
|
||||
* switching into processes that disable dumping. This
|
||||
* protects high value processes like gpg, without having
|
||||
* too high performance overhead. IBPB is *expensive*!
|
||||
*
|
||||
* This will not flush branches when switching into kernel
|
||||
* threads. It will also not flush if we switch to idle
|
||||
* thread and back to the same process. It will flush if we
|
||||
* switch to a different non-dumpable process.
|
||||
*/
|
||||
if (tsk && tsk->mm &&
|
||||
tsk->mm->context.ctx_id != last_ctx_id &&
|
||||
get_dumpable(tsk->mm) != SUID_DUMP_USER)
|
||||
indirect_branch_prediction_barrier();
|
||||
|
||||
/*
|
||||
* Record last user mm's context id, so we can avoid
|
||||
* flushing branch buffer with IBPB if we switch back
|
||||
* to the same user.
|
||||
*/
|
||||
if (next != &init_mm)
|
||||
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
||||
|
||||
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
this_cpu_write(cpu_tlbstate.active_mm, next);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/realmode.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
|
||||
|
@ -347,6 +348,7 @@ extern efi_status_t efi64_thunk(u32, ...);
|
|||
\
|
||||
efi_sync_low_kernel_mappings(); \
|
||||
local_irq_save(flags); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
\
|
||||
efi_scratch.prev_cr3 = read_cr3(); \
|
||||
write_cr3((unsigned long)efi_scratch.efi_pgt); \
|
||||
|
@ -357,6 +359,7 @@ extern efi_status_t efi64_thunk(u32, ...);
|
|||
\
|
||||
write_cr3(efi_scratch.prev_cr3); \
|
||||
__flush_tlb_all(); \
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
local_irq_restore(flags); \
|
||||
\
|
||||
__s; \
|
||||
|
|
|
@ -460,6 +460,12 @@ static void __init xen_init_cpuid_mask(void)
|
|||
cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
|
||||
}
|
||||
|
||||
static void __init xen_init_capabilities(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
setup_force_cpu_cap(X86_FEATURE_XENPV);
|
||||
}
|
||||
|
||||
static void xen_set_debugreg(int reg, unsigned long val)
|
||||
{
|
||||
HYPERVISOR_set_debugreg(reg, val);
|
||||
|
@ -1587,6 +1593,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
|
||||
xen_init_irq_ops();
|
||||
xen_init_cpuid_mask();
|
||||
xen_init_capabilities();
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
|
@ -1883,14 +1890,6 @@ bool xen_hvm_need_lapic(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
|
||||
|
||||
static void xen_set_cpu_features(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (xen_pv_domain()) {
|
||||
clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
set_cpu_cap(c, X86_FEATURE_XENPV);
|
||||
}
|
||||
}
|
||||
|
||||
const struct hypervisor_x86 x86_hyper_xen = {
|
||||
.name = "Xen",
|
||||
.detect = xen_platform,
|
||||
|
@ -1898,7 +1897,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
|
|||
.init_platform = xen_hvm_guest_init,
|
||||
#endif
|
||||
.x2apic_available = xen_x2apic_para_available,
|
||||
.set_cpu_features = xen_set_cpu_features,
|
||||
};
|
||||
EXPORT_SYMBOL(x86_hyper_xen);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <xen/interface/vcpu.h>
|
||||
#include <xen/interface/xenpmu.h>
|
||||
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/xen/interface.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
|
@ -87,6 +88,8 @@ static void cpu_bringup(void)
|
|||
cpu_data(cpu).x86_max_cores = 1;
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
xen_setup_cpu_clockevents();
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
@ -357,6 +360,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
xen_pmu_init(0);
|
||||
|
||||
if (xen_smp_intr_init(0))
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/page.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
|
|||
xen_mm_unpin_all();
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64, spec_ctrl);
|
||||
|
||||
void xen_arch_pre_suspend(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
|
@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
|
|||
|
||||
static void xen_vcpu_notify_restore(void *data)
|
||||
{
|
||||
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
|
||||
|
||||
/* Boot processor notified via generic timekeeping_resume() */
|
||||
if (smp_processor_id() == 0)
|
||||
return;
|
||||
|
@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
|
|||
|
||||
static void xen_vcpu_notify_suspend(void *data)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
tick_suspend_local();
|
||||
|
||||
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
|
||||
this_cpu_write(spec_ctrl, tmp);
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void xen_arch_resume(void)
|
||||
|
|
|
@ -661,21 +661,17 @@ EXPORT_SYMBOL(blk_alloc_queue);
|
|||
int blk_queue_enter(struct request_queue *q, gfp_t gfp)
|
||||
{
|
||||
while (true) {
|
||||
int ret;
|
||||
|
||||
if (percpu_ref_tryget_live(&q->q_usage_counter))
|
||||
return 0;
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp))
|
||||
return -EBUSY;
|
||||
|
||||
ret = wait_event_interruptible(q->mq_freeze_wq,
|
||||
!atomic_read(&q->mq_freeze_depth) ||
|
||||
blk_queue_dying(q));
|
||||
wait_event(q->mq_freeze_wq,
|
||||
!atomic_read(&q->mq_freeze_depth) ||
|
||||
blk_queue_dying(q));
|
||||
if (blk_queue_dying(q))
|
||||
return -ENODEV;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1001,6 +1001,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alg_extsize);
|
||||
|
||||
int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
int ret = 0;
|
||||
struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
|
||||
|
||||
if (!IS_ERR(alg)) {
|
||||
crypto_mod_put(alg);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
|
||||
|
||||
static int __init crypto_algapi_init(void)
|
||||
{
|
||||
crypto_init_proc();
|
||||
|
|
|
@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
|||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
|
|
|
@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
|
|||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
|
|
|
@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v);
|
|||
|
||||
unsigned int crypto_alg_extsize(struct crypto_alg *alg);
|
||||
|
||||
int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
|
||||
u32 type, u32 mask);
|
||||
|
||||
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
|
||||
{
|
||||
atomic_inc(&alg->cra_refcnt);
|
||||
|
|
|
@ -16,7 +16,11 @@
|
|||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/cryptouser.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
|||
if (alg->cra_type == &crypto_blkcipher_type)
|
||||
return sizeof(struct crypto_blkcipher *);
|
||||
|
||||
BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
|
||||
alg->cra_type != &crypto_givcipher_type);
|
||||
if (alg->cra_type == &crypto_ablkcipher_type ||
|
||||
alg->cra_type == &crypto_givcipher_type)
|
||||
return sizeof(struct crypto_ablkcipher *);
|
||||
|
||||
return sizeof(struct crypto_ablkcipher *);
|
||||
return crypto_alg_extsize(alg);
|
||||
}
|
||||
|
||||
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
||||
|
@ -118,7 +123,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
|||
skcipher->decrypt = skcipher_decrypt_blkcipher;
|
||||
|
||||
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
|
||||
skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
|
||||
skcipher->keysize = calg->cra_blkcipher.max_keysize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -211,31 +216,123 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
|||
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
|
||||
sizeof(struct ablkcipher_request);
|
||||
skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
|
||||
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||
|
||||
alg->exit(skcipher);
|
||||
}
|
||||
|
||||
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||
|
||||
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
|
||||
return crypto_init_skcipher_ops_blkcipher(tfm);
|
||||
|
||||
BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
|
||||
tfm->__crt_alg->cra_type != &crypto_givcipher_type);
|
||||
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
|
||||
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
|
||||
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
||||
|
||||
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
||||
skcipher->setkey = alg->setkey;
|
||||
skcipher->encrypt = alg->encrypt;
|
||||
skcipher->decrypt = alg->decrypt;
|
||||
skcipher->ivsize = alg->ivsize;
|
||||
skcipher->keysize = alg->max_keysize;
|
||||
|
||||
if (alg->exit)
|
||||
skcipher->base.exit = crypto_skcipher_exit_tfm;
|
||||
|
||||
if (alg->init)
|
||||
return alg->init(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
|
||||
{
|
||||
struct skcipher_instance *skcipher =
|
||||
container_of(inst, struct skcipher_instance, s.base);
|
||||
|
||||
skcipher->free(skcipher);
|
||||
}
|
||||
|
||||
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
|
||||
base);
|
||||
|
||||
seq_printf(m, "type : skcipher\n");
|
||||
seq_printf(m, "async : %s\n",
|
||||
alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
|
||||
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_report_blkcipher rblkcipher;
|
||||
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
|
||||
base);
|
||||
|
||||
strlcpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
|
||||
strlcpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = skcipher->min_keysize;
|
||||
rblkcipher.max_keysize = skcipher->max_keysize;
|
||||
rblkcipher.ivsize = skcipher->ivsize;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
||||
sizeof(struct crypto_report_blkcipher), &rblkcipher))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
#else
|
||||
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct crypto_type crypto_skcipher_type2 = {
|
||||
.extsize = crypto_skcipher_extsize,
|
||||
.init_tfm = crypto_skcipher_init_tfm,
|
||||
.free = crypto_skcipher_free_instance,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_skcipher_show,
|
||||
#endif
|
||||
.report = crypto_skcipher_report,
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.tfmsize = offsetof(struct crypto_skcipher, base),
|
||||
};
|
||||
|
||||
int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
|
||||
const char *name, u32 type, u32 mask)
|
||||
{
|
||||
spawn->base.frontend = &crypto_skcipher_type2;
|
||||
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_grab_skcipher2);
|
||||
|
||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
|
@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
||||
|
||||
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
|
||||
type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
|
||||
|
||||
static int skcipher_prepare_alg(struct skcipher_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alg->chunksize)
|
||||
alg->chunksize = base->cra_blocksize;
|
||||
|
||||
base->cra_type = &crypto_skcipher_type2;
|
||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crypto_register_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
int err;
|
||||
|
||||
err = skcipher_prepare_alg(alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_register_alg(base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_register_skcipher);
|
||||
|
||||
void crypto_unregister_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
crypto_unregister_alg(&alg->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
|
||||
|
||||
int crypto_register_skciphers(struct skcipher_alg *algs, int count)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = crypto_register_skcipher(&algs[i]);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (--i; i >= 0; --i)
|
||||
crypto_unregister_skcipher(&algs[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_register_skciphers);
|
||||
|
||||
void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = count - 1; i >= 0; --i)
|
||||
crypto_unregister_skcipher(&algs[i]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
|
||||
|
||||
int skcipher_register_instance(struct crypto_template *tmpl,
|
||||
struct skcipher_instance *inst)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = skcipher_prepare_alg(&inst->alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skcipher_register_instance);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Symmetric key cipher type");
|
||||
|
|
|
@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
|
|||
}
|
||||
|
||||
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
|
||||
| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
|
||||
| OSC_PCI_EXPRESS_PME_CONTROL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
|
||||
control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
|
||||
|
||||
if (pci_aer_available()) {
|
||||
if (aer_acpi_firmware_first())
|
||||
dev_info(&device->dev,
|
||||
|
|
|
@ -9,7 +9,7 @@ if ANDROID
|
|||
|
||||
config ANDROID_BINDER_IPC
|
||||
bool "Android Binder IPC Driver"
|
||||
depends on MMU
|
||||
depends on MMU && !M68K
|
||||
default n
|
||||
---help---
|
||||
Binder is used in Android for both communication between processes,
|
||||
|
@ -31,19 +31,6 @@ config ANDROID_BINDER_DEVICES
|
|||
created. Each binder device has its own context manager, and is
|
||||
therefore logically separated from the other devices.
|
||||
|
||||
config ANDROID_BINDER_IPC_32BIT
|
||||
bool
|
||||
depends on !64BIT && ANDROID_BINDER_IPC
|
||||
default y
|
||||
---help---
|
||||
The Binder API has been changed to support both 32 and 64bit
|
||||
applications in a mixed environment.
|
||||
|
||||
Enable this to support an old 32-bit Android user-space (v4.4 and
|
||||
earlier).
|
||||
|
||||
Note that enabling this will break newer Android user-space.
|
||||
|
||||
config ANDROID_BINDER_IPC_SELFTEST
|
||||
bool "Android Binder IPC Driver Selftest"
|
||||
depends on ANDROID_BINDER_IPC
|
||||
|
|
|
@ -71,10 +71,6 @@
|
|||
#include <linux/security.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
|
||||
#define BINDER_IPC_32BIT 1
|
||||
#endif
|
||||
|
||||
#include <uapi/linux/android/binder.h>
|
||||
#include "binder_alloc.h"
|
||||
#include "binder_trace.h"
|
||||
|
@ -143,7 +139,7 @@ enum {
|
|||
};
|
||||
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
|
||||
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
|
||||
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
|
||||
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
|
||||
|
||||
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
|
||||
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
|
||||
|
@ -162,7 +158,7 @@ static int binder_set_stop_on_user_error(const char *val,
|
|||
return ret;
|
||||
}
|
||||
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
|
||||
param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
|
||||
param_get_int, &binder_stop_on_user_error, 0644);
|
||||
|
||||
#define binder_debug(mask, x...) \
|
||||
do { \
|
||||
|
@ -251,7 +247,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|||
unsigned int cur = atomic_inc_return(&log->cur);
|
||||
|
||||
if (cur >= ARRAY_SIZE(log->entry))
|
||||
log->full = 1;
|
||||
log->full = true;
|
||||
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
|
||||
WRITE_ONCE(e->debug_id_done, 0);
|
||||
/*
|
||||
|
@ -466,8 +462,9 @@ struct binder_ref {
|
|||
};
|
||||
|
||||
enum binder_deferred_state {
|
||||
BINDER_DEFERRED_FLUSH = 0x01,
|
||||
BINDER_DEFERRED_RELEASE = 0x02,
|
||||
BINDER_DEFERRED_PUT_FILES = 0x01,
|
||||
BINDER_DEFERRED_FLUSH = 0x02,
|
||||
BINDER_DEFERRED_RELEASE = 0x04,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -504,6 +501,9 @@ struct binder_priority {
|
|||
* (invariant after initialized)
|
||||
* @tsk task_struct for group_leader of process
|
||||
* (invariant after initialized)
|
||||
* @files files_struct for process
|
||||
* (protected by @files_lock)
|
||||
* @files_lock mutex to protect @files
|
||||
* @deferred_work_node: element for binder_deferred_list
|
||||
* (protected by binder_deferred_lock)
|
||||
* @deferred_work: bitmap of deferred work to perform
|
||||
|
@ -548,6 +548,8 @@ struct binder_proc {
|
|||
struct list_head waiting_threads;
|
||||
int pid;
|
||||
struct task_struct *tsk;
|
||||
struct files_struct *files;
|
||||
struct mutex files_lock;
|
||||
struct hlist_node deferred_work_node;
|
||||
int deferred_work;
|
||||
bool is_dead;
|
||||
|
@ -942,33 +944,27 @@ static void binder_free_thread(struct binder_thread *thread);
|
|||
static void binder_free_proc(struct binder_proc *proc);
|
||||
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
|
||||
|
||||
struct files_struct *binder_get_files_struct(struct binder_proc *proc)
|
||||
{
|
||||
return get_files_struct(proc->tsk);
|
||||
}
|
||||
|
||||
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
|
||||
{
|
||||
struct files_struct *files;
|
||||
unsigned long rlim_cur;
|
||||
unsigned long irqs;
|
||||
int ret;
|
||||
|
||||
files = binder_get_files_struct(proc);
|
||||
if (files == NULL)
|
||||
return -ESRCH;
|
||||
|
||||
mutex_lock(&proc->files_lock);
|
||||
if (proc->files == NULL) {
|
||||
ret = -ESRCH;
|
||||
goto err;
|
||||
}
|
||||
if (!lock_task_sighand(proc->tsk, &irqs)) {
|
||||
ret = -EMFILE;
|
||||
goto err;
|
||||
}
|
||||
|
||||
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
|
||||
unlock_task_sighand(proc->tsk, &irqs);
|
||||
|
||||
ret = __alloc_fd(files, 0, rlim_cur, flags);
|
||||
ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
|
||||
err:
|
||||
put_files_struct(files);
|
||||
mutex_unlock(&proc->files_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -978,12 +974,10 @@ err:
|
|||
static void task_fd_install(
|
||||
struct binder_proc *proc, unsigned int fd, struct file *file)
|
||||
{
|
||||
struct files_struct *files = binder_get_files_struct(proc);
|
||||
|
||||
if (files) {
|
||||
__fd_install(files, fd, file);
|
||||
put_files_struct(files);
|
||||
}
|
||||
mutex_lock(&proc->files_lock);
|
||||
if (proc->files)
|
||||
__fd_install(proc->files, fd, file);
|
||||
mutex_unlock(&proc->files_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -991,21 +985,22 @@ static void task_fd_install(
|
|||
*/
|
||||
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
||||
{
|
||||
struct files_struct *files = binder_get_files_struct(proc);
|
||||
int retval;
|
||||
|
||||
if (files == NULL)
|
||||
return -ESRCH;
|
||||
|
||||
retval = __close_fd(files, fd);
|
||||
mutex_lock(&proc->files_lock);
|
||||
if (proc->files == NULL) {
|
||||
retval = -ESRCH;
|
||||
goto err;
|
||||
}
|
||||
retval = __close_fd(proc->files, fd);
|
||||
/* can't restart close syscall because file table entry was cleared */
|
||||
if (unlikely(retval == -ERESTARTSYS ||
|
||||
retval == -ERESTARTNOINTR ||
|
||||
retval == -ERESTARTNOHAND ||
|
||||
retval == -ERESTART_RESTARTBLOCK))
|
||||
retval = -EINTR;
|
||||
put_files_struct(files);
|
||||
|
||||
err:
|
||||
mutex_unlock(&proc->files_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -2215,8 +2210,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
|
|||
struct binder_object_header *hdr;
|
||||
size_t object_size = 0;
|
||||
|
||||
if (offset > buffer->data_size - sizeof(*hdr) ||
|
||||
buffer->data_size < sizeof(*hdr) ||
|
||||
if (buffer->data_size < sizeof(*hdr) ||
|
||||
offset > buffer->data_size - sizeof(*hdr) ||
|
||||
!IS_ALIGNED(offset, sizeof(u32)))
|
||||
return 0;
|
||||
|
||||
|
@ -2356,7 +2351,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|||
int debug_id = buffer->debug_id;
|
||||
|
||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||
"%d buffer release %d, size %zd-%zd, failed at %p\n",
|
||||
"%d buffer release %d, size %zd-%zd, failed at %pK\n",
|
||||
proc->pid, buffer->debug_id,
|
||||
buffer->data_size, buffer->offsets_size, failed_at);
|
||||
|
||||
|
@ -2805,7 +2800,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|||
if (node->has_async_transaction) {
|
||||
pending_async = true;
|
||||
} else {
|
||||
node->has_async_transaction = 1;
|
||||
node->has_async_transaction = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3670,7 +3665,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
w = binder_dequeue_work_head_ilocked(
|
||||
&buf_node->async_todo);
|
||||
if (!w) {
|
||||
buf_node->has_async_transaction = 0;
|
||||
buf_node->has_async_transaction = false;
|
||||
} else {
|
||||
binder_enqueue_work_ilocked(
|
||||
w, &proc->todo);
|
||||
|
@ -3892,7 +3887,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
}
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
||||
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
|
||||
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
|
||||
proc->pid, thread->pid, (u64)cookie,
|
||||
death);
|
||||
if (death == NULL) {
|
||||
|
@ -4098,6 +4093,7 @@ retry:
|
|||
binder_inner_proc_unlock(proc);
|
||||
if (put_user(e->cmd, (uint32_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
cmd = e->cmd;
|
||||
e->cmd = BR_OK;
|
||||
ptr += sizeof(uint32_t);
|
||||
|
||||
|
@ -4866,6 +4862,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
|
|||
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
||||
(unsigned long)pgprot_val(vma->vm_page_prot));
|
||||
binder_alloc_vma_close(&proc->alloc);
|
||||
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
|
||||
}
|
||||
|
||||
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
|
@ -4902,16 +4899,22 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
failure_string = "bad vm_flags";
|
||||
goto err_bad_arg;
|
||||
}
|
||||
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
|
||||
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
vma->vm_ops = &binder_vm_ops;
|
||||
vma->vm_private_data = proc;
|
||||
|
||||
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
|
||||
|
||||
return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&proc->files_lock);
|
||||
proc->files = get_files_struct(current);
|
||||
mutex_unlock(&proc->files_lock);
|
||||
return 0;
|
||||
|
||||
err_bad_arg:
|
||||
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
|
||||
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
|
||||
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4921,7 +4924,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|||
struct binder_proc *proc;
|
||||
struct binder_device *binder_dev;
|
||||
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
||||
current->group_leader->pid, current->pid);
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
|
@ -4931,6 +4934,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|||
spin_lock_init(&proc->outer_lock);
|
||||
get_task_struct(current->group_leader);
|
||||
proc->tsk = current->group_leader;
|
||||
mutex_init(&proc->files_lock);
|
||||
INIT_LIST_HEAD(&proc->todo);
|
||||
if (binder_supported_policy(current->policy)) {
|
||||
proc->default_priority.sched_policy = current->policy;
|
||||
|
@ -4966,7 +4970,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|||
* anyway print all contexts that a given PID has, so this
|
||||
* is not a problem.
|
||||
*/
|
||||
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
|
||||
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
|
||||
binder_debugfs_dir_entry_proc,
|
||||
(void *)(unsigned long)proc->pid,
|
||||
&binder_proc_fops);
|
||||
|
@ -5087,6 +5091,8 @@ static void binder_deferred_release(struct binder_proc *proc)
|
|||
struct rb_node *n;
|
||||
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
|
||||
|
||||
BUG_ON(proc->files);
|
||||
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_del(&proc->proc_node);
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
|
@ -5168,6 +5174,8 @@ static void binder_deferred_release(struct binder_proc *proc)
|
|||
static void binder_deferred_func(struct work_struct *work)
|
||||
{
|
||||
struct binder_proc *proc;
|
||||
struct files_struct *files;
|
||||
|
||||
int defer;
|
||||
|
||||
do {
|
||||
|
@ -5184,11 +5192,23 @@ static void binder_deferred_func(struct work_struct *work)
|
|||
}
|
||||
mutex_unlock(&binder_deferred_lock);
|
||||
|
||||
files = NULL;
|
||||
if (defer & BINDER_DEFERRED_PUT_FILES) {
|
||||
mutex_lock(&proc->files_lock);
|
||||
files = proc->files;
|
||||
if (files)
|
||||
proc->files = NULL;
|
||||
mutex_unlock(&proc->files_lock);
|
||||
}
|
||||
|
||||
if (defer & BINDER_DEFERRED_FLUSH)
|
||||
binder_deferred_flush(proc);
|
||||
|
||||
if (defer & BINDER_DEFERRED_RELEASE)
|
||||
binder_deferred_release(proc); /* frees proc */
|
||||
|
||||
if (files)
|
||||
put_files_struct(files);
|
||||
} while (proc);
|
||||
}
|
||||
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
|
||||
|
@ -5217,7 +5237,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
|||
spin_lock(&t->lock);
|
||||
to_proc = t->to_proc;
|
||||
seq_printf(m,
|
||||
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
||||
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
||||
prefix, t->debug_id, t,
|
||||
t->from ? t->from->proc->pid : 0,
|
||||
t->from ? t->from->pid : 0,
|
||||
|
@ -5242,7 +5262,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
|||
}
|
||||
if (buffer->target_node)
|
||||
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
||||
seq_printf(m, " size %zd:%zd data %p\n",
|
||||
seq_printf(m, " size %zd:%zd data %pK\n",
|
||||
buffer->data_size, buffer->offsets_size,
|
||||
buffer->data);
|
||||
}
|
||||
|
@ -5777,11 +5797,13 @@ static int __init init_binder_device(const char *name)
|
|||
static int __init binder_init(void)
|
||||
{
|
||||
int ret;
|
||||
char *device_name, *device_names;
|
||||
char *device_name, *device_names, *device_tmp;
|
||||
struct binder_device *device;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
binder_alloc_shrinker_init();
|
||||
ret = binder_alloc_shrinker_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
atomic_set(&binder_transaction_log.cur, ~0U);
|
||||
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
||||
|
@ -5796,27 +5818,27 @@ static int __init binder_init(void)
|
|||
|
||||
if (binder_debugfs_dir_entry_root) {
|
||||
debugfs_create_file("state",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_state_fops);
|
||||
debugfs_create_file("stats",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_stats_fops);
|
||||
debugfs_create_file("transactions",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_transactions_fops);
|
||||
debugfs_create_file("transaction_log",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
&binder_transaction_log,
|
||||
&binder_transaction_log_fops);
|
||||
debugfs_create_file("failed_transaction_log",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
&binder_transaction_log_failed,
|
||||
&binder_transaction_log_fops);
|
||||
|
@ -5833,7 +5855,8 @@ static int __init binder_init(void)
|
|||
}
|
||||
strcpy(device_names, binder_devices_param);
|
||||
|
||||
while ((device_name = strsep(&device_names, ","))) {
|
||||
device_tmp = device_names;
|
||||
while ((device_name = strsep(&device_tmp, ","))) {
|
||||
ret = init_binder_device(device_name);
|
||||
if (ret)
|
||||
goto err_init_binder_device_failed;
|
||||
|
@ -5847,6 +5870,9 @@ err_init_binder_device_failed:
|
|||
hlist_del(&device->hlist);
|
||||
kfree(device);
|
||||
}
|
||||
|
||||
kfree(device_names);
|
||||
|
||||
err_alloc_device_names_failed:
|
||||
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
mm = alloc->vma_vm_mm;
|
||||
|
||||
if (mm) {
|
||||
down_write(&mm->mmap_sem);
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = alloc->vma;
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
/* vm_insert_page does not seem to increment the refcount */
|
||||
}
|
||||
if (mm) {
|
||||
up_write(&mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
return 0;
|
||||
|
@ -322,17 +322,18 @@ err_page_ptr_cleared:
|
|||
}
|
||||
err_no_vma:
|
||||
if (mm) {
|
||||
up_write(&mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
|
||||
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async)
|
||||
static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async)
|
||||
{
|
||||
struct rb_node *n = alloc->free_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
|
@ -670,7 +671,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|||
goto err_already_mapped;
|
||||
}
|
||||
|
||||
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
|
||||
area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
|
||||
if (area == NULL) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "get_vm_area";
|
||||
|
@ -1010,8 +1011,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
|
|||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
}
|
||||
|
||||
void binder_alloc_shrinker_init(void)
|
||||
int binder_alloc_shrinker_init(void)
|
||||
{
|
||||
list_lru_init(&binder_alloc_lru);
|
||||
register_shrinker(&binder_shrinker);
|
||||
int ret = list_lru_init(&binder_alloc_lru);
|
||||
|
||||
if (ret == 0) {
|
||||
ret = register_shrinker(&binder_shrinker);
|
||||
if (ret)
|
||||
list_lru_destroy(&binder_alloc_lru);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|||
size_t extra_buffers_size,
|
||||
int is_async);
|
||||
extern void binder_alloc_init(struct binder_alloc *alloc);
|
||||
void binder_alloc_shrinker_init(void);
|
||||
extern int binder_alloc_shrinker_init(void);
|
||||
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
||||
extern struct binder_buffer *
|
||||
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
|
|
|
@ -2198,12 +2198,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
|
|||
if (qc->err_mask & ~AC_ERR_OTHER)
|
||||
qc->err_mask &= ~AC_ERR_OTHER;
|
||||
|
||||
/* SENSE_VALID trumps dev/unknown error and revalidation */
|
||||
/*
|
||||
* SENSE_VALID trumps dev/unknown error and revalidation. Upper
|
||||
* layers will determine whether the command is worth retrying
|
||||
* based on the sense data and device class/type. Otherwise,
|
||||
* determine directly if the command is worth retrying using its
|
||||
* error mask and flags.
|
||||
*/
|
||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
|
||||
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
|
||||
|
||||
/* determine whether the command is worth retrying */
|
||||
if (ata_eh_worth_retry(qc))
|
||||
else if (ata_eh_worth_retry(qc))
|
||||
qc->flags |= ATA_QCFLAG_RETRY;
|
||||
|
||||
/* accumulate error info */
|
||||
|
|
|
@ -1481,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
|||
return -EFAULT;
|
||||
if (pool < 0 || pool > ZATM_LAST_POOL)
|
||||
return -EINVAL;
|
||||
pool = array_index_nospec(pool,
|
||||
ZATM_LAST_POOL + 1);
|
||||
if (copy_from_user(&info,
|
||||
&((struct zatm_pool_req __user *) arg)->info,
|
||||
sizeof(info))) return -EFAULT;
|
||||
|
|
|
@ -693,14 +693,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
|
|||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
&dev_attr_spectre_v1.attr,
|
||||
&dev_attr_spectre_v2.attr,
|
||||
&dev_attr_spec_store_bypass.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -330,14 +330,6 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
|||
goto probe_failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure devices are listed in devices_kset in correct order
|
||||
* It's important to move Dev to the end of devices_kset before
|
||||
* calling .probe, because it could be recursive and parent Dev
|
||||
* should always go first
|
||||
*/
|
||||
devices_kset_move_last(dev);
|
||||
|
||||
if (dev->bus->probe) {
|
||||
ret = dev->bus->probe(dev);
|
||||
if (ret)
|
||||
|
|
|
@ -339,6 +339,9 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
/* Additional Realtek 8723BU Bluetooth devices */
|
||||
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8723DE Bluetooth devices */
|
||||
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8821AE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
|
||||
|
|
|
@ -884,7 +884,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
|
|||
*/
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -27,10 +27,6 @@
|
|||
|
||||
#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
|
||||
|
||||
#define diag_check_update(x) \
|
||||
(!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
|
||||
|| (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
|
||||
|
||||
struct diag_mask_info msg_mask;
|
||||
struct diag_mask_info msg_bt_mask;
|
||||
struct diag_mask_info log_mask;
|
||||
|
@ -65,6 +61,22 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = {
|
|||
{ .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }
|
||||
};
|
||||
|
||||
static int diag_check_update(int md_peripheral, int pid)
|
||||
{
|
||||
int ret;
|
||||
struct diag_md_session_t *info = NULL;
|
||||
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
info = diag_md_session_get_pid(pid);
|
||||
ret = (!info || (info &&
|
||||
(info->peripheral_mask & MD_PERIPHERAL_MASK(md_peripheral))) ||
|
||||
(info && (info->peripheral_mask &
|
||||
MD_PERIPHERAL_PD_MASK(md_peripheral))));
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int diag_apps_responds(void)
|
||||
{
|
||||
/*
|
||||
|
@ -159,6 +171,9 @@ static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
|
|||
|
||||
mutex_lock(&mask_info->lock);
|
||||
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
|
||||
if (equip_id != i && equip_id != ALL_EQUIP_ID)
|
||||
continue;
|
||||
|
||||
|
@ -388,6 +403,8 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
|
|||
}
|
||||
|
||||
for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
mutex_lock(&driver->msg_mask_lock);
|
||||
if (((mask->ssid_first > first) ||
|
||||
(mask->ssid_last_tools < last)) && first != ALL_SSID) {
|
||||
|
@ -632,6 +649,8 @@ static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
|
|||
rsp.padding = 0;
|
||||
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
|
||||
for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
|
||||
if (!build_mask->ptr)
|
||||
continue;
|
||||
if (build_mask->ssid_first != req->ssid_first)
|
||||
continue;
|
||||
num_entries = req->ssid_last - req->ssid_first + 1;
|
||||
|
@ -708,6 +727,8 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
|
|||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
if ((req->ssid_first < mask->ssid_first) ||
|
||||
(req->ssid_first > mask->ssid_last_tools)) {
|
||||
continue;
|
||||
|
@ -777,6 +798,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
|
|||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
if (i < (driver->msg_mask_tbl_count - 1)) {
|
||||
mask_next = mask;
|
||||
mask_next++;
|
||||
|
@ -838,7 +861,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
|
|||
mutex_unlock(&driver->msg_mask_lock);
|
||||
mutex_unlock(&mask_info->lock);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(MSG_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -860,7 +883,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
|
|||
memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
|
||||
write_len += mask_size;
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
|
||||
|
@ -928,7 +951,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
|
|||
mutex_unlock(&driver->msg_mask_lock);
|
||||
mutex_unlock(&mask_info->lock);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(MSG_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -944,7 +967,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
|
|||
write_len += header_len;
|
||||
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
|
||||
|
@ -1032,7 +1055,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
|
|||
mask_info->status = DIAG_CTRL_MASK_VALID;
|
||||
mutex_unlock(&mask_info->lock);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(EVENT_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -1049,7 +1072,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
|
|||
write_len += mask_len;
|
||||
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_event_mask_update(i);
|
||||
|
@ -1098,7 +1121,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
|
|||
}
|
||||
mutex_unlock(&mask_info->lock);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(EVENT_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -1108,7 +1131,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
|
|||
header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
|
||||
header.padding = 0;
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_event_mask_update(i);
|
||||
|
@ -1371,7 +1394,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
|
|||
}
|
||||
mutex_unlock(&mask_info->lock);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(LOG_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -1400,7 +1423,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
|
|||
write_len += payload_len;
|
||||
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_log_mask_update(i, req->equip_id);
|
||||
|
@ -1453,7 +1476,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
|
|||
}
|
||||
mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
if (diag_check_update(APPS_DATA))
|
||||
if (diag_check_update(APPS_DATA, pid))
|
||||
diag_update_userspace_clients(LOG_MASKS_TYPE);
|
||||
|
||||
/*
|
||||
|
@ -1469,7 +1492,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
|
|||
memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
|
||||
write_len += sizeof(struct diag_log_config_rsp_t);
|
||||
for (i = 0; i < NUM_PERIPHERALS; i++) {
|
||||
if (!diag_check_update(i))
|
||||
if (!diag_check_update(i, pid))
|
||||
continue;
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
diag_send_log_mask_update(i, ALL_EQUIP_ID);
|
||||
|
@ -1514,7 +1537,8 @@ static int diag_create_msg_mask_table(void)
|
|||
mutex_lock(&msg_mask.lock);
|
||||
mutex_lock(&driver->msg_mask_lock);
|
||||
driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
|
||||
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
|
||||
for (i = 0; (i < driver->msg_mask_tbl_count) && mask;
|
||||
i++, mask++) {
|
||||
range.ssid_first = msg_mask_tbl[i].ssid_first;
|
||||
range.ssid_last = msg_mask_tbl[i].ssid_last;
|
||||
err = diag_create_msg_mask_table_entry(mask, &range);
|
||||
|
@ -1539,7 +1563,8 @@ static int diag_create_build_time_mask(void)
|
|||
mutex_lock(&driver->msg_mask_lock);
|
||||
driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
|
||||
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
|
||||
for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
|
||||
for (i = 0; (i < driver->bt_msg_mask_tbl_count) && build_mask;
|
||||
i++, build_mask++) {
|
||||
range.ssid_first = msg_mask_tbl[i].ssid_first;
|
||||
range.ssid_last = msg_mask_tbl[i].ssid_last;
|
||||
err = diag_create_msg_mask_table_entry(build_mask, &range);
|
||||
|
@ -1662,7 +1687,7 @@ static int diag_create_log_mask_table(void)
|
|||
|
||||
mutex_lock(&log_mask.lock);
|
||||
mask = (struct diag_log_mask_t *)(log_mask.ptr);
|
||||
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
|
||||
for (i = 0; (i < MAX_EQUIP_ID) && mask; i++, mask++) {
|
||||
mask->equip_id = i;
|
||||
mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
|
||||
mask->num_items_tools = mask->num_items;
|
||||
|
@ -2057,6 +2082,8 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
|
|||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
ptr = mask_info->update_buf;
|
||||
len = 0;
|
||||
mutex_lock(&mask->lock);
|
||||
|
@ -2131,6 +2158,8 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
|
|||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
|
||||
if (!mask->ptr)
|
||||
continue;
|
||||
ptr = mask_info->update_buf;
|
||||
len = 0;
|
||||
mutex_lock(&mask->lock);
|
||||
|
|
|
@ -164,11 +164,12 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
|
|||
return -EIO;
|
||||
}
|
||||
pid = session_info->pid;
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
|
||||
ch = &diag_md[id];
|
||||
if (!ch || !ch->md_info_inited)
|
||||
if (!ch || !ch->md_info_inited) {
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
|
||||
|
@ -184,8 +185,10 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
|
|||
}
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
|
||||
if (found)
|
||||
if (found) {
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
|
||||
|
@ -198,6 +201,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
|
|||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
|
||||
if (!found) {
|
||||
pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
|
||||
|
|
|
@ -662,7 +662,7 @@ static void process_ssid_range_report(uint8_t *buf, uint32_t len,
|
|||
mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
|
||||
found = 0;
|
||||
for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
|
||||
if (!mask_ptr || !ssid_range) {
|
||||
if (!mask_ptr->ptr || !ssid_range) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -741,7 +741,7 @@ static void diag_build_time_mask_update(uint8_t *buf,
|
|||
num_items = range->ssid_last - range->ssid_first + 1;
|
||||
|
||||
for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
|
||||
if (!build_mask) {
|
||||
if (!build_mask->ptr) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -191,6 +191,7 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
|
|||
{
|
||||
int i, ctx = 0;
|
||||
uint32_t max_size = 0;
|
||||
unsigned long flags;
|
||||
unsigned char *temp_buf = NULL;
|
||||
struct diag_md_info *ch = NULL;
|
||||
|
||||
|
@ -205,11 +206,16 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
|
|||
max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
|
||||
}
|
||||
|
||||
mutex_lock(&driver->md_session_lock);
|
||||
if (buf->len < max_size) {
|
||||
if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
|
||||
ch = &diag_md[DIAG_LOCAL_PROC];
|
||||
for (i = 0; ch != NULL &&
|
||||
i < ch->num_tbl_entries; i++) {
|
||||
if (!ch || !ch->md_info_inited) {
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
for (i = 0; i < ch->num_tbl_entries; i++) {
|
||||
if (ch->tbl[i].buf == buf->data) {
|
||||
ctx = ch->tbl[i].ctx;
|
||||
ch->tbl[i].buf = NULL;
|
||||
|
@ -222,18 +228,22 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
}
|
||||
temp_buf = krealloc(buf->data, max_size +
|
||||
APF_DIAG_PADDING,
|
||||
GFP_KERNEL);
|
||||
if (!temp_buf)
|
||||
if (!temp_buf) {
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
|
||||
"Reallocated data buffer: %pK with size: %d\n",
|
||||
temp_buf, max_size);
|
||||
buf->data = temp_buf;
|
||||
buf->len = max_size;
|
||||
}
|
||||
mutex_unlock(&driver->md_session_lock);
|
||||
}
|
||||
|
||||
return buf->len;
|
||||
|
|
|
@ -1503,14 +1503,22 @@ static int
|
|||
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
|
||||
{
|
||||
size_t bytes;
|
||||
__u32 buf[16];
|
||||
__u32 t, buf[16];
|
||||
const char __user *p = buffer;
|
||||
|
||||
while (count > 0) {
|
||||
int b, i = 0;
|
||||
|
||||
bytes = min(count, sizeof(buf));
|
||||
if (copy_from_user(&buf, p, bytes))
|
||||
return -EFAULT;
|
||||
|
||||
for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
|
||||
if (!arch_get_random_int(&t))
|
||||
break;
|
||||
buf[i] ^= t;
|
||||
}
|
||||
|
||||
count -= bytes;
|
||||
p += bytes;
|
||||
|
||||
|
|
|
@ -333,11 +333,11 @@ static struct pdiv_map pllu_p[] = {
|
|||
};
|
||||
|
||||
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
|
||||
{ 12000000, 480000000, 960, 12, 0, 12},
|
||||
{ 13000000, 480000000, 960, 13, 0, 12},
|
||||
{ 16800000, 480000000, 400, 7, 0, 5},
|
||||
{ 19200000, 480000000, 200, 4, 0, 3},
|
||||
{ 26000000, 480000000, 960, 26, 0, 12},
|
||||
{ 12000000, 480000000, 960, 12, 2, 12 },
|
||||
{ 13000000, 480000000, 960, 13, 2, 12 },
|
||||
{ 16800000, 480000000, 400, 7, 2, 5 },
|
||||
{ 19200000, 480000000, 200, 4, 2, 3 },
|
||||
{ 26000000, 480000000, 960, 26, 2, 12 },
|
||||
{ 0, 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||
{TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
|
||||
{TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
|
||||
{TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
|
||||
{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
|
||||
{TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue