Merge "Merge android-4.4@d68ba9f (v4.4.89) into msm-4.4"
This commit is contained in:
commit
f2b5c20a20
189 changed files with 10798 additions and 5631 deletions
|
@ -102,14 +102,16 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
|
|||
collection, triggered in background when I/O subsystem is
|
||||
idle. If background_gc=on, it will turn on the garbage
|
||||
collection and if background_gc=off, garbage collection
|
||||
will be truned off. If background_gc=sync, it will turn
|
||||
will be turned off. If background_gc=sync, it will turn
|
||||
on synchronous garbage collection running in background.
|
||||
Default value for this option is on. So garbage
|
||||
collection is on by default.
|
||||
disable_roll_forward Disable the roll-forward recovery routine
|
||||
norecovery Disable the roll-forward recovery routine, mounted read-
|
||||
only (i.e., -o ro,disable_roll_forward)
|
||||
discard Issue discard/TRIM commands when a segment is cleaned.
|
||||
discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
|
||||
enabled, f2fs will issue discard/TRIM commands when a
|
||||
segment is cleaned.
|
||||
no_heap Disable heap-style segment allocation which finds free
|
||||
segments for data from the beginning of main area, while
|
||||
for node from the end of main area.
|
||||
|
@ -129,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
|
|||
directory entries can be written into inode block. The
|
||||
space of inode block which is used to store inline
|
||||
dentries is limited to ~3.4k.
|
||||
noinline_dentry Diable the inline dentry feature.
|
||||
flush_merge Merge concurrent cache_flush commands as much as possible
|
||||
to eliminate redundant command issues. If the underlying
|
||||
device handles the cache_flush command relatively slowly,
|
||||
|
@ -145,10 +148,15 @@ extent_cache Enable an extent cache based on rb-tree, it can cache
|
|||
as many as extent which map between contiguous logical
|
||||
address and physical address per inode, resulting in
|
||||
increasing the cache hit ratio. Set by default.
|
||||
noextent_cache Diable an extent cache based on rb-tree explicitly, see
|
||||
noextent_cache Disable an extent cache based on rb-tree explicitly, see
|
||||
the above extent_cache mount option.
|
||||
noinline_data Disable the inline data feature, inline data feature is
|
||||
enabled by default.
|
||||
data_flush Enable data flushing before checkpoint in order to
|
||||
persist data of regular and symlink.
|
||||
mode=%s Control block allocation mode which supports "adaptive"
|
||||
and "lfs". In "lfs" mode, there should be no random
|
||||
writes towards main area.
|
||||
|
||||
================================================================================
|
||||
DEBUGFS ENTRIES
|
||||
|
@ -192,7 +200,7 @@ Files in /sys/fs/f2fs/<devname>
|
|||
policy for garbage collection. Setting gc_idle = 0
|
||||
(default) will disable this option. Setting
|
||||
gc_idle = 1 will select the Cost Benefit approach
|
||||
& setting gc_idle = 2 will select the greedy aproach.
|
||||
& setting gc_idle = 2 will select the greedy approach.
|
||||
|
||||
reclaim_segments This parameter controls the number of prefree
|
||||
segments to be reclaimed. If the number of prefree
|
||||
|
@ -298,7 +306,7 @@ The dump.f2fs shows the information of specific inode and dumps SSA and SIT to
|
|||
file. Each file is dump_ssa and dump_sit.
|
||||
|
||||
The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
|
||||
It shows on-disk inode information reconized by a given inode number, and is
|
||||
It shows on-disk inode information recognized by a given inode number, and is
|
||||
able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
|
||||
./dump_sit respectively.
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 85
|
||||
SUBLEVEL = 89
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
The files in this directory are meant to be used as a base for an Android
|
||||
kernel config. All devices should have the options in android-base.cfg enabled.
|
||||
While not mandatory, the options in android-recommended.cfg enable advanced
|
||||
Android features.
|
||||
|
||||
Assuming you already have a minimalist defconfig for your device, a possible
|
||||
way to enable these options would be:
|
||||
|
||||
ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
|
||||
|
||||
This will generate a .config that can then be used to save a new defconfig or
|
||||
compile a new kernel with Android features enabled.
|
||||
|
||||
Because there is no tool to consistently generate these config fragments,
|
||||
lets keep them alphabetically sorted instead of random.
|
|
@ -1,5 +0,0 @@
|
|||
# KEEP ALPHABETICALLY SORTED
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_SWP_EMULATION=y
|
|
@ -1,164 +0,0 @@
|
|||
# KEEP ALPHABETICALLY SORTED
|
||||
# CONFIG_DEVKMEM is not set
|
||||
# CONFIG_DEVMEM is not set
|
||||
# CONFIG_FHANDLE is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_NFSD is not set
|
||||
# CONFIG_NFS_FS is not set
|
||||
# CONFIG_OABI_COMPAT is not set
|
||||
# CONFIG_SYSVIPC is not set
|
||||
# CONFIG_USELIB is not set
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_DEFAULT_SECURITY_SELINUX=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_INET6_AH=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_INET_DIAG_DESTROY=y
|
||||
CONFIG_INET_ESP=y
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=y
|
||||
CONFIG_IP6_NF_FILTER=y
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IPV6=y
|
||||
CONFIG_IPV6_MIP6=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_IPV6_ROUTE_INFO=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_NF_ARPFILTER=y
|
||||
CONFIG_IP_NF_ARPTABLES=y
|
||||
CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MANGLE=y
|
||||
CONFIG_IP_NF_MATCH_AH=y
|
||||
CONFIG_IP_NF_MATCH_ECN=y
|
||||
CONFIG_IP_NF_MATCH_TTL=y
|
||||
CONFIG_IP_NF_NAT=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
CONFIG_IP_NF_SECURITY=y
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=y
|
||||
CONFIG_IP_NF_TARGET_NETMAP=y
|
||||
CONFIG_IP_NF_TARGET_REDIRECT=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=y
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=y
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_FTP=y
|
||||
CONFIG_NF_CONNTRACK_H323=y
|
||||
CONFIG_NF_CONNTRACK_IPV4=y
|
||||
CONFIG_NF_CONNTRACK_IPV6=y
|
||||
CONFIG_NF_CONNTRACK_IRC=y
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
|
||||
CONFIG_NF_CONNTRACK_PPTP=y
|
||||
CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NF_CT_PROTO_DCCP=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||
CONFIG_NF_NAT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_PM_AUTOSLEEP=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PPP=y
|
||||
CONFIG_PPPOLAC=y
|
||||
CONFIG_PPPOPNS=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_SYNC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_USB_CONFIGFS=y
|
||||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_FS=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_USB_CONFIGFS_F_MTP=y
|
||||
CONFIG_USB_CONFIGFS_F_PTP=y
|
||||
CONFIG_USB_CONFIGFS_UEVENT=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_XFRM_USER=y
|
|
@ -1,140 +0,0 @@
|
|||
# KEEP ALPHABETICALLY SORTED
|
||||
# CONFIG_AIO is not set
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
# CONFIG_NF_CONNTRACK_SIP is not set
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
# CONFIG_VT is not set
|
||||
CONFIG_ANDROID_TIMED_GPIO=y
|
||||
CONFIG_ARM64_SW_TTBR0_PAN=y
|
||||
CONFIG_ARM_KERNMEM_PERMS=y
|
||||
CONFIG_ARM64_SW_TTBR0_PAN=y
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_CC_STACKPROTECTOR_STRONG=y
|
||||
CONFIG_COMPACTION=y
|
||||
CONFIG_CPU_SW_DOMAIN_PAN=y
|
||||
CONFIG_DEBUG_RODATA=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_DM_VERITY_FEC=y
|
||||
CONFIG_DRAGONRISE_FF=y
|
||||
CONFIG_ENABLE_DEFAULT_TRACERS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_GREENASIA_FF=y
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_ACRUX=y
|
||||
CONFIG_HID_ACRUX_FF=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
CONFIG_HID_CHERRY=y
|
||||
CONFIG_HID_CHICONY=y
|
||||
CONFIG_HID_CYPRESS=y
|
||||
CONFIG_HID_DRAGONRISE=y
|
||||
CONFIG_HID_ELECOM=y
|
||||
CONFIG_HID_EMS_FF=y
|
||||
CONFIG_HID_EZKEY=y
|
||||
CONFIG_HID_GREENASIA=y
|
||||
CONFIG_HID_GYRATION=y
|
||||
CONFIG_HID_HOLTEK=y
|
||||
CONFIG_HID_KENSINGTON=y
|
||||
CONFIG_HID_KEYTOUCH=y
|
||||
CONFIG_HID_KYE=y
|
||||
CONFIG_HID_LCPOWER=y
|
||||
CONFIG_HID_LOGITECH=y
|
||||
CONFIG_HID_LOGITECH_DJ=y
|
||||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MONTEREY=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NTRIG=y
|
||||
CONFIG_HID_ORTEK=y
|
||||
CONFIG_HID_PANTHERLORD=y
|
||||
CONFIG_HID_PETALYNX=y
|
||||
CONFIG_HID_PICOLCD=y
|
||||
CONFIG_HID_PRIMAX=y
|
||||
CONFIG_HID_PRODIKEYS=y
|
||||
CONFIG_HID_ROCCAT=y
|
||||
CONFIG_HID_SAITEK=y
|
||||
CONFIG_HID_SAMSUNG=y
|
||||
CONFIG_HID_SMARTJOYPLUS=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_HID_SPEEDLINK=y
|
||||
CONFIG_HID_SUNPLUS=y
|
||||
CONFIG_HID_THRUSTMASTER=y
|
||||
CONFIG_HID_TIVO=y
|
||||
CONFIG_HID_TOPSEED=y
|
||||
CONFIG_HID_TWINHAN=y
|
||||
CONFIG_HID_UCLOGIC=y
|
||||
CONFIG_HID_WACOM=y
|
||||
CONFIG_HID_WALTOP=y
|
||||
CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_HID_ZEROPLUS=y
|
||||
CONFIG_HID_ZYDACRON=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
CONFIG_INPUT_GPIO=y
|
||||
CONFIG_INPUT_JOYSTICK=y
|
||||
CONFIG_INPUT_KEYCHORD=y
|
||||
CONFIG_INPUT_KEYRESET=y
|
||||
CONFIG_INPUT_MISC=y
|
||||
CONFIG_INPUT_TABLET=y
|
||||
CONFIG_INPUT_UINPUT=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_JOYSTICK_XPAD=y
|
||||
CONFIG_JOYSTICK_XPAD_FF=y
|
||||
CONFIG_JOYSTICK_XPAD_LEDS=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_LOGIG940_FF=y
|
||||
CONFIG_LOGIRUMBLEPAD2_FF=y
|
||||
CONFIG_LOGITECH_FF=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_MEMORY_STATE_TIME=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_PANIC_TIMEOUT=5
|
||||
CONFIG_PANTHERLORD_FF=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_PM_RUNTIME=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
CONFIG_POWER_SUPPLY=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
CONFIG_PSTORE_RAM=y
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QUOTACTL=y
|
||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
||||
CONFIG_QUOTA_TREE=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_SMARTJOYPLUS_FF=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SUSPEND_TIME=y
|
||||
CONFIG_TABLET_USB_ACECAD=y
|
||||
CONFIG_TABLET_USB_AIPTEK=y
|
||||
CONFIG_TABLET_USB_GTCO=y
|
||||
CONFIG_TABLET_USB_HANWANG=y
|
||||
CONFIG_TABLET_USB_KBTAB=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_TASK_XACCT=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_USBNET=y
|
||||
CONFIG_VFAT_FS=y
|
|
@ -1,6 +1,6 @@
|
|||
#ifndef _ALPHA_TYPES_H
|
||||
#define _ALPHA_TYPES_H
|
||||
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#include <uapi/asm/types.h>
|
||||
|
||||
#endif /* _ALPHA_TYPES_H */
|
||||
|
|
|
@ -9,8 +9,18 @@
|
|||
* need to be careful to avoid a name clashes.
|
||||
*/
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/*
|
||||
* This is here because we used to use l64 for alpha
|
||||
* and we don't want to impact user mode with our change to ll64
|
||||
* in the kernel.
|
||||
*
|
||||
* However, some user programs are fine with this. They can
|
||||
* flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
|
||||
*/
|
||||
#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
|
||||
#include <asm-generic/int-l64.h>
|
||||
#else
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI_ALPHA_TYPES_H */
|
||||
|
|
|
@ -104,6 +104,12 @@ ENTRY(EV_MachineCheck)
|
|||
lr r0, [efa]
|
||||
mov r1, sp
|
||||
|
||||
; hardware auto-disables MMU, re-enable it to allow kernel vaddr
|
||||
; access for say stack unwinding of modules for crash dumps
|
||||
lr r3, [ARC_REG_PID]
|
||||
or r3, r3, MMU_ENABLE
|
||||
sr r3, [ARC_REG_PID]
|
||||
|
||||
lsr r3, r2, 8
|
||||
bmsk r3, r3, 7
|
||||
brne r3, ECR_C_MCHK_DUP_TLB, 1f
|
||||
|
|
|
@ -885,9 +885,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* re-enable the MMU */
|
||||
write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
|
||||
|
||||
/* loop thru all sets of TLB */
|
||||
for (set = 0; set < mmu->sets; set++) {
|
||||
|
||||
|
|
|
@ -831,24 +831,25 @@ void stage2_unmap_vm(struct kvm *kvm)
|
|||
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
|
||||
* underlying level-2 and level-3 tables before freeing the actual level-1 table
|
||||
* and setting the struct pointer to NULL.
|
||||
*
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* destroyed, which can only be done once.
|
||||
*/
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm)
|
||||
{
|
||||
if (kvm->arch.pgd == NULL)
|
||||
return;
|
||||
void *pgd = NULL;
|
||||
void *hwpgd = NULL;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
||||
if (kvm->arch.pgd) {
|
||||
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
||||
pgd = READ_ONCE(kvm->arch.pgd);
|
||||
hwpgd = kvm_get_hwpgd(kvm);
|
||||
kvm->arch.pgd = NULL;
|
||||
}
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
kvm_free_hwpgd(kvm_get_hwpgd(kvm));
|
||||
if (KVM_PREALLOC_LEVEL > 0)
|
||||
kfree(kvm->arch.pgd);
|
||||
|
||||
kvm->arch.pgd = NULL;
|
||||
if (hwpgd)
|
||||
kvm_free_hwpgd(hwpgd);
|
||||
if (KVM_PREALLOC_LEVEL > 0 && pgd)
|
||||
kfree(pgd);
|
||||
}
|
||||
|
||||
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
|
|
|
@ -314,8 +314,11 @@ retry:
|
|||
* signal first. We do not need to release the mmap_sem because
|
||||
* it would already be released in __lock_page_or_retry in
|
||||
* mm/filemap.c. */
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting is only done on the
|
||||
|
|
|
@ -202,9 +202,11 @@ void fpsimd_thread_switch(struct task_struct *next)
|
|||
|
||||
void fpsimd_flush_thread(void)
|
||||
{
|
||||
preempt_disable();
|
||||
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
|
||||
fpsimd_flush_task_state(current);
|
||||
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -370,8 +370,11 @@ retry:
|
|||
* signal first. We do not need to release the mmap_sem because it
|
||||
* would already be released in __lock_page_or_retry in mm/filemap.c.
|
||||
*/
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting is only done on the initial
|
||||
|
|
|
@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754dp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
|
|||
return ys ? x : y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754dp_zero(1);
|
||||
return ieee754dp_zero(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
DPDNORMX;
|
||||
|
@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
|
|||
else if (xs < ys)
|
||||
return x;
|
||||
|
||||
/* Compare exponent */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
/* Signs of inputs are equal, let's compare exponents */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
} else {
|
||||
/* Inputs are both negative */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
}
|
||||
|
||||
/* Compare mantissa */
|
||||
/* Signs and exponents of inputs are equal, let's compare mantissas */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return y;
|
||||
return x;
|
||||
}
|
||||
/* Inputs are both negative, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return y;
|
||||
return x;
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
||||
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
||||
|
@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754dp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
|||
/*
|
||||
* Infinity and zero handling
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
return ieee754dp_inf(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
|
||||
|
@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
|||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
|
||||
|
@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
|||
return y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754dp_zero(1);
|
||||
return ieee754dp_zero(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
DPDNORMX;
|
||||
|
@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
|
|||
return y;
|
||||
|
||||
/* Compare mantissa */
|
||||
if (xm <= ym)
|
||||
if (xm < ym)
|
||||
return y;
|
||||
return x;
|
||||
else if (xm > ym)
|
||||
return x;
|
||||
else if (xs == 0)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
|
|
@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754dp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
|
|||
return ys ? y : x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754dp_zero(1);
|
||||
return ieee754dp_zero(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
DPDNORMX;
|
||||
|
@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
|
|||
else if (xs < ys)
|
||||
return y;
|
||||
|
||||
/* Compare exponent */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
/* Signs of inputs are the same, let's compare exponents */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
} else {
|
||||
/* Inputs are both negative */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
}
|
||||
|
||||
/* Compare mantissa */
|
||||
/* Signs and exponents of inputs are equal, let's compare mantissas */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
/* Inputs are both negative, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return x;
|
||||
return y;
|
||||
return y;
|
||||
return x;
|
||||
}
|
||||
|
||||
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
|
||||
|
@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754dp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
|
|||
/*
|
||||
* Infinity and zero handling
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
return ieee754dp_inf(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
|
||||
return x;
|
||||
return y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
|
||||
return y;
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754dp_zero(1);
|
||||
return ieee754dp_zero(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
DPDNORMX;
|
||||
|
@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
|
|||
return x;
|
||||
|
||||
/* Compare mantissa */
|
||||
if (xm <= ym)
|
||||
if (xm < ym)
|
||||
return x;
|
||||
else if (xm > ym)
|
||||
return y;
|
||||
else if (xs == 1)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
|
|
@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754sp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
|
|||
return ys ? x : y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754sp_zero(1);
|
||||
return ieee754sp_zero(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
SPDNORMX;
|
||||
|
@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
|
|||
else if (xs < ys)
|
||||
return x;
|
||||
|
||||
/* Compare exponent */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
/* Signs of inputs are equal, let's compare exponents */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
} else {
|
||||
/* Inputs are both negative */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
}
|
||||
|
||||
/* Compare mantissa */
|
||||
/* Signs and exponents of inputs are equal, let's compare mantissas */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return y;
|
||||
return x;
|
||||
}
|
||||
/* Inputs are both negative, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return y;
|
||||
return x;
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
||||
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
||||
|
@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754sp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
|||
/*
|
||||
* Infinity and zero handling
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
return ieee754sp_inf(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
|
||||
|
@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
|||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
|
||||
|
@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
|||
return y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754sp_zero(1);
|
||||
return ieee754sp_zero(xs & ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
SPDNORMX;
|
||||
|
@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
|
|||
return y;
|
||||
|
||||
/* Compare mantissa */
|
||||
if (xm <= ym)
|
||||
if (xm < ym)
|
||||
return y;
|
||||
return x;
|
||||
else if (xm > ym)
|
||||
return x;
|
||||
else if (xs == 0)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
|
|
@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754sp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
|
|||
return ys ? y : x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754sp_zero(1);
|
||||
return ieee754sp_zero(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
SPDNORMX;
|
||||
|
@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
|
|||
else if (xs < ys)
|
||||
return y;
|
||||
|
||||
/* Compare exponent */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
/* Signs of inputs are the same, let's compare exponents */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive */
|
||||
if (xe > ye)
|
||||
return y;
|
||||
else if (xe < ye)
|
||||
return x;
|
||||
} else {
|
||||
/* Inputs are both negative */
|
||||
if (xe > ye)
|
||||
return x;
|
||||
else if (xe < ye)
|
||||
return y;
|
||||
}
|
||||
|
||||
/* Compare mantissa */
|
||||
/* Signs and exponents of inputs are equal, let's compare mantissas */
|
||||
if (xs == 0) {
|
||||
/* Inputs are both positive, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
/* Inputs are both negative, with equal signs and exponents */
|
||||
if (xm <= ym)
|
||||
return x;
|
||||
return y;
|
||||
return y;
|
||||
return x;
|
||||
}
|
||||
|
||||
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
|
||||
|
@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
|
|||
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
|
||||
return ieee754sp_nanxcpt(x);
|
||||
|
||||
/* numbers are preferred to NaNs */
|
||||
/*
|
||||
* Quiet NaN handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* The case of both inputs quiet NaNs
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
/*
|
||||
* The cases of exactly one input quiet NaN (numbers
|
||||
* are here preferred as returned values to NaNs)
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
|
||||
|
@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
|
|||
/*
|
||||
* Infinity and zero handling
|
||||
*/
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
return ieee754sp_inf(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
|
||||
return x;
|
||||
return y;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
|
||||
return y;
|
||||
return x;
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
|
||||
if (xs == ys)
|
||||
return x;
|
||||
return ieee754sp_zero(1);
|
||||
return ieee754sp_zero(xs | ys);
|
||||
|
||||
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
|
||||
SPDNORMX;
|
||||
|
@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
|
|||
return x;
|
||||
|
||||
/* Compare mantissa */
|
||||
if (xm <= ym)
|
||||
if (xm < ym)
|
||||
return x;
|
||||
else if (xm > ym)
|
||||
return y;
|
||||
else if (xs == 1)
|
||||
return x;
|
||||
return y;
|
||||
}
|
||||
|
|
|
@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
|
|||
|
||||
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
|
||||
|
||||
#define __get_user_or_set_dar(_regs, _dest, _addr) \
|
||||
({ \
|
||||
int rc = 0; \
|
||||
typeof(_addr) __addr = (_addr); \
|
||||
if (__get_user_inatomic(_dest, __addr)) { \
|
||||
_regs->dar = (unsigned long)__addr; \
|
||||
rc = -EFAULT; \
|
||||
} \
|
||||
rc; \
|
||||
})
|
||||
|
||||
#define __put_user_or_set_dar(_regs, _src, _addr) \
|
||||
({ \
|
||||
int rc = 0; \
|
||||
typeof(_addr) __addr = (_addr); \
|
||||
if (__put_user_inatomic(_src, __addr)) { \
|
||||
_regs->dar = (unsigned long)__addr; \
|
||||
rc = -EFAULT; \
|
||||
} \
|
||||
rc; \
|
||||
})
|
||||
|
||||
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
||||
unsigned int reg, unsigned int nb,
|
||||
unsigned int flags, unsigned int instr,
|
||||
|
@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
|||
} else {
|
||||
unsigned long pc = regs->nip ^ (swiz & 4);
|
||||
|
||||
if (__get_user_inatomic(instr,
|
||||
(unsigned int __user *)pc))
|
||||
if (__get_user_or_set_dar(regs, instr,
|
||||
(unsigned int __user *)pc))
|
||||
return -EFAULT;
|
||||
|
||||
if (swiz == 0 && (flags & SW))
|
||||
instr = cpu_to_le32(instr);
|
||||
nb = (instr >> 11) & 0x1f;
|
||||
|
@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
|||
((nb0 + 3) / 4) * sizeof(unsigned long));
|
||||
|
||||
for (i = 0; i < nb; ++i, ++p)
|
||||
if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
return -EFAULT;
|
||||
if (nb0 > 0) {
|
||||
rptr = ®s->gpr[0];
|
||||
addr += nb;
|
||||
for (i = 0; i < nb0; ++i, ++p)
|
||||
if (__get_user_inatomic(REG_BYTE(rptr,
|
||||
i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
if (__get_user_or_set_dar(regs,
|
||||
REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
} else {
|
||||
for (i = 0; i < nb; ++i, ++p)
|
||||
if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
return -EFAULT;
|
||||
if (nb0 > 0) {
|
||||
rptr = ®s->gpr[0];
|
||||
addr += nb;
|
||||
for (i = 0; i < nb0; ++i, ++p)
|
||||
if (__put_user_inatomic(REG_BYTE(rptr,
|
||||
i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
if (__put_user_or_set_dar(regs,
|
||||
REG_BYTE(rptr, i ^ bswiz),
|
||||
SWIZ_PTR(p)))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
|||
* Only POWER6 has these instructions, and it does true little-endian,
|
||||
* so we don't need the address swizzling.
|
||||
*/
|
||||
static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
|
||||
unsigned int flags)
|
||||
static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
|
||||
unsigned int reg, unsigned int flags)
|
||||
{
|
||||
char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
|
||||
char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
|
||||
int i, ret, sw = 0;
|
||||
int i, sw = 0;
|
||||
|
||||
if (reg & 1)
|
||||
return 0; /* invalid form: FRS/FRT must be even */
|
||||
if (flags & SW)
|
||||
sw = 7;
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; i < 8; ++i) {
|
||||
if (!(flags & ST)) {
|
||||
ret |= __get_user(ptr0[i^sw], addr + i);
|
||||
ret |= __get_user(ptr1[i^sw], addr + i + 8);
|
||||
if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
|
||||
return -EFAULT;
|
||||
if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
ret |= __put_user(ptr0[i^sw], addr + i);
|
||||
ret |= __put_user(ptr1[i^sw], addr + i + 8);
|
||||
if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
|
||||
return -EFAULT;
|
||||
if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
return 1; /* exception handled and fixed up */
|
||||
}
|
||||
|
||||
|
@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
|
|||
{
|
||||
char *ptr0 = (char *)®s->gpr[reg];
|
||||
char *ptr1 = (char *)®s->gpr[reg+1];
|
||||
int i, ret, sw = 0;
|
||||
int i, sw = 0;
|
||||
|
||||
if (reg & 1)
|
||||
return 0; /* invalid form: GPR must be even */
|
||||
if (flags & SW)
|
||||
sw = 7;
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; i < 8; ++i) {
|
||||
if (!(flags & ST)) {
|
||||
ret |= __get_user(ptr0[i^sw], addr + i);
|
||||
ret |= __get_user(ptr1[i^sw], addr + i + 8);
|
||||
if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
|
||||
return -EFAULT;
|
||||
if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
ret |= __put_user(ptr0[i^sw], addr + i);
|
||||
ret |= __put_user(ptr1[i^sw], addr + i + 8);
|
||||
if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
|
||||
return -EFAULT;
|
||||
if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
return 1; /* exception handled and fixed up */
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
|
|||
for (j = 0; j < length; j += elsize) {
|
||||
for (i = 0; i < elsize; ++i) {
|
||||
if (flags & ST)
|
||||
ret |= __put_user(ptr[i^sw], addr + i);
|
||||
ret = __put_user_or_set_dar(regs, ptr[i^sw],
|
||||
addr + i);
|
||||
else
|
||||
ret |= __get_user(ptr[i^sw], addr + i);
|
||||
ret = __get_user_or_set_dar(regs, ptr[i^sw],
|
||||
addr + i);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ptr += elsize;
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
|
@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
|
|||
unsigned int dsisr;
|
||||
unsigned char __user *addr;
|
||||
unsigned long p, swiz;
|
||||
int ret, i;
|
||||
int i;
|
||||
union data {
|
||||
u64 ll;
|
||||
double dd;
|
||||
|
@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
|
|||
if (flags & F) {
|
||||
/* Special case for 16-byte FP loads and stores */
|
||||
PPC_WARN_ALIGNMENT(fp_pair, regs);
|
||||
return emulate_fp_pair(addr, reg, flags);
|
||||
return emulate_fp_pair(regs, addr, reg, flags);
|
||||
} else {
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Special case for 16-byte loads and stores */
|
||||
|
@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
data.ll = 0;
|
||||
ret = 0;
|
||||
p = (unsigned long)addr;
|
||||
|
||||
for (i = 0; i < nb; i++)
|
||||
ret |= __get_user_inatomic(data.v[start + i],
|
||||
SWIZ_PTR(p++));
|
||||
|
||||
if (unlikely(ret))
|
||||
return -EFAULT;
|
||||
if (__get_user_or_set_dar(regs, data.v[start + i],
|
||||
SWIZ_PTR(p++)))
|
||||
return -EFAULT;
|
||||
|
||||
} else if (flags & F) {
|
||||
data.ll = current->thread.TS_FPR(reg);
|
||||
|
@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
|
|||
break;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
p = (unsigned long)addr;
|
||||
|
||||
for (i = 0; i < nb; i++)
|
||||
ret |= __put_user_inatomic(data.v[start + i],
|
||||
SWIZ_PTR(p++));
|
||||
if (__put_user_or_set_dar(regs, data.v[start + i],
|
||||
SWIZ_PTR(p++)))
|
||||
return -EFAULT;
|
||||
|
||||
if (unlikely(ret))
|
||||
return -EFAULT;
|
||||
} else if (flags & F)
|
||||
current->thread.TS_FPR(reg) = data.ll;
|
||||
else
|
||||
|
|
|
@ -204,6 +204,7 @@ void set_personality_ia32(bool);
|
|||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
do { \
|
||||
unsigned long base; \
|
||||
unsigned v; \
|
||||
(pr_reg)[0] = (regs)->r15; \
|
||||
(pr_reg)[1] = (regs)->r14; \
|
||||
|
@ -226,8 +227,8 @@ do { \
|
|||
(pr_reg)[18] = (regs)->flags; \
|
||||
(pr_reg)[19] = (regs)->sp; \
|
||||
(pr_reg)[20] = (regs)->ss; \
|
||||
(pr_reg)[21] = current->thread.fs; \
|
||||
(pr_reg)[22] = current->thread.gs; \
|
||||
rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
|
||||
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
|
||||
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
|
||||
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
|
||||
|
|
|
@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \
|
|||
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
|
||||
{ \
|
||||
asm volatile("rep; outs" #bwl \
|
||||
: "+S"(addr), "+c"(count) : "d"(port)); \
|
||||
: "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
|
||||
} \
|
||||
\
|
||||
static inline void ins##bwl(int port, void *addr, unsigned long count) \
|
||||
{ \
|
||||
asm volatile("rep; ins" #bwl \
|
||||
: "+D"(addr), "+c"(count) : "d"(port)); \
|
||||
: "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
|
||||
}
|
||||
|
||||
BUILDIO(b, b, char)
|
||||
|
|
|
@ -247,7 +247,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
|
|||
**/
|
||||
void blk_start_queue(struct request_queue *q)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
WARN_ON(!in_interrupt() && !irqs_disabled());
|
||||
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
__blk_run_queue(q);
|
||||
|
|
|
@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
|
|||
}
|
||||
sgl = sreq->tsg;
|
||||
n = sg_nents(sgl);
|
||||
for_each_sg(sgl, sg, n, i)
|
||||
put_page(sg_page(sg));
|
||||
for_each_sg(sgl, sg, n, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
/* some SGs may not have a page mapped */
|
||||
if (page && atomic_read(&page->_count))
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
kfree(sreq->tsg);
|
||||
}
|
||||
|
@ -138,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
|
|||
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
|
||||
sgl->cur = 0;
|
||||
|
||||
if (sg)
|
||||
if (sg) {
|
||||
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
|
||||
sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
|
||||
}
|
||||
|
||||
list_add_tail(&sgl->list, &ctx->tsgl);
|
||||
}
|
||||
|
|
|
@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
|
|||
|
||||
Note that enabling this will break newer Android user-space.
|
||||
|
||||
config ANDROID_BINDER_IPC_SELFTEST
|
||||
bool "Android Binder IPC Driver Selftest"
|
||||
depends on ANDROID_BINDER_IPC
|
||||
---help---
|
||||
This feature allows binder selftest to run.
|
||||
|
||||
Binder selftest checks the allocation and free of binder buffers
|
||||
exhaustively with combinations of various buffer sizes and
|
||||
alignments.
|
||||
|
||||
endif # if ANDROID
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
ccflags-y += -I$(src) # needed for trace events
|
||||
|
||||
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
|
||||
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
|
||||
|
|
|
@ -2481,7 +2481,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
|
|||
(u64)node->ptr);
|
||||
binder_node_unlock(node);
|
||||
} else {
|
||||
int ret;
|
||||
struct binder_ref_data dest_rdata;
|
||||
|
||||
binder_node_unlock(node);
|
||||
|
@ -4581,6 +4580,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
|
||||
proc->pid, current->pid, cmd, arg);*/
|
||||
|
||||
binder_selftest_alloc(&proc->alloc);
|
||||
|
||||
trace_binder_ioctl(cmd, arg);
|
||||
|
||||
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
||||
|
@ -5426,6 +5427,8 @@ static void print_binder_proc_stats(struct seq_file *m,
|
|||
count = binder_alloc_get_allocated_count(&proc->alloc);
|
||||
seq_printf(m, " buffers: %d\n", count);
|
||||
|
||||
binder_alloc_print_pages(m, &proc->alloc);
|
||||
|
||||
count = 0;
|
||||
binder_inner_proc_lock(proc);
|
||||
list_for_each_entry(w, &proc->todo, entry) {
|
||||
|
@ -5622,6 +5625,8 @@ static int __init binder_init(void)
|
|||
struct binder_device *device;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
binder_alloc_shrinker_init();
|
||||
|
||||
atomic_set(&binder_transaction_log.cur, ~0U);
|
||||
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
||||
binder_deferred_workqueue = create_singlethread_workqueue("binder");
|
||||
|
|
|
@ -27,9 +27,12 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include "binder_alloc.h"
|
||||
#include "binder_trace.h"
|
||||
|
||||
struct list_lru binder_alloc_lru;
|
||||
|
||||
static DEFINE_MUTEX(binder_alloc_mmap_lock);
|
||||
|
||||
enum {
|
||||
|
@ -48,14 +51,23 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
|
|||
pr_info(x); \
|
||||
} while (0)
|
||||
|
||||
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
|
||||
{
|
||||
return list_entry(buffer->entry.next, struct binder_buffer, entry);
|
||||
}
|
||||
|
||||
static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
|
||||
{
|
||||
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
|
||||
}
|
||||
|
||||
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
if (list_is_last(&buffer->entry, &alloc->buffers))
|
||||
return alloc->buffer +
|
||||
alloc->buffer_size - (void *)buffer->data;
|
||||
return (size_t)list_entry(buffer->entry.next,
|
||||
struct binder_buffer, entry) - (size_t)buffer->data;
|
||||
return (u8 *)alloc->buffer +
|
||||
alloc->buffer_size - (u8 *)buffer->data;
|
||||
return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
|
||||
}
|
||||
|
||||
static void binder_insert_free_buffer(struct binder_alloc *alloc,
|
||||
|
@ -105,9 +117,9 @@ static void binder_insert_allocated_buffer_locked(
|
|||
buffer = rb_entry(parent, struct binder_buffer, rb_node);
|
||||
BUG_ON(buffer->free);
|
||||
|
||||
if (new_buffer < buffer)
|
||||
if (new_buffer->data < buffer->data)
|
||||
p = &parent->rb_left;
|
||||
else if (new_buffer > buffer)
|
||||
else if (new_buffer->data > buffer->data)
|
||||
p = &parent->rb_right;
|
||||
else
|
||||
BUG();
|
||||
|
@ -122,18 +134,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
|
|||
{
|
||||
struct rb_node *n = alloc->allocated_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
struct binder_buffer *kern_ptr;
|
||||
void *kern_ptr;
|
||||
|
||||
kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
|
||||
- offsetof(struct binder_buffer, data));
|
||||
kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
|
||||
|
||||
while (n) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
BUG_ON(buffer->free);
|
||||
|
||||
if (kern_ptr < buffer)
|
||||
if (kern_ptr < buffer->data)
|
||||
n = n->rb_left;
|
||||
else if (kern_ptr > buffer)
|
||||
else if (kern_ptr > buffer->data)
|
||||
n = n->rb_right;
|
||||
else {
|
||||
/*
|
||||
|
@ -175,13 +186,14 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
|||
}
|
||||
|
||||
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
void *start, void *end,
|
||||
struct vm_area_struct *vma)
|
||||
void *start, void *end)
|
||||
{
|
||||
void *page_addr;
|
||||
unsigned long user_page_addr;
|
||||
struct page **page;
|
||||
struct mm_struct *mm;
|
||||
struct binder_lru_page *page;
|
||||
struct vm_area_struct *vma = NULL;
|
||||
struct mm_struct *mm = NULL;
|
||||
bool need_mm = false;
|
||||
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: %s pages %pK-%pK\n", alloc->pid,
|
||||
|
@ -192,25 +204,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
|
||||
trace_binder_update_page_range(alloc, allocate, start, end);
|
||||
|
||||
if (vma)
|
||||
mm = NULL;
|
||||
else
|
||||
mm = get_task_mm(alloc->tsk);
|
||||
if (allocate == 0)
|
||||
goto free_range;
|
||||
|
||||
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
|
||||
if (!page->page_ptr) {
|
||||
need_mm = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Same as mmget_not_zero() in later kernel versions */
|
||||
if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
|
||||
mm = alloc->vma_vm_mm;
|
||||
|
||||
if (mm) {
|
||||
down_write(&mm->mmap_sem);
|
||||
vma = alloc->vma;
|
||||
if (vma && mm != alloc->vma_vm_mm) {
|
||||
pr_err("%d: vma mm and task mm mismatch\n",
|
||||
alloc->pid);
|
||||
vma = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (allocate == 0)
|
||||
goto free_range;
|
||||
|
||||
if (vma == NULL) {
|
||||
if (!vma && need_mm) {
|
||||
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
|
||||
alloc->pid);
|
||||
goto err_no_vma;
|
||||
|
@ -218,18 +232,40 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
|
||||
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
int ret;
|
||||
bool on_lru;
|
||||
size_t index;
|
||||
|
||||
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
|
||||
index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
page = &alloc->pages[index];
|
||||
|
||||
BUG_ON(*page);
|
||||
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
|
||||
if (*page == NULL) {
|
||||
if (page->page_ptr) {
|
||||
trace_binder_alloc_lru_start(alloc, index);
|
||||
|
||||
on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
|
||||
WARN_ON(!on_lru);
|
||||
|
||||
trace_binder_alloc_lru_end(alloc, index);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (WARN_ON(!vma))
|
||||
goto err_page_ptr_cleared;
|
||||
|
||||
trace_binder_alloc_page_start(alloc, index);
|
||||
page->page_ptr = alloc_page(GFP_KERNEL |
|
||||
__GFP_HIGHMEM |
|
||||
__GFP_ZERO);
|
||||
if (!page->page_ptr) {
|
||||
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
|
||||
alloc->pid, page_addr);
|
||||
goto err_alloc_page_failed;
|
||||
}
|
||||
page->alloc = alloc;
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
|
||||
ret = map_kernel_range_noflush((unsigned long)page_addr,
|
||||
PAGE_SIZE, PAGE_KERNEL, page);
|
||||
PAGE_SIZE, PAGE_KERNEL,
|
||||
&page->page_ptr);
|
||||
flush_cache_vmap((unsigned long)page_addr,
|
||||
(unsigned long)page_addr + PAGE_SIZE);
|
||||
if (ret != 1) {
|
||||
|
@ -239,12 +275,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
}
|
||||
user_page_addr =
|
||||
(uintptr_t)page_addr + alloc->user_buffer_offset;
|
||||
ret = vm_insert_page(vma, user_page_addr, page[0]);
|
||||
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
|
||||
if (ret) {
|
||||
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
|
||||
alloc->pid, user_page_addr);
|
||||
goto err_vm_insert_page_failed;
|
||||
}
|
||||
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
/* vm_insert_page does not seem to increment the refcount */
|
||||
}
|
||||
if (mm) {
|
||||
|
@ -256,16 +294,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
free_range:
|
||||
for (page_addr = end - PAGE_SIZE; page_addr >= start;
|
||||
page_addr -= PAGE_SIZE) {
|
||||
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
|
||||
if (vma)
|
||||
zap_page_range(vma, (uintptr_t)page_addr +
|
||||
alloc->user_buffer_offset, PAGE_SIZE, NULL);
|
||||
bool ret;
|
||||
size_t index;
|
||||
|
||||
index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
page = &alloc->pages[index];
|
||||
|
||||
trace_binder_free_lru_start(alloc, index);
|
||||
|
||||
ret = list_lru_add(&binder_alloc_lru, &page->lru);
|
||||
WARN_ON(!ret);
|
||||
|
||||
trace_binder_free_lru_end(alloc, index);
|
||||
continue;
|
||||
|
||||
err_vm_insert_page_failed:
|
||||
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
|
||||
err_map_kernel_failed:
|
||||
__free_page(*page);
|
||||
*page = NULL;
|
||||
__free_page(page->page_ptr);
|
||||
page->page_ptr = NULL;
|
||||
err_alloc_page_failed:
|
||||
err_page_ptr_cleared:
|
||||
;
|
||||
}
|
||||
err_no_vma:
|
||||
|
@ -321,6 +370,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
|
||||
/* Pad 0-size buffers so they get assigned unique addresses */
|
||||
size = max(size, sizeof(void *));
|
||||
|
||||
while (n) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
BUG_ON(!buffer->free);
|
||||
|
@ -380,32 +432,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|||
|
||||
has_page_addr =
|
||||
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
|
||||
if (n == NULL) {
|
||||
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
|
||||
buffer_size = size; /* no room for other buffers */
|
||||
else
|
||||
buffer_size = size + sizeof(struct binder_buffer);
|
||||
}
|
||||
WARN_ON(n && buffer_size != size);
|
||||
end_page_addr =
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
|
||||
if (end_page_addr > has_page_addr)
|
||||
end_page_addr = has_page_addr;
|
||||
ret = binder_update_page_range(alloc, 1,
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (buffer_size != size) {
|
||||
struct binder_buffer *new_buffer;
|
||||
|
||||
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!new_buffer) {
|
||||
pr_err("%s: %d failed to alloc new buffer struct\n",
|
||||
__func__, alloc->pid);
|
||||
goto err_alloc_buf_struct_failed;
|
||||
}
|
||||
new_buffer->data = (u8 *)buffer->data + size;
|
||||
list_add(&new_buffer->entry, &buffer->entry);
|
||||
new_buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, new_buffer);
|
||||
}
|
||||
|
||||
rb_erase(best_fit, &alloc->free_buffers);
|
||||
buffer->free = 0;
|
||||
buffer->free_in_progress = 0;
|
||||
binder_insert_allocated_buffer_locked(alloc, buffer);
|
||||
if (buffer_size != size) {
|
||||
struct binder_buffer *new_buffer = (void *)buffer->data + size;
|
||||
|
||||
list_add(&new_buffer->entry, &buffer->entry);
|
||||
new_buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, new_buffer);
|
||||
}
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: binder_alloc_buf size %zd got %pK\n",
|
||||
alloc->pid, size, buffer);
|
||||
|
@ -420,6 +475,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|||
alloc->pid, size, alloc->free_async_space);
|
||||
}
|
||||
return buffer;
|
||||
|
||||
err_alloc_buf_struct_failed:
|
||||
binder_update_page_range(alloc, 0,
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
|
||||
end_page_addr);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -454,57 +515,58 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|||
|
||||
static void *buffer_start_page(struct binder_buffer *buffer)
|
||||
{
|
||||
return (void *)((uintptr_t)buffer & PAGE_MASK);
|
||||
return (void *)((uintptr_t)buffer->data & PAGE_MASK);
|
||||
}
|
||||
|
||||
static void *buffer_end_page(struct binder_buffer *buffer)
|
||||
static void *prev_buffer_end_page(struct binder_buffer *buffer)
|
||||
{
|
||||
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
|
||||
return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
|
||||
}
|
||||
|
||||
static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
struct binder_buffer *prev, *next = NULL;
|
||||
int free_page_end = 1;
|
||||
int free_page_start = 1;
|
||||
|
||||
bool to_free = true;
|
||||
BUG_ON(alloc->buffers.next == &buffer->entry);
|
||||
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
|
||||
prev = binder_buffer_prev(buffer);
|
||||
BUG_ON(!prev->free);
|
||||
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
|
||||
free_page_start = 0;
|
||||
if (buffer_end_page(prev) == buffer_end_page(buffer))
|
||||
free_page_end = 0;
|
||||
if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
|
||||
to_free = false;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: merge free, buffer %pK share page with %pK\n",
|
||||
alloc->pid, buffer, prev);
|
||||
"%d: merge free, buffer %pK share page with %pK\n",
|
||||
alloc->pid, buffer->data, prev->data);
|
||||
}
|
||||
|
||||
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
|
||||
next = list_entry(buffer->entry.next,
|
||||
struct binder_buffer, entry);
|
||||
if (buffer_start_page(next) == buffer_end_page(buffer)) {
|
||||
free_page_end = 0;
|
||||
if (buffer_start_page(next) ==
|
||||
buffer_start_page(buffer))
|
||||
free_page_start = 0;
|
||||
next = binder_buffer_next(buffer);
|
||||
if (buffer_start_page(next) == buffer_start_page(buffer)) {
|
||||
to_free = false;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: merge free, buffer %pK share page with %pK\n",
|
||||
alloc->pid, buffer, prev);
|
||||
"%d: merge free, buffer %pK share page with %pK\n",
|
||||
alloc->pid,
|
||||
buffer->data,
|
||||
next->data);
|
||||
}
|
||||
}
|
||||
list_del(&buffer->entry);
|
||||
if (free_page_start || free_page_end) {
|
||||
|
||||
if (PAGE_ALIGNED(buffer->data)) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
|
||||
alloc->pid, buffer, free_page_start ? "" : " end",
|
||||
free_page_end ? "" : " start", prev, next);
|
||||
binder_update_page_range(alloc, 0, free_page_start ?
|
||||
buffer_start_page(buffer) : buffer_end_page(buffer),
|
||||
(free_page_end ? buffer_end_page(buffer) :
|
||||
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
|
||||
"%d: merge free, buffer start %pK is page aligned\n",
|
||||
alloc->pid, buffer->data);
|
||||
to_free = false;
|
||||
}
|
||||
|
||||
if (to_free) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
|
||||
alloc->pid, buffer->data,
|
||||
prev->data, next->data);
|
||||
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
|
||||
buffer_start_page(buffer) + PAGE_SIZE);
|
||||
}
|
||||
list_del(&buffer->entry);
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
static void binder_free_buf_locked(struct binder_alloc *alloc,
|
||||
|
@ -525,8 +587,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
BUG_ON(buffer->free);
|
||||
BUG_ON(size > buffer_size);
|
||||
BUG_ON(buffer->transaction != NULL);
|
||||
BUG_ON((void *)buffer < alloc->buffer);
|
||||
BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
|
||||
BUG_ON(buffer->data < alloc->buffer);
|
||||
BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
|
||||
|
||||
if (buffer->async_transaction) {
|
||||
alloc->free_async_space += size + sizeof(struct binder_buffer);
|
||||
|
@ -538,14 +600,12 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
|
||||
binder_update_page_range(alloc, 0,
|
||||
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
|
||||
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
|
||||
NULL);
|
||||
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
|
||||
|
||||
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
|
||||
buffer->free = 1;
|
||||
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
|
||||
struct binder_buffer *next = list_entry(buffer->entry.next,
|
||||
struct binder_buffer, entry);
|
||||
struct binder_buffer *next = binder_buffer_next(buffer);
|
||||
|
||||
if (next->free) {
|
||||
rb_erase(&next->rb_node, &alloc->free_buffers);
|
||||
|
@ -553,8 +613,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
}
|
||||
}
|
||||
if (alloc->buffers.next != &buffer->entry) {
|
||||
struct binder_buffer *prev = list_entry(buffer->entry.prev,
|
||||
struct binder_buffer, entry);
|
||||
struct binder_buffer *prev = binder_buffer_prev(buffer);
|
||||
|
||||
if (prev->free) {
|
||||
binder_delete_free_buffer(alloc, buffer);
|
||||
|
@ -640,14 +699,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|||
}
|
||||
alloc->buffer_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (binder_update_page_range(alloc, 1, alloc->buffer,
|
||||
alloc->buffer + PAGE_SIZE, vma)) {
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "alloc small buf";
|
||||
goto err_alloc_small_buf_failed;
|
||||
failure_string = "alloc buffer struct";
|
||||
goto err_alloc_buf_struct_failed;
|
||||
}
|
||||
buffer = alloc->buffer;
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
|
||||
buffer->data = alloc->buffer;
|
||||
list_add(&buffer->entry, &alloc->buffers);
|
||||
buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, buffer);
|
||||
|
@ -655,10 +714,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|||
barrier();
|
||||
alloc->vma = vma;
|
||||
alloc->vma_vm_mm = vma->vm_mm;
|
||||
/* Same as mmgrab() in later kernel versions */
|
||||
atomic_inc(&alloc->vma_vm_mm->mm_count);
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc_small_buf_failed:
|
||||
err_alloc_buf_struct_failed:
|
||||
kfree(alloc->pages);
|
||||
alloc->pages = NULL;
|
||||
err_alloc_pages_failed:
|
||||
|
@ -678,14 +739,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
{
|
||||
struct rb_node *n;
|
||||
int buffers, page_count;
|
||||
struct binder_buffer *buffer;
|
||||
|
||||
BUG_ON(alloc->vma);
|
||||
|
||||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
struct binder_buffer *buffer;
|
||||
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
|
||||
/* Transaction should already have been freed */
|
||||
|
@ -695,28 +755,44 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
buffers++;
|
||||
}
|
||||
|
||||
while (!list_empty(&alloc->buffers)) {
|
||||
buffer = list_first_entry(&alloc->buffers,
|
||||
struct binder_buffer, entry);
|
||||
WARN_ON(!buffer->free);
|
||||
|
||||
list_del(&buffer->entry);
|
||||
WARN_ON_ONCE(!list_empty(&alloc->buffers));
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
page_count = 0;
|
||||
if (alloc->pages) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
void *page_addr;
|
||||
bool on_lru;
|
||||
|
||||
if (!alloc->pages[i])
|
||||
if (!alloc->pages[i].page_ptr)
|
||||
continue;
|
||||
|
||||
on_lru = list_lru_del(&binder_alloc_lru,
|
||||
&alloc->pages[i].lru);
|
||||
page_addr = alloc->buffer + i * PAGE_SIZE;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%s: %d: page %d at %pK not freed\n",
|
||||
__func__, alloc->pid, i, page_addr);
|
||||
"%s: %d: page %d at %pK %s\n",
|
||||
__func__, alloc->pid, i, page_addr,
|
||||
on_lru ? "on lru" : "active");
|
||||
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
|
||||
__free_page(alloc->pages[i]);
|
||||
__free_page(alloc->pages[i].page_ptr);
|
||||
page_count++;
|
||||
}
|
||||
kfree(alloc->pages);
|
||||
vfree(alloc->buffer);
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
if (alloc->vma_vm_mm)
|
||||
mmdrop(alloc->vma_vm_mm);
|
||||
|
||||
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
|
||||
"%s: %d buffers %d, pages %d\n",
|
||||
|
@ -753,6 +829,34 @@ void binder_alloc_print_allocated(struct seq_file *m,
|
|||
mutex_unlock(&alloc->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_print_pages() - print page usage
|
||||
* @m: seq_file for output via seq_printf()
|
||||
* @alloc: binder_alloc for this proc
|
||||
*/
|
||||
void binder_alloc_print_pages(struct seq_file *m,
|
||||
struct binder_alloc *alloc)
|
||||
{
|
||||
struct binder_lru_page *page;
|
||||
int i;
|
||||
int active = 0;
|
||||
int lru = 0;
|
||||
int free = 0;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_get_allocated_count() - return count of buffers
|
||||
* @alloc: binder_alloc for this proc
|
||||
|
@ -783,9 +887,111 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
|||
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
||||
{
|
||||
WRITE_ONCE(alloc->vma, NULL);
|
||||
WRITE_ONCE(alloc->vma_vm_mm, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_free_page() - shrinker callback to free pages
|
||||
* @item: item to free
|
||||
* @lock: lock protecting the item
|
||||
* @cb_arg: callback argument
|
||||
*
|
||||
* Called from list_lru_walk() in binder_shrink_scan() to free
|
||||
* up pages when the system is under memory pressure.
|
||||
*/
|
||||
enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
struct list_lru_one *lru,
|
||||
spinlock_t *lock,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
struct binder_lru_page *page = container_of(item,
|
||||
struct binder_lru_page,
|
||||
lru);
|
||||
struct binder_alloc *alloc;
|
||||
uintptr_t page_addr;
|
||||
size_t index;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
alloc = page->alloc;
|
||||
if (!mutex_trylock(&alloc->mutex))
|
||||
goto err_get_alloc_mutex_failed;
|
||||
|
||||
if (!page->page_ptr)
|
||||
goto err_page_already_freed;
|
||||
|
||||
index = page - alloc->pages;
|
||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||
vma = alloc->vma;
|
||||
if (vma) {
|
||||
/* Same as mmget_not_zero() in later kernel versions */
|
||||
if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
|
||||
goto err_mmget;
|
||||
mm = alloc->vma_vm_mm;
|
||||
if (!down_write_trylock(&mm->mmap_sem))
|
||||
goto err_down_write_mmap_sem_failed;
|
||||
}
|
||||
|
||||
list_lru_isolate(lru, item);
|
||||
spin_unlock(lock);
|
||||
|
||||
if (vma) {
|
||||
trace_binder_unmap_user_start(alloc, index);
|
||||
|
||||
zap_page_range(vma,
|
||||
page_addr +
|
||||
alloc->user_buffer_offset,
|
||||
PAGE_SIZE, NULL);
|
||||
|
||||
trace_binder_unmap_user_end(alloc, index);
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
trace_binder_unmap_kernel_start(alloc, index);
|
||||
|
||||
unmap_kernel_range(page_addr, PAGE_SIZE);
|
||||
__free_page(page->page_ptr);
|
||||
page->page_ptr = NULL;
|
||||
|
||||
trace_binder_unmap_kernel_end(alloc, index);
|
||||
|
||||
spin_lock(lock);
|
||||
mutex_unlock(&alloc->mutex);
|
||||
return LRU_REMOVED_RETRY;
|
||||
|
||||
err_down_write_mmap_sem_failed:
|
||||
mmput_async(mm);
|
||||
err_mmget:
|
||||
err_page_already_freed:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
err_get_alloc_mutex_failed:
|
||||
return LRU_SKIP;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
unsigned long ret = list_lru_count(&binder_alloc_lru);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
|
||||
NULL, sc->nr_to_scan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct shrinker binder_shrinker = {
|
||||
.count_objects = binder_shrink_count,
|
||||
.scan_objects = binder_shrink_scan,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
/**
|
||||
* binder_alloc_init() - called by binder_open() for per-proc initialization
|
||||
* @alloc: binder_alloc for this proc
|
||||
|
@ -795,8 +1001,13 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
|
|||
*/
|
||||
void binder_alloc_init(struct binder_alloc *alloc)
|
||||
{
|
||||
alloc->tsk = current->group_leader;
|
||||
alloc->pid = current->group_leader->pid;
|
||||
mutex_init(&alloc->mutex);
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
}
|
||||
|
||||
void binder_alloc_shrinker_init(void)
|
||||
{
|
||||
list_lru_init(&binder_alloc_lru);
|
||||
register_shrinker(&binder_shrinker);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,9 @@
|
|||
#include <linux/rtmutex.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list_lru.h>
|
||||
|
||||
extern struct list_lru binder_alloc_lru;
|
||||
struct binder_transaction;
|
||||
|
||||
/**
|
||||
|
@ -57,7 +59,19 @@ struct binder_buffer {
|
|||
size_t data_size;
|
||||
size_t offsets_size;
|
||||
size_t extra_buffers_size;
|
||||
uint8_t data[0];
|
||||
void *data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_lru_page - page object used for binder shrinker
|
||||
* @page_ptr: pointer to physical page in mmap'd space
|
||||
* @lru: entry in binder_alloc_lru
|
||||
* @alloc: binder_alloc for a proc
|
||||
*/
|
||||
struct binder_lru_page {
|
||||
struct list_head lru;
|
||||
struct page *page_ptr;
|
||||
struct binder_alloc *alloc;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -75,8 +89,7 @@ struct binder_buffer {
|
|||
* @allocated_buffers: rb tree of allocated buffers sorted by address
|
||||
* @free_async_space: VA space available for async buffers. This is
|
||||
* initialized at mmap time to 1/2 the full VA space
|
||||
* @pages: array of physical page addresses for each
|
||||
* page of mmap'd space
|
||||
* @pages: array of binder_lru_page
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
*
|
||||
|
@ -87,7 +100,6 @@ struct binder_buffer {
|
|||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
struct task_struct *tsk;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *vma_vm_mm;
|
||||
void *buffer;
|
||||
|
@ -96,18 +108,27 @@ struct binder_alloc {
|
|||
struct rb_root free_buffers;
|
||||
struct rb_root allocated_buffers;
|
||||
size_t free_async_space;
|
||||
struct page **pages;
|
||||
struct binder_lru_page *pages;
|
||||
size_t buffer_size;
|
||||
uint32_t buffer_free;
|
||||
int pid;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc);
|
||||
#else
|
||||
static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
|
||||
#endif
|
||||
enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
struct list_lru_one *lru,
|
||||
spinlock_t *lock, void *cb_arg);
|
||||
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async);
|
||||
extern void binder_alloc_init(struct binder_alloc *alloc);
|
||||
void binder_alloc_shrinker_init(void);
|
||||
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
||||
extern struct binder_buffer *
|
||||
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
|
@ -120,6 +141,8 @@ extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
|
|||
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
|
||||
extern void binder_alloc_print_allocated(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
void binder_alloc_print_pages(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
|
||||
/**
|
||||
* binder_alloc_get_free_async_space() - get free space available for async
|
||||
|
|
310
drivers/android/binder_alloc_selftest.c
Normal file
310
drivers/android/binder_alloc_selftest.c
Normal file
|
@ -0,0 +1,310 @@
|
|||
/* binder_alloc_selftest.c
|
||||
*
|
||||
* Android IPC Subsystem
|
||||
*
|
||||
* Copyright (C) 2017 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/err.h>
|
||||
#include "binder_alloc.h"
|
||||
|
||||
#define BUFFER_NUM 5
|
||||
#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
|
||||
|
||||
static bool binder_selftest_run = true;
|
||||
static int binder_selftest_failures;
|
||||
static DEFINE_MUTEX(binder_selftest_lock);
|
||||
|
||||
/**
|
||||
* enum buf_end_align_type - Page alignment of a buffer
|
||||
* end with regard to the end of the previous buffer.
|
||||
*
|
||||
* In the pictures below, buf2 refers to the buffer we
|
||||
* are aligning. buf1 refers to previous buffer by addr.
|
||||
* Symbol [ means the start of a buffer, ] means the end
|
||||
* of a buffer, and | means page boundaries.
|
||||
*/
|
||||
enum buf_end_align_type {
|
||||
/**
|
||||
* @SAME_PAGE_UNALIGNED: The end of this buffer is on
|
||||
* the same page as the end of the previous buffer and
|
||||
* is not page aligned. Examples:
|
||||
* buf1 ][ buf2 ][ ...
|
||||
* buf1 ]|[ buf2 ][ ...
|
||||
*/
|
||||
SAME_PAGE_UNALIGNED = 0,
|
||||
/**
|
||||
* @SAME_PAGE_ALIGNED: When the end of the previous buffer
|
||||
* is not page aligned, the end of this buffer is on the
|
||||
* same page as the end of the previous buffer and is page
|
||||
* aligned. When the previous buffer is page aligned, the
|
||||
* end of this buffer is aligned to the next page boundary.
|
||||
* Examples:
|
||||
* buf1 ][ buf2 ]| ...
|
||||
* buf1 ]|[ buf2 ]| ...
|
||||
*/
|
||||
SAME_PAGE_ALIGNED,
|
||||
/**
|
||||
* @NEXT_PAGE_UNALIGNED: The end of this buffer is on
|
||||
* the page next to the end of the previous buffer and
|
||||
* is not page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 ][ ...
|
||||
* buf1 ]|[ buf2 | buf2 ][ ...
|
||||
*/
|
||||
NEXT_PAGE_UNALIGNED,
|
||||
/**
|
||||
* @NEXT_PAGE_ALIGNED: The end of this buffer is on
|
||||
* the page next to the end of the previous buffer and
|
||||
* is page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 ]| ...
|
||||
* buf1 ]|[ buf2 | buf2 ]| ...
|
||||
*/
|
||||
NEXT_PAGE_ALIGNED,
|
||||
/**
|
||||
* @NEXT_NEXT_UNALIGNED: The end of this buffer is on
|
||||
* the page that follows the page after the end of the
|
||||
* previous buffer and is not page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 | buf2 ][ ...
|
||||
* buf1 ]|[ buf2 | buf2 | buf2 ][ ...
|
||||
*/
|
||||
NEXT_NEXT_UNALIGNED,
|
||||
LOOP_END,
|
||||
};
|
||||
|
||||
static void pr_err_size_seq(size_t *sizes, int *seq)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_err("alloc sizes: ");
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
pr_cont("[%zu]", sizes[i]);
|
||||
pr_cont("\n");
|
||||
pr_err("free seq: ");
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
pr_cont("[%d]", seq[i]);
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
size_t size)
|
||||
{
|
||||
void *page_addr, *end;
|
||||
int page_index;
|
||||
|
||||
end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
|
||||
page_addr = buffer->data;
|
||||
for (; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
if (!alloc->pages[page_index].page_ptr ||
|
||||
!list_empty(&alloc->pages[page_index].lru)) {
|
||||
pr_err("expect alloc but is %s at page index %d\n",
|
||||
alloc->pages[page_index].page_ptr ?
|
||||
"lru" : "free", page_index);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffers[],
|
||||
size_t *sizes, int *seq)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
|
||||
if (IS_ERR(buffers[i]) ||
|
||||
!check_buffer_pages_allocated(alloc, buffers[i],
|
||||
sizes[i])) {
|
||||
pr_err_size_seq(sizes, seq);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_free_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffers[],
|
||||
size_t *sizes, int *seq, size_t end)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
binder_alloc_free_buf(alloc, buffers[seq[i]]);
|
||||
|
||||
for (i = 0; i < end / PAGE_SIZE; i++) {
|
||||
/**
|
||||
* Error message on a free page can be false positive
|
||||
* if binder shrinker ran during binder_alloc_free_buf
|
||||
* calls above.
|
||||
*/
|
||||
if (list_empty(&alloc->pages[i].lru)) {
|
||||
pr_err_size_seq(sizes, seq);
|
||||
pr_err("expect lru but is %s at page index %d\n",
|
||||
alloc->pages[i].page_ptr ? "alloc" : "free", i);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_free_page(struct binder_alloc *alloc)
|
||||
{
|
||||
int i;
|
||||
unsigned long count;
|
||||
|
||||
while ((count = list_lru_count(&binder_alloc_lru))) {
|
||||
list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
|
||||
NULL, count);
|
||||
}
|
||||
|
||||
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
|
||||
if (alloc->pages[i].page_ptr) {
|
||||
pr_err("expect free but is %s at page index %d\n",
|
||||
list_empty(&alloc->pages[i].lru) ?
|
||||
"alloc" : "lru", i);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_free(struct binder_alloc *alloc,
|
||||
size_t *sizes, int *seq, size_t end)
|
||||
{
|
||||
struct binder_buffer *buffers[BUFFER_NUM];
|
||||
|
||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||
|
||||
/* Allocate from lru. */
|
||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||
if (list_lru_count(&binder_alloc_lru))
|
||||
pr_err("lru list should be empty but is not\n");
|
||||
|
||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||
binder_selftest_free_page(alloc);
|
||||
}
|
||||
|
||||
static bool is_dup(int *seq, int index, int val)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < index; i++) {
|
||||
if (seq[i] == val)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Generate BUFFER_NUM factorial free orders. */
|
||||
static void binder_selftest_free_seq(struct binder_alloc *alloc,
|
||||
size_t *sizes, int *seq,
|
||||
int index, size_t end)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (index == BUFFER_NUM) {
|
||||
binder_selftest_alloc_free(alloc, sizes, seq, end);
|
||||
return;
|
||||
}
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
if (is_dup(seq, index, i))
|
||||
continue;
|
||||
seq[index] = i;
|
||||
binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_size(struct binder_alloc *alloc,
|
||||
size_t *end_offset)
|
||||
{
|
||||
int i;
|
||||
int seq[BUFFER_NUM] = {0};
|
||||
size_t front_sizes[BUFFER_NUM];
|
||||
size_t back_sizes[BUFFER_NUM];
|
||||
size_t last_offset, offset = 0;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
last_offset = offset;
|
||||
offset = end_offset[i];
|
||||
front_sizes[i] = offset - last_offset;
|
||||
back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
|
||||
}
|
||||
/*
|
||||
* Buffers share the first or last few pages.
|
||||
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
|
||||
* we need one giant buffer before getting to the last page.
|
||||
*/
|
||||
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
|
||||
binder_selftest_free_seq(alloc, front_sizes, seq, 0,
|
||||
end_offset[BUFFER_NUM - 1]);
|
||||
binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
|
||||
size_t *end_offset, int index)
|
||||
{
|
||||
int align;
|
||||
size_t end, prev;
|
||||
|
||||
if (index == BUFFER_NUM) {
|
||||
binder_selftest_alloc_size(alloc, end_offset);
|
||||
return;
|
||||
}
|
||||
prev = index == 0 ? 0 : end_offset[index - 1];
|
||||
end = prev;
|
||||
|
||||
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
|
||||
|
||||
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
|
||||
if (align % 2)
|
||||
end = ALIGN(end, PAGE_SIZE);
|
||||
else
|
||||
end += BUFFER_MIN_SIZE;
|
||||
end_offset[index] = end;
|
||||
binder_selftest_alloc_offset(alloc, end_offset, index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_selftest_alloc() - Test alloc and free of buffer pages.
|
||||
* @alloc: Pointer to alloc struct.
|
||||
*
|
||||
* Allocate BUFFER_NUM buffers to cover all page alignment cases,
|
||||
* then free them in all orders possible. Check that pages are
|
||||
* correctly allocated, put onto lru when buffers are freed, and
|
||||
* are freed when binder_alloc_free_page is called.
|
||||
*/
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||
{
|
||||
size_t end_offset[BUFFER_NUM];
|
||||
|
||||
if (!binder_selftest_run)
|
||||
return;
|
||||
mutex_lock(&binder_selftest_lock);
|
||||
if (!binder_selftest_run || !alloc->vma)
|
||||
goto done;
|
||||
pr_info("STARTED\n");
|
||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||
binder_selftest_run = false;
|
||||
if (binder_selftest_failures > 0)
|
||||
pr_info("%d tests FAILED\n", binder_selftest_failures);
|
||||
else
|
||||
pr_info("PASSED\n");
|
||||
|
||||
done:
|
||||
mutex_unlock(&binder_selftest_lock);
|
||||
}
|
|
@ -291,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
|
|||
__entry->offset, __entry->size)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(binder_lru_page_class,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(size_t, page_index)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->page_index = page_index;
|
||||
),
|
||||
TP_printk("proc=%d page_index=%zu",
|
||||
__entry->proc, __entry->page_index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
TRACE_EVENT(binder_command,
|
||||
TP_PROTO(uint32_t cmd),
|
||||
TP_ARGS(cmd),
|
||||
|
|
|
@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
|
|||
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
|
||||
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
|
||||
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
|
||||
static const struct pci_device_id cs5536[] = {
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -737,7 +737,7 @@ int bus_add_driver(struct device_driver *drv)
|
|||
|
||||
out_unregister:
|
||||
kobject_put(&priv->kobj);
|
||||
kfree(drv->p);
|
||||
/* drv->p is freed in driver_release() */
|
||||
drv->p = NULL;
|
||||
out_put_bus:
|
||||
bus_put(bus);
|
||||
|
|
|
@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
|
|||
*/
|
||||
qcmd |= FIT_QCMD_MSGSIZE_64;
|
||||
|
||||
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
|
||||
smp_wmb();
|
||||
|
||||
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
|
||||
|
||||
}
|
||||
|
@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
|
|||
qcmd = skspcl->mb_dma_address;
|
||||
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
|
||||
|
||||
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
|
||||
smp_wmb();
|
||||
|
||||
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
|
||||
}
|
||||
|
||||
|
@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
|
|||
{
|
||||
struct gendisk *disk = skdev->disk;
|
||||
|
||||
if (disk != NULL) {
|
||||
struct request_queue *q = disk->queue;
|
||||
if (disk && (disk->flags & GENHD_FL_UP))
|
||||
del_gendisk(disk);
|
||||
|
||||
if (disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(disk);
|
||||
if (q)
|
||||
blk_cleanup_queue(q);
|
||||
put_disk(disk);
|
||||
if (skdev->queue) {
|
||||
blk_cleanup_queue(skdev->queue);
|
||||
skdev->queue = NULL;
|
||||
disk->queue = NULL;
|
||||
}
|
||||
|
||||
put_disk(disk);
|
||||
skdev->disk = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -333,6 +333,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8821AE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
|
||||
|
|
|
@ -279,6 +279,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
|
||||
/*
|
||||
* Android: set default parameters for parity between schedutil and
|
||||
* schedfreq
|
||||
*/
|
||||
policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
|
||||
policy->down_transition_delay_us = 50000; /* 50ms */
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_cpufreq_table:
|
||||
|
|
|
@ -1825,6 +1825,7 @@ struct cpufreq_governor cpufreq_gov_interactive = {
|
|||
static int __init cpufreq_interactive_init(void)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_init(&speedchange_cpumask_lock);
|
||||
mutex_init(&gov_lock);
|
||||
|
@ -1841,7 +1842,12 @@ static int __init cpufreq_interactive_init(void)
|
|||
/* NB: wake up so the thread does not look hung to the freezer */
|
||||
wake_up_process_no_notif(speedchange_task);
|
||||
|
||||
return cpufreq_register_governor(&cpufreq_gov_interactive);
|
||||
ret = cpufreq_register_governor(&cpufreq_gov_interactive);
|
||||
if (ret) {
|
||||
kthread_stop(speedchange_task);
|
||||
put_task_struct(speedchange_task);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
|
||||
|
|
|
@ -36,7 +36,10 @@ struct adv7511 {
|
|||
bool edid_read;
|
||||
|
||||
wait_queue_head_t wq;
|
||||
struct work_struct hpd_work;
|
||||
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector connector;
|
||||
|
||||
bool embedded_sync;
|
||||
enum adv7511_sync_polarity vsync_polarity;
|
||||
|
@ -48,6 +51,10 @@ struct adv7511 {
|
|||
struct gpio_desc *gpio_pd;
|
||||
};
|
||||
|
||||
static const int edid_i2c_addr = 0x7e;
|
||||
static const int packet_i2c_addr = 0x70;
|
||||
static const int cec_i2c_addr = 0x78;
|
||||
|
||||
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
|
||||
{
|
||||
return to_encoder_slave(encoder)->slave_priv;
|
||||
|
@ -362,12 +369,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
|
|||
{
|
||||
adv7511->current_edid_segment = -1;
|
||||
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
|
||||
ADV7511_POWER_POWER_DOWN, 0);
|
||||
if (adv7511->i2c_main->irq) {
|
||||
/*
|
||||
* Documentation says the INT_ENABLE registers are reset in
|
||||
* POWER_DOWN mode. My 7511w preserved the bits, however.
|
||||
* Still, let's be safe and stick to the documentation.
|
||||
*/
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per spec it is allowed to pulse the HDP signal to indicate that the
|
||||
|
@ -422,7 +436,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int adv7511_irq_process(struct adv7511 *adv7511)
|
||||
static void adv7511_hpd_work(struct work_struct *work)
|
||||
{
|
||||
struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
|
||||
enum drm_connector_status status;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
|
||||
if (ret < 0)
|
||||
status = connector_status_disconnected;
|
||||
else if (val & ADV7511_STATUS_HPD)
|
||||
status = connector_status_connected;
|
||||
else
|
||||
status = connector_status_disconnected;
|
||||
|
||||
if (adv7511->connector.status != status) {
|
||||
adv7511->connector.status = status;
|
||||
drm_kms_helper_hotplug_event(adv7511->connector.dev);
|
||||
}
|
||||
}
|
||||
|
||||
static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
|
||||
{
|
||||
unsigned int irq0, irq1;
|
||||
int ret;
|
||||
|
@ -438,8 +472,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
|
|||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
|
||||
|
||||
if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
|
||||
drm_helper_hpd_irq_event(adv7511->encoder->dev);
|
||||
if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
|
||||
schedule_work(&adv7511->hpd_work);
|
||||
|
||||
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
|
||||
adv7511->edid_read = true;
|
||||
|
@ -456,7 +490,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
|
|||
struct adv7511 *adv7511 = devid;
|
||||
int ret;
|
||||
|
||||
ret = adv7511_irq_process(adv7511);
|
||||
ret = adv7511_irq_process(adv7511, true);
|
||||
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -473,7 +507,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
|
|||
adv7511->edid_read, msecs_to_jiffies(timeout));
|
||||
} else {
|
||||
for (; timeout > 0; timeout -= 25) {
|
||||
ret = adv7511_irq_process(adv7511);
|
||||
ret = adv7511_irq_process(adv7511, false);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
|
@ -567,13 +601,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
|
|||
|
||||
/* Reading the EDID only works if the device is powered */
|
||||
if (!adv7511->powered) {
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
|
||||
ADV7511_POWER_POWER_DOWN, 0);
|
||||
if (adv7511->i2c_main->irq) {
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
}
|
||||
adv7511->current_edid_segment = -1;
|
||||
/* Reset the EDID_I2C_ADDR register as it might be cleared */
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
|
||||
edid_i2c_addr);
|
||||
}
|
||||
|
||||
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
|
||||
|
@ -849,10 +888,6 @@ static int adv7511_parse_dt(struct device_node *np,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const int edid_i2c_addr = 0x7e;
|
||||
static const int packet_i2c_addr = 0x70;
|
||||
static const int cec_i2c_addr = 0x78;
|
||||
|
||||
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
||||
{
|
||||
struct adv7511_link_config link_config;
|
||||
|
@ -913,6 +948,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
|||
if (!adv7511->i2c_edid)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
|
||||
|
||||
if (i2c->irq) {
|
||||
init_waitqueue_head(&adv7511->wq);
|
||||
|
||||
|
|
|
@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
|
|||
"enabling oneshot unclaimed register reporting. "
|
||||
"Please use i915.mmio_debug=N for more information.\n");
|
||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
i915.mmio_debug = mmio_debug_once--;
|
||||
i915.mmio_debug = mmio_debug_once;
|
||||
mmio_debug_once = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -180,6 +180,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
pci->msi = false;
|
||||
#endif
|
||||
|
||||
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
|
||||
if (pci->msi && func->msi_rearm) {
|
||||
pci->msi = pci_enable_msi(pci->pdev) == 0;
|
||||
|
|
|
@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
|
|||
} else {
|
||||
pr_err("Failed to fill pool (%p)\n", pool);
|
||||
/* If we have any pages left put them to the pool. */
|
||||
list_for_each_entry(p, &pool->list, lru) {
|
||||
list_for_each_entry(p, &new_pages, lru) {
|
||||
++cpages;
|
||||
}
|
||||
list_splice(&new_pages, &pool->list);
|
||||
|
|
|
@ -72,6 +72,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
|
||||
.driver_data = (kernel_ulong_t)0,
|
||||
},
|
||||
{
|
||||
/* Cannon Lake H */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
|
||||
.driver_data = (kernel_ulong_t)0,
|
||||
},
|
||||
{
|
||||
/* Cannon Lake LP */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
|
||||
.driver_data = (kernel_ulong_t)0,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
|
|||
break;
|
||||
case I2C_SMBUS_BLOCK_DATA:
|
||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||
memcpy(&data->block[1], dma_buffer, desc->rxbytes);
|
||||
data->block[0] = desc->rxbytes;
|
||||
if (desc->rxbytes != dma_buffer[0] + 1)
|
||||
return -EMSGSIZE;
|
||||
|
||||
memcpy(data->block, dma_buffer, desc->rxbytes);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
|
|||
|
||||
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
|
||||
|
||||
i2c->cmd = 0;
|
||||
memset(i2c->cmd_buf, 0, BUFSIZE);
|
||||
memset(i2c->data_buf, 0, BUFSIZE);
|
||||
|
||||
i2c->irq = platform_get_irq(pdev, 0);
|
||||
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
|
||||
dev_name(&pdev->dev), i2c);
|
||||
|
|
|
@ -381,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
|
|||
return 0;
|
||||
|
||||
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
|
||||
psmouse_warn(psmouse, "failed to get extended button data\n");
|
||||
button_info = 0;
|
||||
psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
|
||||
button_info = 0x33;
|
||||
}
|
||||
|
||||
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
|
||||
|
|
|
@ -904,6 +904,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Gigabyte P57 - Elantech touchpad */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Schenker XMG C504 - Elantech touchpad */
|
||||
.matches = {
|
||||
|
|
|
@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node,
|
|||
gic_len = resource_size(&res);
|
||||
}
|
||||
|
||||
if (mips_cm_present())
|
||||
if (mips_cm_present()) {
|
||||
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
|
||||
/* Ensure GIC region is enabled before trying to access it */
|
||||
__sync();
|
||||
}
|
||||
gic_present = true;
|
||||
|
||||
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
|
||||
|
|
|
@ -333,6 +333,7 @@ struct cached_dev {
|
|||
/* Limit number of writeback bios in flight */
|
||||
struct semaphore in_flight;
|
||||
struct task_struct *writeback_thread;
|
||||
struct workqueue_struct *writeback_write_wq;
|
||||
|
||||
struct keybuf writeback_keys;
|
||||
|
||||
|
|
|
@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
|
|||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
struct bio *bio = op->bio, *n;
|
||||
|
||||
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
|
||||
wake_up_gc(op->c);
|
||||
|
||||
if (op->bypass)
|
||||
return bch_data_invalidate(cl);
|
||||
|
||||
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
|
||||
wake_up_gc(op->c);
|
||||
|
||||
/*
|
||||
* Journal writes are marked REQ_FLUSH; if the original write was a
|
||||
* flush, it'll wait on the journal write.
|
||||
|
|
|
@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
|
|||
}
|
||||
|
||||
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
|
||||
bch_sectors_dirty_init(dc);
|
||||
bch_sectors_dirty_init(&dc->disk);
|
||||
atomic_set(&dc->has_dirty, 1);
|
||||
atomic_inc(&dc->count);
|
||||
bch_writeback_queue(dc);
|
||||
|
@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
|
|||
cancel_delayed_work_sync(&dc->writeback_rate_update);
|
||||
if (!IS_ERR_OR_NULL(dc->writeback_thread))
|
||||
kthread_stop(dc->writeback_thread);
|
||||
if (dc->writeback_write_wq)
|
||||
destroy_workqueue(dc->writeback_write_wq);
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
|
||||
|
@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
|
|||
goto err;
|
||||
|
||||
bcache_device_attach(d, c, u - c->uuids);
|
||||
bch_sectors_dirty_init(d);
|
||||
bch_flash_dev_request_init(d);
|
||||
add_disk(d->disk);
|
||||
|
||||
|
@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
|
|||
else
|
||||
err = "device busy";
|
||||
mutex_unlock(&bch_register_lock);
|
||||
if (!IS_ERR(bdev))
|
||||
bdput(bdev);
|
||||
if (attr == &ksysfs_register_quiet)
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ STORE(__cached_dev)
|
|||
{
|
||||
struct cached_dev *dc = container_of(kobj, struct cached_dev,
|
||||
disk.kobj);
|
||||
unsigned v = size;
|
||||
ssize_t v = size;
|
||||
struct cache_set *c;
|
||||
struct kobj_uevent_env *env;
|
||||
|
||||
|
@ -226,7 +226,7 @@ STORE(__cached_dev)
|
|||
bch_cached_dev_run(dc);
|
||||
|
||||
if (attr == &sysfs_cache_mode) {
|
||||
ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
|
||||
v = bch_read_string_list(buf, bch_cache_modes + 1);
|
||||
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
|
|
@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
|
|||
STRTO_H(strtoll, long long)
|
||||
STRTO_H(strtoull, unsigned long long)
|
||||
|
||||
/**
|
||||
* bch_hprint() - formats @v to human readable string for sysfs.
|
||||
*
|
||||
* @v - signed 64 bit integer
|
||||
* @buf - the (at least 8 byte) buffer to format the result into.
|
||||
*
|
||||
* Returns the number of bytes used by format.
|
||||
*/
|
||||
ssize_t bch_hprint(char *buf, int64_t v)
|
||||
{
|
||||
static const char units[] = "?kMGTPEZY";
|
||||
char dec[4] = "";
|
||||
int u, t = 0;
|
||||
int u = 0, t;
|
||||
|
||||
for (u = 0; v >= 1024 || v <= -1024; u++) {
|
||||
t = v & ~(~0 << 10);
|
||||
v >>= 10;
|
||||
}
|
||||
uint64_t q;
|
||||
|
||||
if (!u)
|
||||
return sprintf(buf, "%llu", v);
|
||||
if (v < 0)
|
||||
q = -v;
|
||||
else
|
||||
q = v;
|
||||
|
||||
if (v < 100 && v > -100)
|
||||
snprintf(dec, sizeof(dec), ".%i", t / 100);
|
||||
/* For as long as the number is more than 3 digits, but at least
|
||||
* once, shift right / divide by 1024. Keep the remainder for
|
||||
* a digit after the decimal point.
|
||||
*/
|
||||
do {
|
||||
u++;
|
||||
|
||||
return sprintf(buf, "%lli%s%c", v, dec, units[u]);
|
||||
t = q & ~(~0 << 10);
|
||||
q >>= 10;
|
||||
} while (q >= 1000);
|
||||
|
||||
if (v < 0)
|
||||
/* '-', up to 3 digits, '.', 1 digit, 1 character, null;
|
||||
* yields 8 bytes.
|
||||
*/
|
||||
return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
|
||||
else
|
||||
return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
|
||||
}
|
||||
|
||||
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
static void __update_writeback_rate(struct cached_dev *dc)
|
||||
{
|
||||
struct cache_set *c = dc->disk.c;
|
||||
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
|
||||
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
|
||||
bcache_flash_devs_sectors_dirty(c);
|
||||
uint64_t cache_dirty_target =
|
||||
div_u64(cache_sectors * dc->writeback_percent, 100);
|
||||
|
||||
|
@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
|
|||
|
||||
closure_bio_submit(&io->bio, cl);
|
||||
|
||||
continue_at(cl, write_dirty_finish, system_wq);
|
||||
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
|
||||
}
|
||||
|
||||
static void read_dirty_endio(struct bio *bio)
|
||||
|
@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
|
|||
|
||||
closure_bio_submit(&io->bio, cl);
|
||||
|
||||
continue_at(cl, write_dirty, system_wq);
|
||||
continue_at(cl, write_dirty, io->dc->writeback_write_wq);
|
||||
}
|
||||
|
||||
static void read_dirty(struct cached_dev *dc)
|
||||
|
@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
|
|||
return MAP_CONTINUE;
|
||||
}
|
||||
|
||||
void bch_sectors_dirty_init(struct cached_dev *dc)
|
||||
void bch_sectors_dirty_init(struct bcache_device *d)
|
||||
{
|
||||
struct sectors_dirty_init op;
|
||||
|
||||
bch_btree_op_init(&op.op, -1);
|
||||
op.inode = dc->disk.id;
|
||||
op.inode = d->id;
|
||||
|
||||
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
|
||||
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
|
||||
sectors_dirty_init_fn, 0);
|
||||
|
||||
dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
|
||||
d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
|
||||
}
|
||||
|
||||
void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
|
@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
|||
|
||||
int bch_cached_dev_writeback_start(struct cached_dev *dc)
|
||||
{
|
||||
dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!dc->writeback_write_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
|
||||
"bcache_writeback");
|
||||
if (IS_ERR(dc->writeback_thread))
|
||||
|
|
|
@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
|
||||
{
|
||||
uint64_t i, ret = 0;
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
|
||||
for (i = 0; i < c->nr_uuids; i++) {
|
||||
struct bcache_device *d = c->devices[i];
|
||||
|
||||
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
|
||||
continue;
|
||||
ret += bcache_dev_sectors_dirty(d);
|
||||
}
|
||||
|
||||
mutex_unlock(&bch_register_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned offset_to_stripe(struct bcache_device *d,
|
||||
uint64_t offset)
|
||||
{
|
||||
|
@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
|
|||
|
||||
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
|
||||
|
||||
void bch_sectors_dirty_init(struct cached_dev *dc);
|
||||
void bch_sectors_dirty_init(struct bcache_device *);
|
||||
void bch_cached_dev_writeback_init(struct cached_dev *);
|
||||
int bch_cached_dev_writeback_start(struct cached_dev *);
|
||||
|
||||
|
|
|
@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
long pages;
|
||||
struct bitmap_page *new_bp;
|
||||
|
||||
if (bitmap->storage.file && !init) {
|
||||
pr_info("md: cannot resize file-based bitmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chunksize == 0) {
|
||||
/* If there is enough space, leave the chunk size unchanged,
|
||||
* else increase by factor of two until there is enough space.
|
||||
|
|
|
@ -5822,6 +5822,8 @@ static void raid5_do_work(struct work_struct *work)
|
|||
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
r5l_flush_stripe_to_raid(conf->log);
|
||||
|
||||
async_tx_issue_pending_all();
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
|
|
|
@ -2004,6 +2004,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
|
|||
goto done;
|
||||
}
|
||||
|
||||
/* Validate the user-provided bit-size and offset */
|
||||
if (mapping->size > 32 ||
|
||||
mapping->offset + mapping->size > ctrl->info.size * 8) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
list_for_each_entry(map, &ctrl->info.mappings, list) {
|
||||
if (mapping->id == map->id) {
|
||||
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
|
||||
|
|
|
@ -788,7 +788,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
|
|||
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
|
||||
put_user(kp->pending, &up->pending) ||
|
||||
put_user(kp->sequence, &up->sequence) ||
|
||||
compat_put_timespec(&kp->timestamp, &up->timestamp) ||
|
||||
put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
|
||||
put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
|
||||
put_user(kp->id, &up->id) ||
|
||||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
|
|||
u32 tempval1 = gfar_read(®s->maccfg1);
|
||||
u32 tempval = gfar_read(®s->maccfg2);
|
||||
u32 ecntrl = gfar_read(®s->ecntrl);
|
||||
u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
|
||||
u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
|
||||
|
||||
if (phydev->duplex != priv->oldduplex) {
|
||||
if (!(phydev->duplex))
|
||||
|
|
|
@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
|
|||
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
|
||||
seg_hdr->segNum = seg_number;
|
||||
seg_hdr->segSize = seg_size;
|
||||
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
|
||||
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
|
|||
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
|
||||
phydev->state = PHY_UP;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* Now we can run the state machine synchronously */
|
||||
phy_state_machine(&phydev->state_queue.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3740,7 +3740,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
|
|||
if (adapter->config_bands & BAND_A)
|
||||
n_channels_a = mwifiex_band_5ghz.n_channels;
|
||||
|
||||
adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
|
||||
adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
|
||||
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
|
||||
adapter->num_in_chan_stats);
|
||||
|
||||
|
|
|
@ -2170,6 +2170,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
|
|||
sizeof(struct mwifiex_chan_stats);
|
||||
|
||||
for (i = 0 ; i < num_chan; i++) {
|
||||
if (adapter->survey_idx >= adapter->num_in_chan_stats) {
|
||||
mwifiex_dbg(adapter, WARN,
|
||||
"FW reported too many channel results (max %d)\n",
|
||||
adapter->num_in_chan_stats);
|
||||
return;
|
||||
}
|
||||
chan_stats.chan_num = fw_chan_stats->chan_num;
|
||||
chan_stats.bandcfg = fw_chan_stats->bandcfg;
|
||||
chan_stats.flags = fw_chan_stats->flags;
|
||||
|
|
|
@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
|
|||
|
||||
entry += sizeof(__le16);
|
||||
chan->pa_points_per_curve = 8;
|
||||
memset(chan->curve_data, 0, sizeof(*chan->curve_data));
|
||||
memset(chan->curve_data, 0, sizeof(chan->curve_data));
|
||||
memcpy(chan->curve_data, entry,
|
||||
sizeof(struct p54_pa_curve_data_sample) *
|
||||
min((u8)8, curve_data->points_per_channel));
|
||||
|
|
|
@ -2269,7 +2269,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
|
|||
/* find adapter */
|
||||
if (!_rtl_pci_find_adapter(pdev, hw)) {
|
||||
err = -ENODEV;
|
||||
goto fail3;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
/* Init IO handler */
|
||||
|
@ -2339,10 +2339,10 @@ fail3:
|
|||
pci_set_drvdata(pdev, NULL);
|
||||
rtl_deinit_core(hw);
|
||||
|
||||
fail2:
|
||||
if (rtlpriv->io.pci_mem_start != 0)
|
||||
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
|
||||
|
||||
fail2:
|
||||
pci_release_regions(pdev);
|
||||
complete(&rtlpriv->firmware_loading_complete);
|
||||
|
||||
|
|
|
@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
|
|||
|
||||
wl->state = WL1251_STATE_OFF;
|
||||
mutex_init(&wl->mutex);
|
||||
spin_lock_init(&wl->wl_lock);
|
||||
|
||||
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
|
||||
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
|
||||
|
|
|
@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
|
|||
if (rc) {
|
||||
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
|
||||
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
|
||||
} else {
|
||||
pci_set_master(pdev);
|
||||
}
|
||||
|
||||
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Debug traces for zfcp.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2016
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zfcp"
|
||||
|
@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
|
|||
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
|
||||
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
|
||||
struct scatterlist *resp_entry = ct_els->resp;
|
||||
struct fc_ct_hdr *resph;
|
||||
struct fc_gpn_ft_resp *acc;
|
||||
int max_entries, x, last = 0;
|
||||
|
||||
|
@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
|
|||
return len; /* not GPN_FT response so do not cap */
|
||||
|
||||
acc = sg_virt(resp_entry);
|
||||
|
||||
/* cap all but accept CT responses to at least the CT header */
|
||||
resph = (struct fc_ct_hdr *)acc;
|
||||
if ((ct_els->status) ||
|
||||
(resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
|
||||
return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
|
||||
|
||||
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
|
||||
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
|
||||
* to account for header as 1st pseudo "entry" */;
|
||||
|
@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
|
|||
rec->scsi_retries = sc->retries;
|
||||
rec->scsi_allowed = sc->allowed;
|
||||
rec->scsi_id = sc->device->id;
|
||||
/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
|
||||
rec->scsi_lun = (u32)sc->device->lun;
|
||||
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
|
||||
rec->host_scribble = (unsigned long)sc->host_scribble;
|
||||
|
||||
memcpy(rec->scsi_opcode, sc->cmnd,
|
||||
|
@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
|
|||
|
||||
if (fsf) {
|
||||
rec->fsf_req_id = fsf->req_id;
|
||||
rec->pl_len = FCP_RESP_WITH_EXT;
|
||||
fcp_rsp = (struct fcp_resp_with_ext *)
|
||||
&(fsf->qtcb->bottom.io.fcp_rsp);
|
||||
/* mandatory parts of FCP_RSP IU in this SCSI record */
|
||||
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
|
||||
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
|
||||
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
|
||||
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
|
||||
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
|
||||
}
|
||||
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
|
||||
rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
|
||||
(u16)ZFCP_DBF_PAY_MAX_REC);
|
||||
zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
|
||||
"fcp_sns", fsf->req_id);
|
||||
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
|
||||
}
|
||||
/* complete FCP_RSP IU in associated PAYload record
|
||||
* but only if there are optional parts
|
||||
*/
|
||||
if (fcp_rsp->resp.fr_flags != 0)
|
||||
zfcp_dbf_pl_write(
|
||||
dbf, fcp_rsp,
|
||||
/* at least one full PAY record
|
||||
* but not beyond hardware response field
|
||||
*/
|
||||
min_t(u16, max_t(u16, rec->pl_len,
|
||||
ZFCP_DBF_PAY_MAX_REC),
|
||||
FSF_FCP_RSP_SIZE),
|
||||
"fcp_riu", fsf->req_id);
|
||||
}
|
||||
|
||||
debug_event(dbf->scsi, level, rec, sizeof(*rec));
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* zfcp device driver
|
||||
* debug feature declarations
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2016
|
||||
* Copyright IBM Corp. 2008, 2017
|
||||
*/
|
||||
|
||||
#ifndef ZFCP_DBF_H
|
||||
|
@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
|
|||
* @id: unique number of recovery record type
|
||||
* @tag: identifier string specifying the location of initiation
|
||||
* @scsi_id: scsi device id
|
||||
* @scsi_lun: scsi device logical unit number
|
||||
* @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
|
||||
* @scsi_result: scsi result
|
||||
* @scsi_retries: current retry number of scsi request
|
||||
* @scsi_allowed: allowed retries
|
||||
|
@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
|
|||
* @host_scribble: LLD specific data attached to SCSI request
|
||||
* @pl_len: length of paload stored as zfcp_dbf_pay
|
||||
* @fsf_rsp: response for fsf request
|
||||
* @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
|
||||
*/
|
||||
struct zfcp_dbf_scsi {
|
||||
u8 id;
|
||||
|
@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
|
|||
u64 host_scribble;
|
||||
u16 pl_len;
|
||||
struct fcp_resp_with_ext fcp_rsp;
|
||||
u32 scsi_lun_64_hi;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
|
@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
|
|||
{
|
||||
struct fsf_qtcb *qtcb = req->qtcb;
|
||||
|
||||
if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
|
||||
if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
|
||||
ZFCP_STATUS_FSFREQ_ERROR))) {
|
||||
zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
|
||||
|
||||
} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
|
||||
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
|
||||
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
|
||||
|
||||
|
@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
|
|||
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
|
||||
*/
|
||||
static inline
|
||||
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
|
||||
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
|
||||
struct zfcp_fsf_req *fsf_req)
|
||||
{
|
||||
char tmp_tag[ZFCP_DBF_TAG_LEN];
|
||||
|
||||
|
@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
|
|||
memcpy(tmp_tag, "lr_", 3);
|
||||
|
||||
memcpy(&tmp_tag[3], tag, 4);
|
||||
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
|
||||
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Fibre Channel related definitions and inline functions for the zfcp
|
||||
* device driver
|
||||
*
|
||||
* Copyright IBM Corp. 2009
|
||||
* Copyright IBM Corp. 2009, 2017
|
||||
*/
|
||||
|
||||
#ifndef ZFCP_FC_H
|
||||
|
@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
|
|||
!(rsp_flags & FCP_SNS_LEN_VAL) &&
|
||||
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
|
||||
set_host_byte(scsi, DID_ERROR);
|
||||
} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
|
||||
/* FCP_DL was not sufficient for SCSI data length */
|
||||
if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
|
||||
set_host_byte(scsi, DID_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
switch (header->fsf_status) {
|
||||
case FSF_GOOD:
|
||||
zfcp_dbf_san_res("fsscth2", req);
|
||||
ct->status = 0;
|
||||
zfcp_dbf_san_res("fsscth2", req);
|
||||
break;
|
||||
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
|
||||
zfcp_fsf_class_not_supp(req);
|
||||
|
@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
switch (header->fsf_status) {
|
||||
case FSF_GOOD:
|
||||
zfcp_dbf_san_res("fsselh1", req);
|
||||
send_els->status = 0;
|
||||
zfcp_dbf_san_res("fsselh1", req);
|
||||
break;
|
||||
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
|
||||
zfcp_fsf_class_not_supp(req);
|
||||
|
@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
|
|||
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
|
||||
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
|
||||
|
||||
if (scsi_prot_sg_count(scsi_cmnd)) {
|
||||
if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
|
||||
scsi_prot_sg_count(scsi_cmnd)) {
|
||||
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
|
||||
scsi_prot_sg_count(scsi_cmnd));
|
||||
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Interface to Linux SCSI midlayer.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2016
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zfcp"
|
||||
|
@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
|
|||
|
||||
zfcp_erp_wait(adapter);
|
||||
ret = fc_block_scsi_eh(scpnt);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(atomic_read(&adapter->status) &
|
||||
ZFCP_STATUS_COMMON_RUNNING)) {
|
||||
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
|
||||
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
|
||||
return SUCCESS;
|
||||
}
|
||||
}
|
||||
if (!fsf_req)
|
||||
if (!fsf_req) {
|
||||
zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
wait_for_completion(&fsf_req->completion);
|
||||
|
||||
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
|
||||
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
|
||||
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
|
||||
retval = FAILED;
|
||||
} else {
|
||||
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
|
||||
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
|
||||
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
|
|||
{
|
||||
static const char * const strings[] = RNC_STATES;
|
||||
|
||||
if (state >= ARRAY_SIZE(strings))
|
||||
return "UNKNOWN";
|
||||
|
||||
return strings[state];
|
||||
}
|
||||
#undef C
|
||||
|
|
|
@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi:
|
|||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
}
|
||||
}
|
||||
lpfc_issue_reg_vfi(vport);
|
||||
|
||||
/* Do not register VFI if the driver aborted FLOGI */
|
||||
if (!lpfc_error_lost_link(irsp))
|
||||
lpfc_issue_reg_vfi(vport);
|
||||
lpfc_nlp_put(ndlp);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
|
|||
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
|
||||
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
|
||||
if (cmd_mfi->sync_cmd &&
|
||||
cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
|
||||
(cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
|
||||
cmd_mfi->frame->hdr.cmd_status =
|
||||
MFI_STAT_WRONG_STATE;
|
||||
megasas_complete_cmd(instance,
|
||||
cmd_mfi, DID_OK);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
|
|||
prev_aen.word =
|
||||
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
|
||||
|
||||
if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
|
||||
(curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
|
||||
dev_info(&instance->pdev->dev,
|
||||
"%s %d out of range class %d send by application\n",
|
||||
__func__, __LINE__, curr_aen.members.class);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* A class whose enum value is smaller is inclusive of all
|
||||
* higher values. If a PROGRESS (= -1) was previously
|
||||
|
|
|
@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
|||
return -EINVAL;
|
||||
if (start > ha->optrom_size)
|
||||
return -EINVAL;
|
||||
if (size > ha->optrom_size - start)
|
||||
size = ha->optrom_size - start;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
switch (val) {
|
||||
|
@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
|||
}
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
ha->optrom_region_size = start + size > ha->optrom_size ?
|
||||
ha->optrom_size - start : size;
|
||||
ha->optrom_region_size = start + size;
|
||||
|
||||
ha->optrom_state = QLA_SREADING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
|
@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
|||
}
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
ha->optrom_region_size = start + size > ha->optrom_size ?
|
||||
ha->optrom_size - start : size;
|
||||
ha->optrom_region_size = start + size;
|
||||
|
||||
ha->optrom_state = QLA_SWRITING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
|
|
|
@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
|
|||
struct sg_fd;
|
||||
|
||||
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
|
||||
struct sg_request *nextrp; /* NULL -> tail request (slist) */
|
||||
struct list_head entry; /* list entry */
|
||||
struct sg_fd *parentfp; /* NULL -> not in use */
|
||||
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
|
||||
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
|
||||
|
@ -153,11 +153,11 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
|
|||
struct sg_device *parentdp; /* owning device */
|
||||
wait_queue_head_t read_wait; /* queue read until command done */
|
||||
rwlock_t rq_list_lock; /* protect access to list in req_arr */
|
||||
struct mutex f_mutex; /* protect against changes in this fd */
|
||||
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
|
||||
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
|
||||
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
|
||||
unsigned save_scat_len; /* original length of trunc. scat. element */
|
||||
Sg_request *headrp; /* head of request slist, NULL->empty */
|
||||
struct list_head rq_list; /* head of request list */
|
||||
struct fasync_struct *async_qp; /* used by asynchronous notification */
|
||||
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
|
||||
char low_dma; /* as in parent but possibly overridden to 1 */
|
||||
|
@ -166,6 +166,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
|
|||
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
|
||||
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
|
||||
char mmap_called; /* 0 -> mmap() never called on this fd */
|
||||
char res_in_use; /* 1 -> 'reserve' array in use */
|
||||
struct kref f_ref;
|
||||
struct execute_work ew;
|
||||
} Sg_fd;
|
||||
|
@ -209,7 +210,6 @@ static void sg_remove_sfp(struct kref *);
|
|||
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
|
||||
static Sg_request *sg_add_request(Sg_fd * sfp);
|
||||
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
|
||||
static int sg_res_in_use(Sg_fd * sfp);
|
||||
static Sg_device *sg_get_dev(int dev);
|
||||
static void sg_device_destroy(struct kref *kref);
|
||||
|
||||
|
@ -625,6 +625,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
|
|||
}
|
||||
buf += SZ_SG_HEADER;
|
||||
__get_user(opcode, buf);
|
||||
mutex_lock(&sfp->f_mutex);
|
||||
if (sfp->next_cmd_len > 0) {
|
||||
cmd_size = sfp->next_cmd_len;
|
||||
sfp->next_cmd_len = 0; /* reset so only this write() effected */
|
||||
|
@ -633,6 +634,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
|
|||
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
|
||||
cmd_size = 12;
|
||||
}
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
|
||||
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
|
||||
/* Determine buffer size. */
|
||||
|
@ -732,7 +734,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
|
|||
sg_remove_request(sfp, srp);
|
||||
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
|
||||
}
|
||||
if (sg_res_in_use(sfp)) {
|
||||
if (sfp->res_in_use) {
|
||||
sg_remove_request(sfp, srp);
|
||||
return -EBUSY; /* reserve buffer already being used */
|
||||
}
|
||||
|
@ -837,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
|
|||
return max_sectors << 9;
|
||||
}
|
||||
|
||||
static void
|
||||
sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
|
||||
{
|
||||
Sg_request *srp;
|
||||
int val;
|
||||
unsigned int ms;
|
||||
|
||||
val = 0;
|
||||
list_for_each_entry(srp, &sfp->rq_list, entry) {
|
||||
if (val > SG_MAX_QUEUE)
|
||||
break;
|
||||
rinfo[val].req_state = srp->done + 1;
|
||||
rinfo[val].problem =
|
||||
srp->header.masked_status &
|
||||
srp->header.host_status &
|
||||
srp->header.driver_status;
|
||||
if (srp->done)
|
||||
rinfo[val].duration =
|
||||
srp->header.duration;
|
||||
else {
|
||||
ms = jiffies_to_msecs(jiffies);
|
||||
rinfo[val].duration =
|
||||
(ms > srp->header.duration) ?
|
||||
(ms - srp->header.duration) : 0;
|
||||
}
|
||||
rinfo[val].orphan = srp->orphan;
|
||||
rinfo[val].sg_io_owned = srp->sg_io_owned;
|
||||
rinfo[val].pack_id = srp->header.pack_id;
|
||||
rinfo[val].usr_ptr = srp->header.usr_ptr;
|
||||
val++;
|
||||
}
|
||||
}
|
||||
|
||||
static long
|
||||
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
||||
{
|
||||
|
@ -902,7 +937,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
return result;
|
||||
if (val) {
|
||||
sfp->low_dma = 1;
|
||||
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
|
||||
if ((0 == sfp->low_dma) && !sfp->res_in_use) {
|
||||
val = (int) sfp->reserve.bufflen;
|
||||
sg_remove_scat(sfp, &sfp->reserve);
|
||||
sg_build_reserve(sfp, val);
|
||||
|
@ -948,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
|
||||
return -EFAULT;
|
||||
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
|
||||
list_for_each_entry(srp, &sfp->rq_list, entry) {
|
||||
if ((1 == srp->done) && (!srp->sg_io_owned)) {
|
||||
read_unlock_irqrestore(&sfp->rq_list_lock,
|
||||
iflags);
|
||||
|
@ -961,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
return 0;
|
||||
case SG_GET_NUM_WAITING:
|
||||
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
|
||||
val = 0;
|
||||
list_for_each_entry(srp, &sfp->rq_list, entry) {
|
||||
if ((1 == srp->done) && (!srp->sg_io_owned))
|
||||
++val;
|
||||
}
|
||||
|
@ -977,12 +1013,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
return -EINVAL;
|
||||
val = min_t(int, val,
|
||||
max_sectors_bytes(sdp->device->request_queue));
|
||||
mutex_lock(&sfp->f_mutex);
|
||||
if (val != sfp->reserve.bufflen) {
|
||||
if (sg_res_in_use(sfp) || sfp->mmap_called)
|
||||
if (sfp->mmap_called ||
|
||||
sfp->res_in_use) {
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
sg_remove_scat(sfp, &sfp->reserve);
|
||||
sg_build_reserve(sfp, val);
|
||||
}
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
return 0;
|
||||
case SG_GET_RESERVED_SIZE:
|
||||
val = min_t(int, sfp->reserve.bufflen,
|
||||
|
@ -1023,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
return -EFAULT;
|
||||
else {
|
||||
sg_req_info_t *rinfo;
|
||||
unsigned int ms;
|
||||
|
||||
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
|
||||
GFP_KERNEL);
|
||||
rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
|
||||
GFP_KERNEL);
|
||||
if (!rinfo)
|
||||
return -ENOMEM;
|
||||
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
|
||||
++val, srp = srp ? srp->nextrp : srp) {
|
||||
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
|
||||
if (srp) {
|
||||
rinfo[val].req_state = srp->done + 1;
|
||||
rinfo[val].problem =
|
||||
srp->header.masked_status &
|
||||
srp->header.host_status &
|
||||
srp->header.driver_status;
|
||||
if (srp->done)
|
||||
rinfo[val].duration =
|
||||
srp->header.duration;
|
||||
else {
|
||||
ms = jiffies_to_msecs(jiffies);
|
||||
rinfo[val].duration =
|
||||
(ms > srp->header.duration) ?
|
||||
(ms - srp->header.duration) : 0;
|
||||
}
|
||||
rinfo[val].orphan = srp->orphan;
|
||||
rinfo[val].sg_io_owned =
|
||||
srp->sg_io_owned;
|
||||
rinfo[val].pack_id =
|
||||
srp->header.pack_id;
|
||||
rinfo[val].usr_ptr =
|
||||
srp->header.usr_ptr;
|
||||
}
|
||||
}
|
||||
sg_fill_request_table(sfp, rinfo);
|
||||
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
result = __copy_to_user(p, rinfo,
|
||||
result = __copy_to_user(p, rinfo,
|
||||
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
|
||||
result = result ? -EFAULT : 0;
|
||||
kfree(rinfo);
|
||||
|
@ -1164,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
|
|||
return POLLERR;
|
||||
poll_wait(filp, &sfp->read_wait, wait);
|
||||
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
|
||||
list_for_each_entry(srp, &sfp->rq_list, entry) {
|
||||
/* if any read waiting, flag it */
|
||||
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
|
||||
res = POLLIN | POLLRDNORM;
|
||||
|
@ -1245,6 +1260,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
unsigned long req_sz, len, sa;
|
||||
Sg_scatter_hold *rsv_schp;
|
||||
int k, length;
|
||||
int ret = 0;
|
||||
|
||||
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
|
||||
return -ENXIO;
|
||||
|
@ -1255,8 +1271,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
if (vma->vm_pgoff)
|
||||
return -EINVAL; /* want no offset */
|
||||
rsv_schp = &sfp->reserve;
|
||||
if (req_sz > rsv_schp->bufflen)
|
||||
return -ENOMEM; /* cannot map more than reserved buffer */
|
||||
mutex_lock(&sfp->f_mutex);
|
||||
if (req_sz > rsv_schp->bufflen) {
|
||||
ret = -ENOMEM; /* cannot map more than reserved buffer */
|
||||
goto out;
|
||||
}
|
||||
|
||||
sa = vma->vm_start;
|
||||
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
|
||||
|
@ -1270,7 +1289,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = sfp;
|
||||
vma->vm_ops = &sg_mmap_vm_ops;
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1734,13 +1755,25 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
|||
md = &map_data;
|
||||
|
||||
if (md) {
|
||||
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
|
||||
mutex_lock(&sfp->f_mutex);
|
||||
if (dxfer_len <= rsv_schp->bufflen &&
|
||||
!sfp->res_in_use) {
|
||||
sfp->res_in_use = 1;
|
||||
sg_link_reserve(sfp, srp, dxfer_len);
|
||||
else {
|
||||
} else if (hp->flags & SG_FLAG_MMAP_IO) {
|
||||
res = -EBUSY; /* sfp->res_in_use == 1 */
|
||||
if (dxfer_len > rsv_schp->bufflen)
|
||||
res = -ENOMEM;
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
return res;
|
||||
} else {
|
||||
res = sg_build_indirect(req_schp, sfp, dxfer_len);
|
||||
if (res)
|
||||
if (res) {
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&sfp->f_mutex);
|
||||
|
||||
md->pages = req_schp->pages;
|
||||
md->page_order = req_schp->page_order;
|
||||
|
@ -2029,8 +2062,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
|
|||
req_schp->pages = NULL;
|
||||
req_schp->page_order = 0;
|
||||
req_schp->sglist_len = 0;
|
||||
sfp->save_scat_len = 0;
|
||||
srp->res_used = 0;
|
||||
/* Called without mutex lock to avoid deadlock */
|
||||
sfp->res_in_use = 0;
|
||||
}
|
||||
|
||||
static Sg_request *
|
||||
|
@ -2040,7 +2074,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
|
|||
unsigned long iflags;
|
||||
|
||||
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (resp = sfp->headrp; resp; resp = resp->nextrp) {
|
||||
list_for_each_entry(resp, &sfp->rq_list, entry) {
|
||||
/* look for requests that are ready + not SG_IO owned */
|
||||
if ((1 == resp->done) && (!resp->sg_io_owned) &&
|
||||
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
|
||||
|
@ -2058,70 +2092,45 @@ sg_add_request(Sg_fd * sfp)
|
|||
{
|
||||
int k;
|
||||
unsigned long iflags;
|
||||
Sg_request *resp;
|
||||
Sg_request *rp = sfp->req_arr;
|
||||
|
||||
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
resp = sfp->headrp;
|
||||
if (!resp) {
|
||||
memset(rp, 0, sizeof (Sg_request));
|
||||
rp->parentfp = sfp;
|
||||
resp = rp;
|
||||
sfp->headrp = resp;
|
||||
} else {
|
||||
if (0 == sfp->cmd_q)
|
||||
resp = NULL; /* command queuing disallowed */
|
||||
else {
|
||||
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
|
||||
if (!rp->parentfp)
|
||||
break;
|
||||
}
|
||||
if (k < SG_MAX_QUEUE) {
|
||||
memset(rp, 0, sizeof (Sg_request));
|
||||
rp->parentfp = sfp;
|
||||
while (resp->nextrp)
|
||||
resp = resp->nextrp;
|
||||
resp->nextrp = rp;
|
||||
resp = rp;
|
||||
} else
|
||||
resp = NULL;
|
||||
if (!list_empty(&sfp->rq_list)) {
|
||||
if (!sfp->cmd_q)
|
||||
goto out_unlock;
|
||||
|
||||
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
|
||||
if (!rp->parentfp)
|
||||
break;
|
||||
}
|
||||
if (k >= SG_MAX_QUEUE)
|
||||
goto out_unlock;
|
||||
}
|
||||
if (resp) {
|
||||
resp->nextrp = NULL;
|
||||
resp->header.duration = jiffies_to_msecs(jiffies);
|
||||
}
|
||||
memset(rp, 0, sizeof (Sg_request));
|
||||
rp->parentfp = sfp;
|
||||
rp->header.duration = jiffies_to_msecs(jiffies);
|
||||
list_add_tail(&rp->entry, &sfp->rq_list);
|
||||
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
return resp;
|
||||
return rp;
|
||||
out_unlock:
|
||||
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Return of 1 for found; 0 for not found */
|
||||
static int
|
||||
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
|
||||
{
|
||||
Sg_request *prev_rp;
|
||||
Sg_request *rp;
|
||||
unsigned long iflags;
|
||||
int res = 0;
|
||||
|
||||
if ((!sfp) || (!srp) || (!sfp->headrp))
|
||||
if (!sfp || !srp || list_empty(&sfp->rq_list))
|
||||
return res;
|
||||
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
prev_rp = sfp->headrp;
|
||||
if (srp == prev_rp) {
|
||||
sfp->headrp = prev_rp->nextrp;
|
||||
prev_rp->parentfp = NULL;
|
||||
if (!list_empty(&srp->entry)) {
|
||||
list_del(&srp->entry);
|
||||
srp->parentfp = NULL;
|
||||
res = 1;
|
||||
} else {
|
||||
while ((rp = prev_rp->nextrp)) {
|
||||
if (srp == rp) {
|
||||
prev_rp->nextrp = rp->nextrp;
|
||||
rp->parentfp = NULL;
|
||||
res = 1;
|
||||
break;
|
||||
}
|
||||
prev_rp = rp;
|
||||
}
|
||||
}
|
||||
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
return res;
|
||||
|
@ -2140,8 +2149,9 @@ sg_add_sfp(Sg_device * sdp)
|
|||
|
||||
init_waitqueue_head(&sfp->read_wait);
|
||||
rwlock_init(&sfp->rq_list_lock);
|
||||
|
||||
INIT_LIST_HEAD(&sfp->rq_list);
|
||||
kref_init(&sfp->f_ref);
|
||||
mutex_init(&sfp->f_mutex);
|
||||
sfp->timeout = SG_DEFAULT_TIMEOUT;
|
||||
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
|
||||
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
|
||||
|
@ -2180,10 +2190,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
|
|||
{
|
||||
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
|
||||
struct sg_device *sdp = sfp->parentdp;
|
||||
Sg_request *srp;
|
||||
|
||||
/* Cleanup any responses which were never read(). */
|
||||
while (sfp->headrp)
|
||||
sg_finish_rem_req(sfp->headrp);
|
||||
while (!list_empty(&sfp->rq_list)) {
|
||||
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
|
||||
sg_finish_rem_req(srp);
|
||||
}
|
||||
|
||||
if (sfp->reserve.bufflen > 0) {
|
||||
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
|
||||
|
@ -2217,20 +2230,6 @@ sg_remove_sfp(struct kref *kref)
|
|||
schedule_work(&sfp->ew.work);
|
||||
}
|
||||
|
||||
static int
|
||||
sg_res_in_use(Sg_fd * sfp)
|
||||
{
|
||||
const Sg_request *srp;
|
||||
unsigned long iflags;
|
||||
|
||||
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
for (srp = sfp->headrp; srp; srp = srp->nextrp)
|
||||
if (srp->res_used)
|
||||
break;
|
||||
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
||||
return srp ? 1 : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
static int
|
||||
sg_idr_max_id(int id, void *p, void *data)
|
||||
|
@ -2600,7 +2599,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
|
|||
/* must be called while holding sg_index_lock */
|
||||
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
||||
{
|
||||
int k, m, new_interface, blen, usg;
|
||||
int k, new_interface, blen, usg;
|
||||
Sg_request *srp;
|
||||
Sg_fd *fp;
|
||||
const sg_io_hdr_t *hp;
|
||||
|
@ -2620,13 +2619,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
|
||||
(int) fp->cmd_q, (int) fp->force_packid,
|
||||
(int) fp->keep_orphan);
|
||||
for (m = 0, srp = fp->headrp;
|
||||
srp != NULL;
|
||||
++m, srp = srp->nextrp) {
|
||||
list_for_each_entry(srp, &fp->rq_list, entry) {
|
||||
hp = &srp->header;
|
||||
new_interface = (hp->interface_id == '\0') ? 0 : 1;
|
||||
if (srp->res_used) {
|
||||
if (new_interface &&
|
||||
if (new_interface &&
|
||||
(SG_FLAG_MMAP_IO & hp->flags))
|
||||
cp = " mmap>> ";
|
||||
else
|
||||
|
@ -2657,7 +2654,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
|
||||
(int) srp->data.cmd_opcode);
|
||||
}
|
||||
if (0 == m)
|
||||
if (list_empty(&fp->rq_list))
|
||||
seq_puts(s, " No requests active\n");
|
||||
read_unlock(&fp->rq_list_lock);
|
||||
}
|
||||
|
|
|
@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
|||
ret = storvsc_do_io(dev, cmd_request);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
if (payload_sz > sizeof(cmd_request->mpb))
|
||||
kfree(payload);
|
||||
/* no more space */
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ static void fiq_debugger_work(struct work_struct *work)
|
|||
cmd += 6;
|
||||
while (*cmd == ' ')
|
||||
cmd++;
|
||||
if ((cmd != '\0') && sysrq_on())
|
||||
if ((*cmd != '\0') && sysrq_on())
|
||||
kernel_restart(cmd);
|
||||
else
|
||||
kernel_restart(NULL);
|
||||
|
|
|
@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
|
|||
sense->ascq = ascq;
|
||||
if (sns_key_info0 != 0) {
|
||||
sense->sns_key_info[0] = SKSV | sns_key_info0;
|
||||
sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
|
||||
sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
|
||||
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
|
|||
}
|
||||
EXPORT_SYMBOL(tty_insert_flip_string_flags);
|
||||
|
||||
/**
|
||||
* __tty_insert_flip_char - Add one character to the tty buffer
|
||||
* @port: tty port
|
||||
* @ch: character
|
||||
* @flag: flag byte
|
||||
*
|
||||
* Queue a single byte to the tty buffering, with an optional flag.
|
||||
* This is the slow path of tty_insert_flip_char.
|
||||
*/
|
||||
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
|
||||
{
|
||||
struct tty_buffer *tb;
|
||||
int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
|
||||
|
||||
if (!__tty_buffer_request_room(port, 1, flags))
|
||||
return 0;
|
||||
|
||||
tb = port->buf.tail;
|
||||
if (~tb->flags & TTYB_NORMAL)
|
||||
*flag_buf_ptr(tb, tb->used) = flag;
|
||||
*char_buf_ptr(tb, tb->used++) = ch;
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(__tty_insert_flip_char);
|
||||
|
||||
/**
|
||||
* tty_schedule_flip - push characters to ldisc
|
||||
* @port: tty port to push from
|
||||
|
|
|
@ -519,6 +519,8 @@ static void async_completed(struct urb *urb)
|
|||
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
|
||||
as->status != -ENOENT)
|
||||
cancel_bulk_urbs(ps, as->bulk_addr);
|
||||
|
||||
wake_up(&ps->wait);
|
||||
spin_unlock(&ps->lock);
|
||||
|
||||
if (signr) {
|
||||
|
@ -526,8 +528,6 @@ static void async_completed(struct urb *urb)
|
|||
put_pid(pid);
|
||||
put_cred(cred);
|
||||
}
|
||||
|
||||
wake_up(&ps->wait);
|
||||
}
|
||||
|
||||
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
|
||||
|
|
|
@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
/* Microsoft LifeCam-VX700 v2.0 */
|
||||
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* Logitech HD Pro Webcams C920 and C930e */
|
||||
/* Logitech HD Pro Webcams C920, C920-C and C930e */
|
||||
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
||||
/* Logitech ConferenceCam CC3000e */
|
||||
|
@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
|
||||
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
|
||||
|
||||
/* Corsair Strafe RGB */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
||||
/* Acer C120 LED Projector */
|
||||
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
|
|
|
@ -133,29 +133,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
|
|||
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
|
||||
else if (rev >= 0x40 && rev <= 0x4f)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
|
||||
}
|
||||
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||
0x145c, NULL);
|
||||
if (pinfo->smbus_dev) {
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
|
||||
} else {
|
||||
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
|
||||
|
||||
if (!pinfo->smbus_dev) {
|
||||
pinfo->sb_type.gen = NOT_AMD_CHIPSET;
|
||||
return 0;
|
||||
if (pinfo->smbus_dev) {
|
||||
rev = pinfo->smbus_dev->revision;
|
||||
if (rev >= 0x11 && rev <= 0x14)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
|
||||
else if (rev >= 0x15 && rev <= 0x18)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
|
||||
else if (rev >= 0x39 && rev <= 0x3a)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
|
||||
} else {
|
||||
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||
0x145c, NULL);
|
||||
if (pinfo->smbus_dev) {
|
||||
rev = pinfo->smbus_dev->revision;
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
|
||||
} else {
|
||||
pinfo->sb_type.gen = NOT_AMD_CHIPSET;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
rev = pinfo->smbus_dev->revision;
|
||||
if (rev >= 0x11 && rev <= 0x14)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
|
||||
else if (rev >= 0x15 && rev <= 0x18)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
|
||||
else if (rev >= 0x39 && rev <= 0x3a)
|
||||
pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
|
||||
}
|
||||
|
||||
pinfo->sb_type.rev = rev;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -2023,6 +2023,7 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
|
||||
|
|
|
@ -73,6 +73,8 @@ config FILE_LOCKING
|
|||
for filesystems like NFS and for the flock() system
|
||||
call. Disabling this option saves about 11k.
|
||||
|
||||
source "fs/crypto/Kconfig"
|
||||
|
||||
source "fs/notify/Kconfig"
|
||||
|
||||
source "fs/quota/Kconfig"
|
||||
|
|
|
@ -30,6 +30,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
|
|||
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
|
||||
obj-$(CONFIG_AIO) += aio.o
|
||||
obj-$(CONFIG_FS_DAX) += dax.o
|
||||
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
|
||||
obj-$(CONFIG_FILE_LOCKING) += locks.o
|
||||
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
|
||||
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
|
||||
|
|
|
@ -1727,6 +1727,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|||
goto restore;
|
||||
}
|
||||
|
||||
btrfs_qgroup_rescan_resume(fs_info);
|
||||
|
||||
if (!fs_info->uuid_root) {
|
||||
btrfs_info(fs_info, "creating UUID tree");
|
||||
ret = btrfs_create_uuid_tree(fs_info);
|
||||
|
|
|
@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
|
|||
},
|
||||
};
|
||||
|
||||
const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
|
||||
const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
|
||||
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
|
||||
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
|
||||
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
|
||||
|
|
|
@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
|
|||
/*
|
||||
* read a single page, without unlocking it.
|
||||
*/
|
||||
static int readpage_nounlock(struct file *filp, struct page *page)
|
||||
static int ceph_do_readpage(struct file *filp, struct page *page)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
|
@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
|
|||
|
||||
err = ceph_readpage_from_fscache(inode, page);
|
||||
if (err == 0)
|
||||
goto out;
|
||||
return -EINPROGRESS;
|
||||
|
||||
dout("readpage inode %p file %p page %p index %lu\n",
|
||||
inode, filp, page, page->index);
|
||||
|
@ -249,8 +249,11 @@ out:
|
|||
|
||||
static int ceph_readpage(struct file *filp, struct page *page)
|
||||
{
|
||||
int r = readpage_nounlock(filp, page);
|
||||
unlock_page(page);
|
||||
int r = ceph_do_readpage(filp, page);
|
||||
if (r != -EINPROGRESS)
|
||||
unlock_page(page);
|
||||
else
|
||||
r = 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1094,7 +1097,7 @@ retry_locked:
|
|||
goto retry_locked;
|
||||
r = writepage_nounlock(page, NULL);
|
||||
if (r < 0)
|
||||
goto fail_nosnap;
|
||||
goto fail_unlock;
|
||||
goto retry_locked;
|
||||
}
|
||||
|
||||
|
@ -1122,11 +1125,14 @@ retry_locked:
|
|||
}
|
||||
|
||||
/* we need to read it. */
|
||||
r = readpage_nounlock(file, page);
|
||||
if (r < 0)
|
||||
goto fail_nosnap;
|
||||
r = ceph_do_readpage(file, page);
|
||||
if (r < 0) {
|
||||
if (r == -EINPROGRESS)
|
||||
return -EAGAIN;
|
||||
goto fail_unlock;
|
||||
}
|
||||
goto retry_locked;
|
||||
fail_nosnap:
|
||||
fail_unlock:
|
||||
unlock_page(page);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
|
|||
fscache_relinquish_cookie(cookie, 0);
|
||||
}
|
||||
|
||||
static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
|
||||
{
|
||||
if (!error)
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
|
||||
static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
|
||||
{
|
||||
if (!error)
|
||||
SetPageUptodate(page);
|
||||
|
@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
|
|||
return -ENOBUFS;
|
||||
|
||||
ret = fscache_read_or_alloc_page(ci->fscache, page,
|
||||
ceph_vfs_readpage_complete, NULL,
|
||||
ceph_readpage_from_fscache_complete, NULL,
|
||||
GFP_KERNEL);
|
||||
|
||||
switch (ret) {
|
||||
|
@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
|
|||
return -ENOBUFS;
|
||||
|
||||
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
|
||||
ceph_vfs_readpage_complete_unlock,
|
||||
ceph_readpage_from_fscache_complete,
|
||||
NULL, mapping_gfp_mask(mapping));
|
||||
|
||||
switch (ret) {
|
||||
|
|
|
@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
|
|||
int i;
|
||||
|
||||
if (unlikely(direntry->d_name.len >
|
||||
tcon->fsAttrInfo.MaxPathNameComponentLength))
|
||||
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
|
||||
|
|
|
@ -82,8 +82,8 @@
|
|||
|
||||
#define NUMBER_OF_SMB2_COMMANDS 0x0013
|
||||
|
||||
/* BB FIXME - analyze following length BB */
|
||||
#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
|
||||
/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
|
||||
#define MAX_SMB2_HDR_SIZE 0x00b0
|
||||
|
||||
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
|
||||
|
||||
|
|
18
fs/crypto/Kconfig
Normal file
18
fs/crypto/Kconfig
Normal file
|
@ -0,0 +1,18 @@
|
|||
config FS_ENCRYPTION
|
||||
tristate "FS Encryption (Per-file encryption)"
|
||||
depends on BLOCK
|
||||
select CRYPTO
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_CTS
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_SHA256
|
||||
select KEYS
|
||||
select ENCRYPTED_KEYS
|
||||
help
|
||||
Enable encryption of files and directories. This
|
||||
feature is similar to ecryptfs, but it is more memory
|
||||
efficient since it avoids caching the encrypted and
|
||||
decrypted pages in the page cache.
|
3
fs/crypto/Makefile
Normal file
3
fs/crypto/Makefile
Normal file
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
|
||||
|
||||
fscrypto-y := crypto.o fname.o policy.o keyinfo.o
|
568
fs/crypto/crypto.c
Normal file
568
fs/crypto/crypto.c
Normal file
|
@ -0,0 +1,568 @@
|
|||
/*
|
||||
* This contains encryption functions for per-file encryption.
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
* Copyright (C) 2015, Motorola Mobility
|
||||
*
|
||||
* Written by Michael Halcrow, 2014.
|
||||
*
|
||||
* Filename encryption additions
|
||||
* Uday Savagaonkar, 2014
|
||||
* Encryption policy handling additions
|
||||
* Ildar Muslukhov, 2014
|
||||
* Add fscrypt_pullback_bio_page()
|
||||
* Jaegeuk Kim, 2015.
|
||||
*
|
||||
* This has not yet undergone a rigorous security audit.
|
||||
*
|
||||
* The usage of AES-XTS should conform to recommendations in NIST
|
||||
* Special Publication 800-38E and IEEE P1619/D16.
|
||||
*/
|
||||
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/fscrypto.h>
|
||||
|
||||
static unsigned int num_prealloc_crypto_pages = 32;
|
||||
static unsigned int num_prealloc_crypto_ctxs = 128;
|
||||
|
||||
module_param(num_prealloc_crypto_pages, uint, 0444);
|
||||
MODULE_PARM_DESC(num_prealloc_crypto_pages,
|
||||
"Number of crypto pages to preallocate");
|
||||
module_param(num_prealloc_crypto_ctxs, uint, 0444);
|
||||
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
|
||||
"Number of crypto contexts to preallocate");
|
||||
|
||||
static mempool_t *fscrypt_bounce_page_pool = NULL;
|
||||
|
||||
static LIST_HEAD(fscrypt_free_ctxs);
|
||||
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
|
||||
|
||||
static struct workqueue_struct *fscrypt_read_workqueue;
|
||||
static DEFINE_MUTEX(fscrypt_init_mutex);
|
||||
|
||||
static struct kmem_cache *fscrypt_ctx_cachep;
|
||||
struct kmem_cache *fscrypt_info_cachep;
|
||||
|
||||
/**
|
||||
* fscrypt_release_ctx() - Releases an encryption context
|
||||
* @ctx: The encryption context to release.
|
||||
*
|
||||
* If the encryption context was allocated from the pre-allocated pool, returns
|
||||
* it to that pool. Else, frees it.
|
||||
*
|
||||
* If there's a bounce page in the context, this frees that.
|
||||
*/
|
||||
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
|
||||
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
|
||||
ctx->w.bounce_page = NULL;
|
||||
}
|
||||
ctx->w.control_page = NULL;
|
||||
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
|
||||
kmem_cache_free(fscrypt_ctx_cachep, ctx);
|
||||
} else {
|
||||
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
||||
list_add(&ctx->free_list, &fscrypt_free_ctxs);
|
||||
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_release_ctx);
|
||||
|
||||
/**
|
||||
* fscrypt_get_ctx() - Gets an encryption context
|
||||
* @inode: The inode for which we are doing the crypto
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
*
|
||||
* Allocates and initializes an encryption context.
|
||||
*
|
||||
* Return: An allocated and initialized encryption context on success; error
|
||||
* value or NULL otherwise.
|
||||
*/
|
||||
struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
|
||||
{
|
||||
struct fscrypt_ctx *ctx = NULL;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
unsigned long flags;
|
||||
|
||||
if (ci == NULL)
|
||||
return ERR_PTR(-ENOKEY);
|
||||
|
||||
/*
|
||||
* We first try getting the ctx from a free list because in
|
||||
* the common case the ctx will have an allocated and
|
||||
* initialized crypto tfm, so it's probably a worthwhile
|
||||
* optimization. For the bounce page, we first try getting it
|
||||
* from the kernel allocator because that's just about as fast
|
||||
* as getting it from a list and because a cache of free pages
|
||||
* should generally be a "last resort" option for a filesystem
|
||||
* to be able to do its job.
|
||||
*/
|
||||
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
||||
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
|
||||
struct fscrypt_ctx, free_list);
|
||||
if (ctx)
|
||||
list_del(&ctx->free_list);
|
||||
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
|
||||
if (!ctx) {
|
||||
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
||||
} else {
|
||||
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
||||
}
|
||||
ctx->flags &= ~FS_WRITE_PATH_FL;
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_get_ctx);
|
||||
|
||||
/**
|
||||
* page_crypt_complete() - completion callback for page crypto
|
||||
* @req: The asynchronous cipher request context
|
||||
* @res: The result of the cipher operation
|
||||
*/
|
||||
static void page_crypt_complete(struct crypto_async_request *req, int res)
|
||||
{
|
||||
struct fscrypt_completion_result *ecr = req->data;
|
||||
|
||||
if (res == -EINPROGRESS)
|
||||
return;
|
||||
ecr->res = res;
|
||||
complete(&ecr->completion);
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
FS_DECRYPT = 0,
|
||||
FS_ENCRYPT,
|
||||
} fscrypt_direction_t;
|
||||
|
||||
static int do_page_crypto(struct inode *inode,
|
||||
fscrypt_direction_t rw, pgoff_t index,
|
||||
struct page *src_page, struct page *dest_page,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct {
|
||||
__le64 index;
|
||||
u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
|
||||
} xts_tweak;
|
||||
struct skcipher_request *req = NULL;
|
||||
DECLARE_FS_COMPLETION_RESULT(ecr);
|
||||
struct scatterlist dst, src;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
int res = 0;
|
||||
|
||||
req = skcipher_request_alloc(tfm, gfp_flags);
|
||||
if (!req) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: crypto_request_alloc() failed\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skcipher_request_set_callback(
|
||||
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
page_crypt_complete, &ecr);
|
||||
|
||||
BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
|
||||
xts_tweak.index = cpu_to_le64(index);
|
||||
memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
|
||||
|
||||
sg_init_table(&dst, 1);
|
||||
sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
|
||||
sg_init_table(&src, 1);
|
||||
sg_set_page(&src, src_page, PAGE_SIZE, 0);
|
||||
skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
|
||||
if (rw == FS_DECRYPT)
|
||||
res = crypto_skcipher_decrypt(req);
|
||||
else
|
||||
res = crypto_skcipher_encrypt(req);
|
||||
if (res == -EINPROGRESS || res == -EBUSY) {
|
||||
BUG_ON(req->base.data != &ecr);
|
||||
wait_for_completion(&ecr.completion);
|
||||
res = ecr.res;
|
||||
}
|
||||
skcipher_request_free(req);
|
||||
if (res) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: crypto_skcipher_encrypt() returned %d\n",
|
||||
__func__, res);
|
||||
return res;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
|
||||
{
|
||||
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
||||
if (ctx->w.bounce_page == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ctx->flags |= FS_WRITE_PATH_FL;
|
||||
return ctx->w.bounce_page;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscypt_encrypt_page() - Encrypts a page
|
||||
* @inode: The inode for which the encryption should take place
|
||||
* @plaintext_page: The page to encrypt. Must be locked.
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
*
|
||||
* Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
|
||||
* encryption context.
|
||||
*
|
||||
* Called on the page write path. The caller must call
|
||||
* fscrypt_restore_control_page() on the returned ciphertext page to
|
||||
* release the bounce buffer and the encryption context.
|
||||
*
|
||||
* Return: An allocated page with the encrypted content on success. Else, an
|
||||
* error value or NULL.
|
||||
*/
|
||||
struct page *fscrypt_encrypt_page(struct inode *inode,
|
||||
struct page *plaintext_page, gfp_t gfp_flags)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = NULL;
|
||||
int err;
|
||||
|
||||
BUG_ON(!PageLocked(plaintext_page));
|
||||
|
||||
ctx = fscrypt_get_ctx(inode, gfp_flags);
|
||||
if (IS_ERR(ctx))
|
||||
return (struct page *)ctx;
|
||||
|
||||
/* The encryption operation will require a bounce page. */
|
||||
ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
|
||||
if (IS_ERR(ciphertext_page))
|
||||
goto errout;
|
||||
|
||||
ctx->w.control_page = plaintext_page;
|
||||
err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
|
||||
plaintext_page, ciphertext_page,
|
||||
gfp_flags);
|
||||
if (err) {
|
||||
ciphertext_page = ERR_PTR(err);
|
||||
goto errout;
|
||||
}
|
||||
SetPagePrivate(ciphertext_page);
|
||||
set_page_private(ciphertext_page, (unsigned long)ctx);
|
||||
lock_page(ciphertext_page);
|
||||
return ciphertext_page;
|
||||
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
return ciphertext_page;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_page);
|
||||
|
||||
/**
|
||||
* f2crypt_decrypt_page() - Decrypts a page in-place
|
||||
* @page: The page to decrypt. Must be locked.
|
||||
*
|
||||
* Decrypts page in-place using the ctx encryption context.
|
||||
*
|
||||
* Called from the read completion callback.
|
||||
*
|
||||
* Return: Zero on success, non-zero otherwise.
|
||||
*/
|
||||
int fscrypt_decrypt_page(struct page *page)
|
||||
{
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
return do_page_crypto(page->mapping->host,
|
||||
FS_DECRYPT, page->index, page, page, GFP_NOFS);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_page);
|
||||
|
||||
int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
|
||||
sector_t pblk, unsigned int len)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = NULL;
|
||||
struct bio *bio;
|
||||
int ret, err = 0;
|
||||
|
||||
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
|
||||
|
||||
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
|
||||
if (IS_ERR(ciphertext_page)) {
|
||||
err = PTR_ERR(ciphertext_page);
|
||||
goto errout;
|
||||
}
|
||||
|
||||
while (len--) {
|
||||
err = do_page_crypto(inode, FS_ENCRYPT, lblk,
|
||||
ZERO_PAGE(0), ciphertext_page,
|
||||
GFP_NOFS);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
bio = bio_alloc(GFP_NOWAIT, 1);
|
||||
if (!bio) {
|
||||
err = -ENOMEM;
|
||||
goto errout;
|
||||
}
|
||||
bio->bi_bdev = inode->i_sb->s_bdev;
|
||||
bio->bi_iter.bi_sector =
|
||||
pblk << (inode->i_sb->s_blocksize_bits - 9);
|
||||
ret = bio_add_page(bio, ciphertext_page,
|
||||
inode->i_sb->s_blocksize, 0);
|
||||
if (ret != inode->i_sb->s_blocksize) {
|
||||
/* should never happen! */
|
||||
WARN_ON(1);
|
||||
bio_put(bio);
|
||||
err = -EIO;
|
||||
goto errout;
|
||||
}
|
||||
err = submit_bio_wait(WRITE, bio);
|
||||
if ((err == 0) && bio->bi_error)
|
||||
err = -EIO;
|
||||
bio_put(bio);
|
||||
if (err)
|
||||
goto errout;
|
||||
lblk++;
|
||||
pblk++;
|
||||
}
|
||||
err = 0;
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_zeroout_range);
|
||||
|
||||
/*
|
||||
* Validate dentries for encrypted directories to make sure we aren't
|
||||
* potentially caching stale data after a key has been added or
|
||||
* removed.
|
||||
*/
|
||||
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
struct dentry *dir;
|
||||
struct fscrypt_info *ci;
|
||||
int dir_has_key, cached_with_key;
|
||||
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
|
||||
dir = dget_parent(dentry);
|
||||
if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
|
||||
dput(dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ci = d_inode(dir)->i_crypt_info;
|
||||
if (ci && ci->ci_keyring_key &&
|
||||
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
|
||||
(1 << KEY_FLAG_REVOKED) |
|
||||
(1 << KEY_FLAG_DEAD))))
|
||||
ci = NULL;
|
||||
|
||||
/* this should eventually be an flag in d_flags */
|
||||
spin_lock(&dentry->d_lock);
|
||||
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
dir_has_key = (ci != NULL);
|
||||
dput(dir);
|
||||
|
||||
/*
|
||||
* If the dentry was cached without the key, and it is a
|
||||
* negative dentry, it might be a valid name. We can't check
|
||||
* if the key has since been made available due to locking
|
||||
* reasons, so we fail the validation so ext4_lookup() can do
|
||||
* this check.
|
||||
*
|
||||
* We also fail the validation if the dentry was created with
|
||||
* the key present, but we no longer have the key, or vice versa.
|
||||
*/
|
||||
if ((!cached_with_key && d_is_negative(dentry)) ||
|
||||
(!cached_with_key && dir_has_key) ||
|
||||
(cached_with_key && !dir_has_key))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
const struct dentry_operations fscrypt_d_ops = {
|
||||
.d_revalidate = fscrypt_d_revalidate,
|
||||
};
|
||||
EXPORT_SYMBOL(fscrypt_d_ops);
|
||||
|
||||
/*
|
||||
* Call fscrypt_decrypt_page on every single page, reusing the encryption
|
||||
* context.
|
||||
*/
|
||||
static void completion_pages(struct work_struct *work)
|
||||
{
|
||||
struct fscrypt_ctx *ctx =
|
||||
container_of(work, struct fscrypt_ctx, r.work);
|
||||
struct bio *bio = ctx->r.bio;
|
||||
struct bio_vec *bv;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
struct page *page = bv->bv_page;
|
||||
int ret = fscrypt_decrypt_page(page);
|
||||
|
||||
if (ret) {
|
||||
WARN_ON_ONCE(1);
|
||||
SetPageError(page);
|
||||
} else {
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
unlock_page(page);
|
||||
}
|
||||
fscrypt_release_ctx(ctx);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
|
||||
{
|
||||
INIT_WORK(&ctx->r.work, completion_pages);
|
||||
ctx->r.bio = bio;
|
||||
queue_work(fscrypt_read_workqueue, &ctx->r.work);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
|
||||
|
||||
void fscrypt_pullback_bio_page(struct page **page, bool restore)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *bounce_page;
|
||||
|
||||
/* The bounce data pages are unmapped. */
|
||||
if ((*page)->mapping)
|
||||
return;
|
||||
|
||||
/* The bounce data page is unmapped. */
|
||||
bounce_page = *page;
|
||||
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
|
||||
|
||||
/* restore control page */
|
||||
*page = ctx->w.control_page;
|
||||
|
||||
if (restore)
|
||||
fscrypt_restore_control_page(bounce_page);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
|
||||
|
||||
void fscrypt_restore_control_page(struct page *page)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
|
||||
ctx = (struct fscrypt_ctx *)page_private(page);
|
||||
set_page_private(page, (unsigned long)NULL);
|
||||
ClearPagePrivate(page);
|
||||
unlock_page(page);
|
||||
fscrypt_release_ctx(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_restore_control_page);
|
||||
|
||||
static void fscrypt_destroy(void)
|
||||
{
|
||||
struct fscrypt_ctx *pos, *n;
|
||||
|
||||
list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
|
||||
kmem_cache_free(fscrypt_ctx_cachep, pos);
|
||||
INIT_LIST_HEAD(&fscrypt_free_ctxs);
|
||||
mempool_destroy(fscrypt_bounce_page_pool);
|
||||
fscrypt_bounce_page_pool = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_initialize() - allocate major buffers for fs encryption.
|
||||
*
|
||||
* We only call this when we start accessing encrypted files, since it
|
||||
* results in memory getting allocated that wouldn't otherwise be used.
|
||||
*
|
||||
* Return: Zero on success, non-zero otherwise.
|
||||
*/
|
||||
int fscrypt_initialize(void)
|
||||
{
|
||||
int i, res = -ENOMEM;
|
||||
|
||||
if (fscrypt_bounce_page_pool)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&fscrypt_init_mutex);
|
||||
if (fscrypt_bounce_page_pool)
|
||||
goto already_initialized;
|
||||
|
||||
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
|
||||
struct fscrypt_ctx *ctx;
|
||||
|
||||
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
|
||||
if (!ctx)
|
||||
goto fail;
|
||||
list_add(&ctx->free_list, &fscrypt_free_ctxs);
|
||||
}
|
||||
|
||||
fscrypt_bounce_page_pool =
|
||||
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
|
||||
if (!fscrypt_bounce_page_pool)
|
||||
goto fail;
|
||||
|
||||
already_initialized:
|
||||
mutex_unlock(&fscrypt_init_mutex);
|
||||
return 0;
|
||||
fail:
|
||||
fscrypt_destroy();
|
||||
mutex_unlock(&fscrypt_init_mutex);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_initialize);
|
||||
|
||||
/**
|
||||
* fscrypt_init() - Set up for fs encryption.
|
||||
*/
|
||||
static int __init fscrypt_init(void)
|
||||
{
|
||||
fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
|
||||
WQ_HIGHPRI, 0);
|
||||
if (!fscrypt_read_workqueue)
|
||||
goto fail;
|
||||
|
||||
fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
|
||||
if (!fscrypt_ctx_cachep)
|
||||
goto fail_free_queue;
|
||||
|
||||
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
|
||||
if (!fscrypt_info_cachep)
|
||||
goto fail_free_ctx;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_ctx:
|
||||
kmem_cache_destroy(fscrypt_ctx_cachep);
|
||||
fail_free_queue:
|
||||
destroy_workqueue(fscrypt_read_workqueue);
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
module_init(fscrypt_init)
|
||||
|
||||
/**
|
||||
* fscrypt_exit() - Shutdown the fs encryption system
|
||||
*/
|
||||
static void __exit fscrypt_exit(void)
|
||||
{
|
||||
fscrypt_destroy();
|
||||
|
||||
if (fscrypt_read_workqueue)
|
||||
destroy_workqueue(fscrypt_read_workqueue);
|
||||
kmem_cache_destroy(fscrypt_ctx_cachep);
|
||||
kmem_cache_destroy(fscrypt_info_cachep);
|
||||
}
|
||||
module_exit(fscrypt_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
414
fs/crypto/fname.c
Normal file
414
fs/crypto/fname.c
Normal file
|
@ -0,0 +1,414 @@
|
|||
/*
|
||||
* This contains functions for filename crypto management
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
* Copyright (C) 2015, Motorola Mobility
|
||||
*
|
||||
* Written by Uday Savagaonkar, 2014.
|
||||
* Modified by Jaegeuk Kim, 2015.
|
||||
*
|
||||
* This has not yet undergone a rigorous security audit.
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/fscrypto.h>
|
||||
|
||||
/**
|
||||
* fname_crypt_complete() - completion callback for filename crypto
|
||||
* @req: The asynchronous cipher request context
|
||||
* @res: The result of the cipher operation
|
||||
*/
|
||||
static void fname_crypt_complete(struct crypto_async_request *req, int res)
|
||||
{
|
||||
struct fscrypt_completion_result *ecr = req->data;
|
||||
|
||||
if (res == -EINPROGRESS)
|
||||
return;
|
||||
ecr->res = res;
|
||||
complete(&ecr->completion);
|
||||
}
|
||||
|
||||
/**
|
||||
* fname_encrypt() - encrypt a filename
|
||||
*
|
||||
* The caller must have allocated sufficient memory for the @oname string.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int fname_encrypt(struct inode *inode,
|
||||
const struct qstr *iname, struct fscrypt_str *oname)
|
||||
{
|
||||
struct skcipher_request *req = NULL;
|
||||
DECLARE_FS_COMPLETION_RESULT(ecr);
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
int res = 0;
|
||||
char iv[FS_CRYPTO_BLOCK_SIZE];
|
||||
struct scatterlist sg;
|
||||
int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
|
||||
unsigned int lim;
|
||||
unsigned int cryptlen;
|
||||
|
||||
lim = inode->i_sb->s_cop->max_namelen(inode);
|
||||
if (iname->len <= 0 || iname->len > lim)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Copy the filename to the output buffer for encrypting in-place and
|
||||
* pad it with the needed number of NUL bytes.
|
||||
*/
|
||||
cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
|
||||
cryptlen = round_up(cryptlen, padding);
|
||||
cryptlen = min(cryptlen, lim);
|
||||
memcpy(oname->name, iname->name, iname->len);
|
||||
memset(oname->name + iname->len, 0, cryptlen - iname->len);
|
||||
|
||||
/* Initialize the IV */
|
||||
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
|
||||
|
||||
/* Set up the encryption request */
|
||||
req = skcipher_request_alloc(tfm, GFP_NOFS);
|
||||
if (!req) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: skcipher_request_alloc() failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skcipher_request_set_callback(req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
fname_crypt_complete, &ecr);
|
||||
sg_init_one(&sg, oname->name, cryptlen);
|
||||
skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
|
||||
|
||||
/* Do the encryption */
|
||||
res = crypto_skcipher_encrypt(req);
|
||||
if (res == -EINPROGRESS || res == -EBUSY) {
|
||||
/* Request is being completed asynchronously; wait for it */
|
||||
wait_for_completion(&ecr.completion);
|
||||
res = ecr.res;
|
||||
}
|
||||
skcipher_request_free(req);
|
||||
if (res < 0) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: Error (error code %d)\n", __func__, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
oname->len = cryptlen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fname_decrypt() - decrypt a filename
|
||||
*
|
||||
* The caller must have allocated sufficient memory for the @oname string.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int fname_decrypt(struct inode *inode,
|
||||
const struct fscrypt_str *iname,
|
||||
struct fscrypt_str *oname)
|
||||
{
|
||||
struct skcipher_request *req = NULL;
|
||||
DECLARE_FS_COMPLETION_RESULT(ecr);
|
||||
struct scatterlist src_sg, dst_sg;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
int res = 0;
|
||||
char iv[FS_CRYPTO_BLOCK_SIZE];
|
||||
unsigned lim;
|
||||
|
||||
lim = inode->i_sb->s_cop->max_namelen(inode);
|
||||
if (iname->len <= 0 || iname->len > lim)
|
||||
return -EIO;
|
||||
|
||||
/* Allocate request */
|
||||
req = skcipher_request_alloc(tfm, GFP_NOFS);
|
||||
if (!req) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: crypto_request_alloc() failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skcipher_request_set_callback(req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
fname_crypt_complete, &ecr);
|
||||
|
||||
/* Initialize IV */
|
||||
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
|
||||
|
||||
/* Create decryption request */
|
||||
sg_init_one(&src_sg, iname->name, iname->len);
|
||||
sg_init_one(&dst_sg, oname->name, oname->len);
|
||||
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
|
||||
res = crypto_skcipher_decrypt(req);
|
||||
if (res == -EINPROGRESS || res == -EBUSY) {
|
||||
wait_for_completion(&ecr.completion);
|
||||
res = ecr.res;
|
||||
}
|
||||
skcipher_request_free(req);
|
||||
if (res < 0) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: Error (error code %d)\n", __func__, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
oname->len = strnlen(oname->name, iname->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *lookup_table =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
|
||||
|
||||
/**
|
||||
* digest_encode() -
|
||||
*
|
||||
* Encodes the input digest using characters from the set [a-zA-Z0-9_+].
|
||||
* The encoded string is roughly 4/3 times the size of the input string.
|
||||
*/
|
||||
static int digest_encode(const char *src, int len, char *dst)
|
||||
{
|
||||
int i = 0, bits = 0, ac = 0;
|
||||
char *cp = dst;
|
||||
|
||||
while (i < len) {
|
||||
ac += (((unsigned char) src[i]) << bits);
|
||||
bits += 8;
|
||||
do {
|
||||
*cp++ = lookup_table[ac & 0x3f];
|
||||
ac >>= 6;
|
||||
bits -= 6;
|
||||
} while (bits >= 6);
|
||||
i++;
|
||||
}
|
||||
if (bits)
|
||||
*cp++ = lookup_table[ac & 0x3f];
|
||||
return cp - dst;
|
||||
}
|
||||
|
||||
static int digest_decode(const char *src, int len, char *dst)
|
||||
{
|
||||
int i = 0, bits = 0, ac = 0;
|
||||
const char *p;
|
||||
char *cp = dst;
|
||||
|
||||
while (i < len) {
|
||||
p = strchr(lookup_table, src[i]);
|
||||
if (p == NULL || src[i] == 0)
|
||||
return -2;
|
||||
ac += (p - lookup_table) << bits;
|
||||
bits += 6;
|
||||
if (bits >= 8) {
|
||||
*cp++ = ac & 0xff;
|
||||
ac >>= 8;
|
||||
bits -= 8;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (ac)
|
||||
return -1;
|
||||
return cp - dst;
|
||||
}
|
||||
|
||||
u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
|
||||
{
|
||||
int padding = 32;
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
|
||||
if (ci)
|
||||
padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
|
||||
ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
|
||||
return round_up(ilen, padding);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
|
||||
|
||||
/**
|
||||
* fscrypt_fname_crypto_alloc_obuff() -
|
||||
*
|
||||
* Allocates an output buffer that is sufficient for the crypto operation
|
||||
* specified by the context and the direction.
|
||||
*/
|
||||
int fscrypt_fname_alloc_buffer(struct inode *inode,
|
||||
u32 ilen, struct fscrypt_str *crypto_str)
|
||||
{
|
||||
unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
|
||||
|
||||
crypto_str->len = olen;
|
||||
if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
|
||||
olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
|
||||
/*
|
||||
* Allocated buffer can hold one more character to null-terminate the
|
||||
* string
|
||||
*/
|
||||
crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
|
||||
if (!(crypto_str->name))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
|
||||
|
||||
/**
|
||||
* fscrypt_fname_crypto_free_buffer() -
|
||||
*
|
||||
* Frees the buffer allocated for crypto operation.
|
||||
*/
|
||||
void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
|
||||
{
|
||||
if (!crypto_str)
|
||||
return;
|
||||
kfree(crypto_str->name);
|
||||
crypto_str->name = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_fname_free_buffer);
|
||||
|
||||
/**
|
||||
* fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
|
||||
* space
|
||||
*
|
||||
* The caller must have allocated sufficient memory for the @oname string.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_fname_disk_to_usr(struct inode *inode,
|
||||
u32 hash, u32 minor_hash,
|
||||
const struct fscrypt_str *iname,
|
||||
struct fscrypt_str *oname)
|
||||
{
|
||||
const struct qstr qname = FSTR_TO_QSTR(iname);
|
||||
char buf[24];
|
||||
|
||||
if (fscrypt_is_dot_dotdot(&qname)) {
|
||||
oname->name[0] = '.';
|
||||
oname->name[iname->len - 1] = '.';
|
||||
oname->len = iname->len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (iname->len < FS_CRYPTO_BLOCK_SIZE)
|
||||
return -EUCLEAN;
|
||||
|
||||
if (inode->i_crypt_info)
|
||||
return fname_decrypt(inode, iname, oname);
|
||||
|
||||
if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
|
||||
oname->len = digest_encode(iname->name, iname->len,
|
||||
oname->name);
|
||||
return 0;
|
||||
}
|
||||
if (hash) {
|
||||
memcpy(buf, &hash, 4);
|
||||
memcpy(buf + 4, &minor_hash, 4);
|
||||
} else {
|
||||
memset(buf, 0, 8);
|
||||
}
|
||||
memcpy(buf + 8, iname->name + iname->len - 16, 16);
|
||||
oname->name[0] = '_';
|
||||
oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
|
||||
|
||||
/**
|
||||
* fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
|
||||
* space
|
||||
*
|
||||
* The caller must have allocated sufficient memory for the @oname string.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_fname_usr_to_disk(struct inode *inode,
|
||||
const struct qstr *iname,
|
||||
struct fscrypt_str *oname)
|
||||
{
|
||||
if (fscrypt_is_dot_dotdot(iname)) {
|
||||
oname->name[0] = '.';
|
||||
oname->name[iname->len - 1] = '.';
|
||||
oname->len = iname->len;
|
||||
return 0;
|
||||
}
|
||||
if (inode->i_crypt_info)
|
||||
return fname_encrypt(inode, iname, oname);
|
||||
/*
|
||||
* Without a proper key, a user is not allowed to modify the filenames
|
||||
* in a directory. Consequently, a user space name cannot be mapped to
|
||||
* a disk-space name
|
||||
*/
|
||||
return -EACCES;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
|
||||
|
||||
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
|
||||
int lookup, struct fscrypt_name *fname)
|
||||
{
|
||||
int ret = 0, bigname = 0;
|
||||
|
||||
memset(fname, 0, sizeof(struct fscrypt_name));
|
||||
fname->usr_fname = iname;
|
||||
|
||||
if (!dir->i_sb->s_cop->is_encrypted(dir) ||
|
||||
fscrypt_is_dot_dotdot(iname)) {
|
||||
fname->disk_name.name = (unsigned char *)iname->name;
|
||||
fname->disk_name.len = iname->len;
|
||||
return 0;
|
||||
}
|
||||
ret = get_crypt_info(dir);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
return ret;
|
||||
|
||||
if (dir->i_crypt_info) {
|
||||
ret = fscrypt_fname_alloc_buffer(dir, iname->len,
|
||||
&fname->crypto_buf);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = fname_encrypt(dir, iname, &fname->crypto_buf);
|
||||
if (ret)
|
||||
goto errout;
|
||||
fname->disk_name.name = fname->crypto_buf.name;
|
||||
fname->disk_name.len = fname->crypto_buf.len;
|
||||
return 0;
|
||||
}
|
||||
if (!lookup)
|
||||
return -EACCES;
|
||||
|
||||
/*
|
||||
* We don't have the key and we are doing a lookup; decode the
|
||||
* user-supplied name
|
||||
*/
|
||||
if (iname->name[0] == '_')
|
||||
bigname = 1;
|
||||
if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
|
||||
return -ENOENT;
|
||||
|
||||
fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
|
||||
if (fname->crypto_buf.name == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = digest_decode(iname->name + bigname, iname->len - bigname,
|
||||
fname->crypto_buf.name);
|
||||
if (ret < 0) {
|
||||
ret = -ENOENT;
|
||||
goto errout;
|
||||
}
|
||||
fname->crypto_buf.len = ret;
|
||||
if (bigname) {
|
||||
memcpy(&fname->hash, fname->crypto_buf.name, 4);
|
||||
memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
|
||||
} else {
|
||||
fname->disk_name.name = fname->crypto_buf.name;
|
||||
fname->disk_name.len = fname->crypto_buf.len;
|
||||
}
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
fscrypt_fname_free_buffer(&fname->crypto_buf);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_setup_filename);
|
||||
|
||||
void fscrypt_free_filename(struct fscrypt_name *fname)
|
||||
{
|
||||
kfree(fname->crypto_buf.name);
|
||||
fname->crypto_buf.name = NULL;
|
||||
fname->usr_fname = NULL;
|
||||
fname->disk_name.name = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_free_filename);
|
333
fs/crypto/keyinfo.c
Normal file
333
fs/crypto/keyinfo.c
Normal file
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* key management facility for FS encryption support.
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
*
|
||||
* This contains encryption key functions.
|
||||
*
|
||||
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
|
||||
*/
|
||||
|
||||
#include <keys/user-type.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/fscrypto.h>
|
||||
|
||||
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
|
||||
{
|
||||
struct fscrypt_completion_result *ecr = req->data;
|
||||
|
||||
if (rc == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
ecr->res = rc;
|
||||
complete(&ecr->completion);
|
||||
}
|
||||
|
||||
/**
|
||||
* derive_key_aes() - Derive a key using AES-128-ECB
|
||||
* @deriving_key: Encryption key used for derivation.
|
||||
* @source_key: Source key to which to apply derivation.
|
||||
* @derived_key: Derived key.
|
||||
*
|
||||
* Return: Zero on success; non-zero otherwise.
|
||||
*/
|
||||
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
|
||||
u8 source_key[FS_AES_256_XTS_KEY_SIZE],
|
||||
u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
|
||||
{
|
||||
int res = 0;
|
||||
struct skcipher_request *req = NULL;
|
||||
DECLARE_FS_COMPLETION_RESULT(ecr);
|
||||
struct scatterlist src_sg, dst_sg;
|
||||
struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
|
||||
|
||||
if (IS_ERR(tfm)) {
|
||||
res = PTR_ERR(tfm);
|
||||
tfm = NULL;
|
||||
goto out;
|
||||
}
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
req = skcipher_request_alloc(tfm, GFP_NOFS);
|
||||
if (!req) {
|
||||
res = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
skcipher_request_set_callback(req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
derive_crypt_complete, &ecr);
|
||||
res = crypto_skcipher_setkey(tfm, deriving_key,
|
||||
FS_AES_128_ECB_KEY_SIZE);
|
||||
if (res < 0)
|
||||
goto out;
|
||||
|
||||
sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
|
||||
sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
|
||||
skcipher_request_set_crypt(req, &src_sg, &dst_sg,
|
||||
FS_AES_256_XTS_KEY_SIZE, NULL);
|
||||
res = crypto_skcipher_encrypt(req);
|
||||
if (res == -EINPROGRESS || res == -EBUSY) {
|
||||
wait_for_completion(&ecr.completion);
|
||||
res = ecr.res;
|
||||
}
|
||||
out:
|
||||
skcipher_request_free(req);
|
||||
crypto_free_skcipher(tfm);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int validate_user_key(struct fscrypt_info *crypt_info,
|
||||
struct fscrypt_context *ctx, u8 *raw_key,
|
||||
u8 *prefix, int prefix_size)
|
||||
{
|
||||
u8 *full_key_descriptor;
|
||||
struct key *keyring_key;
|
||||
struct fscrypt_key *master_key;
|
||||
const struct user_key_payload *ukp;
|
||||
int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
|
||||
int res;
|
||||
|
||||
full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
|
||||
if (!full_key_descriptor)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(full_key_descriptor, prefix, prefix_size);
|
||||
sprintf(full_key_descriptor + prefix_size,
|
||||
"%*phN", FS_KEY_DESCRIPTOR_SIZE,
|
||||
ctx->master_key_descriptor);
|
||||
full_key_descriptor[full_key_len - 1] = '\0';
|
||||
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
|
||||
kfree(full_key_descriptor);
|
||||
if (IS_ERR(keyring_key))
|
||||
return PTR_ERR(keyring_key);
|
||||
|
||||
if (keyring_key->type != &key_type_logon) {
|
||||
printk_once(KERN_WARNING
|
||||
"%s: key type must be logon\n", __func__);
|
||||
res = -ENOKEY;
|
||||
goto out;
|
||||
}
|
||||
down_read(&keyring_key->sem);
|
||||
ukp = user_key_payload(keyring_key);
|
||||
if (ukp->datalen != sizeof(struct fscrypt_key)) {
|
||||
res = -EINVAL;
|
||||
up_read(&keyring_key->sem);
|
||||
goto out;
|
||||
}
|
||||
master_key = (struct fscrypt_key *)ukp->data;
|
||||
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
|
||||
|
||||
if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
|
||||
printk_once(KERN_WARNING
|
||||
"%s: key size incorrect: %d\n",
|
||||
__func__, master_key->size);
|
||||
res = -ENOKEY;
|
||||
up_read(&keyring_key->sem);
|
||||
goto out;
|
||||
}
|
||||
res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
|
||||
up_read(&keyring_key->sem);
|
||||
if (res)
|
||||
goto out;
|
||||
|
||||
crypt_info->ci_keyring_key = keyring_key;
|
||||
return 0;
|
||||
out:
|
||||
key_put(keyring_key);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
|
||||
const char **cipher_str_ret, int *keysize_ret)
|
||||
{
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
|
||||
*cipher_str_ret = "xts(aes)";
|
||||
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
|
||||
return 0;
|
||||
}
|
||||
pr_warn_once("fscrypto: unsupported contents encryption mode "
|
||||
"%d for inode %lu\n",
|
||||
ci->ci_data_mode, inode->i_ino);
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
|
||||
if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
|
||||
*cipher_str_ret = "cts(cbc(aes))";
|
||||
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
|
||||
return 0;
|
||||
}
|
||||
pr_warn_once("fscrypto: unsupported filenames encryption mode "
|
||||
"%d for inode %lu\n",
|
||||
ci->ci_filename_mode, inode->i_ino);
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
|
||||
(inode->i_mode & S_IFMT), inode->i_ino);
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
static void put_crypt_info(struct fscrypt_info *ci)
|
||||
{
|
||||
if (!ci)
|
||||
return;
|
||||
|
||||
key_put(ci->ci_keyring_key);
|
||||
crypto_free_skcipher(ci->ci_ctfm);
|
||||
kmem_cache_free(fscrypt_info_cachep, ci);
|
||||
}
|
||||
|
||||
int get_crypt_info(struct inode *inode)
|
||||
{
|
||||
struct fscrypt_info *crypt_info;
|
||||
struct fscrypt_context ctx;
|
||||
struct crypto_skcipher *ctfm;
|
||||
const char *cipher_str;
|
||||
int keysize;
|
||||
u8 *raw_key = NULL;
|
||||
int res;
|
||||
|
||||
res = fscrypt_initialize();
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (!inode->i_sb->s_cop->get_context)
|
||||
return -EOPNOTSUPP;
|
||||
retry:
|
||||
crypt_info = ACCESS_ONCE(inode->i_crypt_info);
|
||||
if (crypt_info) {
|
||||
if (!crypt_info->ci_keyring_key ||
|
||||
key_validate(crypt_info->ci_keyring_key) == 0)
|
||||
return 0;
|
||||
fscrypt_put_encryption_info(inode, crypt_info);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
|
||||
if (res < 0) {
|
||||
if (!fscrypt_dummy_context_enabled(inode))
|
||||
return res;
|
||||
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
|
||||
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
|
||||
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
|
||||
ctx.flags = 0;
|
||||
} else if (res != sizeof(ctx)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
|
||||
return -EINVAL;
|
||||
|
||||
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
|
||||
return -EINVAL;
|
||||
|
||||
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
|
||||
if (!crypt_info)
|
||||
return -ENOMEM;
|
||||
|
||||
crypt_info->ci_flags = ctx.flags;
|
||||
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
|
||||
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
|
||||
crypt_info->ci_ctfm = NULL;
|
||||
crypt_info->ci_keyring_key = NULL;
|
||||
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
|
||||
sizeof(crypt_info->ci_master_key));
|
||||
|
||||
res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
|
||||
if (res)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This cannot be a stack buffer because it is passed to the scatterlist
|
||||
* crypto API as part of key derivation.
|
||||
*/
|
||||
res = -ENOMEM;
|
||||
raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
|
||||
if (!raw_key)
|
||||
goto out;
|
||||
|
||||
if (fscrypt_dummy_context_enabled(inode)) {
|
||||
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
|
||||
goto got_key;
|
||||
}
|
||||
|
||||
res = validate_user_key(crypt_info, &ctx, raw_key,
|
||||
FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
|
||||
if (res && inode->i_sb->s_cop->key_prefix) {
|
||||
u8 *prefix = NULL;
|
||||
int prefix_size, res2;
|
||||
|
||||
prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
|
||||
res2 = validate_user_key(crypt_info, &ctx, raw_key,
|
||||
prefix, prefix_size);
|
||||
if (res2) {
|
||||
if (res2 == -ENOKEY)
|
||||
res = -ENOKEY;
|
||||
goto out;
|
||||
}
|
||||
} else if (res) {
|
||||
goto out;
|
||||
}
|
||||
got_key:
|
||||
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
|
||||
if (!ctfm || IS_ERR(ctfm)) {
|
||||
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
|
||||
printk(KERN_DEBUG
|
||||
"%s: error %d (inode %u) allocating crypto tfm\n",
|
||||
__func__, res, (unsigned) inode->i_ino);
|
||||
goto out;
|
||||
}
|
||||
crypt_info->ci_ctfm = ctfm;
|
||||
crypto_skcipher_clear_flags(ctfm, ~0);
|
||||
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
|
||||
if (res)
|
||||
goto out;
|
||||
|
||||
kzfree(raw_key);
|
||||
raw_key = NULL;
|
||||
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
|
||||
put_crypt_info(crypt_info);
|
||||
goto retry;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (res == -ENOKEY)
|
||||
res = 0;
|
||||
put_crypt_info(crypt_info);
|
||||
kzfree(raw_key);
|
||||
return res;
|
||||
}
|
||||
|
||||
void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
|
||||
{
|
||||
struct fscrypt_info *prev;
|
||||
|
||||
if (ci == NULL)
|
||||
ci = ACCESS_ONCE(inode->i_crypt_info);
|
||||
if (ci == NULL)
|
||||
return;
|
||||
|
||||
prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
|
||||
if (prev != ci)
|
||||
return;
|
||||
|
||||
put_crypt_info(ci);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_put_encryption_info);
|
||||
|
||||
int fscrypt_get_encryption_info(struct inode *inode)
|
||||
{
|
||||
struct fscrypt_info *ci = inode->i_crypt_info;
|
||||
|
||||
if (!ci ||
|
||||
(ci->ci_keyring_key &&
|
||||
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
|
||||
(1 << KEY_FLAG_REVOKED) |
|
||||
(1 << KEY_FLAG_DEAD)))))
|
||||
return get_crypt_info(inode);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_get_encryption_info);
|
250
fs/crypto/policy.c
Normal file
250
fs/crypto/policy.c
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Encryption policy functions for per-file encryption support.
|
||||
*
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
* Copyright (C) 2015, Motorola Mobility.
|
||||
*
|
||||
* Written by Michael Halcrow, 2015.
|
||||
* Modified by Jaegeuk Kim, 2015.
|
||||
*/
|
||||
|
||||
#include <linux/random.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/fscrypto.h>
|
||||
#include <linux/mount.h>
|
||||
|
||||
static int inode_has_encryption_context(struct inode *inode)
|
||||
{
|
||||
if (!inode->i_sb->s_cop->get_context)
|
||||
return 0;
|
||||
return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* check whether the policy is consistent with the encryption context
|
||||
* for the inode
|
||||
*/
|
||||
static int is_encryption_context_consistent_with_policy(struct inode *inode,
|
||||
const struct fscrypt_policy *policy)
|
||||
{
|
||||
struct fscrypt_context ctx;
|
||||
int res;
|
||||
|
||||
if (!inode->i_sb->s_cop->get_context)
|
||||
return 0;
|
||||
|
||||
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
|
||||
if (res != sizeof(ctx))
|
||||
return 0;
|
||||
|
||||
return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
|
||||
FS_KEY_DESCRIPTOR_SIZE) == 0 &&
|
||||
(ctx.flags == policy->flags) &&
|
||||
(ctx.contents_encryption_mode ==
|
||||
policy->contents_encryption_mode) &&
|
||||
(ctx.filenames_encryption_mode ==
|
||||
policy->filenames_encryption_mode));
|
||||
}
|
||||
|
||||
static int create_encryption_context_from_policy(struct inode *inode,
|
||||
const struct fscrypt_policy *policy)
|
||||
{
|
||||
struct fscrypt_context ctx;
|
||||
int res;
|
||||
|
||||
if (!inode->i_sb->s_cop->set_context)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (inode->i_sb->s_cop->prepare_context) {
|
||||
res = inode->i_sb->s_cop->prepare_context(inode);
|
||||
if (res)
|
||||
return res;
|
||||
}
|
||||
|
||||
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
|
||||
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
|
||||
FS_KEY_DESCRIPTOR_SIZE);
|
||||
|
||||
if (!fscrypt_valid_contents_enc_mode(
|
||||
policy->contents_encryption_mode)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: Invalid contents encryption mode %d\n", __func__,
|
||||
policy->contents_encryption_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!fscrypt_valid_filenames_enc_mode(
|
||||
policy->filenames_encryption_mode)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: Invalid filenames encryption mode %d\n", __func__,
|
||||
policy->filenames_encryption_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (policy->flags & ~FS_POLICY_FLAGS_VALID)
|
||||
return -EINVAL;
|
||||
|
||||
ctx.contents_encryption_mode = policy->contents_encryption_mode;
|
||||
ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
|
||||
ctx.flags = policy->flags;
|
||||
BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
|
||||
get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
|
||||
|
||||
return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
|
||||
}
|
||||
|
||||
int fscrypt_process_policy(struct file *filp,
|
||||
const struct fscrypt_policy *policy)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
int ret;
|
||||
|
||||
if (!inode_owner_or_capable(inode))
|
||||
return -EACCES;
|
||||
|
||||
if (policy->version != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mnt_want_write_file(filp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
if (!inode_has_encryption_context(inode)) {
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
ret = -EINVAL;
|
||||
else if (!inode->i_sb->s_cop->empty_dir)
|
||||
ret = -EOPNOTSUPP;
|
||||
else if (!inode->i_sb->s_cop->empty_dir(inode))
|
||||
ret = -ENOTEMPTY;
|
||||
else
|
||||
ret = create_encryption_context_from_policy(inode,
|
||||
policy);
|
||||
} else if (!is_encryption_context_consistent_with_policy(inode,
|
||||
policy)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: Policy inconsistent with encryption context\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
inode_unlock(inode);
|
||||
|
||||
mnt_drop_write_file(filp);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_process_policy);
|
||||
|
||||
int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
|
||||
{
|
||||
struct fscrypt_context ctx;
|
||||
int res;
|
||||
|
||||
if (!inode->i_sb->s_cop->get_context ||
|
||||
!inode->i_sb->s_cop->is_encrypted(inode))
|
||||
return -ENODATA;
|
||||
|
||||
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
|
||||
if (res != sizeof(ctx))
|
||||
return -ENODATA;
|
||||
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
|
||||
return -EINVAL;
|
||||
|
||||
policy->version = 0;
|
||||
policy->contents_encryption_mode = ctx.contents_encryption_mode;
|
||||
policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
|
||||
policy->flags = ctx.flags;
|
||||
memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
|
||||
FS_KEY_DESCRIPTOR_SIZE);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_get_policy);
|
||||
|
||||
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
|
||||
{
|
||||
struct fscrypt_info *parent_ci, *child_ci;
|
||||
int res;
|
||||
|
||||
if ((parent == NULL) || (child == NULL)) {
|
||||
printk(KERN_ERR "parent %p child %p\n", parent, child);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
/* no restrictions if the parent directory is not encrypted */
|
||||
if (!parent->i_sb->s_cop->is_encrypted(parent))
|
||||
return 1;
|
||||
/* if the child directory is not encrypted, this is always a problem */
|
||||
if (!parent->i_sb->s_cop->is_encrypted(child))
|
||||
return 0;
|
||||
res = fscrypt_get_encryption_info(parent);
|
||||
if (res)
|
||||
return 0;
|
||||
res = fscrypt_get_encryption_info(child);
|
||||
if (res)
|
||||
return 0;
|
||||
parent_ci = parent->i_crypt_info;
|
||||
child_ci = child->i_crypt_info;
|
||||
if (!parent_ci && !child_ci)
|
||||
return 1;
|
||||
if (!parent_ci || !child_ci)
|
||||
return 0;
|
||||
|
||||
return (memcmp(parent_ci->ci_master_key,
|
||||
child_ci->ci_master_key,
|
||||
FS_KEY_DESCRIPTOR_SIZE) == 0 &&
|
||||
(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
|
||||
(parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
|
||||
(parent_ci->ci_flags == child_ci->ci_flags));
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_has_permitted_context);
|
||||
|
||||
/**
|
||||
* fscrypt_inherit_context() - Sets a child context from its parent
|
||||
* @parent: Parent inode from which the context is inherited.
|
||||
* @child: Child inode that inherits the context from @parent.
|
||||
* @fs_data: private data given by FS.
|
||||
* @preload: preload child i_crypt_info
|
||||
*
|
||||
* Return: Zero on success, non-zero otherwise
|
||||
*/
|
||||
int fscrypt_inherit_context(struct inode *parent, struct inode *child,
|
||||
void *fs_data, bool preload)
|
||||
{
|
||||
struct fscrypt_context ctx;
|
||||
struct fscrypt_info *ci;
|
||||
int res;
|
||||
|
||||
if (!parent->i_sb->s_cop->set_context)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
res = fscrypt_get_encryption_info(parent);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
ci = parent->i_crypt_info;
|
||||
if (ci == NULL)
|
||||
return -ENOKEY;
|
||||
|
||||
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
|
||||
if (fscrypt_dummy_context_enabled(parent)) {
|
||||
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
|
||||
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
|
||||
ctx.flags = 0;
|
||||
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
|
||||
res = 0;
|
||||
} else {
|
||||
ctx.contents_encryption_mode = ci->ci_data_mode;
|
||||
ctx.filenames_encryption_mode = ci->ci_filename_mode;
|
||||
ctx.flags = ci->ci_flags;
|
||||
memcpy(ctx.master_key_descriptor, ci->ci_master_key,
|
||||
FS_KEY_DESCRIPTOR_SIZE);
|
||||
}
|
||||
get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
|
||||
res = parent->i_sb->s_cop->set_context(child, &ctx,
|
||||
sizeof(ctx), fs_data);
|
||||
if (res)
|
||||
return res;
|
||||
return preload ? fscrypt_get_encryption_info(child): 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_inherit_context);
|
|
@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
|
|||
error = misc_register(&ls->ls_device);
|
||||
if (error) {
|
||||
kfree(ls->ls_device.name);
|
||||
/* this has to be set to NULL
|
||||
* to avoid a double-free in dlm_device_deregister
|
||||
*/
|
||||
ls->ls_device.name = NULL;
|
||||
}
|
||||
fail:
|
||||
return error;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue