Merge android-4.4@4b8fc9f (v4.4.82) into msm-4.4
* refs/heads/tmp-4b8fc9f UPSTREAM: locking: avoid passing around 'thread_info' in mutex debugging code ANDROID: arm64: fix undeclared 'init_thread_info' error UPSTREAM: kdb: use task_cpu() instead of task_thread_info()->cpu Linux 4.4.82 net: account for current skb length when deciding about UFO ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output mm/mempool: avoid KASAN marking mempool poison checks as use-after-free KVM: arm/arm64: Handle hva aging while destroying the vm sparc64: Prevent perf from running during super critical sections udp: consistently apply ufo or fragmentation revert "ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output" revert "net: account for current skb length when deciding about UFO" packet: fix tp_reserve race in packet_set_ring net: avoid skb_warn_bad_offload false positives on UFO tcp: fastopen: tcp_connect() must refresh the route net: sched: set xt_tgchk_param par.nft_compat as 0 in ipt_init_target bpf, s390: fix jit branch offset related to ldimm64 net: fix keepalive code vs TCP_FASTOPEN_CONNECT tcp: avoid setting cwnd to invalid ssthresh after cwnd reduction states ANDROID: keychord: Fix for a memory leak in keychord. ANDROID: keychord: Fix races in keychord_write. Use %zu to print resid (size_t). ANDROID: keychord: Fix a slab out-of-bounds read. Linux 4.4.81 workqueue: implicit ordered attribute should be overridable net: account for current skb length when deciding about UFO ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output mm: don't dereference struct page fields of invalid pages signal: protect SIGNAL_UNKILLABLE from unintentional clearing. lib/Kconfig.debug: fix frv build failure mm, slab: make sure that KMALLOC_MAX_SIZE will fit into MAX_ORDER ARM: 8632/1: ftrace: fix syscall name matching virtio_blk: fix panic in initialization error path drm/virtio: fix framebuffer sparse warning scsi: qla2xxx: Get mutex lock before checking optrom_state phy state machine: failsafe leave invalid RUNNING state x86/boot: Add missing declaration of string functions tg3: Fix race condition in tg3_get_stats64(). net: phy: dp83867: fix irq generation sh_eth: R8A7740 supports packet shecksumming wext: handle NULL extra data in iwe_stream_add_point better sparc64: Measure receiver forward progress to avoid send mondo timeout xen-netback: correctly schedule rate-limited queues net: phy: Fix PHY unbind crash net: phy: Correctly process PHY_HALTED in phy_stop_machine() net/mlx5: Fix command bad flow on command entry allocation failure sctp: fix the check for _sctp_walk_params and _sctp_walk_errors sctp: don't dereference ptr before leaving _sctp_walk_{params, errors}() dccp: fix a memleak for dccp_feat_init err process dccp: fix a memleak that dccp_ipv4 doesn't put reqsk properly dccp: fix a memleak that dccp_ipv6 doesn't put reqsk properly net: ethernet: nb8800: Handle all 4 RGMII modes identically ipv6: Don't increase IPSTATS_MIB_FRAGFAILS twice in ip6_fragment() packet: fix use-after-free in prb_retire_rx_blk_timer_expired() openvswitch: fix potential out of bound access in parse_ct mcs7780: Fix initialization when CONFIG_VMAP_STACK is enabled rtnetlink: allocate more memory for dev_set_mac_address() ipv4: initialize fib_trie prior to register_netdev_notifier call. ipv6: avoid overflow of offset in ip6_find_1stfragopt net: Zero terminate ifr_name in dev_ifname(). ipv4: ipv6: initialize treq->txhash in cookie_v[46]_check() saa7164: fix double fetch PCIe access condition drm: rcar-du: fix backport bug f2fs: sanity check checkpoint segno and blkoff media: lirc: LIRC_GET_REC_RESOLUTION should return microseconds mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries iser-target: Avoid isert_conn->cm_id dereference in isert_login_recv_done iscsi-target: Fix delayed logout processing greater than SECONDS_FOR_LOGOUT_COMP iscsi-target: Fix initial login PDU asynchronous socket close OOPs iscsi-target: Fix early sk_data_ready LOGIN_FLAGS_READY race iscsi-target: Always wait for kthread_should_stop() before kthread exit target: Avoid mappedlun symlink creation during lun shutdown media: platform: davinci: return -EINVAL for VPFE_CMD_S_CCDC_RAW_PARAMS ioctl ARM: dts: armada-38x: Fix irq type for pca955 ext4: fix overflow caused by missing cast in ext4_resize_fs() ext4: fix SEEK_HOLE/SEEK_DATA for blocksize < pagesize mm/page_alloc: Remove kernel address exposure in free_reserved_area() KVM: async_pf: make rcu irq exit if not triggered from idle task ASoC: do not close shared backend dailink ALSA: hda - Fix speaker output from VAIO VPCL14M1R workqueue: restore WQ_UNBOUND/max_active==1 to be ordered libata: array underflow in ata_find_dev() ANDROID: binder: don't queue async transactions to thread. ANDROID: binder: don't enqueue death notifications to thread todo. ANDROID: binder: call poll_wait() unconditionally. android: configs: move quota-related configs to recommended BACKPORT: arm64: split thread_info from task stack UPSTREAM: arm64: assembler: introduce ldr_this_cpu UPSTREAM: arm64: make cpu number a percpu variable UPSTREAM: arm64: smp: prepare for smp_processor_id() rework BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx UPSTREAM: arm64: prep stack walkers for THREAD_INFO_IN_TASK UPSTREAM: arm64: unexport walk_stackframe UPSTREAM: arm64: traps: simplify die() and __die() UPSTREAM: arm64: factor out current_stack_pointer BACKPORT: arm64: asm-offsets: remove unused definitions UPSTREAM: arm64: thread_info remove stale items UPSTREAM: thread_info: include <current.h> for THREAD_INFO_IN_TASK UPSTREAM: thread_info: factor out restart_block UPSTREAM: kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function UPSTREAM: sched/core: Add try_get_task_stack() and put_task_stack() UPSTREAM: sched/core: Allow putting thread_info into task_struct UPSTREAM: printk: when dumping regs, show the stack, not thread_info UPSTREAM: fix up initial thread stack pointer vs thread_info confusion UPSTREAM: Clarify naming of thread info/stack allocators ANDROID: sdcardfs: override credential for ioctl to lower fs Conflicts: android/configs/android-base.cfg arch/arm64/Kconfig arch/arm64/include/asm/suspend.h arch/arm64/kernel/head.S arch/arm64/kernel/smp.c arch/arm64/kernel/suspend.c arch/arm64/kernel/traps.c arch/arm64/mm/proc.S kernel/fork.c sound/soc/soc-pcm.c Change-Id: I273e216c94899a838bbd208391c6cbe20b2bf683 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
901bf6ddcc
135 changed files with 1236 additions and 470 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 80
|
||||
SUBLEVEL = 82
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -139,11 +139,6 @@ CONFIG_PPP_DEFLATE=y
|
|||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
||||
CONFIG_QUOTA_TREE=y
|
||||
CONFIG_QUOTACTL=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
|
|
|
@ -110,6 +110,11 @@ CONFIG_POWER_SUPPLY=y
|
|||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
CONFIG_PSTORE_RAM=y
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QUOTACTL=y
|
||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
||||
CONFIG_QUOTA_TREE=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_SMARTJOYPLUS_FF=y
|
||||
CONFIG_SND=y
|
||||
|
|
|
@ -225,8 +225,8 @@ config ARCH_INIT_TASK
|
|||
config ARCH_TASK_STRUCT_ALLOCATOR
|
||||
bool
|
||||
|
||||
# Select if arch has its private alloc_thread_info() function
|
||||
config ARCH_THREAD_INFO_ALLOCATOR
|
||||
# Select if arch has its private alloc_thread_stack() function
|
||||
config ARCH_THREAD_STACK_ALLOCATOR
|
||||
bool
|
||||
|
||||
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pca0_pins>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
|
@ -101,7 +101,7 @@
|
|||
compatible = "nxp,pca9555";
|
||||
pinctrl-names = "default";
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
|
|
|
@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
|
|||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
|
||||
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
||||
|
||||
static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
const char *name)
|
||||
{
|
||||
if (!strcmp(sym, "sys_mmap2"))
|
||||
sym = "sys_mmap_pgoff";
|
||||
else if (!strcmp(sym, "sys_statfs64_wrapper"))
|
||||
sym = "sys_statfs64";
|
||||
else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
|
||||
sym = "sys_fstatfs64";
|
||||
else if (!strcmp(sym, "sys_arm_fadvise64_64"))
|
||||
sym = "sys_fadvise64_64";
|
||||
|
||||
/* Ignore case since sym may start with "SyS" instead of "sys" */
|
||||
return !strcasecmp(sym, name);
|
||||
}
|
||||
|
||||
#endif /* ifndef __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARM_FTRACE */
|
||||
|
|
|
@ -1636,12 +1636,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
|||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
trace_kvm_age_hva(start, end);
|
||||
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
trace_kvm_test_age_hva(hva);
|
||||
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@ config ARM64
|
|||
select SYSCTL_EXCEPTION_TRACE
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_ARM_SMCCC
|
||||
select THREAD_INFO_IN_TASK
|
||||
help
|
||||
ARM 64-bit (AArch64) Linux support.
|
||||
|
||||
|
|
|
@ -235,14 +235,25 @@ lr .req x30 // link register
|
|||
.endm
|
||||
|
||||
/*
|
||||
* @dst: Result of per_cpu(sym, smp_processor_id())
|
||||
* @sym: The name of the per-cpu variable
|
||||
* @reg: Result of per_cpu(sym, smp_processor_id())
|
||||
* @tmp: scratch register
|
||||
*/
|
||||
.macro this_cpu_ptr, sym, reg, tmp
|
||||
adr_l \reg, \sym
|
||||
.macro adr_this_cpu, dst, sym, tmp
|
||||
adr_l \dst, \sym
|
||||
mrs \tmp, tpidr_el1
|
||||
add \reg, \reg, \tmp
|
||||
add \dst, \dst, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
|
||||
* @sym: The name of the per-cpu variable
|
||||
* @tmp: scratch register
|
||||
*/
|
||||
.macro ldr_this_cpu dst, sym, tmp
|
||||
adr_l \dst, \sym
|
||||
mrs \tmp, tpidr_el1
|
||||
ldr \dst, [\dst, \tmp]
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
|
27
arch/arm64/include/asm/current.h
Normal file
27
arch/arm64/include/asm/current.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef __ASM_CURRENT_H
|
||||
#define __ASM_CURRENT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct task_struct;
|
||||
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return (struct task_struct *)read_sysreg(sp_el0);
|
||||
}
|
||||
#define current get_current()
|
||||
#else
|
||||
#include <linux/thread_info.h>
|
||||
#define get_current() (current_thread_info()->task)
|
||||
#define current get_current()
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_CURRENT_H */
|
||||
|
|
@ -16,6 +16,8 @@
|
|||
#ifndef __ASM_PERCPU_H
|
||||
#define __ASM_PERCPU_H
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#ifndef __ASM_PERF_EVENT_H
|
||||
#define __ASM_PERF_EVENT_H
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct pt_regs;
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
|
|
|
@ -16,11 +16,20 @@
|
|||
#ifndef __ASM_SMP_H
|
||||
#define __ASM_SMP_H
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||||
|
||||
/*
|
||||
* We don't use this_cpu_read(cpu_number) as that has implicit writes to
|
||||
* preempt_count, and associated (compiler) barriers, that we'd like to avoid
|
||||
* the expense of. If we're preemptible, the value can be stale at use anyway.
|
||||
*/
|
||||
#define raw_smp_processor_id() (*this_cpu_ptr(&cpu_number))
|
||||
|
||||
struct seq_file;
|
||||
|
||||
|
@ -57,6 +66,9 @@ asmlinkage void secondary_start_kernel(void);
|
|||
*/
|
||||
struct secondary_data {
|
||||
void *stack;
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct task_struct *task;
|
||||
#endif
|
||||
};
|
||||
extern struct secondary_data secondary_data;
|
||||
extern void secondary_entry(void);
|
||||
|
|
9
arch/arm64/include/asm/stack_pointer.h
Normal file
9
arch/arm64/include/asm/stack_pointer.h
Normal file
|
@ -0,0 +1,9 @@
|
|||
#ifndef __ASM_STACK_POINTER_H
|
||||
#define __ASM_STACK_POINTER_H
|
||||
|
||||
/*
|
||||
* how to get the current stack pointer from C
|
||||
*/
|
||||
register unsigned long current_stack_pointer asm ("sp");
|
||||
|
||||
#endif /* __ASM_STACK_POINTER_H */
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef __ASM_SUSPEND_H
|
||||
#define __ASM_SUSPEND_H
|
||||
|
||||
#define NR_CTX_REGS 10
|
||||
#define NR_CTX_REGS 12
|
||||
#define NR_CALLEE_SAVED_REGS 12
|
||||
|
||||
/*
|
||||
|
|
|
@ -36,25 +36,36 @@
|
|||
|
||||
struct task_struct;
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef unsigned long mm_segment_t;
|
||||
|
||||
/*
|
||||
* low level task data that entry.S needs immediate access to.
|
||||
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
|
||||
*/
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct task_struct *task; /* main task structure */
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
int cpu; /* cpu */
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
}
|
||||
#else
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
|
@ -63,14 +74,6 @@ struct thread_info {
|
|||
.addr_limit = KERNEL_DS, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
/*
|
||||
* how to get the current stack pointer from C
|
||||
*/
|
||||
register unsigned long current_stack_pointer asm ("sp");
|
||||
|
||||
/*
|
||||
* how to get the thread information struct from C
|
||||
*/
|
||||
|
@ -88,6 +91,11 @@ static inline struct thread_info *current_thread_info(void)
|
|||
return (struct thread_info *)sp_el0;
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
#endif
|
||||
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define thread_saved_pc(tsk) \
|
||||
((unsigned long)(tsk->thread.cpu_context.pc))
|
||||
#define thread_saved_sp(tsk) \
|
||||
|
|
|
@ -35,11 +35,16 @@ int main(void)
|
|||
{
|
||||
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
||||
BLANK();
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
||||
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
|
||||
#else
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
|
||||
#endif
|
||||
|
@ -124,6 +129,11 @@ int main(void)
|
|||
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
|
||||
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
|
||||
BLANK();
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||
BLANK();
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
|
||||
|
|
|
@ -93,9 +93,14 @@
|
|||
|
||||
.if \el == 0
|
||||
mrs x21, sp_el0
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
|
||||
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
|
||||
#else
|
||||
mov tsk, sp
|
||||
and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
|
||||
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
|
||||
#endif
|
||||
disable_step_tsk x19, x20 // exceptions when scheduling.
|
||||
|
||||
mov x29, xzr // fp pointed to user-space
|
||||
|
@ -103,10 +108,18 @@
|
|||
add x21, sp, #S_FRAME_SIZE
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
#else
|
||||
ldr x20, [tsk, #TI_ADDR_LIMIT]
|
||||
#endif
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
#else
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
#endif
|
||||
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
|
@ -168,7 +181,11 @@ alternative_else_nop_endif
|
|||
.if \el != 0
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
#else
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
#endif
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
@ -258,15 +275,22 @@ alternative_endif
|
|||
mov x19, sp // preserve the original sp
|
||||
|
||||
/*
|
||||
* Compare sp with the current thread_info, if the top
|
||||
* ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
|
||||
* should switch to the irq stack.
|
||||
* Compare sp with the base of the task stack.
|
||||
* If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
|
||||
* and should switch to the irq stack.
|
||||
*/
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x25, [tsk, TSK_STACK]
|
||||
eor x25, x25, x19
|
||||
and x25, x25, #~(THREAD_SIZE - 1)
|
||||
cbnz x25, 9998f
|
||||
#else
|
||||
and x25, x19, #~(THREAD_SIZE - 1)
|
||||
cmp x25, tsk
|
||||
b.ne 9998f
|
||||
#endif
|
||||
|
||||
this_cpu_ptr irq_stack, x25, x26
|
||||
adr_this_cpu x25, irq_stack, x26
|
||||
mov x26, #IRQ_STACK_START_SP
|
||||
add x26, x25, x26
|
||||
|
||||
|
@ -498,9 +522,17 @@ el1_irq:
|
|||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||
#else
|
||||
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
|
||||
#endif
|
||||
cbnz w24, 1f // preempt count != 0
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
||||
#else
|
||||
ldr x0, [tsk, #TI_FLAGS] // get flags
|
||||
#endif
|
||||
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
||||
bl el1_preempt
|
||||
1:
|
||||
|
@ -515,7 +547,11 @@ ENDPROC(el1_irq)
|
|||
el1_preempt:
|
||||
mov x24, lr
|
||||
1: bl preempt_schedule_irq // irq en/disable is done inside
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
|
||||
#else
|
||||
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
|
||||
#endif
|
||||
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
|
||||
ret x24
|
||||
#endif
|
||||
|
@ -773,8 +809,12 @@ ENTRY(cpu_switch_to)
|
|||
mov v15.16b, v15.16b
|
||||
#endif
|
||||
mov sp, x9
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
msr sp_el0, x1
|
||||
#else
|
||||
and x9, x9, #~(THREAD_SIZE - 1)
|
||||
msr sp_el0, x9
|
||||
#endif
|
||||
ret
|
||||
ENDPROC(cpu_switch_to)
|
||||
|
||||
|
@ -785,7 +825,11 @@ ENDPROC(cpu_switch_to)
|
|||
ret_fast_syscall:
|
||||
disable_irq // disable interrupts
|
||||
str x0, [sp, #S_X0] // returned x0
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
|
||||
#else
|
||||
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
|
||||
#endif
|
||||
and x2, x1, #_TIF_SYSCALL_WORK
|
||||
cbnz x2, ret_fast_syscall_trace
|
||||
and x2, x1, #_TIF_WORK_MASK
|
||||
|
@ -817,7 +861,11 @@ work_resched:
|
|||
*/
|
||||
ret_to_user:
|
||||
disable_irq // disable interrupts
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS]
|
||||
#else
|
||||
ldr x1, [tsk, #TI_FLAGS]
|
||||
#endif
|
||||
and x2, x1, #_TIF_WORK_MASK
|
||||
cbnz x2, work_pending
|
||||
enable_step_tsk x1, x2
|
||||
|
@ -849,7 +897,11 @@ el0_svc_naked: // compat entry point
|
|||
enable_dbg_and_irq
|
||||
ct_user_exit 1
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
|
||||
#else
|
||||
ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
|
||||
#endif
|
||||
tst x16, #_TIF_SYSCALL_WORK
|
||||
b.ne __sys_trace
|
||||
cmp scno, sc_nr // check upper syscall limit
|
||||
|
|
|
@ -418,6 +418,7 @@ ENDPROC(__create_page_tables)
|
|||
.set initial_sp, init_thread_union + THREAD_START_SP
|
||||
__primary_switched:
|
||||
mov x28, lr // preserve LR
|
||||
|
||||
adr_l x8, vectors // load VBAR_EL1 with virtual
|
||||
msr vbar_el1, x8 // vector table address
|
||||
isb
|
||||
|
@ -430,10 +431,18 @@ __primary_switched:
|
|||
bl __pi_memset
|
||||
dsb ishst // Make zero page visible to PTW
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
adrp x4, init_thread_union
|
||||
add sp, x4, #THREAD_SIZE
|
||||
adr_l x5, init_task
|
||||
msr sp_el0, x5 // Save thread_info
|
||||
#else
|
||||
adr_l sp, initial_sp, x4
|
||||
mov x4, sp
|
||||
and x4, x4, #~(THREAD_SIZE - 1)
|
||||
msr sp_el0, x4 // Save thread_info
|
||||
#endif
|
||||
|
||||
str_l x21, __fdt_pointer, x5 // Save FDT pointer
|
||||
|
||||
ldr_l x4, kimage_vaddr // Save the offset between
|
||||
|
@ -642,11 +651,18 @@ __secondary_switched:
|
|||
adr_l x5, vectors
|
||||
msr vbar_el1, x5
|
||||
isb
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
adr_l x0, secondary_data
|
||||
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
|
||||
mov sp, x1
|
||||
ldr x2, [x0, #CPU_BOOT_TASK]
|
||||
msr sp_el0, x2
|
||||
#else
|
||||
ldr_l x0, secondary_data // get secondary_data.stack
|
||||
mov sp, x0
|
||||
and x0, x0, #~(THREAD_SIZE - 1)
|
||||
msr sp_el0, x0 // save thread_info
|
||||
#endif
|
||||
mov x29, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
|
|
|
@ -65,7 +65,7 @@ void tracectr_notifier(void *ignore, bool preempt,
|
|||
{
|
||||
u32 cnten_val;
|
||||
int current_pid;
|
||||
u32 cpu = task_thread_info(next)->cpu;
|
||||
u32 cpu = task_cpu(next);
|
||||
|
||||
if (tp_pid_state != 1)
|
||||
return;
|
||||
|
|
|
@ -45,6 +45,9 @@
|
|||
#include <linux/personality.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <trace/events/power.h>
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
#include <linux/percpu.h>
|
||||
#endif
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/compat.h>
|
||||
|
@ -394,6 +397,22 @@ void uao_thread_switch(struct task_struct *next)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/*
|
||||
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
|
||||
* shadow copy so that we can restore this upon entry from userspace.
|
||||
*
|
||||
* This is *only* for exception entry from EL0, and is not valid until we
|
||||
* __switch_to() a user task.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct task_struct *, __entry_task);
|
||||
|
||||
static void entry_task_switch(struct task_struct *next)
|
||||
{
|
||||
__this_cpu_write(__entry_task, next);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Thread switching.
|
||||
*/
|
||||
|
@ -406,6 +425,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
tls_thread_switch(next);
|
||||
hw_breakpoint_thread_switch(next);
|
||||
contextidr_thread_switch(next);
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
entry_task_switch(next);
|
||||
#endif
|
||||
uao_thread_switch(next);
|
||||
|
||||
/*
|
||||
|
@ -423,27 +445,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
struct stackframe frame;
|
||||
unsigned long stack_page;
|
||||
unsigned long stack_page, ret = 0;
|
||||
int count = 0;
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
|
||||
stack_page = (unsigned long)try_get_task_stack(p);
|
||||
if (!stack_page)
|
||||
return 0;
|
||||
|
||||
frame.fp = thread_saved_fp(p);
|
||||
frame.sp = thread_saved_sp(p);
|
||||
frame.pc = thread_saved_pc(p);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame.graph = p->curr_ret_stack;
|
||||
#endif
|
||||
stack_page = (unsigned long)task_stack_page(p);
|
||||
do {
|
||||
if (frame.sp < stack_page ||
|
||||
frame.sp >= stack_page + THREAD_SIZE ||
|
||||
unwind_frame(p, &frame))
|
||||
return 0;
|
||||
if (!in_sched_functions(frame.pc))
|
||||
return frame.pc;
|
||||
goto out;
|
||||
if (!in_sched_functions(frame.pc)) {
|
||||
ret = frame.pc;
|
||||
goto out;
|
||||
}
|
||||
} while (count ++ < 16);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
put_task_stack(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
struct return_address_data {
|
||||
|
|
|
@ -362,11 +362,15 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
* Make sure init_thread_info.ttbr0 always generates translation
|
||||
* Make sure thread_info.ttbr0 always generates translation
|
||||
* faults in case uaccess_enable() is inadvertently called by the init
|
||||
* thread.
|
||||
*/
|
||||
init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
|
||||
#else
|
||||
init_thread_info.ttbr0 = (init_thread_union.thread_info);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
|
|
|
@ -124,9 +124,6 @@ ENTRY(_cpu_resume)
|
|||
/* load sp from context */
|
||||
ldr x2, [x0, #CPU_CTX_SP]
|
||||
mov sp, x2
|
||||
/* save thread_info */
|
||||
and x2, x2, #~(THREAD_SIZE - 1)
|
||||
msr sp_el0, x2
|
||||
/*
|
||||
* cpu_do_resume expects x0 to contain context address pointer
|
||||
*/
|
||||
|
|
|
@ -58,6 +58,9 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/ipi.h>
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_number);
|
||||
|
||||
/*
|
||||
* as from 2.5, kernels no longer have an init_tasks structure
|
||||
* so we need some other way of telling a new secondary core
|
||||
|
@ -97,6 +100,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
* We need to tell the secondary core where to find its stack and the
|
||||
* page tables.
|
||||
*/
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
secondary_data.task = idle;
|
||||
#endif
|
||||
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
|
||||
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
|
||||
|
@ -120,6 +126,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
secondary_data.task = NULL;
|
||||
#endif
|
||||
secondary_data.stack = NULL;
|
||||
|
||||
return ret;
|
||||
|
@ -137,7 +146,12 @@ static void smp_store_cpu_info(unsigned int cpuid)
|
|||
asmlinkage void secondary_start_kernel(void)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = task_cpu(current);
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
|
||||
pr_debug("CPU%u: Booted secondary processor\n", cpu);
|
||||
|
||||
/*
|
||||
* All kernel threads share the same mm context; grab a
|
||||
|
@ -146,10 +160,6 @@ asmlinkage void secondary_start_kernel(void)
|
|||
atomic_inc(&mm->mm_count);
|
||||
current->active_mm = mm;
|
||||
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
|
||||
pr_debug("CPU%u: Booted secondary processor\n", cpu);
|
||||
|
||||
/*
|
||||
* TTBR0 is only used for the identity mapping at this stage. Make it
|
||||
* point to zero page to avoid speculatively fetching new entries.
|
||||
|
@ -632,6 +642,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
if (max_cpus == 0)
|
||||
break;
|
||||
|
||||
per_cpu(cpu_number, cpu) = cpu;
|
||||
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/stacktrace.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
/*
|
||||
|
@ -130,7 +131,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(walk_stackframe);
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
struct stack_trace_data {
|
||||
|
@ -162,6 +162,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||
struct stack_trace_data data;
|
||||
struct stackframe frame;
|
||||
|
||||
if (!try_get_task_stack(tsk))
|
||||
return;
|
||||
|
||||
data.trace = trace;
|
||||
data.skip = trace->skip;
|
||||
|
||||
|
@ -183,6 +186,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||
walk_stackframe(tsk, &frame, save_trace, &data);
|
||||
if (trace->nr_entries < trace->max_entries)
|
||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||
|
||||
put_task_stack(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(save_stack_trace_tsk);
|
||||
|
||||
|
|
|
@ -44,12 +44,6 @@ void notrace __cpu_suspend_exit(void)
|
|||
*/
|
||||
cpu_uninstall_idmap();
|
||||
|
||||
/*
|
||||
* Restore per-cpu offset before any kernel
|
||||
* subsystem relying on it has a chance to run.
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
* before debug exceptions are possibly reenabled
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/esr.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
@ -153,6 +154,14 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||
unsigned long irq_stack_ptr;
|
||||
int skip;
|
||||
|
||||
pr_debug("%s(regs = %pK tsk = %pK)\n", __func__, regs, tsk);
|
||||
|
||||
if (!tsk)
|
||||
tsk = current;
|
||||
|
||||
if (!try_get_task_stack(tsk))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Switching between stacks is valid when tracing current and in
|
||||
* non-preemptible context.
|
||||
|
@ -223,6 +232,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||
stack + sizeof(struct pt_regs), false);
|
||||
}
|
||||
}
|
||||
|
||||
put_task_stack(tsk);
|
||||
}
|
||||
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
||||
|
@ -238,10 +249,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
|
|||
#endif
|
||||
#define S_SMP " SMP"
|
||||
|
||||
static int __die(const char *str, int err, struct thread_info *thread,
|
||||
struct pt_regs *regs)
|
||||
static int __die(const char *str, int err, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = thread->task;
|
||||
struct task_struct *tsk = current;
|
||||
static int die_counter;
|
||||
int ret;
|
||||
|
||||
|
@ -256,7 +266,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
|
|||
print_modules();
|
||||
__show_regs(regs);
|
||||
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
|
||||
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
|
||||
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
|
||||
end_of_stack(tsk));
|
||||
|
||||
if (!user_mode(regs) || in_interrupt()) {
|
||||
dump_backtrace(regs, tsk);
|
||||
|
@ -321,7 +332,6 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int notify)
|
|||
*/
|
||||
void die(const char *str, struct pt_regs *regs, int err)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
|
||||
unsigned long flags = oops_begin();
|
||||
int ret;
|
||||
|
@ -331,7 +341,7 @@ void die(const char *str, struct pt_regs *regs, int err)
|
|||
if (bug_type != BUG_TRAP_TYPE_NONE)
|
||||
str = "Oops - BUG";
|
||||
|
||||
ret = __die(str, err, thread, regs);
|
||||
ret = __die(str, err, regs);
|
||||
|
||||
oops_end(flags, regs, ret);
|
||||
}
|
||||
|
|
|
@ -116,11 +116,14 @@ ENTRY(cpu_do_suspend)
|
|||
mrs x8, mdscr_el1
|
||||
mrs x9, oslsr_el1
|
||||
mrs x10, sctlr_el1
|
||||
mrs x11, tpidr_el1
|
||||
mrs x12, sp_el0
|
||||
stp x2, x3, [x0]
|
||||
stp x4, xzr, [x0, #16]
|
||||
stp x5, x6, [x0, #32]
|
||||
stp x7, x8, [x0, #48]
|
||||
stp x9, x10, [x0, #64]
|
||||
stp x11, x12, [x0, #80]
|
||||
ret
|
||||
ENDPROC(cpu_do_suspend)
|
||||
|
||||
|
@ -135,6 +138,7 @@ ENTRY(cpu_do_resume)
|
|||
ldp x6, x8, [x0, #32]
|
||||
ldp x9, x10, [x0, #48]
|
||||
ldp x11, x12, [x0, #64]
|
||||
ldp x13, x14, [x0, #80]
|
||||
msr tpidr_el0, x2
|
||||
msr tpidrro_el0, x3
|
||||
msr contextidr_el1, x4
|
||||
|
@ -148,6 +152,8 @@ ENTRY(cpu_do_resume)
|
|||
msr vbar_el1, x9
|
||||
msr mdscr_el1, x10
|
||||
msr sctlr_el1, x12
|
||||
msr tpidr_el1, x13
|
||||
msr sp_el0, x14
|
||||
/*
|
||||
* Restore oslsr_el1 by writing oslar_el1
|
||||
*/
|
||||
|
|
|
@ -45,7 +45,7 @@ config IA64
|
|||
select GENERIC_SMP_IDLE_THREAD
|
||||
select ARCH_INIT_TASK
|
||||
select ARCH_TASK_STRUCT_ALLOCATOR
|
||||
select ARCH_THREAD_INFO_ALLOCATOR
|
||||
select ARCH_THREAD_STACK_ALLOCATOR
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select SYSCTL_ARCH_UNALIGN_NO_WARN
|
||||
|
|
|
@ -48,15 +48,15 @@ struct thread_info {
|
|||
#ifndef ASM_OFFSETS_C
|
||||
/* how to get the thread information struct from C */
|
||||
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
|
||||
#define alloc_thread_info_node(tsk, node) \
|
||||
((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
||||
#define alloc_thread_stack_node(tsk, node) \
|
||||
((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
|
||||
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
||||
#else
|
||||
#define current_thread_info() ((struct thread_info *) 0)
|
||||
#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
|
||||
#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
|
||||
#define task_thread_info(tsk) ((struct thread_info *) 0)
|
||||
#endif
|
||||
#define free_thread_info(ti) /* nothing */
|
||||
#define free_thread_stack(ti) /* nothing */
|
||||
#define task_stack_page(tsk) ((void *)(tsk))
|
||||
|
||||
#define __HAVE_THREAD_FUNCTIONS
|
||||
|
|
|
@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|||
* handled. This is done by having a special ".data..init_task" section...
|
||||
*/
|
||||
#define init_thread_info init_task_mem.s.thread_info
|
||||
#define init_stack init_task_mem.stack
|
||||
|
||||
union {
|
||||
struct {
|
||||
|
|
|
@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_KGDB
|
||||
void arch_release_thread_info(struct thread_info *ti);
|
||||
void arch_release_thread_stack(unsigned long *stack);
|
||||
#endif
|
||||
#define get_thread_info(ti) get_task_struct((ti)->task)
|
||||
#define put_thread_info(ti) put_task_struct((ti)->task)
|
||||
|
|
|
@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
|
|||
* single-step state is cleared. At this point the breakpoints should have
|
||||
* been removed by __switch_to().
|
||||
*/
|
||||
void arch_release_thread_info(struct thread_info *ti)
|
||||
void arch_release_thread_stack(unsigned long *stack)
|
||||
{
|
||||
struct thread_info *ti = (void *)stack;
|
||||
if (kgdb_sstep_thread == ti) {
|
||||
kgdb_sstep_thread = NULL;
|
||||
|
||||
|
|
|
@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
return -1;
|
||||
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
|
||||
|
|
|
@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
|
|||
void __tsb_context_switch(unsigned long pgd_pa,
|
||||
struct tsb_config *tsb_base,
|
||||
struct tsb_config *tsb_huge,
|
||||
unsigned long tsb_descr_pa);
|
||||
unsigned long tsb_descr_pa,
|
||||
unsigned long secondary_ctx);
|
||||
|
||||
static inline void tsb_context_switch(struct mm_struct *mm)
|
||||
static inline void tsb_context_switch_ctx(struct mm_struct *mm,
|
||||
unsigned long ctx)
|
||||
{
|
||||
__tsb_context_switch(__pa(mm->pgd),
|
||||
&mm->context.tsb_block[0],
|
||||
|
@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
|
|||
#else
|
||||
NULL
|
||||
#endif
|
||||
, __pa(&mm->context.tsb_descr[0]));
|
||||
, __pa(&mm->context.tsb_descr[0]),
|
||||
ctx);
|
||||
}
|
||||
|
||||
#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
|
||||
|
||||
void tsb_grow(struct mm_struct *mm,
|
||||
unsigned long tsb_index,
|
||||
unsigned long mm_rss);
|
||||
|
@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|||
* cpu0 to update it's TSB because at that point the cpu_vm_mask
|
||||
* only had cpu1 set in it.
|
||||
*/
|
||||
load_secondary_context(mm);
|
||||
tsb_context_switch(mm);
|
||||
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
||||
|
||||
/* Any time a processor runs a context on an address space
|
||||
* for the first time, we must flush that context out of the
|
||||
|
|
|
@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
|
|||
void init_cur_cpu_trap(struct thread_info *);
|
||||
void setup_tba(void);
|
||||
extern int ncpus_probed;
|
||||
extern u64 cpu_mondo_counter[NR_CPUS];
|
||||
|
||||
unsigned long real_hard_smp_processor_id(void);
|
||||
|
||||
|
|
|
@ -617,22 +617,48 @@ retry:
|
|||
}
|
||||
}
|
||||
|
||||
/* Multi-cpu list version. */
|
||||
#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
|
||||
#define MONDO_USEC_WAIT_MIN 2
|
||||
#define MONDO_USEC_WAIT_MAX 100
|
||||
#define MONDO_RETRY_LIMIT 500000
|
||||
|
||||
/* Multi-cpu list version.
|
||||
*
|
||||
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
|
||||
* Sometimes not all cpus receive the mondo, requiring us to re-send
|
||||
* the mondo until all cpus have received, or cpus are truly stuck
|
||||
* unable to receive mondo, and we timeout.
|
||||
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
|
||||
* perform guest service, such as PCIe error handling. Consider the
|
||||
* service time, 1 second overall wait is reasonable for 1 cpu.
|
||||
* Here two in-between mondo check wait time are defined: 2 usec for
|
||||
* single cpu quick turn around and up to 100usec for large cpu count.
|
||||
* Deliver mondo to large number of cpus could take longer, we adjusts
|
||||
* the retry count as long as target cpus are making forward progress.
|
||||
*/
|
||||
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
||||
{
|
||||
int retries, this_cpu, prev_sent, i, saw_cpu_error;
|
||||
int this_cpu, tot_cpus, prev_sent, i, rem;
|
||||
int usec_wait, retries, tot_retries;
|
||||
u16 first_cpu = 0xffff;
|
||||
unsigned long xc_rcvd = 0;
|
||||
unsigned long status;
|
||||
int ecpuerror_id = 0;
|
||||
int enocpu_id = 0;
|
||||
u16 *cpu_list;
|
||||
u16 cpu;
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
|
||||
cpu_list = __va(tb->cpu_list_pa);
|
||||
|
||||
saw_cpu_error = 0;
|
||||
retries = 0;
|
||||
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
|
||||
if (usec_wait > MONDO_USEC_WAIT_MAX)
|
||||
usec_wait = MONDO_USEC_WAIT_MAX;
|
||||
retries = tot_retries = 0;
|
||||
tot_cpus = cnt;
|
||||
prev_sent = 0;
|
||||
|
||||
do {
|
||||
int forward_progress, n_sent;
|
||||
int n_sent, mondo_delivered, target_cpu_busy;
|
||||
|
||||
status = sun4v_cpu_mondo_send(cnt,
|
||||
tb->cpu_list_pa,
|
||||
|
@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
|||
|
||||
/* HV_EOK means all cpus received the xcall, we're done. */
|
||||
if (likely(status == HV_EOK))
|
||||
break;
|
||||
goto xcall_done;
|
||||
|
||||
/* If not these non-fatal errors, panic */
|
||||
if (unlikely((status != HV_EWOULDBLOCK) &&
|
||||
(status != HV_ECPUERROR) &&
|
||||
(status != HV_ENOCPU)))
|
||||
goto fatal_errors;
|
||||
|
||||
/* First, see if we made any forward progress.
|
||||
*
|
||||
* Go through the cpu_list, count the target cpus that have
|
||||
* received our mondo (n_sent), and those that did not (rem).
|
||||
* Re-pack cpu_list with the cpus remain to be retried in the
|
||||
* front - this simplifies tracking the truly stalled cpus.
|
||||
*
|
||||
* The hypervisor indicates successful sends by setting
|
||||
* cpu list entries to the value 0xffff.
|
||||
*
|
||||
* EWOULDBLOCK means some target cpus did not receive the
|
||||
* mondo and retry usually helps.
|
||||
*
|
||||
* ECPUERROR means at least one target cpu is in error state,
|
||||
* it's usually safe to skip the faulty cpu and retry.
|
||||
*
|
||||
* ENOCPU means one of the target cpu doesn't belong to the
|
||||
* domain, perhaps offlined which is unexpected, but not
|
||||
* fatal and it's okay to skip the offlined cpu.
|
||||
*/
|
||||
rem = 0;
|
||||
n_sent = 0;
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (likely(cpu_list[i] == 0xffff))
|
||||
cpu = cpu_list[i];
|
||||
if (likely(cpu == 0xffff)) {
|
||||
n_sent++;
|
||||
} else if ((status == HV_ECPUERROR) &&
|
||||
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
|
||||
ecpuerror_id = cpu + 1;
|
||||
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
|
||||
enocpu_id = cpu + 1;
|
||||
} else {
|
||||
cpu_list[rem++] = cpu;
|
||||
}
|
||||
}
|
||||
|
||||
forward_progress = 0;
|
||||
if (n_sent > prev_sent)
|
||||
forward_progress = 1;
|
||||
/* No cpu remained, we're done. */
|
||||
if (rem == 0)
|
||||
break;
|
||||
|
||||
/* Otherwise, update the cpu count for retry. */
|
||||
cnt = rem;
|
||||
|
||||
/* Record the overall number of mondos received by the
|
||||
* first of the remaining cpus.
|
||||
*/
|
||||
if (first_cpu != cpu_list[0]) {
|
||||
first_cpu = cpu_list[0];
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
}
|
||||
|
||||
/* Was any mondo delivered successfully? */
|
||||
mondo_delivered = (n_sent > prev_sent);
|
||||
prev_sent = n_sent;
|
||||
|
||||
/* If we get a HV_ECPUERROR, then one or more of the cpus
|
||||
* in the list are in error state. Use the cpu_state()
|
||||
* hypervisor call to find out which cpus are in error state.
|
||||
/* or, was any target cpu busy processing other mondos? */
|
||||
target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
|
||||
/* Retry count is for no progress. If we're making progress,
|
||||
* reset the retry count.
|
||||
*/
|
||||
if (unlikely(status == HV_ECPUERROR)) {
|
||||
for (i = 0; i < cnt; i++) {
|
||||
long err;
|
||||
u16 cpu;
|
||||
|
||||
cpu = cpu_list[i];
|
||||
if (cpu == 0xffff)
|
||||
continue;
|
||||
|
||||
err = sun4v_cpu_state(cpu);
|
||||
if (err == HV_CPU_STATE_ERROR) {
|
||||
saw_cpu_error = (cpu + 1);
|
||||
cpu_list[i] = 0xffff;
|
||||
}
|
||||
}
|
||||
} else if (unlikely(status != HV_EWOULDBLOCK))
|
||||
goto fatal_mondo_error;
|
||||
|
||||
/* Don't bother rewriting the CPU list, just leave the
|
||||
* 0xffff and non-0xffff entries in there and the
|
||||
* hypervisor will do the right thing.
|
||||
*
|
||||
* Only advance timeout state if we didn't make any
|
||||
* forward progress.
|
||||
*/
|
||||
if (unlikely(!forward_progress)) {
|
||||
if (unlikely(++retries > 10000))
|
||||
goto fatal_mondo_timeout;
|
||||
|
||||
/* Delay a little bit to let other cpus catch up
|
||||
* on their cpu mondo queue work.
|
||||
*/
|
||||
udelay(2 * cnt);
|
||||
if (likely(mondo_delivered || target_cpu_busy)) {
|
||||
tot_retries += retries;
|
||||
retries = 0;
|
||||
} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
|
||||
goto fatal_mondo_timeout;
|
||||
}
|
||||
|
||||
/* Delay a little bit to let other cpus catch up on
|
||||
* their cpu mondo queue work.
|
||||
*/
|
||||
if (!mondo_delivered)
|
||||
udelay(usec_wait);
|
||||
|
||||
retries++;
|
||||
} while (1);
|
||||
|
||||
if (unlikely(saw_cpu_error))
|
||||
goto fatal_mondo_cpu_error;
|
||||
|
||||
xcall_done:
|
||||
if (unlikely(ecpuerror_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
|
||||
this_cpu, ecpuerror_id - 1);
|
||||
} else if (unlikely(enocpu_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
|
||||
this_cpu, enocpu_id - 1);
|
||||
}
|
||||
return;
|
||||
|
||||
fatal_mondo_cpu_error:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
|
||||
"(including %d) were in error state\n",
|
||||
this_cpu, saw_cpu_error - 1);
|
||||
return;
|
||||
fatal_errors:
|
||||
/* fatal errors include bad alignment, etc */
|
||||
pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
|
||||
this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
panic("Unexpected SUN4V mondo error %lu\n", status);
|
||||
|
||||
fatal_mondo_timeout:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
|
||||
" progress after %d retries.\n",
|
||||
this_cpu, retries);
|
||||
goto dump_cpu_list_and_out;
|
||||
|
||||
fatal_mondo_error:
|
||||
printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
|
||||
this_cpu, status);
|
||||
printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
|
||||
"mondo_block_pa(%lx)\n",
|
||||
this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
|
||||
dump_cpu_list_and_out:
|
||||
printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
|
||||
for (i = 0; i < cnt; i++)
|
||||
printk("%u ", cpu_list[i]);
|
||||
printk("]\n");
|
||||
/* some cpus being non-responsive to the cpu mondo */
|
||||
pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
|
||||
this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
|
||||
panic("SUN4V mondo timeout panic\n");
|
||||
}
|
||||
|
||||
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
|
||||
|
|
|
@ -26,6 +26,21 @@ sun4v_cpu_mondo:
|
|||
ldxa [%g0] ASI_SCRATCHPAD, %g4
|
||||
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
|
||||
|
||||
/* Get smp_processor_id() into %g3 */
|
||||
sethi %hi(trap_block), %g5
|
||||
or %g5, %lo(trap_block), %g5
|
||||
sub %g4, %g5, %g3
|
||||
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
|
||||
|
||||
/* Increment cpu_mondo_counter[smp_processor_id()] */
|
||||
sethi %hi(cpu_mondo_counter), %g5
|
||||
or %g5, %lo(cpu_mondo_counter), %g5
|
||||
sllx %g3, 3, %g3
|
||||
add %g5, %g3, %g5
|
||||
ldx [%g5], %g3
|
||||
add %g3, 1, %g3
|
||||
stx %g3, [%g5]
|
||||
|
||||
/* Get CPU mondo queue base phys address into %g7. */
|
||||
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
|
||||
|
||||
|
|
|
@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
u64 cpu_mondo_counter[NR_CPUS] = {0};
|
||||
struct trap_per_cpu trap_block[NR_CPUS];
|
||||
EXPORT_SYMBOL(trap_block);
|
||||
|
||||
|
|
|
@ -375,6 +375,7 @@ tsb_flush:
|
|||
* %o1: TSB base config pointer
|
||||
* %o2: TSB huge config pointer, or NULL if none
|
||||
* %o3: Hypervisor TSB descriptor physical address
|
||||
* %o4: Secondary context to load, if non-zero
|
||||
*
|
||||
* We have to run this whole thing with interrupts
|
||||
* disabled so that the current cpu doesn't change
|
||||
|
@ -387,6 +388,17 @@ __tsb_context_switch:
|
|||
rdpr %pstate, %g1
|
||||
wrpr %g1, PSTATE_IE, %pstate
|
||||
|
||||
brz,pn %o4, 1f
|
||||
mov SECONDARY_CONTEXT, %o5
|
||||
|
||||
661: stxa %o4, [%o5] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %o4, [%o5] ASI_MMU
|
||||
.previous
|
||||
flush %g6
|
||||
|
||||
1:
|
||||
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
|
||||
|
||||
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
|
||||
|
|
|
@ -35,6 +35,5 @@ void restore_processor_state(void)
|
|||
{
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
|
||||
load_secondary_context(mm);
|
||||
tsb_context_switch(mm);
|
||||
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ struct thread_info {
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
void arch_release_thread_info(struct thread_info *info);
|
||||
void arch_release_thread_stack(unsigned long *stack);
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
register unsigned long stack_pointer __asm__("sp");
|
||||
|
|
|
@ -73,8 +73,9 @@ void arch_cpu_idle(void)
|
|||
/*
|
||||
* Release a thread_info structure
|
||||
*/
|
||||
void arch_release_thread_info(struct thread_info *info)
|
||||
void arch_release_thread_stack(unsigned long *stack)
|
||||
{
|
||||
struct thread_info *info = (void *)stack;
|
||||
struct single_step_state *step_state = info->step_state;
|
||||
|
||||
if (step_state) {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include "ctype.h"
|
||||
#include "string.h"
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
|
|
|
@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
|
|||
#define memset(d,c,l) __builtin_memset(d,c,l)
|
||||
#define memcmp __builtin_memcmp
|
||||
|
||||
extern int strcmp(const char *str1, const char *str2);
|
||||
extern int strncmp(const char *cs, const char *ct, size_t count);
|
||||
extern size_t strlen(const char *s);
|
||||
extern char *strstr(const char *s1, const char *s2);
|
||||
extern size_t strnlen(const char *s, size_t maxlen);
|
||||
extern unsigned int atou(const char *s);
|
||||
extern unsigned long long simple_strtoull(const char *cp, char **endp,
|
||||
unsigned int base);
|
||||
|
||||
#endif /* BOOT_STRING_H */
|
||||
|
|
|
@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
|
|||
if (hlist_unhashed(&n.link))
|
||||
break;
|
||||
|
||||
rcu_irq_exit();
|
||||
|
||||
if (!n.halted) {
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
|
@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
|
|||
/*
|
||||
* We cannot reschedule. So halt.
|
||||
*/
|
||||
rcu_irq_exit();
|
||||
native_safe_halt();
|
||||
local_irq_disable();
|
||||
rcu_irq_enter();
|
||||
}
|
||||
|
||||
rcu_irq_enter();
|
||||
}
|
||||
if (!n.halted)
|
||||
finish_wait(&n.wq, &wait);
|
||||
|
|
|
@ -3522,11 +3522,13 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
BUG_ON(buf_node->proc != proc);
|
||||
w = binder_dequeue_work_head_ilocked(
|
||||
&buf_node->async_todo);
|
||||
if (!w)
|
||||
if (!w) {
|
||||
buf_node->has_async_transaction = 0;
|
||||
else
|
||||
} else {
|
||||
binder_enqueue_work_ilocked(
|
||||
w, &thread->todo);
|
||||
w, &proc->todo);
|
||||
binder_wakeup_proc_ilocked(proc);
|
||||
}
|
||||
binder_node_inner_unlock(buf_node);
|
||||
}
|
||||
trace_binder_transaction_buffer_release(buffer);
|
||||
|
@ -3670,22 +3672,12 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
ref->death = death;
|
||||
if (ref->node->proc == NULL) {
|
||||
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
||||
if (thread->looper &
|
||||
(BINDER_LOOPER_STATE_REGISTERED |
|
||||
BINDER_LOOPER_STATE_ENTERED))
|
||||
binder_enqueue_work(
|
||||
proc,
|
||||
&ref->death->work,
|
||||
&thread->todo);
|
||||
else {
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_enqueue_work_ilocked(
|
||||
&ref->death->work,
|
||||
&proc->todo);
|
||||
binder_wakeup_proc_ilocked(
|
||||
proc);
|
||||
binder_inner_proc_unlock(proc);
|
||||
}
|
||||
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_enqueue_work_ilocked(
|
||||
&ref->death->work, &proc->todo);
|
||||
binder_wakeup_proc_ilocked(proc);
|
||||
binder_inner_proc_unlock(proc);
|
||||
}
|
||||
} else {
|
||||
if (ref->death == NULL) {
|
||||
|
@ -3802,12 +3794,6 @@ static void binder_stat_br(struct binder_proc *proc,
|
|||
}
|
||||
}
|
||||
|
||||
static int binder_has_thread_work(struct binder_thread *thread)
|
||||
{
|
||||
return !binder_worklist_empty(thread->proc, &thread->todo) ||
|
||||
thread->looper_need_return;
|
||||
}
|
||||
|
||||
static int binder_put_node_cmd(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
void __user **ptrp,
|
||||
|
@ -4438,12 +4424,9 @@ static unsigned int binder_poll(struct file *filp,
|
|||
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
|
||||
if (binder_has_work(thread, wait_for_proc_work))
|
||||
return POLLIN;
|
||||
|
||||
poll_wait(filp, &thread->wait, wait);
|
||||
|
||||
if (binder_has_thread_work(thread))
|
||||
if (binder_has_work(thread, wait_for_proc_work))
|
||||
return POLLIN;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2832,10 +2832,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
|||
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
|
||||
{
|
||||
if (!sata_pmp_attached(ap)) {
|
||||
if (likely(devno < ata_link_max_devices(&ap->link)))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ata_link_max_devices(&ap->link)))
|
||||
return &ap->link.device[devno];
|
||||
} else {
|
||||
if (likely(devno < ap->nr_pmp_links))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ap->nr_pmp_links))
|
||||
return &ap->pmp_link[devno].device[0];
|
||||
}
|
||||
|
||||
|
|
|
@ -641,11 +641,12 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
if (err)
|
||||
goto out_put_disk;
|
||||
|
||||
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
|
||||
q = blk_mq_init_queue(&vblk->tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_tags;
|
||||
}
|
||||
vblk->disk->queue = q;
|
||||
|
||||
q->queuedata = vblk;
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ static int rcar_du_probe(struct platform_device *pdev)
|
|||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
|
||||
if (IS_ERR(rcdu->mmio))
|
||||
ret = PTR_ERR(rcdu->mmio);
|
||||
return PTR_ERR(rcdu->mmio);
|
||||
|
||||
/* DRM/KMS objects */
|
||||
ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
|
||||
|
|
|
@ -338,7 +338,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
|||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_base = obj->vmap;
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
|
|
|
@ -1581,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
|
|||
struct isert_conn *isert_conn,
|
||||
u32 xfer_len)
|
||||
{
|
||||
struct ib_device *ib_dev = isert_conn->cm_id->device;
|
||||
struct ib_device *ib_dev = isert_conn->device->ib_device;
|
||||
struct iscsi_hdr *hdr;
|
||||
u64 rx_dma;
|
||||
int rx_buflen;
|
||||
|
|
|
@ -60,6 +60,10 @@ struct keychord_device {
|
|||
unsigned char head;
|
||||
unsigned char tail;
|
||||
__u16 buff[BUFFER_SIZE];
|
||||
/* Bit to serialize writes to this device */
|
||||
#define KEYCHORD_BUSY 0x01
|
||||
unsigned long flags;
|
||||
wait_queue_head_t write_waitq;
|
||||
};
|
||||
|
||||
static int check_keychord(struct keychord_device *kdev,
|
||||
|
@ -172,7 +176,6 @@ static int keychord_connect(struct input_handler *handler,
|
|||
goto err_input_open_device;
|
||||
|
||||
pr_info("keychord: using input dev %s for fevent\n", dev->name);
|
||||
|
||||
return 0;
|
||||
|
||||
err_input_open_device:
|
||||
|
@ -224,6 +227,41 @@ static ssize_t keychord_read(struct file *file, char __user *buffer,
|
|||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* serializes writes on a device. can use mutex_lock_interruptible()
|
||||
* for this particular use case as well - a matter of preference.
|
||||
*/
|
||||
static int
|
||||
keychord_write_lock(struct keychord_device *kdev)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
while (kdev->flags & KEYCHORD_BUSY) {
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
ret = wait_event_interruptible(kdev->write_waitq,
|
||||
((kdev->flags & KEYCHORD_BUSY) == 0));
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
}
|
||||
kdev->flags |= KEYCHORD_BUSY;
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
keychord_write_unlock(struct keychord_device *kdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kdev->lock, flags);
|
||||
kdev->flags &= ~KEYCHORD_BUSY;
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
wake_up_interruptible(&kdev->write_waitq);
|
||||
}
|
||||
|
||||
/*
|
||||
* keychord_write is used to configure the driver
|
||||
*/
|
||||
|
@ -232,9 +270,11 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
|||
{
|
||||
struct keychord_device *kdev = file->private_data;
|
||||
struct input_keychord *keychords = 0;
|
||||
struct input_keychord *keychord, *next, *end;
|
||||
struct input_keychord *keychord;
|
||||
int ret, i, key;
|
||||
unsigned long flags;
|
||||
size_t resid = count;
|
||||
size_t key_bytes;
|
||||
|
||||
if (count < sizeof(struct input_keychord))
|
||||
return -EINVAL;
|
||||
|
@ -248,6 +288,22 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Serialize writes to this device to prevent various races.
|
||||
* 1) writers racing here could do duplicate input_unregister_handler()
|
||||
* calls, resulting in attempting to unlink a node from a list that
|
||||
* does not exist.
|
||||
* 2) writers racing here could do duplicate input_register_handler() calls
|
||||
* below, resulting in a duplicate insertion of a node into the list.
|
||||
* 3) a double kfree of keychords can occur (in the event that
|
||||
* input_register_handler() fails below.
|
||||
*/
|
||||
ret = keychord_write_lock(kdev);
|
||||
if (ret) {
|
||||
kfree(keychords);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* unregister handler before changing configuration */
|
||||
if (kdev->registered) {
|
||||
input_unregister_handler(&kdev->input_handler);
|
||||
|
@ -265,15 +321,29 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
|||
kdev->head = kdev->tail = 0;
|
||||
|
||||
keychord = keychords;
|
||||
end = (struct input_keychord *)((char *)keychord + count);
|
||||
|
||||
while (keychord < end) {
|
||||
next = NEXT_KEYCHORD(keychord);
|
||||
if (keychord->count <= 0 || next > end) {
|
||||
while (resid > 0) {
|
||||
/* Is the entire keychord entry header present ? */
|
||||
if (resid < sizeof(struct input_keychord)) {
|
||||
pr_err("keychord: Insufficient bytes present for header %zu\n",
|
||||
resid);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
resid -= sizeof(struct input_keychord);
|
||||
if (keychord->count <= 0) {
|
||||
pr_err("keychord: invalid keycode count %d\n",
|
||||
keychord->count);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
key_bytes = keychord->count * sizeof(keychord->keycodes[0]);
|
||||
/* Do we have all the expected keycodes ? */
|
||||
if (resid < key_bytes) {
|
||||
pr_err("keychord: Insufficient bytes present for keycount %zu\n",
|
||||
resid);
|
||||
goto err_unlock_return;
|
||||
}
|
||||
resid -= key_bytes;
|
||||
|
||||
if (keychord->version != KEYCHORD_VERSION) {
|
||||
pr_err("keychord: unsupported version %d\n",
|
||||
keychord->version);
|
||||
|
@ -292,7 +362,7 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
|||
}
|
||||
|
||||
kdev->keychord_count++;
|
||||
keychord = next;
|
||||
keychord = NEXT_KEYCHORD(keychord);
|
||||
}
|
||||
|
||||
kdev->keychords = keychords;
|
||||
|
@ -302,15 +372,19 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
|
|||
if (ret) {
|
||||
kfree(keychords);
|
||||
kdev->keychords = 0;
|
||||
keychord_write_unlock(kdev);
|
||||
return ret;
|
||||
}
|
||||
kdev->registered = 1;
|
||||
|
||||
keychord_write_unlock(kdev);
|
||||
|
||||
return count;
|
||||
|
||||
err_unlock_return:
|
||||
spin_unlock_irqrestore(&kdev->lock, flags);
|
||||
kfree(keychords);
|
||||
keychord_write_unlock(kdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -336,6 +410,7 @@ static int keychord_open(struct inode *inode, struct file *file)
|
|||
|
||||
spin_lock_init(&kdev->lock);
|
||||
init_waitqueue_head(&kdev->waitq);
|
||||
init_waitqueue_head(&kdev->write_waitq);
|
||||
|
||||
kdev->input_handler.event = keychord_event;
|
||||
kdev->input_handler.connect = keychord_connect;
|
||||
|
@ -357,6 +432,7 @@ static int keychord_release(struct inode *inode, struct file *file)
|
|||
|
||||
if (kdev->registered)
|
||||
input_unregister_handler(&kdev->input_handler);
|
||||
kfree(kdev->keychords);
|
||||
kfree(kdev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
|
||||
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
|
||||
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
|
||||
/* No need to update the read positions, because this was a peek */
|
||||
/* If the caller specifically want to peek, return */
|
||||
if (peekonly) {
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
goto peekout;
|
||||
}
|
||||
|
||||
|
@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
space_rem = bus->m_dwSizeGetRing - curr_grp;
|
||||
|
||||
if (space_rem < sizeof(*msg)) {
|
||||
/* msg wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
|
||||
memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
|
||||
sizeof(*msg) - space_rem);
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
|
||||
space_rem, buf_size);
|
||||
|
||||
} else if (space_rem == sizeof(*msg)) {
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
|
||||
} else {
|
||||
/* Additional data wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf) {
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
|
||||
sizeof(*msg), space_rem - sizeof(*msg));
|
||||
|
@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
|
||||
} else {
|
||||
/* No wrapping */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
|
||||
buf_size);
|
||||
}
|
||||
/* Convert from little endian to CPU */
|
||||
msg->size = le16_to_cpu((__force __le16)msg->size);
|
||||
msg->command = le32_to_cpu((__force __le32)msg->command);
|
||||
msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
|
||||
|
||||
/* Update the read positions, adjusting the ring */
|
||||
saa7164_writel(bus->m_dwGetReadPos, new_grp);
|
||||
|
|
|
@ -1709,27 +1709,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
|
|||
|
||||
switch (cmd) {
|
||||
case VPFE_CMD_S_CCDC_RAW_PARAMS:
|
||||
ret = -EINVAL;
|
||||
v4l2_warn(&vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
|
||||
if (ccdc_dev->hw_ops.set_params) {
|
||||
ret = ccdc_dev->hw_ops.set_params(param);
|
||||
if (ret) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Error setting parameters in CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
ret = vpfe_get_ccdc_image_format(vpfe_dev,
|
||||
&vpfe_dev->fmt);
|
||||
if (ret < 0) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Invalid image format at CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
|
||||
}
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
|
|
|
@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
|
|||
return 0;
|
||||
|
||||
case LIRC_GET_REC_RESOLUTION:
|
||||
val = dev->rx_resolution;
|
||||
val = dev->rx_resolution / 1000;
|
||||
break;
|
||||
|
||||
case LIRC_SET_WIDEBAND_RECEIVER:
|
||||
|
|
|
@ -608,7 +608,7 @@ static void nb8800_mac_config(struct net_device *dev)
|
|||
mac_mode |= HALF_DUPLEX;
|
||||
|
||||
if (gigabit) {
|
||||
if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_interface_is_rgmii(dev->phydev))
|
||||
mac_mode |= RGMII_MODE;
|
||||
|
||||
mac_mode |= GMAC_MODE;
|
||||
|
@ -1295,11 +1295,10 @@ static int nb8800_tangox_init(struct net_device *dev)
|
|||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp)
|
|||
tg3_mem_rx_release(tp);
|
||||
tg3_mem_tx_release(tp);
|
||||
|
||||
/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
|
||||
tg3_full_lock(tp, 0);
|
||||
if (tp->hw_stats) {
|
||||
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
|
||||
tp->hw_stats, tp->stats_mapping);
|
||||
tp->hw_stats = NULL;
|
||||
}
|
||||
tg3_full_unlock(tp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -630,6 +630,10 @@ static void dump_command(struct mlx5_core_dev *dev,
|
|||
pr_debug("\n");
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
|
||||
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_msg *msg);
|
||||
|
||||
static void cmd_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
||||
|
@ -638,16 +642,27 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
int alloc_ret;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
if (!ent->page_queue) {
|
||||
ent->idx = alloc_ent(cmd);
|
||||
if (ent->idx < 0) {
|
||||
alloc_ret = alloc_ent(cmd);
|
||||
if (alloc_ret < 0) {
|
||||
if (ent->callback) {
|
||||
ent->callback(-EAGAIN, ent->context);
|
||||
mlx5_free_cmd_msg(dev, ent->out);
|
||||
free_msg(dev, ent->in);
|
||||
free_cmd(ent);
|
||||
} else {
|
||||
ent->ret = -EAGAIN;
|
||||
complete(&ent->done);
|
||||
}
|
||||
mlx5_core_err(dev, "failed to allocate command entry\n");
|
||||
up(sem);
|
||||
return;
|
||||
}
|
||||
ent->idx = alloc_ret;
|
||||
} else {
|
||||
ent->idx = cmd->max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
|
|
|
@ -819,6 +819,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
|
|||
.rpadir_value = 2 << 16,
|
||||
.no_trimd = 1,
|
||||
.no_ade = 1,
|
||||
.hw_crc = 1,
|
||||
.tsu = 1,
|
||||
.select_mii = 1,
|
||||
.shift_rd0 = 1,
|
||||
|
|
|
@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
|
|||
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
|
||||
{
|
||||
struct usb_device *dev = mcs->usbdev;
|
||||
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, val, 2,
|
||||
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
|
||||
void *dmabuf;
|
||||
int ret;
|
||||
|
||||
dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
|
||||
if (!dmabuf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, dmabuf, 2,
|
||||
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
|
||||
|
||||
memcpy(val, dmabuf, sizeof(__u16));
|
||||
kfree(dmabuf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define MII_DP83867_MICR 0x12
|
||||
#define MII_DP83867_ISR 0x13
|
||||
#define DP83867_CTRL 0x1f
|
||||
#define DP83867_CFG3 0x1e
|
||||
|
||||
/* Extended Registers */
|
||||
#define DP83867_RGMIICTL 0x0032
|
||||
|
@ -89,6 +90,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
|
|||
micr_status |=
|
||||
(MII_DP83867_MICR_AN_ERR_INT_EN |
|
||||
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
|
||||
MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
|
||||
|
||||
|
@ -184,6 +187,13 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
DP83867_DEVADDR, phydev->addr, delay);
|
||||
}
|
||||
|
||||
/* Enable Interrupt output INT_OE in CFG3 register */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
val |= BIT(7);
|
||||
phy_write(phydev, DP83867_CFG3, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -541,6 +541,9 @@ void phy_stop_machine(struct phy_device *phydev)
|
|||
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
|
||||
phydev->state = PHY_UP;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* Now we can run the state machine synchronously */
|
||||
phy_state_machine(&phydev->state_queue.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -918,6 +921,15 @@ void phy_state_machine(struct work_struct *work)
|
|||
if (old_link != phydev->link)
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
}
|
||||
/*
|
||||
* Failsafe: check that nobody set phydev->link=0 between two
|
||||
* poll cycles, otherwise we won't leave RUNNING state as long
|
||||
* as link remains down.
|
||||
*/
|
||||
if (!phydev->link && phydev->state == PHY_RUNNING) {
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
dev_err(&phydev->dev, "no link in PHY_RUNNING\n");
|
||||
}
|
||||
break;
|
||||
case PHY_CHANGELINK:
|
||||
err = phy_read_status(phydev);
|
||||
|
|
|
@ -1368,6 +1368,8 @@ static int phy_remove(struct device *dev)
|
|||
{
|
||||
struct phy_device *phydev = to_phy_device(dev);
|
||||
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->state = PHY_DOWN;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
|
|
@ -201,6 +201,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
|||
unsigned long remaining_credit;
|
||||
struct timer_list credit_timeout;
|
||||
u64 credit_window_start;
|
||||
bool rate_limited;
|
||||
|
||||
/* Statistics */
|
||||
struct xenvif_stats stats;
|
||||
|
|
|
@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
if (work_done < budget) {
|
||||
napi_complete(napi);
|
||||
xenvif_napi_schedule_or_enable_events(queue);
|
||||
/* If the queue is rate-limited, it shall be
|
||||
* rescheduled in the timer callback.
|
||||
*/
|
||||
if (likely(!queue->rate_limited))
|
||||
xenvif_napi_schedule_or_enable_events(queue);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
|
|
@ -687,6 +687,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
|
|||
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
|
||||
|
||||
queue->remaining_credit = min(max_credit, max_burst);
|
||||
queue->rate_limited = false;
|
||||
}
|
||||
|
||||
void xenvif_tx_credit_callback(unsigned long data)
|
||||
|
@ -1184,8 +1185,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
|||
msecs_to_jiffies(queue->credit_usec / 1000);
|
||||
|
||||
/* Timer could already be pending in rare cases. */
|
||||
if (timer_pending(&queue->credit_timeout))
|
||||
if (timer_pending(&queue->credit_timeout)) {
|
||||
queue->rate_limited = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Passed the point where we can replenish credit? */
|
||||
if (time_after_eq64(now, next_credit)) {
|
||||
|
@ -1200,6 +1203,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
|||
mod_timer(&queue->credit_timeout,
|
||||
next_credit);
|
||||
queue->credit_window_start = next_credit;
|
||||
queue->rate_limited = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
|
|||
struct qla_hw_data *ha = vha->hw;
|
||||
ssize_t rval = 0;
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
goto out;
|
||||
|
||||
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
|
||||
ha->optrom_region_size);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
return rval;
|
||||
|
@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
|
|||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING)
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -EINVAL;
|
||||
if (off > ha->optrom_region_size)
|
||||
}
|
||||
if (off > ha->optrom_region_size) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (off + count > ha->optrom_region_size)
|
||||
count = ha->optrom_region_size - off;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
memcpy(&ha->optrom_buffer[off], buf, count);
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
|
|
|
@ -3965,6 +3965,8 @@ int iscsi_target_tx_thread(void *arg)
|
|||
{
|
||||
int ret = 0;
|
||||
struct iscsi_conn *conn = arg;
|
||||
bool conn_freed = false;
|
||||
|
||||
/*
|
||||
* Allow ourselves to be interrupted by SIGINT so that a
|
||||
* connection recovery / failure event can be triggered externally.
|
||||
|
@ -3990,12 +3992,14 @@ get_immediate:
|
|||
goto transport_err;
|
||||
|
||||
ret = iscsit_handle_response_queue(conn);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
goto get_immediate;
|
||||
else if (ret == -ECONNRESET)
|
||||
} else if (ret == -ECONNRESET) {
|
||||
conn_freed = true;
|
||||
goto out;
|
||||
else if (ret < 0)
|
||||
} else if (ret < 0) {
|
||||
goto transport_err;
|
||||
}
|
||||
}
|
||||
|
||||
transport_err:
|
||||
|
@ -4005,8 +4009,13 @@ transport_err:
|
|||
* responsible for cleaning up the early connection failure.
|
||||
*/
|
||||
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
|
||||
iscsit_take_action_for_connection_exit(conn);
|
||||
iscsit_take_action_for_connection_exit(conn, &conn_freed);
|
||||
out:
|
||||
if (!conn_freed) {
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4105,6 +4114,7 @@ int iscsi_target_rx_thread(void *arg)
|
|||
u32 checksum = 0, digest = 0;
|
||||
struct iscsi_conn *conn = arg;
|
||||
struct kvec iov;
|
||||
bool conn_freed = false;
|
||||
/*
|
||||
* Allow ourselves to be interrupted by SIGINT so that a
|
||||
* connection recovery / failure event can be triggered externally.
|
||||
|
@ -4116,7 +4126,7 @@ int iscsi_target_rx_thread(void *arg)
|
|||
*/
|
||||
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
|
||||
if (rc < 0 || iscsi_target_check_conn_state(conn))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
|
||||
struct completion comp;
|
||||
|
@ -4201,7 +4211,13 @@ int iscsi_target_rx_thread(void *arg)
|
|||
transport_err:
|
||||
if (!signal_pending(current))
|
||||
atomic_set(&conn->transport_failed, 1);
|
||||
iscsit_take_action_for_connection_exit(conn);
|
||||
iscsit_take_action_for_connection_exit(conn, &conn_freed);
|
||||
out:
|
||||
if (!conn_freed) {
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4575,8 +4591,11 @@ static void iscsit_logout_post_handler_closesession(
|
|||
* always sleep waiting for RX/TX thread shutdown to complete
|
||||
* within iscsit_close_connection().
|
||||
*/
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP)
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP) {
|
||||
sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
||||
if (!sleep)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&conn->conn_logout_remove, 0);
|
||||
complete(&conn->conn_logout_comp);
|
||||
|
@ -4592,8 +4611,11 @@ static void iscsit_logout_post_handler_samecid(
|
|||
{
|
||||
int sleep = 1;
|
||||
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP)
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP) {
|
||||
sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
||||
if (!sleep)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&conn->conn_logout_remove, 0);
|
||||
complete(&conn->conn_logout_comp);
|
||||
|
|
|
@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
|
|||
}
|
||||
}
|
||||
|
||||
void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
||||
void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
|
||||
{
|
||||
*conn_freed = false;
|
||||
|
||||
spin_lock_bh(&conn->state_lock);
|
||||
if (atomic_read(&conn->connection_exit)) {
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
|
@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
|||
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
iscsit_close_connection(conn);
|
||||
*conn_freed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
|||
spin_unlock_bh(&conn->state_lock);
|
||||
|
||||
iscsit_handle_connection_cleanup(conn);
|
||||
*conn_freed = true;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
|
|||
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
|
||||
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
|
||||
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
|
||||
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
|
||||
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
|
||||
|
||||
#endif /*** ISCSI_TARGET_ERL0_H ***/
|
||||
|
|
|
@ -1436,5 +1436,9 @@ int iscsi_target_login_thread(void *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -489,14 +489,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
|
|||
|
||||
static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
|
||||
|
||||
static bool iscsi_target_sk_state_check(struct sock *sk)
|
||||
static bool __iscsi_target_sk_check_close(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
|
||||
pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
"returning FALSE\n");
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = test_bit(flag, &conn->login_flags);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
if (!state)
|
||||
clear_bit(flag, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
|
||||
|
@ -536,6 +582,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
|
||||
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
/*
|
||||
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
|
||||
* before initial PDU processing in iscsi_target_start_negotiation()
|
||||
* has completed, go ahead and retry until it's cleared.
|
||||
*
|
||||
* Otherwise if the TCP connection drops while this is occuring,
|
||||
* iscsi_target_start_negotiation() will detect the failure, call
|
||||
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
|
||||
* remaining iscsi connection resources from iscsi_np process context.
|
||||
*/
|
||||
if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
|
||||
schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_state_lock);
|
||||
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
|
||||
|
@ -543,26 +603,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
if (iscsi_target_sk_check_close(conn)) {
|
||||
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
conn->login_kworker = current;
|
||||
|
@ -580,34 +626,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
flush_signals(current);
|
||||
conn->login_kworker = NULL;
|
||||
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
|
||||
rc = iscsi_target_do_login(conn, login);
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
goto err;
|
||||
} else if (!rc) {
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
|
||||
goto err;
|
||||
} else if (rc == 1) {
|
||||
iscsi_target_nego_release(conn);
|
||||
iscsi_post_login_handler(np, conn, zero_tsih);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
return;
|
||||
|
||||
err:
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
|
||||
static void iscsi_target_do_cleanup(struct work_struct *work)
|
||||
|
@ -655,31 +696,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
|
|||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
state = __iscsi_target_sk_check_close(sk);
|
||||
pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
|
||||
|
||||
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
|
||||
" conn: %p\n", conn);
|
||||
if (state)
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
|
||||
conn);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
|
||||
|
||||
if (!state) {
|
||||
/*
|
||||
* If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
|
||||
* but only queue conn->login_work -> iscsi_target_do_login_rx()
|
||||
* processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
|
||||
*
|
||||
* When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
|
||||
* will detect the dropped TCP connection from delayed workqueue context.
|
||||
*
|
||||
* If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
|
||||
* iscsi_target_start_negotiation() is running, iscsi_target_do_login()
|
||||
* via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
|
||||
* via iscsi_target_sk_check_and_clear() is responsible for detecting the
|
||||
* dropped TCP connection in iscsi_np process context, and cleaning up
|
||||
* the remaining iscsi connection resources.
|
||||
*/
|
||||
if (state) {
|
||||
pr_debug("iscsi_target_sk_state_change got failed state\n");
|
||||
schedule_delayed_work(&conn->login_cleanup_work, 0);
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
orig_state_change(sk);
|
||||
|
||||
if (!state)
|
||||
schedule_delayed_work(&conn->login_work, 0);
|
||||
return;
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
orig_state_change(sk);
|
||||
}
|
||||
|
||||
|
@ -944,6 +1008,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
|||
if (iscsi_target_handle_csg_one(conn, login) < 0)
|
||||
return -1;
|
||||
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
|
||||
/*
|
||||
* Check to make sure the TCP connection has not
|
||||
* dropped asynchronously while session reinstatement
|
||||
* was occuring in this kthread context, before
|
||||
* transitioning to full feature phase operation.
|
||||
*/
|
||||
if (iscsi_target_sk_check_close(conn))
|
||||
return -1;
|
||||
|
||||
login->tsih = conn->sess->tsih;
|
||||
login->login_complete = 1;
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
|
@ -970,21 +1043,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
|||
break;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
bool state;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login() failed state for"
|
||||
" conn: %p\n", conn);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1248,16 +1306,28 @@ int iscsi_target_start_negotiation(
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = iscsi_target_do_login(conn, login);
|
||||
if (!ret) {
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
} else if (ret < 0) {
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
|
||||
set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
/*
|
||||
* If iscsi_target_do_login returns zero to signal more PDU
|
||||
* exchanges are required to complete the login, go ahead and
|
||||
* clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
|
||||
* is still active.
|
||||
*
|
||||
* Otherwise if TCP connection dropped asynchronously, go ahead
|
||||
* and perform connection cleanup now.
|
||||
*/
|
||||
ret = iscsi_target_do_login(conn, login);
|
||||
if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
|
||||
ret = -1;
|
||||
|
||||
if (ret < 0) {
|
||||
cancel_delayed_work_sync(&conn->login_work);
|
||||
cancel_delayed_work_sync(&conn->login_cleanup_work);
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
|
|
|
@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
|
|||
pr_err("Source se_lun->lun_se_dev does not exist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lun->lun_shutdown) {
|
||||
pr_err("Unable to create mappedlun symlink because"
|
||||
" lun->lun_shutdown=true\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
se_tpg = lun->lun_tpg;
|
||||
|
||||
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
|
||||
|
|
|
@ -673,6 +673,8 @@ void core_tpg_remove_lun(
|
|||
*/
|
||||
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
|
||||
|
||||
lun->lun_shutdown = true;
|
||||
|
||||
core_clear_lun_from_tpg(lun, tpg);
|
||||
/*
|
||||
* Wait for any active I/O references to percpu se_lun->lun_ref to
|
||||
|
@ -694,6 +696,8 @@ void core_tpg_remove_lun(
|
|||
}
|
||||
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
|
||||
hlist_del_rcu(&lun->link);
|
||||
|
||||
lun->lun_shutdown = false;
|
||||
mutex_unlock(&tpg->tpg_lun_mutex);
|
||||
|
||||
percpu_ref_exit(&lun->lun_ref);
|
||||
|
|
|
@ -500,6 +500,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
lastoff = page_offset(page);
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (lastoff + bh->b_size <= startoff)
|
||||
goto next;
|
||||
if (buffer_uptodate(bh) ||
|
||||
buffer_unwritten(bh)) {
|
||||
if (whence == SEEK_DATA)
|
||||
|
@ -514,6 +516,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
next:
|
||||
lastoff += bh->b_size;
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
|
|
@ -1926,7 +1926,8 @@ retry:
|
|||
n_desc_blocks = o_desc_blocks +
|
||||
le16_to_cpu(es->s_reserved_gdt_blocks);
|
||||
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
|
||||
n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_blocks_count = (ext4_fsblk_t)n_group *
|
||||
EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_group--; /* set to last group number */
|
||||
}
|
||||
|
||||
|
|
|
@ -104,12 +104,19 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
|
|||
{
|
||||
long err = -ENOTTY;
|
||||
struct file *lower_file;
|
||||
const struct cred *saved_cred = NULL;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
|
||||
|
||||
lower_file = sdcardfs_lower_file(file);
|
||||
|
||||
/* XXX: use vfs_ioctl if/when VFS exports it */
|
||||
if (!lower_file || !lower_file->f_op)
|
||||
goto out;
|
||||
|
||||
/* save current_cred and override it */
|
||||
OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
|
||||
|
||||
if (lower_file->f_op->unlocked_ioctl)
|
||||
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
|
||||
|
||||
|
@ -117,6 +124,7 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
|
|||
if (!err)
|
||||
sdcardfs_copy_and_fix_attrs(file_inode(file),
|
||||
file_inode(lower_file));
|
||||
REVERT_CRED(saved_cred);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -127,15 +135,23 @@ static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd,
|
|||
{
|
||||
long err = -ENOTTY;
|
||||
struct file *lower_file;
|
||||
const struct cred *saved_cred = NULL;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
|
||||
|
||||
lower_file = sdcardfs_lower_file(file);
|
||||
|
||||
/* XXX: use vfs_ioctl if/when VFS exports it */
|
||||
if (!lower_file || !lower_file->f_op)
|
||||
goto out;
|
||||
|
||||
/* save current_cred and override it */
|
||||
OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
|
||||
|
||||
if (lower_file->f_op->compat_ioctl)
|
||||
err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
|
||||
|
||||
REVERT_CRED(saved_cred);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include <net/net_namespace.h>
|
||||
#include <linux/sched/rt.h>
|
||||
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define INIT_PUSHABLE_TASKS(tsk) \
|
||||
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
|
||||
|
@ -183,14 +185,21 @@ extern struct task_group root_task_group;
|
|||
# define INIT_KASAN(tsk)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
# define INIT_TASK_TI(tsk) .thread_info = INIT_THREAD_INFO(tsk),
|
||||
#else
|
||||
# define INIT_TASK_TI(tsk)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* INIT_TASK is used to set up the first task table, touch at
|
||||
* your own risk!. Base=0, limit=0x1fffff (=2MB)
|
||||
*/
|
||||
#define INIT_TASK(tsk) \
|
||||
{ \
|
||||
INIT_TASK_TI(tsk) \
|
||||
.state = 0, \
|
||||
.stack = &init_thread_info, \
|
||||
.stack = init_stack, \
|
||||
.usage = ATOMIC_INIT(2), \
|
||||
.flags = PF_KTHREAD, \
|
||||
.prio = MAX_PRIO-20, \
|
||||
|
|
|
@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void);
|
|||
static inline
|
||||
int kdb_process_cpu(const struct task_struct *p)
|
||||
{
|
||||
unsigned int cpu = task_thread_info(p)->cpu;
|
||||
unsigned int cpu = task_cpu(p);
|
||||
if (cpu > num_possible_cpus())
|
||||
cpu = 0;
|
||||
return cpu;
|
||||
|
|
|
@ -514,6 +514,10 @@ struct mm_struct {
|
|||
* PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
bool tlb_flush_pending;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_X86_INTEL_MPX
|
||||
|
|
51
include/linux/restart_block.h
Normal file
51
include/linux/restart_block.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Common syscall restarting data
|
||||
*/
|
||||
#ifndef __LINUX_RESTART_BLOCK_H
|
||||
#define __LINUX_RESTART_BLOCK_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct timespec;
|
||||
struct compat_timespec;
|
||||
struct pollfd;
|
||||
|
||||
/*
|
||||
* System call restart block.
|
||||
*/
|
||||
struct restart_block {
|
||||
long (*fn)(struct restart_block *);
|
||||
union {
|
||||
/* For futex_wait and futex_wait_requeue_pi */
|
||||
struct {
|
||||
u32 __user *uaddr;
|
||||
u32 val;
|
||||
u32 flags;
|
||||
u32 bitset;
|
||||
u64 time;
|
||||
u32 __user *uaddr2;
|
||||
} futex;
|
||||
/* For nanosleep */
|
||||
struct {
|
||||
clockid_t clockid;
|
||||
struct timespec __user *rmtp;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_timespec __user *compat_rmtp;
|
||||
#endif
|
||||
u64 expires;
|
||||
} nanosleep;
|
||||
/* For poll */
|
||||
struct {
|
||||
struct pollfd __user *ufds;
|
||||
int nfds;
|
||||
int has_timeout;
|
||||
unsigned long tv_sec;
|
||||
unsigned long tv_nsec;
|
||||
} poll;
|
||||
};
|
||||
};
|
||||
|
||||
extern long do_no_restart_syscall(struct restart_block *parm);
|
||||
|
||||
#endif /* __LINUX_RESTART_BLOCK_H */
|
|
@ -882,6 +882,16 @@ struct signal_struct {
|
|||
|
||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||
|
||||
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||
SIGNAL_STOP_CONTINUED)
|
||||
|
||||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
|
@ -1601,6 +1611,13 @@ struct tlbflush_unmap_batch {
|
|||
};
|
||||
|
||||
struct task_struct {
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/*
|
||||
* For reasons of header soup (see current_thread_info()), this
|
||||
* must be the first element of task_struct.
|
||||
*/
|
||||
struct thread_info thread_info;
|
||||
#endif
|
||||
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
|
||||
void *stack;
|
||||
atomic_t usage;
|
||||
|
@ -1610,6 +1627,9 @@ struct task_struct {
|
|||
#ifdef CONFIG_SMP
|
||||
struct llist_node wake_entry;
|
||||
int on_cpu;
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
unsigned int cpu; /* current CPU */
|
||||
#endif
|
||||
unsigned int wakee_flips;
|
||||
unsigned long wakee_flip_decay_ts;
|
||||
struct task_struct *last_wakee;
|
||||
|
@ -2753,7 +2773,9 @@ extern void set_curr_task(int cpu, struct task_struct *p);
|
|||
void yield(void);
|
||||
|
||||
union thread_union {
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct thread_info thread_info;
|
||||
#endif
|
||||
unsigned long stack[THREAD_SIZE/sizeof(long)];
|
||||
};
|
||||
|
||||
|
@ -3149,10 +3171,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
|
|||
cgroup_threadgroup_change_end(tsk);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_THREAD_FUNCTIONS
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
|
||||
static inline struct thread_info *task_thread_info(struct task_struct *task)
|
||||
{
|
||||
return &task->thread_info;
|
||||
}
|
||||
|
||||
/*
|
||||
* When accessing the stack of a non-current task that might exit, use
|
||||
* try_get_task_stack() instead. task_stack_page will return a pointer
|
||||
* that could get freed out from under you.
|
||||
*/
|
||||
static inline void *task_stack_page(const struct task_struct *task)
|
||||
{
|
||||
return task->stack;
|
||||
}
|
||||
|
||||
#define setup_thread_stack(new,old) do { } while(0)
|
||||
|
||||
static inline unsigned long *end_of_stack(const struct task_struct *task)
|
||||
{
|
||||
return task->stack;
|
||||
}
|
||||
|
||||
#elif !defined(__HAVE_THREAD_FUNCTIONS)
|
||||
|
||||
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
|
||||
#define task_stack_page(task) ((task)->stack)
|
||||
#define task_stack_page(task) ((void *)(task)->stack)
|
||||
|
||||
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
|
||||
{
|
||||
|
@ -3179,6 +3225,14 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline void *try_get_task_stack(struct task_struct *tsk)
|
||||
{
|
||||
return task_stack_page(tsk);
|
||||
}
|
||||
|
||||
static inline void put_task_stack(struct task_struct *tsk) {}
|
||||
|
||||
#define task_stack_end_corrupted(task) \
|
||||
(*(end_of_stack(task)) != STACK_END_MAGIC)
|
||||
|
||||
|
@ -3189,7 +3243,7 @@ static inline int object_is_on_stack(void *obj)
|
|||
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
|
||||
}
|
||||
|
||||
extern void thread_info_cache_init(void);
|
||||
extern void thread_stack_cache_init(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
static inline unsigned long stack_not_used(struct task_struct *p)
|
||||
|
@ -3453,7 +3507,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
|
|||
|
||||
static inline unsigned int task_cpu(const struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
return p->cpu;
|
||||
#else
|
||||
return task_thread_info(p)->cpu;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int task_node(const struct task_struct *p)
|
||||
|
|
|
@ -215,7 +215,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
|||
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
@ -228,7 +228,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
|||
* be allocated from the same page.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||
#define KMALLOC_SHIFT_MAX 30
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
|
|
@ -9,46 +9,17 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/restart_block.h>
|
||||
|
||||
struct timespec;
|
||||
struct compat_timespec;
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/*
|
||||
* System call restart block.
|
||||
* For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
|
||||
* definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
|
||||
* including <asm/current.h> can cause a circular dependency on some platforms.
|
||||
*/
|
||||
struct restart_block {
|
||||
long (*fn)(struct restart_block *);
|
||||
union {
|
||||
/* For futex_wait and futex_wait_requeue_pi */
|
||||
struct {
|
||||
u32 __user *uaddr;
|
||||
u32 val;
|
||||
u32 flags;
|
||||
u32 bitset;
|
||||
u64 time;
|
||||
u32 __user *uaddr2;
|
||||
} futex;
|
||||
/* For nanosleep */
|
||||
struct {
|
||||
clockid_t clockid;
|
||||
struct timespec __user *rmtp;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_timespec __user *compat_rmtp;
|
||||
#include <asm/current.h>
|
||||
#define current_thread_info() ((struct thread_info *)current)
|
||||
#endif
|
||||
u64 expires;
|
||||
} nanosleep;
|
||||
/* For poll */
|
||||
struct {
|
||||
struct pollfd __user *ufds;
|
||||
int nfds;
|
||||
int has_timeout;
|
||||
unsigned long tv_sec;
|
||||
unsigned long tv_nsec;
|
||||
} poll;
|
||||
};
|
||||
};
|
||||
|
||||
extern long do_no_restart_syscall(struct restart_block *parm);
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
|
|
@ -311,6 +311,7 @@ enum {
|
|||
|
||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
|
@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||
*/
|
||||
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
|
||||
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
|
||||
|
|
|
@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
|
|||
memcpy(stream + lcp_len,
|
||||
((char *) &iwe->u) + IW_EV_POINT_OFF,
|
||||
IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
|
||||
memcpy(stream + point_len, extra, iwe->u.data.length);
|
||||
if (iwe->u.data.length && extra)
|
||||
memcpy(stream + point_len, extra, iwe->u.data.length);
|
||||
stream += event_len;
|
||||
}
|
||||
return stream;
|
||||
|
|
|
@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
|
|||
|
||||
#define _sctp_walk_params(pos, chunk, end, member)\
|
||||
for (pos.v = chunk->member;\
|
||||
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
|
||||
(void *)chunk + end) &&\
|
||||
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
|
||||
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
|
||||
pos.v += WORD_ROUND(ntohs(pos.p->length)))
|
||||
|
@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
|
|||
#define _sctp_walk_errors(err, chunk_hdr, end)\
|
||||
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
|
||||
sizeof(sctp_chunkhdr_t));\
|
||||
((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
|
||||
(void *)chunk_hdr + end) &&\
|
||||
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
|
||||
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
|
||||
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
|
||||
|
|
|
@ -562,6 +562,7 @@ struct iscsi_conn {
|
|||
#define LOGIN_FLAGS_READ_ACTIVE 1
|
||||
#define LOGIN_FLAGS_CLOSED 2
|
||||
#define LOGIN_FLAGS_READY 4
|
||||
#define LOGIN_FLAGS_INITIAL_PDU 8
|
||||
unsigned long login_flags;
|
||||
struct delayed_work login_work;
|
||||
struct delayed_work login_cleanup_work;
|
||||
|
|
|
@ -714,6 +714,7 @@ struct se_lun {
|
|||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
u32 lun_access;
|
||||
bool lun_shutdown;
|
||||
u32 lun_index;
|
||||
|
||||
/* RELATIVE TARGET PORT IDENTIFER */
|
||||
|
|
10
init/Kconfig
10
init/Kconfig
|
@ -26,6 +26,16 @@ config IRQ_WORK
|
|||
config BUILDTIME_EXTABLE_SORT
|
||||
bool
|
||||
|
||||
config THREAD_INFO_IN_TASK
|
||||
bool
|
||||
help
|
||||
Select this to move thread_info off the stack into task_struct. To
|
||||
make this work, an arch will need to remove all thread_info fields
|
||||
except flags and fix any runtime bugs.
|
||||
|
||||
One subtle change that will be needed is to use try_get_task_stack()
|
||||
and put_task_stack() in save_thread_stack_tsk() and get_wchan().
|
||||
|
||||
menu "General setup"
|
||||
|
||||
config BROKEN
|
||||
|
|
|
@ -22,5 +22,8 @@ EXPORT_SYMBOL(init_task);
|
|||
* Initial thread structure. Alignment of this is handled by a special
|
||||
* linker map entry.
|
||||
*/
|
||||
union thread_union init_thread_union __init_task_data =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
union thread_union init_thread_union __init_task_data = {
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
INIT_THREAD_INFO(init_task)
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -468,7 +468,7 @@ void __init __weak smp_setup_processor_id(void)
|
|||
}
|
||||
|
||||
# if THREAD_SIZE >= PAGE_SIZE
|
||||
void __init __weak thread_info_cache_init(void)
|
||||
void __init __weak thread_stack_cache_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -644,7 +644,7 @@ asmlinkage __visible void __init start_kernel(void)
|
|||
/* Should be run before the first non-init thread is created */
|
||||
init_espfix_bsp();
|
||||
#endif
|
||||
thread_info_cache_init();
|
||||
thread_stack_cache_init();
|
||||
cred_init();
|
||||
fork_init();
|
||||
proc_caches_init();
|
||||
|
|
|
@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
|
|||
}
|
||||
#endif
|
||||
|
||||
void __weak arch_release_thread_info(struct thread_info *ti)
|
||||
void __weak arch_release_thread_stack(unsigned long *stack)
|
||||
{
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
|
||||
#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
|
||||
|
||||
/*
|
||||
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
|
||||
* kmemcache based allocator.
|
||||
*/
|
||||
# if THREAD_SIZE >= PAGE_SIZE
|
||||
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
||||
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
|
||||
int node)
|
||||
{
|
||||
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
|
||||
|
@ -168,30 +168,32 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
|||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
||||
static inline void free_thread_info(struct thread_info *ti)
|
||||
static inline void free_thread_stack(unsigned long *stack)
|
||||
{
|
||||
kasan_alloc_pages(virt_to_page(ti), THREAD_SIZE_ORDER);
|
||||
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
||||
struct page *page = virt_to_page(stack);
|
||||
|
||||
kasan_alloc_pages(page, THREAD_SIZE_ORDER);
|
||||
__free_kmem_pages(page, THREAD_SIZE_ORDER);
|
||||
}
|
||||
# else
|
||||
static struct kmem_cache *thread_info_cache;
|
||||
static struct kmem_cache *thread_stack_cache;
|
||||
|
||||
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
||||
static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk,
|
||||
int node)
|
||||
{
|
||||
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
|
||||
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
|
||||
}
|
||||
|
||||
static void free_thread_info(struct thread_info *ti)
|
||||
static void free_stack(unsigned long *stack)
|
||||
{
|
||||
kmem_cache_free(thread_info_cache, ti);
|
||||
kmem_cache_free(thread_stack_cache, stack);
|
||||
}
|
||||
|
||||
void thread_info_cache_init(void)
|
||||
void thread_stack_cache_init(void)
|
||||
{
|
||||
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
|
||||
thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
|
||||
THREAD_SIZE, 0, NULL);
|
||||
BUG_ON(thread_info_cache == NULL);
|
||||
BUG_ON(thread_stack_cache == NULL);
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
|
@ -214,9 +216,9 @@ struct kmem_cache *vm_area_cachep;
|
|||
/* SLAB cache for mm_struct structures (tsk->mm) */
|
||||
static struct kmem_cache *mm_cachep;
|
||||
|
||||
static void account_kernel_stack(struct thread_info *ti, int account)
|
||||
static void account_kernel_stack(unsigned long *stack, int account)
|
||||
{
|
||||
struct zone *zone = page_zone(virt_to_page(ti));
|
||||
struct zone *zone = page_zone(virt_to_page(stack));
|
||||
|
||||
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
|
||||
}
|
||||
|
@ -224,8 +226,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
|
|||
void free_task(struct task_struct *tsk)
|
||||
{
|
||||
account_kernel_stack(tsk->stack, -1);
|
||||
arch_release_thread_info(tsk->stack);
|
||||
free_thread_info(tsk->stack);
|
||||
arch_release_thread_stack(tsk->stack);
|
||||
free_thread_stack(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
ftrace_graph_exit_task(tsk);
|
||||
put_seccomp_filter(tsk);
|
||||
|
@ -336,7 +338,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
|
|||
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct thread_info *ti;
|
||||
unsigned long *stack;
|
||||
int err;
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
|
@ -345,15 +347,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|||
if (!tsk)
|
||||
return NULL;
|
||||
|
||||
ti = alloc_thread_info_node(tsk, node);
|
||||
if (!ti)
|
||||
stack = alloc_thread_stack_node(tsk, node);
|
||||
if (!stack)
|
||||
goto free_tsk;
|
||||
|
||||
err = arch_dup_task_struct(tsk, orig);
|
||||
if (err)
|
||||
goto free_ti;
|
||||
goto free_stack;
|
||||
|
||||
tsk->stack = ti;
|
||||
tsk->stack = stack;
|
||||
#ifdef CONFIG_SECCOMP
|
||||
/*
|
||||
* We must handle setting up seccomp filters once we're under
|
||||
|
@ -385,12 +387,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|||
tsk->task_frag.page = NULL;
|
||||
tsk->wake_q.next = NULL;
|
||||
|
||||
account_kernel_stack(ti, 1);
|
||||
account_kernel_stack(stack, 1);
|
||||
|
||||
return tsk;
|
||||
|
||||
free_ti:
|
||||
free_thread_info(ti);
|
||||
free_stack:
|
||||
free_thread_stack(stack);
|
||||
free_tsk:
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
|
|
|
@ -65,7 +65,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
|||
static struct kthread *to_live_kthread(struct task_struct *k)
|
||||
{
|
||||
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
|
||||
if (likely(vfork))
|
||||
if (likely(vfork) && try_get_task_stack(k))
|
||||
return __to_kthread(vfork);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -427,8 +427,10 @@ void kthread_unpark(struct task_struct *k)
|
|||
{
|
||||
struct kthread *kthread = to_live_kthread(k);
|
||||
|
||||
if (kthread)
|
||||
if (kthread) {
|
||||
__kthread_unpark(k, kthread);
|
||||
put_task_stack(k);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_unpark);
|
||||
|
||||
|
@ -457,6 +459,7 @@ int kthread_park(struct task_struct *k)
|
|||
wait_for_completion(&kthread->parked);
|
||||
}
|
||||
}
|
||||
put_task_stack(k);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
|
@ -492,6 +495,7 @@ int kthread_stop(struct task_struct *k)
|
|||
__kthread_unpark(k, kthread);
|
||||
wake_up_process(k);
|
||||
wait_for_completion(&kthread->exited);
|
||||
put_task_stack(k);
|
||||
}
|
||||
ret = k->exit_code;
|
||||
put_task_struct(k);
|
||||
|
|
|
@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
|
|||
}
|
||||
|
||||
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct thread_info *ti)
|
||||
struct task_struct *task)
|
||||
{
|
||||
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
|
||||
|
||||
/* Mark the current thread as blocked on the lock: */
|
||||
ti->task->blocked_on = waiter;
|
||||
task->blocked_on = waiter;
|
||||
}
|
||||
|
||||
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct thread_info *ti)
|
||||
struct task_struct *task)
|
||||
{
|
||||
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
||||
DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
|
||||
DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
|
||||
ti->task->blocked_on = NULL;
|
||||
DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
||||
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
|
||||
task->blocked_on = NULL;
|
||||
|
||||
list_del_init(&waiter->list);
|
||||
waiter->task = NULL;
|
||||
|
|
|
@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
|
|||
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
|
||||
extern void debug_mutex_add_waiter(struct mutex *lock,
|
||||
struct mutex_waiter *waiter,
|
||||
struct thread_info *ti);
|
||||
struct task_struct *task);
|
||||
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct thread_info *ti);
|
||||
struct task_struct *task);
|
||||
extern void debug_mutex_unlock(struct mutex *lock);
|
||||
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
|
|
@ -549,7 +549,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
goto skip_wait;
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
||||
debug_mutex_add_waiter(lock, &waiter, task);
|
||||
|
||||
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
||||
list_add_tail(&waiter.list, &lock->wait_list);
|
||||
|
@ -596,7 +596,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
}
|
||||
__set_task_state(task, TASK_RUNNING);
|
||||
|
||||
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
||||
mutex_remove_waiter(lock, &waiter, task);
|
||||
/* set it to 0 if there are no waiters left: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
|
@ -617,7 +617,7 @@ skip_wait:
|
|||
return 0;
|
||||
|
||||
err:
|
||||
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
||||
mutex_remove_waiter(lock, &waiter, task);
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
mutex_release(&lock->dep_map, 1, ip);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
do { spin_lock(lock); (void)(flags); } while (0)
|
||||
#define spin_unlock_mutex(lock, flags) \
|
||||
do { spin_unlock(lock); (void)(flags); } while (0)
|
||||
#define mutex_remove_waiter(lock, waiter, ti) \
|
||||
#define mutex_remove_waiter(lock, waiter, task) \
|
||||
__list_del((waiter)->list.prev, (waiter)->list.next)
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue