Merge tag 'v4.4.31' into linux-linaro-lsk-v4.4
This is the 4.4.31 stable release
This commit is contained in:
commit
17d454ca33
67 changed files with 384 additions and 185 deletions
3
Makefile
3
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 30
|
SUBLEVEL = 31
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
@ -617,6 +617,7 @@ include arch/$(SRCARCH)/Makefile
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
|
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
|
||||||
|
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
|
||||||
|
|
||||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||||
KBUILD_CFLAGS += -Os
|
KBUILD_CFLAGS += -Os
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
#define fd_outb(val,port) \
|
#define fd_outb(val,port) \
|
||||||
do { \
|
do { \
|
||||||
if ((port) == FD_DOR) \
|
if ((port) == (u32)FD_DOR) \
|
||||||
fd_setdor((val)); \
|
fd_setdor((val)); \
|
||||||
else \
|
else \
|
||||||
outb((val),(port)); \
|
outb((val),(port)); \
|
||||||
|
|
|
@ -31,7 +31,6 @@ struct thread_info {
|
||||||
int cpu; /* cpu we're on */
|
int cpu; /* cpu we're on */
|
||||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||||
mm_segment_t addr_limit;
|
mm_segment_t addr_limit;
|
||||||
struct restart_block restart_block;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -44,9 +43,6 @@ struct thread_info {
|
||||||
.cpu = 0, \
|
.cpu = 0, \
|
||||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||||
.addr_limit = KERNEL_DS, \
|
.addr_limit = KERNEL_DS, \
|
||||||
.restart_block = { \
|
|
||||||
.fn = do_no_restart_syscall, \
|
|
||||||
}, \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define init_thread_info (init_thread_union.thread_info)
|
#define init_thread_info (init_thread_union.thread_info)
|
||||||
|
|
|
@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
|
||||||
unsigned int er0;
|
unsigned int er0;
|
||||||
|
|
||||||
/* Always make any pending restarted system calls return -EINTR */
|
/* Always make any pending restarted system calls return -EINTR */
|
||||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
current->restart_block.fn = do_no_restart_syscall;
|
||||||
|
|
||||||
/* restore passed registers */
|
/* restore passed registers */
|
||||||
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
|
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
|
||||||
|
|
|
@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||||
enum emulation_result er = EMULATE_DONE;
|
enum emulation_result er = EMULATE_DONE;
|
||||||
|
|
||||||
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
||||||
|
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
||||||
|
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
||||||
|
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
||||||
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
|
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
|
||||||
kvm_read_c0_guest_epc(cop0));
|
kvm_read_c0_guest_epc(cop0));
|
||||||
kvm_clear_c0_guest_status(cop0, ST0_EXL);
|
kvm_clear_c0_guest_status(cop0, ST0_EXL);
|
||||||
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
|
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
|
||||||
|
|
||||||
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
|
||||||
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
|
||||||
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
|
||||||
} else {
|
} else {
|
||||||
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
|
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
|
||||||
vcpu->arch.pc);
|
vcpu->arch.pc);
|
||||||
|
|
|
@ -106,8 +106,6 @@ linux_gateway_entry:
|
||||||
mtsp %r0,%sr4 /* get kernel space into sr4 */
|
mtsp %r0,%sr4 /* get kernel space into sr4 */
|
||||||
mtsp %r0,%sr5 /* get kernel space into sr5 */
|
mtsp %r0,%sr5 /* get kernel space into sr5 */
|
||||||
mtsp %r0,%sr6 /* get kernel space into sr6 */
|
mtsp %r0,%sr6 /* get kernel space into sr6 */
|
||||||
mfsp %sr7,%r1 /* save user sr7 */
|
|
||||||
mtsp %r1,%sr3 /* and store it in sr3 */
|
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
/* for now we can *always* set the W bit on entry to the syscall
|
/* for now we can *always* set the W bit on entry to the syscall
|
||||||
|
@ -133,6 +131,14 @@ linux_gateway_entry:
|
||||||
depdi 0, 31, 32, %r21
|
depdi 0, 31, 32, %r21
|
||||||
1:
|
1:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
|
||||||
|
* by external interrupts.
|
||||||
|
*/
|
||||||
|
mfsp %sr7,%r1 /* save user sr7 */
|
||||||
|
rsm PSW_SM_I, %r0 /* disable interrupts */
|
||||||
|
mtsp %r1,%sr3 /* and store it in sr3 */
|
||||||
|
|
||||||
mfctl %cr30,%r1
|
mfctl %cr30,%r1
|
||||||
xor %r1,%r30,%r30 /* ye olde xor trick */
|
xor %r1,%r30,%r30 /* ye olde xor trick */
|
||||||
xor %r1,%r30,%r1
|
xor %r1,%r30,%r1
|
||||||
|
@ -147,6 +153,7 @@ linux_gateway_entry:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mtsp %r0,%sr7 /* get kernel space into sr7 */
|
mtsp %r0,%sr7 /* get kernel space into sr7 */
|
||||||
|
ssm PSW_SM_I, %r0 /* enable interrupts */
|
||||||
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
|
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
|
||||||
mfctl %cr30,%r1 /* get task ptr in %r1 */
|
mfctl %cr30,%r1 /* get task ptr in %r1 */
|
||||||
LDREG TI_TASK(%r1),%r1
|
LDREG TI_TASK(%r1),%r1
|
||||||
|
|
|
@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||||
|
|
||||||
#else
|
#else
|
||||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||||
offsetof(struct thread_fp_state, fpr[32][0]));
|
offsetof(struct thread_fp_state, fpr[32]));
|
||||||
|
|
||||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.fp_state, 0, -1);
|
&target->thread.fp_state, 0, -1);
|
||||||
|
@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||||
offsetof(struct thread_fp_state, fpr[32][0]));
|
offsetof(struct thread_fp_state, fpr[32]));
|
||||||
|
|
||||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||||
&target->thread.fp_state, 0, -1);
|
&target->thread.fp_state, 0, -1);
|
||||||
|
|
|
@ -339,7 +339,7 @@ do { \
|
||||||
#define __get_user_asm_u64(x, ptr, retval, errret) \
|
#define __get_user_asm_u64(x, ptr, retval, errret) \
|
||||||
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
|
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
|
||||||
#define __get_user_asm_ex_u64(x, ptr) \
|
#define __get_user_asm_ex_u64(x, ptr) \
|
||||||
__get_user_asm_ex(x, ptr, "q", "", "=r")
|
__get_user_asm_ex(x, ptr, "q", "", "=&r")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __get_user_size(x, ptr, size, retval, errret) \
|
#define __get_user_size(x, ptr, size, retval, errret) \
|
||||||
|
@ -386,13 +386,13 @@ do { \
|
||||||
__chk_user_ptr(ptr); \
|
__chk_user_ptr(ptr); \
|
||||||
switch (size) { \
|
switch (size) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
|
__get_user_asm_ex(x, ptr, "b", "b", "=&q"); \
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
|
__get_user_asm_ex(x, ptr, "w", "w", "=&r"); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
|
__get_user_asm_ex(x, ptr, "l", "k", "=&r"); \
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
__get_user_asm_ex_u64(x, ptr); \
|
__get_user_asm_ex_u64(x, ptr); \
|
||||||
|
@ -406,7 +406,7 @@ do { \
|
||||||
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
||||||
"2:\n" \
|
"2:\n" \
|
||||||
_ASM_EXTABLE_EX(1b, 2b) \
|
_ASM_EXTABLE_EX(1b, 2b) \
|
||||||
: ltype(x) : "m" (__m(addr)))
|
: ltype(x) : "m" (__m(addr)), "0" (0))
|
||||||
|
|
||||||
#define __put_user_nocheck(x, ptr, size) \
|
#define __put_user_nocheck(x, ptr, size) \
|
||||||
({ \
|
({ \
|
||||||
|
|
|
@ -5033,7 +5033,7 @@ done_prefixes:
|
||||||
/* Decode and fetch the destination operand: register or memory. */
|
/* Decode and fetch the destination operand: register or memory. */
|
||||||
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
|
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
|
||||||
|
|
||||||
if (ctxt->rip_relative)
|
if (ctxt->rip_relative && likely(ctxt->memopp))
|
||||||
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
|
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
|
||||||
ctxt->memopp->addr.mem.ea + ctxt->_eip);
|
ctxt->memopp->addr.mem.ea + ctxt->_eip);
|
||||||
|
|
||||||
|
|
|
@ -7252,10 +7252,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
||||||
|
|
||||||
kvmclock_reset(vcpu);
|
kvmclock_reset(vcpu);
|
||||||
|
|
||||||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
|
||||||
kvm_x86_ops->vcpu_free(vcpu);
|
kvm_x86_ops->vcpu_free(vcpu);
|
||||||
|
free_cpumask_var(wbinvd_dirty_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
|
||||||
|
|
||||||
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
|
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
|
||||||
* We include the PMD passed in on _both_ boundaries. */
|
* We include the PMD passed in on _both_ boundaries. */
|
||||||
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
|
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
|
||||||
pmd++, vaddr += PMD_SIZE) {
|
pmd++, vaddr += PMD_SIZE) {
|
||||||
if (pmd_none(*pmd))
|
if (pmd_none(*pmd))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
||||||
|
|
||||||
|
|
||||||
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
||||||
uint32_t desc)
|
u32 desc, bool need_strong_ref)
|
||||||
{
|
{
|
||||||
struct rb_node *n = proc->refs_by_desc.rb_node;
|
struct rb_node *n = proc->refs_by_desc.rb_node;
|
||||||
struct binder_ref *ref;
|
struct binder_ref *ref;
|
||||||
|
@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
||||||
while (n) {
|
while (n) {
|
||||||
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
||||||
|
|
||||||
if (desc < ref->desc)
|
if (desc < ref->desc) {
|
||||||
n = n->rb_left;
|
n = n->rb_left;
|
||||||
else if (desc > ref->desc)
|
} else if (desc > ref->desc) {
|
||||||
n = n->rb_right;
|
n = n->rb_right;
|
||||||
else
|
} else if (need_strong_ref && !ref->strong) {
|
||||||
|
binder_user_error("tried to use weak ref as strong ref\n");
|
||||||
|
return NULL;
|
||||||
|
} else {
|
||||||
return ref;
|
return ref;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||||
} break;
|
} break;
|
||||||
case BINDER_TYPE_HANDLE:
|
case BINDER_TYPE_HANDLE:
|
||||||
case BINDER_TYPE_WEAK_HANDLE: {
|
case BINDER_TYPE_WEAK_HANDLE: {
|
||||||
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
|
struct binder_ref *ref;
|
||||||
|
|
||||||
|
ref = binder_get_ref(proc, fp->handle,
|
||||||
|
fp->type == BINDER_TYPE_HANDLE);
|
||||||
|
|
||||||
if (ref == NULL) {
|
if (ref == NULL) {
|
||||||
pr_err("transaction release %d bad handle %d\n",
|
pr_err("transaction release %d bad handle %d\n",
|
||||||
|
@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
if (tr->target.handle) {
|
if (tr->target.handle) {
|
||||||
struct binder_ref *ref;
|
struct binder_ref *ref;
|
||||||
|
|
||||||
ref = binder_get_ref(proc, tr->target.handle);
|
ref = binder_get_ref(proc, tr->target.handle, true);
|
||||||
if (ref == NULL) {
|
if (ref == NULL) {
|
||||||
binder_user_error("%d:%d got transaction to invalid handle\n",
|
binder_user_error("%d:%d got transaction to invalid handle\n",
|
||||||
proc->pid, thread->pid);
|
proc->pid, thread->pid);
|
||||||
|
@ -1571,7 +1578,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
fp->type = BINDER_TYPE_HANDLE;
|
fp->type = BINDER_TYPE_HANDLE;
|
||||||
else
|
else
|
||||||
fp->type = BINDER_TYPE_WEAK_HANDLE;
|
fp->type = BINDER_TYPE_WEAK_HANDLE;
|
||||||
|
fp->binder = 0;
|
||||||
fp->handle = ref->desc;
|
fp->handle = ref->desc;
|
||||||
|
fp->cookie = 0;
|
||||||
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
|
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
|
||||||
&thread->todo);
|
&thread->todo);
|
||||||
|
|
||||||
|
@ -1583,7 +1592,10 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
} break;
|
} break;
|
||||||
case BINDER_TYPE_HANDLE:
|
case BINDER_TYPE_HANDLE:
|
||||||
case BINDER_TYPE_WEAK_HANDLE: {
|
case BINDER_TYPE_WEAK_HANDLE: {
|
||||||
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
|
struct binder_ref *ref;
|
||||||
|
|
||||||
|
ref = binder_get_ref(proc, fp->handle,
|
||||||
|
fp->type == BINDER_TYPE_HANDLE);
|
||||||
|
|
||||||
if (ref == NULL) {
|
if (ref == NULL) {
|
||||||
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
||||||
|
@ -1618,7 +1630,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
return_error = BR_FAILED_REPLY;
|
return_error = BR_FAILED_REPLY;
|
||||||
goto err_binder_get_ref_for_node_failed;
|
goto err_binder_get_ref_for_node_failed;
|
||||||
}
|
}
|
||||||
|
fp->binder = 0;
|
||||||
fp->handle = new_ref->desc;
|
fp->handle = new_ref->desc;
|
||||||
|
fp->cookie = 0;
|
||||||
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
|
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
|
||||||
trace_binder_transaction_ref_to_ref(t, ref,
|
trace_binder_transaction_ref_to_ref(t, ref,
|
||||||
new_ref);
|
new_ref);
|
||||||
|
@ -1672,6 +1686,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||||
" fd %d -> %d\n", fp->handle, target_fd);
|
" fd %d -> %d\n", fp->handle, target_fd);
|
||||||
/* TODO: fput? */
|
/* TODO: fput? */
|
||||||
|
fp->binder = 0;
|
||||||
fp->handle = target_fd;
|
fp->handle = target_fd;
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
|
@ -1794,7 +1809,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||||
ref->desc);
|
ref->desc);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ref = binder_get_ref(proc, target);
|
ref = binder_get_ref(proc, target,
|
||||||
|
cmd == BC_ACQUIRE ||
|
||||||
|
cmd == BC_RELEASE);
|
||||||
if (ref == NULL) {
|
if (ref == NULL) {
|
||||||
binder_user_error("%d:%d refcount change on invalid ref %d\n",
|
binder_user_error("%d:%d refcount change on invalid ref %d\n",
|
||||||
proc->pid, thread->pid, target);
|
proc->pid, thread->pid, target);
|
||||||
|
@ -1990,7 +2007,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||||
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
ptr += sizeof(binder_uintptr_t);
|
ptr += sizeof(binder_uintptr_t);
|
||||||
ref = binder_get_ref(proc, target);
|
ref = binder_get_ref(proc, target, false);
|
||||||
if (ref == NULL) {
|
if (ref == NULL) {
|
||||||
binder_user_error("%d:%d %s invalid ref %d\n",
|
binder_user_error("%d:%d %s invalid ref %d\n",
|
||||||
proc->pid, thread->pid,
|
proc->pid, thread->pid,
|
||||||
|
|
|
@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
|
||||||
spin_lock_irq(&port->inbuf_lock);
|
spin_lock_irq(&port->inbuf_lock);
|
||||||
/* Remove unused data this port might have received. */
|
/* Remove unused data this port might have received. */
|
||||||
discard_port_data(port);
|
discard_port_data(port);
|
||||||
|
spin_unlock_irq(&port->inbuf_lock);
|
||||||
|
|
||||||
/* Remove buffers we queued up for the Host to send us data in. */
|
/* Remove buffers we queued up for the Host to send us data in. */
|
||||||
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
|
do {
|
||||||
free_buf(buf, true);
|
spin_lock_irq(&port->inbuf_lock);
|
||||||
spin_unlock_irq(&port->inbuf_lock);
|
buf = virtqueue_detach_unused_buf(port->in_vq);
|
||||||
|
spin_unlock_irq(&port->inbuf_lock);
|
||||||
|
if (buf)
|
||||||
|
free_buf(buf, true);
|
||||||
|
} while (buf);
|
||||||
|
|
||||||
spin_lock_irq(&port->outvq_lock);
|
spin_lock_irq(&port->outvq_lock);
|
||||||
reclaim_consumed_buffers(port);
|
reclaim_consumed_buffers(port);
|
||||||
|
spin_unlock_irq(&port->outvq_lock);
|
||||||
|
|
||||||
/* Free pending buffers from the out-queue. */
|
/* Free pending buffers from the out-queue. */
|
||||||
while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
|
do {
|
||||||
free_buf(buf, true);
|
spin_lock_irq(&port->outvq_lock);
|
||||||
spin_unlock_irq(&port->outvq_lock);
|
buf = virtqueue_detach_unused_buf(port->out_vq);
|
||||||
|
spin_unlock_irq(&port->outvq_lock);
|
||||||
|
if (buf)
|
||||||
|
free_buf(buf, true);
|
||||||
|
} while (buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -73,13 +73,13 @@ struct rfc2734_header {
|
||||||
|
|
||||||
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
|
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
|
||||||
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
|
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
|
||||||
#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
|
#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
|
||||||
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
|
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
|
||||||
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
|
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
|
||||||
|
|
||||||
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
|
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
|
||||||
#define fwnet_set_hdr_ether_type(et) (et)
|
#define fwnet_set_hdr_ether_type(et) (et)
|
||||||
#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
|
#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
|
||||||
#define fwnet_set_hdr_fg_off(fgo) (fgo)
|
#define fwnet_set_hdr_fg_off(fgo) (fgo)
|
||||||
|
|
||||||
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
|
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
|
||||||
|
@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
|
||||||
int retval;
|
int retval;
|
||||||
u16 ether_type;
|
u16 ether_type;
|
||||||
|
|
||||||
|
if (len <= RFC2374_UNFRAG_HDR_SIZE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
hdr.w0 = be32_to_cpu(buf[0]);
|
hdr.w0 = be32_to_cpu(buf[0]);
|
||||||
lf = fwnet_get_hdr_lf(&hdr);
|
lf = fwnet_get_hdr_lf(&hdr);
|
||||||
if (lf == RFC2374_HDR_UNFRAG) {
|
if (lf == RFC2374_HDR_UNFRAG) {
|
||||||
|
@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
|
||||||
return fwnet_finish_incoming_packet(net, skb, source_node_id,
|
return fwnet_finish_incoming_packet(net, skb, source_node_id,
|
||||||
is_broadcast, ether_type);
|
is_broadcast, ether_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* A datagram fragment has been received, now the fun begins. */
|
/* A datagram fragment has been received, now the fun begins. */
|
||||||
|
|
||||||
|
if (len <= RFC2374_FRAG_HDR_SIZE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
hdr.w1 = ntohl(buf[1]);
|
hdr.w1 = ntohl(buf[1]);
|
||||||
buf += 2;
|
buf += 2;
|
||||||
len -= RFC2374_FRAG_HDR_SIZE;
|
len -= RFC2374_FRAG_HDR_SIZE;
|
||||||
|
@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
|
||||||
fg_off = fwnet_get_hdr_fg_off(&hdr);
|
fg_off = fwnet_get_hdr_fg_off(&hdr);
|
||||||
}
|
}
|
||||||
datagram_label = fwnet_get_hdr_dgl(&hdr);
|
datagram_label = fwnet_get_hdr_dgl(&hdr);
|
||||||
dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
|
dg_size = fwnet_get_hdr_dg_size(&hdr);
|
||||||
|
|
||||||
|
if (fg_off + len > dg_size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev->lock, flags);
|
spin_lock_irqsave(&dev->lock, flags);
|
||||||
|
|
||||||
|
@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
|
||||||
fw_send_response(card, r, rcode);
|
fw_send_response(card, r, rcode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gasp_source_id(__be32 *p)
|
||||||
|
{
|
||||||
|
return be32_to_cpu(p[0]) >> 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 gasp_specifier_id(__be32 *p)
|
||||||
|
{
|
||||||
|
return (be32_to_cpu(p[0]) & 0xffff) << 8 |
|
||||||
|
(be32_to_cpu(p[1]) & 0xff000000) >> 24;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 gasp_version(__be32 *p)
|
||||||
|
{
|
||||||
|
return be32_to_cpu(p[1]) & 0xffffff;
|
||||||
|
}
|
||||||
|
|
||||||
static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
||||||
u32 cycle, size_t header_length, void *header, void *data)
|
u32 cycle, size_t header_length, void *header, void *data)
|
||||||
{
|
{
|
||||||
|
@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
||||||
__be32 *buf_ptr;
|
__be32 *buf_ptr;
|
||||||
int retval;
|
int retval;
|
||||||
u32 length;
|
u32 length;
|
||||||
u16 source_node_id;
|
|
||||||
u32 specifier_id;
|
|
||||||
u32 ver;
|
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev->lock, flags);
|
spin_unlock_irqrestore(&dev->lock, flags);
|
||||||
|
|
||||||
specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
|
if (length > IEEE1394_GASP_HDR_SIZE &&
|
||||||
| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
|
gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
|
||||||
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
|
(gasp_version(buf_ptr) == RFC2734_SW_VERSION
|
||||||
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
|
|
||||||
|
|
||||||
if (specifier_id == IANA_SPECIFIER_ID &&
|
|
||||||
(ver == RFC2734_SW_VERSION
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|| ver == RFC3146_SW_VERSION
|
|| gasp_version(buf_ptr) == RFC3146_SW_VERSION
|
||||||
#endif
|
#endif
|
||||||
)) {
|
))
|
||||||
buf_ptr += 2;
|
fwnet_incoming_packet(dev, buf_ptr + 2,
|
||||||
length -= IEEE1394_GASP_HDR_SIZE;
|
length - IEEE1394_GASP_HDR_SIZE,
|
||||||
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
|
gasp_source_id(buf_ptr),
|
||||||
context->card->generation, true);
|
context->card->generation, true);
|
||||||
}
|
|
||||||
|
|
||||||
packet.payload_length = dev->rcv_buffer_size;
|
packet.payload_length = dev->rcv_buffer_size;
|
||||||
packet.interrupt = 1;
|
packet.interrupt = 1;
|
||||||
|
|
|
@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref)
|
||||||
/* no need to clean up vcpi
|
/* no need to clean up vcpi
|
||||||
* as if we have no connector we never setup a vcpi */
|
* as if we have no connector we never setup a vcpi */
|
||||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||||
|
port->pdt = DP_PEER_DEVICE_NONE;
|
||||||
}
|
}
|
||||||
kfree(port);
|
kfree(port);
|
||||||
}
|
}
|
||||||
|
@ -1154,7 +1155,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
|
||||||
drm_dp_put_port(port);
|
drm_dp_put_port(port);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
|
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
|
||||||
|
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
|
||||||
|
port->port_num >= DP_MST_LOGICAL_PORT_0) {
|
||||||
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
|
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
|
||||||
drm_mode_connector_set_tile_property(port->connector);
|
drm_mode_connector_set_tile_property(port->connector);
|
||||||
}
|
}
|
||||||
|
@ -2872,6 +2875,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
|
||||||
mgr->cbs->destroy_connector(mgr, port->connector);
|
mgr->cbs->destroy_connector(mgr, port->connector);
|
||||||
|
|
||||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||||
|
port->pdt = DP_PEER_DEVICE_NONE;
|
||||||
|
|
||||||
if (!port->input && port->vcpi.vcpi > 0) {
|
if (!port->input && port->vcpi.vcpi > 0) {
|
||||||
drm_dp_mst_reset_vcpi_slots(mgr, port);
|
drm_dp_mst_reset_vcpi_slots(mgr, port);
|
||||||
|
|
|
@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
|
list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
|
||||||
if (subdrv->close)
|
if (subdrv->close)
|
||||||
subdrv->close(dev, subdrv->dev, file);
|
subdrv->close(dev, subdrv->dev, file);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
|
||||||
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
|
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
|
||||||
int ring, u32 cp_int_cntl)
|
int ring, u32 cp_int_cntl)
|
||||||
{
|
{
|
||||||
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
|
WREG32(SRBM_GFX_CNTL, RINGID(ring));
|
||||||
|
|
||||||
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
|
|
||||||
WREG32(CP_INT_CNTL, cp_int_cntl);
|
WREG32(CP_INT_CNTL, cp_int_cntl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
|
||||||
|
|
||||||
tmp &= AUX_HPD_SEL(0x7);
|
tmp &= AUX_HPD_SEL(0x7);
|
||||||
tmp |= AUX_HPD_SEL(chan->rec.hpd);
|
tmp |= AUX_HPD_SEL(chan->rec.hpd);
|
||||||
tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
|
tmp |= AUX_EN | AUX_LS_READ_EN;
|
||||||
|
|
||||||
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
|
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
|
||||||
|
|
||||||
|
|
|
@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||||
int i;
|
int i;
|
||||||
struct si_dpm_quirk *p = si_dpm_quirk_list;
|
struct si_dpm_quirk *p = si_dpm_quirk_list;
|
||||||
|
|
||||||
|
/* limit all SI kickers */
|
||||||
|
if (rdev->family == CHIP_PITCAIRN) {
|
||||||
|
if ((rdev->pdev->revision == 0x81) ||
|
||||||
|
(rdev->pdev->device == 0x6810) ||
|
||||||
|
(rdev->pdev->device == 0x6811) ||
|
||||||
|
(rdev->pdev->device == 0x6816) ||
|
||||||
|
(rdev->pdev->device == 0x6817) ||
|
||||||
|
(rdev->pdev->device == 0x6806))
|
||||||
|
max_mclk = 120000;
|
||||||
|
} else if (rdev->family == CHIP_VERDE) {
|
||||||
|
if ((rdev->pdev->revision == 0x81) ||
|
||||||
|
(rdev->pdev->revision == 0x83) ||
|
||||||
|
(rdev->pdev->revision == 0x87) ||
|
||||||
|
(rdev->pdev->device == 0x6820) ||
|
||||||
|
(rdev->pdev->device == 0x6821) ||
|
||||||
|
(rdev->pdev->device == 0x6822) ||
|
||||||
|
(rdev->pdev->device == 0x6823) ||
|
||||||
|
(rdev->pdev->device == 0x682A) ||
|
||||||
|
(rdev->pdev->device == 0x682B)) {
|
||||||
|
max_sclk = 75000;
|
||||||
|
max_mclk = 80000;
|
||||||
|
}
|
||||||
|
} else if (rdev->family == CHIP_OLAND) {
|
||||||
|
if ((rdev->pdev->revision == 0xC7) ||
|
||||||
|
(rdev->pdev->revision == 0x80) ||
|
||||||
|
(rdev->pdev->revision == 0x81) ||
|
||||||
|
(rdev->pdev->revision == 0x83) ||
|
||||||
|
(rdev->pdev->device == 0x6604) ||
|
||||||
|
(rdev->pdev->device == 0x6605)) {
|
||||||
|
max_sclk = 75000;
|
||||||
|
max_mclk = 80000;
|
||||||
|
}
|
||||||
|
} else if (rdev->family == CHIP_HAINAN) {
|
||||||
|
if ((rdev->pdev->revision == 0x81) ||
|
||||||
|
(rdev->pdev->revision == 0x83) ||
|
||||||
|
(rdev->pdev->revision == 0xC3) ||
|
||||||
|
(rdev->pdev->device == 0x6664) ||
|
||||||
|
(rdev->pdev->device == 0x6665) ||
|
||||||
|
(rdev->pdev->device == 0x6667)) {
|
||||||
|
max_sclk = 75000;
|
||||||
|
max_mclk = 80000;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* Apply dpm quirks */
|
/* Apply dpm quirks */
|
||||||
while (p && p->chip_device != 0) {
|
while (p && p->chip_device != 0) {
|
||||||
if (rdev->pdev->vendor == p->chip_vendor &&
|
if (rdev->pdev->vendor == p->chip_vendor &&
|
||||||
|
@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||||
}
|
}
|
||||||
++p;
|
++p;
|
||||||
}
|
}
|
||||||
/* limit mclk on all R7 370 parts for stability */
|
|
||||||
if (rdev->pdev->device == 0x6811 &&
|
|
||||||
rdev->pdev->revision == 0x81)
|
|
||||||
max_mclk = 120000;
|
|
||||||
/* limit sclk/mclk on Jet parts for stability */
|
|
||||||
if (rdev->pdev->device == 0x6665 &&
|
|
||||||
rdev->pdev->revision == 0xc3) {
|
|
||||||
max_sclk = 75000;
|
|
||||||
max_mclk = 80000;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rps->vce_active) {
|
if (rps->vce_active) {
|
||||||
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
|
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
|
||||||
|
|
|
@ -168,6 +168,7 @@
|
||||||
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
|
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
|
||||||
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
|
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
|
||||||
#define USB_DEVICE_ID_ATEN_CS682 0x2213
|
#define USB_DEVICE_ID_ATEN_CS682 0x2213
|
||||||
|
#define USB_DEVICE_ID_ATEN_CS692 0x8021
|
||||||
|
|
||||||
#define USB_VENDOR_ID_ATMEL 0x03eb
|
#define USB_VENDOR_ID_ATMEL 0x03eb
|
||||||
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
|
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
|
||||||
|
|
|
@ -61,6 +61,7 @@ static const struct hid_blacklist {
|
||||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
|
||||||
|
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
|
||||||
|
|
|
@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
|
||||||
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
|
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
|
||||||
struct icmsg_negotiate *negop = NULL;
|
struct icmsg_negotiate *negop = NULL;
|
||||||
|
|
||||||
vmbus_recvpacket(channel, hbeat_txf_buf,
|
while (1) {
|
||||||
PAGE_SIZE, &recvlen, &requestid);
|
|
||||||
|
vmbus_recvpacket(channel, hbeat_txf_buf,
|
||||||
|
PAGE_SIZE, &recvlen, &requestid);
|
||||||
|
|
||||||
|
if (!recvlen)
|
||||||
|
break;
|
||||||
|
|
||||||
if (recvlen > 0) {
|
|
||||||
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
|
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
|
||||||
sizeof(struct vmbuspipe_hdr)];
|
sizeof(struct vmbuspipe_hdr)];
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
|
||||||
struct mbox_chan *mbox_chan;
|
struct mbox_chan *mbox_chan;
|
||||||
struct mbox_client mbox_client;
|
struct mbox_client mbox_client;
|
||||||
struct completion rd_complete;
|
struct completion rd_complete;
|
||||||
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
|
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
|
||||||
u32 *resp_msg;
|
u32 *resp_msg;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1876,6 +1876,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
|
||||||
/* add the driver to the list of i2c drivers in the driver core */
|
/* add the driver to the list of i2c drivers in the driver core */
|
||||||
driver->driver.owner = owner;
|
driver->driver.owner = owner;
|
||||||
driver->driver.bus = &i2c_bus_type;
|
driver->driver.bus = &i2c_bus_type;
|
||||||
|
INIT_LIST_HEAD(&driver->clients);
|
||||||
|
|
||||||
/* When registration returns, the driver core
|
/* When registration returns, the driver core
|
||||||
* will have called probe() for all matching-but-unbound devices.
|
* will have called probe() for all matching-but-unbound devices.
|
||||||
|
@ -1886,7 +1887,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
|
||||||
|
|
||||||
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
|
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&driver->clients);
|
|
||||||
/* Walk the adapters that are already present */
|
/* Walk the adapters that are already present */
|
||||||
i2c_for_each_dev(driver, __process_new_driver);
|
i2c_for_each_dev(driver, __process_new_driver);
|
||||||
|
|
||||||
|
|
|
@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Schenker XMG C504 - Elantech touchpad */
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1288,6 +1288,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
||||||
|
|
||||||
dm_bio_restore(bd, bio);
|
dm_bio_restore(bd, bio);
|
||||||
bio_record->details.bi_bdev = NULL;
|
bio_record->details.bi_bdev = NULL;
|
||||||
|
bio->bi_error = 0;
|
||||||
|
|
||||||
queue_bio(ms, bio, rw);
|
queue_bio(ms, bio, rw);
|
||||||
return DM_ENDIO_INCOMPLETE;
|
return DM_ENDIO_INCOMPLETE;
|
||||||
|
|
|
@ -2260,8 +2260,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
if (md->bs)
|
if (md->bs)
|
||||||
bioset_free(md->bs);
|
bioset_free(md->bs);
|
||||||
|
|
||||||
cleanup_srcu_struct(&md->io_barrier);
|
|
||||||
|
|
||||||
if (md->disk) {
|
if (md->disk) {
|
||||||
spin_lock(&_minor_lock);
|
spin_lock(&_minor_lock);
|
||||||
md->disk->private_data = NULL;
|
md->disk->private_data = NULL;
|
||||||
|
@ -2273,6 +2271,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
if (md->queue)
|
if (md->queue)
|
||||||
blk_cleanup_queue(md->queue);
|
blk_cleanup_queue(md->queue);
|
||||||
|
|
||||||
|
cleanup_srcu_struct(&md->io_barrier);
|
||||||
|
|
||||||
if (md->bdev) {
|
if (md->bdev) {
|
||||||
bdput(md->bdev);
|
bdput(md->bdev);
|
||||||
md->bdev = NULL;
|
md->bdev = NULL;
|
||||||
|
|
|
@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
|
||||||
if (copy_from_user(sgl->lpage, user_addr + user_size -
|
if (copy_from_user(sgl->lpage, user_addr + user_size -
|
||||||
sgl->lpage_size, sgl->lpage_size)) {
|
sgl->lpage_size, sgl->lpage_size)) {
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto err_out1;
|
goto err_out2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_out2:
|
||||||
|
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
|
||||||
|
sgl->lpage_dma_addr);
|
||||||
|
sgl->lpage = NULL;
|
||||||
|
sgl->lpage_dma_addr = 0;
|
||||||
err_out1:
|
err_out1:
|
||||||
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
|
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
|
||||||
sgl->fpage_dma_addr);
|
sgl->fpage_dma_addr);
|
||||||
|
sgl->fpage = NULL;
|
||||||
|
sgl->fpage_dma_addr = 0;
|
||||||
err_out:
|
err_out:
|
||||||
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
|
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
|
||||||
sgl->sgl_dma_addr);
|
sgl->sgl_dma_addr);
|
||||||
|
sgl->sgl = NULL;
|
||||||
|
sgl->sgl_dma_addr = 0;
|
||||||
|
sgl->sgl_size = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
|
||||||
hisr = mei_txe_br_reg_read(hw, HISR_REG);
|
hisr = mei_txe_br_reg_read(hw, HISR_REG);
|
||||||
|
|
||||||
aliveness = mei_txe_aliveness_get(dev);
|
aliveness = mei_txe_aliveness_get(dev);
|
||||||
if (hhisr & IPC_HHIER_SEC && aliveness)
|
if (hhisr & IPC_HHIER_SEC && aliveness) {
|
||||||
ipc_isr = mei_txe_sec_reg_read_silent(hw,
|
ipc_isr = mei_txe_sec_reg_read_silent(hw,
|
||||||
SEC_IPC_HOST_INT_STATUS_REG);
|
SEC_IPC_HOST_INT_STATUS_REG);
|
||||||
else
|
} else {
|
||||||
ipc_isr = 0;
|
ipc_isr = 0;
|
||||||
|
hhisr &= ~IPC_HHIER_SEC;
|
||||||
|
}
|
||||||
|
|
||||||
generated = generated ||
|
generated = generated ||
|
||||||
(hisr & HISR_INT_STS_MSK) ||
|
(hisr & HISR_INT_STS_MSK) ||
|
||||||
|
|
|
@ -59,12 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
|
||||||
host->pdata = pdev->dev.platform_data;
|
host->pdata = pdev->dev.platform_data;
|
||||||
|
|
||||||
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
/* Get registers' physical base address */
|
|
||||||
host->phy_regs = regs->start;
|
|
||||||
host->regs = devm_ioremap_resource(&pdev->dev, regs);
|
host->regs = devm_ioremap_resource(&pdev->dev, regs);
|
||||||
if (IS_ERR(host->regs))
|
if (IS_ERR(host->regs))
|
||||||
return PTR_ERR(host->regs);
|
return PTR_ERR(host->regs);
|
||||||
|
|
||||||
|
/* Get registers' physical base address */
|
||||||
|
host->phy_regs = regs->start;
|
||||||
|
|
||||||
platform_set_drvdata(pdev, host);
|
platform_set_drvdata(pdev, host);
|
||||||
return dw_mci_probe(host);
|
return dw_mci_probe(host);
|
||||||
}
|
}
|
||||||
|
|
|
@ -513,10 +513,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
||||||
unsigned long long ec = be64_to_cpu(ech->ec);
|
unsigned long long ec = be64_to_cpu(ech->ec);
|
||||||
unmap_peb(ai, pnum);
|
unmap_peb(ai, pnum);
|
||||||
dbg_bld("Adding PEB to free: %i", pnum);
|
dbg_bld("Adding PEB to free: %i", pnum);
|
||||||
|
|
||||||
if (err == UBI_IO_FF_BITFLIPS)
|
if (err == UBI_IO_FF_BITFLIPS)
|
||||||
add_aeb(ai, free, pnum, ec, 1);
|
scrub = 1;
|
||||||
else
|
|
||||||
add_aeb(ai, free, pnum, ec, 0);
|
add_aeb(ai, free, pnum, ec, scrub);
|
||||||
continue;
|
continue;
|
||||||
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
|
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
|
||||||
dbg_bld("Found non empty PEB:%i in pool", pnum);
|
dbg_bld("Found non empty PEB:%i in pool", pnum);
|
||||||
|
@ -748,11 +749,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
|
||||||
fmvhdr->vol_type,
|
fmvhdr->vol_type,
|
||||||
be32_to_cpu(fmvhdr->last_eb_bytes));
|
be32_to_cpu(fmvhdr->last_eb_bytes));
|
||||||
|
|
||||||
if (!av)
|
if (IS_ERR(av)) {
|
||||||
goto fail_bad;
|
if (PTR_ERR(av) == -EEXIST)
|
||||||
if (PTR_ERR(av) == -EINVAL) {
|
ubi_err(ubi, "volume (ID %i) already exists",
|
||||||
ubi_err(ubi, "volume (ID %i) already exists",
|
fmvhdr->vol_id);
|
||||||
fmvhdr->vol_id);
|
|
||||||
goto fail_bad;
|
goto fail_bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev)
|
||||||
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
|
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
|
||||||
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
|
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
|
||||||
#else
|
#else
|
||||||
#define smc_special_trylock(lock, flags) (flags == flags)
|
#define smc_special_trylock(lock, flags) ((void)flags, true)
|
||||||
#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
|
#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
|
||||||
#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
|
#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -321,6 +321,8 @@ int pwmchip_remove(struct pwm_chip *chip)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
pwmchip_sysfs_unexport_children(chip);
|
||||||
|
|
||||||
mutex_lock(&pwm_lock);
|
mutex_lock(&pwm_lock);
|
||||||
|
|
||||||
for (i = 0; i < chip->npwm; i++) {
|
for (i = 0; i < chip->npwm; i++) {
|
||||||
|
|
|
@ -350,6 +350,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
|
||||||
|
{
|
||||||
|
struct device *parent;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
parent = class_find_device(&pwm_class, NULL, chip,
|
||||||
|
pwmchip_sysfs_match);
|
||||||
|
if (!parent)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < chip->npwm; i++) {
|
||||||
|
struct pwm_device *pwm = &chip->pwms[i];
|
||||||
|
|
||||||
|
if (test_bit(PWMF_EXPORTED, &pwm->flags))
|
||||||
|
pwm_unexport_child(parent, pwm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int __init pwm_sysfs_init(void)
|
static int __init pwm_sysfs_init(void)
|
||||||
{
|
{
|
||||||
return class_register(&pwm_class);
|
return class_register(&pwm_class);
|
||||||
|
|
|
@ -2545,18 +2545,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
|
||||||
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
|
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
|
||||||
struct CommandControlBlock *ccb;
|
struct CommandControlBlock *ccb;
|
||||||
int target = cmd->device->id;
|
int target = cmd->device->id;
|
||||||
int lun = cmd->device->lun;
|
|
||||||
uint8_t scsicmd = cmd->cmnd[0];
|
|
||||||
cmd->scsi_done = done;
|
cmd->scsi_done = done;
|
||||||
cmd->host_scribble = NULL;
|
cmd->host_scribble = NULL;
|
||||||
cmd->result = 0;
|
cmd->result = 0;
|
||||||
if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
|
|
||||||
if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
|
|
||||||
cmd->result = (DID_NO_CONNECT << 16);
|
|
||||||
}
|
|
||||||
cmd->scsi_done(cmd);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (target == 16) {
|
if (target == 16) {
|
||||||
/* virtual device for iop message transfer */
|
/* virtual device for iop message transfer */
|
||||||
arcmsr_handle_virtual_command(acb, cmd);
|
arcmsr_handle_virtual_command(acb, cmd);
|
||||||
|
|
|
@ -1688,16 +1688,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
|
||||||
goto out_done;
|
goto out_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (scmd->cmnd[0]) {
|
/*
|
||||||
case SYNCHRONIZE_CACHE:
|
* FW takes care of flush cache on its own for Virtual Disk.
|
||||||
/*
|
* No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
|
||||||
* FW takes care of flush cache on its own
|
*/
|
||||||
* No need to send it down
|
if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
|
||||||
*/
|
|
||||||
scmd->result = DID_OK << 16;
|
scmd->result = DID_OK << 16;
|
||||||
goto out_done;
|
goto out_done;
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
|
if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
|
||||||
|
|
|
@ -4981,6 +4981,7 @@ static void __exit scsi_debug_exit(void)
|
||||||
bus_unregister(&pseudo_lld_bus);
|
bus_unregister(&pseudo_lld_bus);
|
||||||
root_device_unregister(pseudo_primary);
|
root_device_unregister(pseudo_primary);
|
||||||
|
|
||||||
|
vfree(map_storep);
|
||||||
vfree(dif_storep);
|
vfree(dif_storep);
|
||||||
vfree(fake_storep);
|
vfree(fake_storep);
|
||||||
}
|
}
|
||||||
|
|
|
@ -872,10 +872,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
||||||
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
|
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (new_screen_size > (4 << 20))
|
||||||
|
return -EINVAL;
|
||||||
newscreen = kmalloc(new_screen_size, GFP_USER);
|
newscreen = kmalloc(new_screen_size, GFP_USER);
|
||||||
if (!newscreen)
|
if (!newscreen)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (vc == sel_cons)
|
||||||
|
clear_selection();
|
||||||
|
|
||||||
old_rows = vc->vc_rows;
|
old_rows = vc->vc_rows;
|
||||||
old_row_size = vc->vc_size_row;
|
old_row_size = vc->vc_size_row;
|
||||||
|
|
||||||
|
@ -1173,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
|
||||||
break;
|
break;
|
||||||
case 3: /* erase scroll-back buffer (and whole display) */
|
case 3: /* erase scroll-back buffer (and whole display) */
|
||||||
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
|
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
|
||||||
vc->vc_screenbuf_size >> 1);
|
vc->vc_screenbuf_size);
|
||||||
set_origin(vc);
|
set_origin(vc);
|
||||||
if (CON_IS_VISIBLE(vc))
|
if (CON_IS_VISIBLE(vc))
|
||||||
update_screen(vc);
|
update_screen(vc);
|
||||||
|
|
|
@ -2845,7 +2845,7 @@ err3:
|
||||||
kfree(dwc->setup_buf);
|
kfree(dwc->setup_buf);
|
||||||
|
|
||||||
err2:
|
err2:
|
||||||
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
|
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
|
||||||
dwc->ep0_trb, dwc->ep0_trb_addr);
|
dwc->ep0_trb, dwc->ep0_trb_addr);
|
||||||
|
|
||||||
err1:
|
err1:
|
||||||
|
@ -2869,7 +2869,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
|
||||||
|
|
||||||
kfree(dwc->setup_buf);
|
kfree(dwc->setup_buf);
|
||||||
|
|
||||||
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
|
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
|
||||||
dwc->ep0_trb, dwc->ep0_trb_addr);
|
dwc->ep0_trb, dwc->ep0_trb_addr);
|
||||||
|
|
||||||
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
|
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
|
||||||
|
|
|
@ -596,8 +596,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
||||||
|
|
||||||
/* throttle high/super speed IRQ rate back slightly */
|
/* throttle high/super speed IRQ rate back slightly */
|
||||||
if (gadget_is_dualspeed(dev->gadget))
|
if (gadget_is_dualspeed(dev->gadget))
|
||||||
req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
|
req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
|
||||||
dev->gadget->speed == USB_SPEED_SUPER)
|
dev->gadget->speed == USB_SPEED_SUPER)) &&
|
||||||
|
!list_empty(&dev->tx_reqs))
|
||||||
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
|
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
|
||||||
: 0;
|
: 0;
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@
|
||||||
static const char hcd_name [] = "ohci_hcd";
|
static const char hcd_name [] = "ohci_hcd";
|
||||||
|
|
||||||
#define STATECHANGE_DELAY msecs_to_jiffies(300)
|
#define STATECHANGE_DELAY msecs_to_jiffies(300)
|
||||||
#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
|
#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
|
||||||
|
|
||||||
#include "ohci.h"
|
#include "ohci.h"
|
||||||
#include "pci-quirks.h"
|
#include "pci-quirks.h"
|
||||||
|
|
|
@ -1157,7 +1157,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
||||||
xhci_set_link_state(xhci, port_array, wIndex,
|
xhci_set_link_state(xhci, port_array, wIndex,
|
||||||
XDEV_RESUME);
|
XDEV_RESUME);
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
msleep(20);
|
msleep(USB_RESUME_TIMEOUT);
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
xhci_set_link_state(xhci, port_array, wIndex,
|
xhci_set_link_state(xhci, port_array, wIndex,
|
||||||
XDEV_U0);
|
XDEV_U0);
|
||||||
|
@ -1401,7 +1401,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
|
||||||
|
|
||||||
if (need_usb2_u3_exit) {
|
if (need_usb2_u3_exit) {
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
msleep(20);
|
msleep(USB_RESUME_TIMEOUT);
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
|
|
||||||
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
|
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
|
||||||
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
|
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
|
||||||
|
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
|
||||||
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
|
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
|
||||||
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
|
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
|
||||||
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
|
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
|
||||||
|
@ -154,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
||||||
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
||||||
}
|
}
|
||||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||||
pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
|
(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
|
||||||
|
pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
|
||||||
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
|
||||||
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
|
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
|
||||||
}
|
}
|
||||||
|
|
|
@ -845,7 +845,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
|
||||||
unsigned int control;
|
unsigned int control;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
|
result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
|
||||||
|
if (result)
|
||||||
|
return result;
|
||||||
|
|
||||||
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|
||||||
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
|
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
|
||||||
|
|
|
@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
|
||||||
/* ekey Devices */
|
/* ekey Devices */
|
||||||
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
|
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
|
||||||
/* Infineon Devices */
|
/* Infineon Devices */
|
||||||
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
|
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
|
||||||
|
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
|
||||||
/* GE Healthcare devices */
|
/* GE Healthcare devices */
|
||||||
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
|
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
|
||||||
/* Active Research (Actisense) devices */
|
/* Active Research (Actisense) devices */
|
||||||
|
|
|
@ -626,8 +626,9 @@
|
||||||
/*
|
/*
|
||||||
* Infineon Technologies
|
* Infineon Technologies
|
||||||
*/
|
*/
|
||||||
#define INFINEON_VID 0x058b
|
#define INFINEON_VID 0x058b
|
||||||
#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
|
#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
|
||||||
|
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Acton Research Corp.
|
* Acton Research Corp.
|
||||||
|
|
|
@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface,
|
||||||
|
|
||||||
serial->disconnected = 0;
|
serial->disconnected = 0;
|
||||||
|
|
||||||
usb_serial_console_init(serial->port[0]->minor);
|
if (num_ports > 0)
|
||||||
|
usb_serial_console_init(serial->port[0]->minor);
|
||||||
exit:
|
exit:
|
||||||
module_put(type->driver.owner);
|
module_put(type->driver.owner);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -548,7 +548,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
|
||||||
|
|
||||||
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
||||||
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -580,7 +581,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
|
||||||
* entry. Always do both to keep code simple. */
|
* entry. Always do both to keep code simple. */
|
||||||
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
||||||
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
|
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
|
||||||
END_USE(vq);
|
END_USE(vq);
|
||||||
|
@ -648,10 +650,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
|
||||||
* more to do. */
|
* more to do. */
|
||||||
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
|
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
|
||||||
* either clear the flags bit or point the event index at the next
|
* either clear the flags bit or point the event index at the next
|
||||||
* entry. Always do both to keep code simple. */
|
* entry. Always update the event index to keep code simple. */
|
||||||
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
||||||
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
/* TODO: tune this threshold */
|
/* TODO: tune this threshold */
|
||||||
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
||||||
|
@ -770,7 +773,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
|
||||||
/* No callback? Tell other side not to bother us. */
|
/* No callback? Tell other side not to bother us. */
|
||||||
if (!callback) {
|
if (!callback) {
|
||||||
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Put everything in free lists. */
|
/* Put everything in free lists. */
|
||||||
|
|
|
@ -2696,14 +2696,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
|
||||||
int index, int error)
|
int index, int error)
|
||||||
{
|
{
|
||||||
struct btrfs_log_ctx *ctx;
|
struct btrfs_log_ctx *ctx;
|
||||||
|
struct btrfs_log_ctx *safe;
|
||||||
|
|
||||||
if (!error) {
|
list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
|
||||||
INIT_LIST_HEAD(&root->log_ctxs[index]);
|
list_del_init(&ctx->list);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_for_each_entry(ctx, &root->log_ctxs[index], list)
|
|
||||||
ctx->log_ret = error;
|
ctx->log_ret = error;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&root->log_ctxs[index]);
|
INIT_LIST_HEAD(&root->log_ctxs[index]);
|
||||||
}
|
}
|
||||||
|
@ -2944,13 +2942,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
||||||
mutex_unlock(&root->log_mutex);
|
mutex_unlock(&root->log_mutex);
|
||||||
|
|
||||||
out_wake_log_root:
|
out_wake_log_root:
|
||||||
/*
|
mutex_lock(&log_root_tree->log_mutex);
|
||||||
* We needn't get log_mutex here because we are sure all
|
|
||||||
* the other tasks are blocked.
|
|
||||||
*/
|
|
||||||
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
|
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
|
||||||
|
|
||||||
mutex_lock(&log_root_tree->log_mutex);
|
|
||||||
log_root_tree->log_transid_committed++;
|
log_root_tree->log_transid_committed++;
|
||||||
atomic_set(&log_root_tree->log_commit[index2], 0);
|
atomic_set(&log_root_tree->log_commit[index2], 0);
|
||||||
mutex_unlock(&log_root_tree->log_mutex);
|
mutex_unlock(&log_root_tree->log_mutex);
|
||||||
|
@ -2961,10 +2955,8 @@ out_wake_log_root:
|
||||||
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
|
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
|
||||||
wake_up(&log_root_tree->log_commit_wait[index2]);
|
wake_up(&log_root_tree->log_commit_wait[index2]);
|
||||||
out:
|
out:
|
||||||
/* See above. */
|
|
||||||
btrfs_remove_all_log_ctxs(root, index1, ret);
|
|
||||||
|
|
||||||
mutex_lock(&root->log_mutex);
|
mutex_lock(&root->log_mutex);
|
||||||
|
btrfs_remove_all_log_ctxs(root, index1, ret);
|
||||||
root->log_transid_committed++;
|
root->log_transid_committed++;
|
||||||
atomic_set(&root->log_commit[index1], 0);
|
atomic_set(&root->log_commit[index1], 0);
|
||||||
mutex_unlock(&root->log_mutex);
|
mutex_unlock(&root->log_mutex);
|
||||||
|
|
|
@ -139,6 +139,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
|
||||||
len -= bytes;
|
len -= bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!error)
|
||||||
|
error = vfs_fsync(new_file, 0);
|
||||||
fput(new_file);
|
fput(new_file);
|
||||||
out_fput:
|
out_fput:
|
||||||
fput(old_file);
|
fput(old_file);
|
||||||
|
|
|
@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
|
||||||
*/
|
*/
|
||||||
static int ubifs_readdir(struct file *file, struct dir_context *ctx)
|
static int ubifs_readdir(struct file *file, struct dir_context *ctx)
|
||||||
{
|
{
|
||||||
int err;
|
int err = 0;
|
||||||
struct qstr nm;
|
struct qstr nm;
|
||||||
union ubifs_key key;
|
union ubifs_key key;
|
||||||
struct ubifs_dent_node *dent;
|
struct ubifs_dent_node *dent;
|
||||||
|
@ -452,14 +452,20 @@ out:
|
||||||
kfree(file->private_data);
|
kfree(file->private_data);
|
||||||
file->private_data = NULL;
|
file->private_data = NULL;
|
||||||
|
|
||||||
if (err != -ENOENT) {
|
if (err != -ENOENT)
|
||||||
ubifs_err(c, "cannot find next direntry, error %d", err);
|
ubifs_err(c, "cannot find next direntry, error %d", err);
|
||||||
return err;
|
else
|
||||||
}
|
/*
|
||||||
|
* -ENOENT is a non-fatal error in this context, the TNC uses
|
||||||
|
* it to indicate that the cursor moved past the current directory
|
||||||
|
* and readdir() has to stop.
|
||||||
|
*/
|
||||||
|
err = 0;
|
||||||
|
|
||||||
|
|
||||||
/* 2 is a special value indicating that there are no more direntries */
|
/* 2 is a special value indicating that there are no more direntries */
|
||||||
ctx->pos = 2;
|
ctx->pos = 2;
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free saved readdir() state when the directory is closed */
|
/* Free saved readdir() state when the directory is closed */
|
||||||
|
|
|
@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
|
||||||
if (mp->m_quotainfo)
|
if (mp->m_quotainfo)
|
||||||
ndquots = mp->m_quotainfo->qi_dqperchunk;
|
ndquots = mp->m_quotainfo->qi_dqperchunk;
|
||||||
else
|
else
|
||||||
ndquots = xfs_calc_dquots_per_chunk(
|
ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
|
||||||
XFS_BB_TO_FSB(mp, bp->b_length));
|
|
||||||
|
|
||||||
for (i = 0; i < ndquots; i++, d++) {
|
for (i = 0; i < ndquots; i++, d++) {
|
||||||
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
|
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
|
||||||
|
|
|
@ -331,6 +331,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
|
||||||
#ifdef CONFIG_PWM_SYSFS
|
#ifdef CONFIG_PWM_SYSFS
|
||||||
void pwmchip_sysfs_export(struct pwm_chip *chip);
|
void pwmchip_sysfs_export(struct pwm_chip *chip);
|
||||||
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
|
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
|
||||||
|
void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
|
||||||
#else
|
#else
|
||||||
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
|
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
|
||||||
{
|
{
|
||||||
|
@ -339,6 +340,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
|
||||||
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
|
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif /* CONFIG_PWM_SYSFS */
|
#endif /* CONFIG_PWM_SYSFS */
|
||||||
|
|
||||||
#endif /* __LINUX_PWM_H */
|
#endif /* __LINUX_PWM_H */
|
||||||
|
|
|
@ -236,6 +236,9 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css,
|
||||||
*/
|
*/
|
||||||
static bool cgroup_ssid_enabled(int ssid)
|
static bool cgroup_ssid_enabled(int ssid)
|
||||||
{
|
{
|
||||||
|
if (CGROUP_SUBSYS_COUNT == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
|
return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
7
mm/cma.c
7
mm/cma.c
|
@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* ensure minimal alignment required by mm core */
|
/* ensure minimal alignment required by mm core */
|
||||||
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
alignment = PAGE_SIZE <<
|
||||||
|
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
|
||||||
|
|
||||||
/* alignment should be aligned with order_per_bit */
|
/* alignment should be aligned with order_per_bit */
|
||||||
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
|
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
|
||||||
|
@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
||||||
* migratetype page by page allocator's buddy algorithm. In the case,
|
* migratetype page by page allocator's buddy algorithm. In the case,
|
||||||
* you couldn't get a contiguous memory, which is not what we want.
|
* you couldn't get a contiguous memory, which is not what we want.
|
||||||
*/
|
*/
|
||||||
alignment = max(alignment,
|
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
|
||||||
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
|
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
|
||||||
base = ALIGN(base, alignment);
|
base = ALIGN(base, alignment);
|
||||||
size = ALIGN(size, alignment);
|
size = ALIGN(size, alignment);
|
||||||
limit &= ~(alignment - 1);
|
limit &= ~(alignment - 1);
|
||||||
|
|
|
@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
||||||
err = memcg_init_list_lru(lru, memcg_aware);
|
err = memcg_init_list_lru(lru, memcg_aware);
|
||||||
if (err) {
|
if (err) {
|
||||||
kfree(lru->node);
|
kfree(lru->node);
|
||||||
|
/* Do this so a list_lru_destroy() doesn't crash: */
|
||||||
|
lru->node = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2055,6 +2055,15 @@ retry:
|
||||||
current->flags & PF_EXITING))
|
current->flags & PF_EXITING))
|
||||||
goto force;
|
goto force;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent unbounded recursion when reclaim operations need to
|
||||||
|
* allocate memory. This might exceed the limits temporarily,
|
||||||
|
* but we prefer facilitating memory reclaim and getting back
|
||||||
|
* under the limit over triggering OOM kills in these cases.
|
||||||
|
*/
|
||||||
|
if (unlikely(current->flags & PF_MEMALLOC))
|
||||||
|
goto force;
|
||||||
|
|
||||||
if (unlikely(task_in_memcg_oom(current)))
|
if (unlikely(task_in_memcg_oom(current)))
|
||||||
goto nomem;
|
goto nomem;
|
||||||
|
|
||||||
|
|
|
@ -2910,7 +2910,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
||||||
sc.may_writepage,
|
sc.may_writepage,
|
||||||
sc.gfp_mask);
|
sc.gfp_mask);
|
||||||
|
|
||||||
|
current->flags |= PF_MEMALLOC;
|
||||||
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
||||||
|
current->flags &= ~PF_MEMALLOC;
|
||||||
|
|
||||||
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
|
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
|
||||||
|
|
||||||
|
|
|
@ -2203,16 +2203,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
|
||||||
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
|
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
|
||||||
return RX_CONTINUE;
|
return RX_CONTINUE;
|
||||||
|
|
||||||
if (ieee80211_has_a4(hdr->frame_control) &&
|
if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
|
||||||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
switch (rx->sdata->vif.type) {
|
||||||
!rx->sdata->u.vlan.sta)
|
case NL80211_IFTYPE_AP_VLAN:
|
||||||
return RX_DROP_UNUSABLE;
|
if (!rx->sdata->u.vlan.sta)
|
||||||
|
return RX_DROP_UNUSABLE;
|
||||||
|
break;
|
||||||
|
case NL80211_IFTYPE_STATION:
|
||||||
|
if (!rx->sdata->u.mgd.use_4addr)
|
||||||
|
return RX_DROP_UNUSABLE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return RX_DROP_UNUSABLE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (is_multicast_ether_addr(hdr->addr1) &&
|
if (is_multicast_ether_addr(hdr->addr1))
|
||||||
((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
|
||||||
rx->sdata->u.vlan.sta) ||
|
|
||||||
(rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
|
|
||||||
rx->sdata->u.mgd.use_4addr)))
|
|
||||||
return RX_DROP_UNUSABLE;
|
return RX_DROP_UNUSABLE;
|
||||||
|
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
|
|
@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
|
||||||
struct timespec now;
|
struct timespec now;
|
||||||
unsigned long timo;
|
unsigned long timo;
|
||||||
key_ref_t key_ref, skey_ref;
|
key_ref_t key_ref, skey_ref;
|
||||||
char xbuf[12];
|
char xbuf[16];
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
struct keyring_search_context ctx = {
|
struct keyring_search_context ctx = {
|
||||||
|
|
|
@ -956,7 +956,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
|
||||||
status = azx_readb(chip, RIRBSTS);
|
status = azx_readb(chip, RIRBSTS);
|
||||||
if (status & RIRB_INT_MASK) {
|
if (status & RIRB_INT_MASK) {
|
||||||
if (status & RIRB_INT_RESPONSE) {
|
if (status & RIRB_INT_RESPONSE) {
|
||||||
if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
|
if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
|
||||||
udelay(80);
|
udelay(80);
|
||||||
snd_hdac_bus_update_rirb(bus);
|
snd_hdac_bus_update_rirb(bus);
|
||||||
}
|
}
|
||||||
|
@ -1055,11 +1055,6 @@ int azx_bus_init(struct azx *chip, const char *model,
|
||||||
if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
|
if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
|
||||||
bus->core.corbrp_self_clear = true;
|
bus->core.corbrp_self_clear = true;
|
||||||
|
|
||||||
if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
|
|
||||||
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
|
|
||||||
bus->needs_damn_long_delay = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
|
if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
|
||||||
bus->core.align_bdle_4k = true;
|
bus->core.align_bdle_4k = true;
|
||||||
|
|
||||||
|
|
|
@ -32,8 +32,8 @@
|
||||||
#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
|
#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
|
||||||
#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
|
#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
|
||||||
#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
|
#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
|
||||||
#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
|
/* 13 unused */
|
||||||
#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
|
/* 14 unused */
|
||||||
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
|
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
|
||||||
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
|
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
|
||||||
#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
|
#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
|
||||||
|
|
|
@ -334,8 +334,7 @@ enum {
|
||||||
|
|
||||||
/* quirks for Nvidia */
|
/* quirks for Nvidia */
|
||||||
#define AZX_DCAPS_PRESET_NVIDIA \
|
#define AZX_DCAPS_PRESET_NVIDIA \
|
||||||
(AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
|
(AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
|
||||||
AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
|
|
||||||
AZX_DCAPS_SNOOP_TYPE(NVIDIA))
|
AZX_DCAPS_SNOOP_TYPE(NVIDIA))
|
||||||
|
|
||||||
#define AZX_DCAPS_PRESET_CTHDA \
|
#define AZX_DCAPS_PRESET_CTHDA \
|
||||||
|
@ -1637,6 +1636,11 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (chip->driver_type == AZX_DRIVER_NVIDIA) {
|
||||||
|
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
|
||||||
|
chip->bus.needs_damn_long_delay = 1;
|
||||||
|
}
|
||||||
|
|
||||||
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
|
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(card->dev, "Error creating device [card]!\n");
|
dev_err(card->dev, "Error creating device [card]!\n");
|
||||||
|
@ -1720,6 +1724,10 @@ static int azx_first_init(struct azx *chip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* NVidia hardware normally only supports up to 40 bits of DMA */
|
||||||
|
if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
|
||||||
|
dma_bits = 40;
|
||||||
|
|
||||||
/* disable 64bit DMA address on some devices */
|
/* disable 64bit DMA address on some devices */
|
||||||
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
|
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
|
||||||
dev_dbg(card->dev, "Disabling 64bit DMA\n");
|
dev_dbg(card->dev, "Disabling 64bit DMA\n");
|
||||||
|
@ -2406,14 +2414,12 @@ static const struct pci_device_id azx_ids[] = {
|
||||||
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
|
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
|
||||||
.class_mask = 0xffffff,
|
.class_mask = 0xffffff,
|
||||||
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
|
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
|
||||||
AZX_DCAPS_NO_64BIT |
|
AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
|
||||||
AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
|
|
||||||
#else
|
#else
|
||||||
/* this entry seems still valid -- i.e. without emu20kx chip */
|
/* this entry seems still valid -- i.e. without emu20kx chip */
|
||||||
{ PCI_DEVICE(0x1102, 0x0009),
|
{ PCI_DEVICE(0x1102, 0x0009),
|
||||||
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
|
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
|
||||||
AZX_DCAPS_NO_64BIT |
|
AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
|
||||||
AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
|
|
||||||
#endif
|
#endif
|
||||||
/* CM8888 */
|
/* CM8888 */
|
||||||
{ PCI_DEVICE(0x13f6, 0x5011),
|
{ PCI_DEVICE(0x13f6, 0x5011),
|
||||||
|
|
|
@ -464,6 +464,8 @@ static int hda_tegra_create(struct snd_card *card,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
chip->bus.needs_damn_long_delay = 1;
|
||||||
|
|
||||||
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
|
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(card->dev, "Error creating device\n");
|
dev_err(card->dev, "Error creating device\n");
|
||||||
|
@ -481,8 +483,7 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
|
||||||
|
|
||||||
static int hda_tegra_probe(struct platform_device *pdev)
|
static int hda_tegra_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
const unsigned int driver_flags = AZX_DCAPS_RIRB_DELAY |
|
const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR;
|
||||||
AZX_DCAPS_CORBRP_SELF_CLEAR;
|
|
||||||
struct snd_card *card;
|
struct snd_card *card;
|
||||||
struct azx *chip;
|
struct azx *chip;
|
||||||
struct hda_tegra *hda;
|
struct hda_tegra *hda;
|
||||||
|
|
|
@ -5793,8 +5793,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
||||||
#define ALC295_STANDARD_PINS \
|
#define ALC295_STANDARD_PINS \
|
||||||
{0x12, 0xb7a60130}, \
|
{0x12, 0xb7a60130}, \
|
||||||
{0x14, 0x90170110}, \
|
{0x14, 0x90170110}, \
|
||||||
{0x17, 0x21014020}, \
|
|
||||||
{0x18, 0x21a19030}, \
|
|
||||||
{0x21, 0x04211020}
|
{0x21, 0x04211020}
|
||||||
|
|
||||||
#define ALC298_STANDARD_PINS \
|
#define ALC298_STANDARD_PINS \
|
||||||
|
@ -5840,10 +5838,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
||||||
{0x14, 0x90170110},
|
{0x14, 0x90170110},
|
||||||
{0x1b, 0x02011020},
|
{0x1b, 0x02011020},
|
||||||
{0x21, 0x0221101f}),
|
{0x21, 0x0221101f}),
|
||||||
|
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
|
{0x14, 0x90170110},
|
||||||
|
{0x1b, 0x01011020},
|
||||||
|
{0x21, 0x0221101f}),
|
||||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
{0x14, 0x90170130},
|
{0x14, 0x90170130},
|
||||||
{0x1b, 0x01014020},
|
{0x1b, 0x01014020},
|
||||||
{0x21, 0x0221103f}),
|
{0x21, 0x0221103f}),
|
||||||
|
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
|
{0x14, 0x90170130},
|
||||||
|
{0x1b, 0x01011020},
|
||||||
|
{0x21, 0x0221103f}),
|
||||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
{0x14, 0x90170130},
|
{0x14, 0x90170130},
|
||||||
{0x1b, 0x02011020},
|
{0x1b, 0x02011020},
|
||||||
|
@ -6021,7 +6027,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
||||||
ALC292_STANDARD_PINS,
|
ALC292_STANDARD_PINS,
|
||||||
{0x13, 0x90a60140}),
|
{0x13, 0x90a60140}),
|
||||||
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
|
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
ALC295_STANDARD_PINS),
|
ALC295_STANDARD_PINS,
|
||||||
|
{0x17, 0x21014020},
|
||||||
|
{0x18, 0x21a19030}),
|
||||||
|
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
|
ALC295_STANDARD_PINS,
|
||||||
|
{0x17, 0x21014040},
|
||||||
|
{0x18, 0x21a19050}),
|
||||||
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
|
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||||
ALC298_STANDARD_PINS,
|
ALC298_STANDARD_PINS,
|
||||||
{0x17, 0x90170110}),
|
{0x17, 0x90170110}),
|
||||||
|
|
|
@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
|
||||||
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
|
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
|
||||||
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
||||||
|
|
||||||
|
/* Syntek STK1160 */
|
||||||
|
{
|
||||||
|
.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
||||||
|
USB_DEVICE_ID_MATCH_INT_CLASS |
|
||||||
|
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
||||||
|
.idVendor = 0x05e1,
|
||||||
|
.idProduct = 0x0408,
|
||||||
|
.bInterfaceClass = USB_CLASS_AUDIO,
|
||||||
|
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
|
||||||
|
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
||||||
|
.vendor_name = "Syntek",
|
||||||
|
.product_name = "STK1160",
|
||||||
|
.ifnum = QUIRK_ANY_INTERFACE,
|
||||||
|
.type = QUIRK_AUDIO_ALIGN_TRANSFER
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
/* Digidesign Mbox */
|
/* Digidesign Mbox */
|
||||||
{
|
{
|
||||||
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
|
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
|
||||||
|
|
|
@ -423,7 +423,7 @@ $(LIBTRACEEVENT)-clean:
|
||||||
$(call QUIET_CLEAN, libtraceevent)
|
$(call QUIET_CLEAN, libtraceevent)
|
||||||
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
|
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
|
||||||
|
|
||||||
install-traceevent-plugins: $(LIBTRACEEVENT)
|
install-traceevent-plugins: libtraceevent_plugins
|
||||||
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
|
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
|
||||||
|
|
||||||
$(LIBAPI): fixdep FORCE
|
$(LIBAPI): fixdep FORCE
|
||||||
|
|
Loading…
Add table
Reference in a new issue