KVM: s390: remove _bh locking from local_int.lock
local_int.lock is not used in a bottom-half handler anymore, therefore we can turn it into an ordinary spin_lock at all occurrences. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
0759d0681c
commit
4ae3c0815f
3 changed files with 28 additions and 28 deletions
|
@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (atomic_read(&li->active)) {
|
if (atomic_read(&li->active)) {
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_for_each_entry(inti, &li->list, list)
|
list_for_each_entry(inti, &li->list, list)
|
||||||
if (__interrupt_is_deliverable(vcpu, inti)) {
|
if (__interrupt_is_deliverable(vcpu, inti)) {
|
||||||
rc = 1;
|
rc = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((!rc) && atomic_read(&fi->active)) {
|
if ((!rc) && atomic_read(&fi->active)) {
|
||||||
|
@ -645,13 +645,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
|
||||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||||
struct kvm_s390_interrupt_info *n, *inti = NULL;
|
struct kvm_s390_interrupt_info *n, *inti = NULL;
|
||||||
|
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_for_each_entry_safe(inti, n, &li->list, list) {
|
list_for_each_entry_safe(inti, n, &li->list, list) {
|
||||||
list_del(&inti->list);
|
list_del(&inti->list);
|
||||||
kfree(inti);
|
kfree(inti);
|
||||||
}
|
}
|
||||||
atomic_set(&li->active, 0);
|
atomic_set(&li->active, 0);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
|
|
||||||
/* clear pending external calls set by sigp interpretation facility */
|
/* clear pending external calls set by sigp interpretation facility */
|
||||||
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
|
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
|
||||||
|
@ -670,7 +670,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
||||||
if (atomic_read(&li->active)) {
|
if (atomic_read(&li->active)) {
|
||||||
do {
|
do {
|
||||||
deliver = 0;
|
deliver = 0;
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_for_each_entry_safe(inti, n, &li->list, list) {
|
list_for_each_entry_safe(inti, n, &li->list, list) {
|
||||||
if (__interrupt_is_deliverable(vcpu, inti)) {
|
if (__interrupt_is_deliverable(vcpu, inti)) {
|
||||||
list_del(&inti->list);
|
list_del(&inti->list);
|
||||||
|
@ -681,7 +681,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
if (list_empty(&li->list))
|
if (list_empty(&li->list))
|
||||||
atomic_set(&li->active, 0);
|
atomic_set(&li->active, 0);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
if (deliver) {
|
if (deliver) {
|
||||||
__do_deliver_interrupt(vcpu, inti);
|
__do_deliver_interrupt(vcpu, inti);
|
||||||
kfree(inti);
|
kfree(inti);
|
||||||
|
@ -727,7 +727,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
|
||||||
if (atomic_read(&li->active)) {
|
if (atomic_read(&li->active)) {
|
||||||
do {
|
do {
|
||||||
deliver = 0;
|
deliver = 0;
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_for_each_entry_safe(inti, n, &li->list, list) {
|
list_for_each_entry_safe(inti, n, &li->list, list) {
|
||||||
if ((inti->type == KVM_S390_MCHK) &&
|
if ((inti->type == KVM_S390_MCHK) &&
|
||||||
__interrupt_is_deliverable(vcpu, inti)) {
|
__interrupt_is_deliverable(vcpu, inti)) {
|
||||||
|
@ -739,7 +739,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
if (list_empty(&li->list))
|
if (list_empty(&li->list))
|
||||||
atomic_set(&li->active, 0);
|
atomic_set(&li->active, 0);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
if (deliver) {
|
if (deliver) {
|
||||||
__do_deliver_interrupt(vcpu, inti);
|
__do_deliver_interrupt(vcpu, inti);
|
||||||
kfree(inti);
|
kfree(inti);
|
||||||
|
@ -786,11 +786,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
|
||||||
|
|
||||||
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
|
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
|
||||||
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
|
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_add(&inti->list, &li->list);
|
list_add(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
BUG_ON(waitqueue_active(li->wq));
|
BUG_ON(waitqueue_active(li->wq));
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -811,11 +811,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
inti->type = KVM_S390_PROGRAM_INT;
|
inti->type = KVM_S390_PROGRAM_INT;
|
||||||
memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
|
memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
list_add(&inti->list, &li->list);
|
list_add(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
BUG_ON(waitqueue_active(li->wq));
|
BUG_ON(waitqueue_active(li->wq));
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -903,12 +903,12 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
|
||||||
}
|
}
|
||||||
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
|
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
|
||||||
li = &dst_vcpu->arch.local_int;
|
li = &dst_vcpu->arch.local_int;
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||||
if (waitqueue_active(li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
kvm_get_vcpu(kvm, sigcpu)->preempted = true;
|
kvm_get_vcpu(kvm, sigcpu)->preempted = true;
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
unlock_fi:
|
unlock_fi:
|
||||||
spin_unlock(&fi->lock);
|
spin_unlock(&fi->lock);
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
@ -1050,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
mutex_lock(&vcpu->kvm->lock);
|
mutex_lock(&vcpu->kvm->lock);
|
||||||
li = &vcpu->arch.local_int;
|
li = &vcpu->arch.local_int;
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
if (inti->type == KVM_S390_PROGRAM_INT)
|
if (inti->type == KVM_S390_PROGRAM_INT)
|
||||||
list_add(&inti->list, &li->list);
|
list_add(&inti->list, &li->list);
|
||||||
else
|
else
|
||||||
|
@ -1062,7 +1062,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||||
if (waitqueue_active(&vcpu->wq))
|
if (waitqueue_active(&vcpu->wq))
|
||||||
wake_up_interruptible(&vcpu->wq);
|
wake_up_interruptible(&vcpu->wq);
|
||||||
vcpu->preempted = true;
|
vcpu->preempted = true;
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
mutex_unlock(&vcpu->kvm->lock);
|
mutex_unlock(&vcpu->kvm->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1522,13 +1522,13 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
|
||||||
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
|
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
|
||||||
|
|
||||||
/* Need to lock access to action_bits to avoid a SIGP race condition */
|
/* Need to lock access to action_bits to avoid a SIGP race condition */
|
||||||
spin_lock_bh(&vcpu->arch.local_int.lock);
|
spin_lock(&vcpu->arch.local_int.lock);
|
||||||
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
|
||||||
|
|
||||||
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
|
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
|
||||||
vcpu->arch.local_int.action_bits &=
|
vcpu->arch.local_int.action_bits &=
|
||||||
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
|
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
|
||||||
spin_unlock_bh(&vcpu->arch.local_int.lock);
|
spin_unlock(&vcpu->arch.local_int.lock);
|
||||||
|
|
||||||
__disable_ibs_on_vcpu(vcpu);
|
__disable_ibs_on_vcpu(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
inti->type = KVM_S390_SIGP_STOP;
|
inti->type = KVM_S390_SIGP_STOP;
|
||||||
|
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
if (li->action_bits & ACTION_STOP_ON_STOP) {
|
if (li->action_bits & ACTION_STOP_ON_STOP) {
|
||||||
/* another SIGP STOP is pending */
|
/* another SIGP STOP is pending */
|
||||||
rc = SIGP_CC_BUSY;
|
rc = SIGP_CC_BUSY;
|
||||||
|
@ -154,7 +154,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
|
||||||
if (waitqueue_active(li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
out:
|
out:
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
|
||||||
if (!inti)
|
if (!inti)
|
||||||
return SIGP_CC_BUSY;
|
return SIGP_CC_BUSY;
|
||||||
|
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
/* cpu must be in stopped state */
|
/* cpu must be in stopped state */
|
||||||
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
|
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
|
||||||
*reg &= 0xffffffff00000000UL;
|
*reg &= 0xffffffff00000000UL;
|
||||||
|
@ -264,7 +264,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
|
||||||
|
|
||||||
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
|
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
|
||||||
out_li:
|
out_li:
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,9 +280,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
|
||||||
if (!dst_vcpu)
|
if (!dst_vcpu)
|
||||||
return SIGP_CC_NOT_OPERATIONAL;
|
return SIGP_CC_NOT_OPERATIONAL;
|
||||||
|
|
||||||
spin_lock_bh(&dst_vcpu->arch.local_int.lock);
|
spin_lock(&dst_vcpu->arch.local_int.lock);
|
||||||
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
|
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
|
||||||
spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
|
spin_unlock(&dst_vcpu->arch.local_int.lock);
|
||||||
if (!(flags & CPUSTAT_STOPPED)) {
|
if (!(flags & CPUSTAT_STOPPED)) {
|
||||||
*reg &= 0xffffffff00000000UL;
|
*reg &= 0xffffffff00000000UL;
|
||||||
*reg |= SIGP_STATUS_INCORRECT_STATE;
|
*reg |= SIGP_STATUS_INCORRECT_STATE;
|
||||||
|
@ -343,10 +343,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||||
if (!dst_vcpu)
|
if (!dst_vcpu)
|
||||||
return SIGP_CC_NOT_OPERATIONAL;
|
return SIGP_CC_NOT_OPERATIONAL;
|
||||||
li = &dst_vcpu->arch.local_int;
|
li = &dst_vcpu->arch.local_int;
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock(&li->lock);
|
||||||
if (li->action_bits & ACTION_STOP_ON_STOP)
|
if (li->action_bits & ACTION_STOP_ON_STOP)
|
||||||
rc = SIGP_CC_BUSY;
|
rc = SIGP_CC_BUSY;
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock(&li->lock);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -466,11 +466,11 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
||||||
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||||
BUG_ON(dest_vcpu == NULL);
|
BUG_ON(dest_vcpu == NULL);
|
||||||
|
|
||||||
spin_lock_bh(&dest_vcpu->arch.local_int.lock);
|
spin_lock(&dest_vcpu->arch.local_int.lock);
|
||||||
if (waitqueue_active(&dest_vcpu->wq))
|
if (waitqueue_active(&dest_vcpu->wq))
|
||||||
wake_up_interruptible(&dest_vcpu->wq);
|
wake_up_interruptible(&dest_vcpu->wq);
|
||||||
dest_vcpu->preempted = true;
|
dest_vcpu->preempted = true;
|
||||||
spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
|
spin_unlock(&dest_vcpu->arch.local_int.lock);
|
||||||
|
|
||||||
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
|
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue