KVM: x86: Remove redundant definitions
Some constants are redfined in emulate.c. Avoid it. s/SELECTOR_RPL_MASK/SEGMENT_RPL_MASK s/SELECTOR_TI_MASK/SEGMENT_TI_MASK No functional change. Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Message-Id: <1427635984-8113-3-git-send-email-namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0efb04406d
commit
b32a991800
3 changed files with 12 additions and 15 deletions
|
@ -81,9 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
||||||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SELECTOR_TI_MASK (1 << 2)
|
|
||||||
#define SELECTOR_RPL_MASK 0x03
|
|
||||||
|
|
||||||
#define KVM_PERMILLE_MMU_PAGES 20
|
#define KVM_PERMILLE_MMU_PAGES 20
|
||||||
#define KVM_MIN_ALLOC_MMU_PAGES 64
|
#define KVM_MIN_ALLOC_MMU_PAGES 64
|
||||||
#define KVM_MMU_HASH_SHIFT 10
|
#define KVM_MMU_HASH_SHIFT 10
|
||||||
|
|
|
@ -2435,7 +2435,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
||||||
return emulate_gp(ctxt, 0);
|
return emulate_gp(ctxt, 0);
|
||||||
|
|
||||||
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
||||||
cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
|
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
|
||||||
ss_sel = cs_sel + 8;
|
ss_sel = cs_sel + 8;
|
||||||
if (efer & EFER_LMA) {
|
if (efer & EFER_LMA) {
|
||||||
cs.d = 0;
|
cs.d = 0;
|
||||||
|
@ -2502,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||||||
return emulate_gp(ctxt, 0);
|
return emulate_gp(ctxt, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cs_sel |= SELECTOR_RPL_MASK;
|
cs_sel |= SEGMENT_RPL_MASK;
|
||||||
ss_sel |= SELECTOR_RPL_MASK;
|
ss_sel |= SEGMENT_RPL_MASK;
|
||||||
|
|
||||||
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
|
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
|
||||||
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
||||||
|
|
|
@ -3263,8 +3263,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
|
||||||
* default value.
|
* default value.
|
||||||
*/
|
*/
|
||||||
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
|
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
|
||||||
save->selector &= ~SELECTOR_RPL_MASK;
|
save->selector &= ~SEGMENT_RPL_MASK;
|
||||||
save->dpl = save->selector & SELECTOR_RPL_MASK;
|
save->dpl = save->selector & SEGMENT_RPL_MASK;
|
||||||
save->s = 1;
|
save->s = 1;
|
||||||
}
|
}
|
||||||
vmx_set_segment(vcpu, save, seg);
|
vmx_set_segment(vcpu, save, seg);
|
||||||
|
@ -3837,7 +3837,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
|
||||||
unsigned int cs_rpl;
|
unsigned int cs_rpl;
|
||||||
|
|
||||||
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||||
cs_rpl = cs.selector & SELECTOR_RPL_MASK;
|
cs_rpl = cs.selector & SEGMENT_RPL_MASK;
|
||||||
|
|
||||||
if (cs.unusable)
|
if (cs.unusable)
|
||||||
return false;
|
return false;
|
||||||
|
@ -3865,7 +3865,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
|
||||||
unsigned int ss_rpl;
|
unsigned int ss_rpl;
|
||||||
|
|
||||||
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
||||||
ss_rpl = ss.selector & SELECTOR_RPL_MASK;
|
ss_rpl = ss.selector & SEGMENT_RPL_MASK;
|
||||||
|
|
||||||
if (ss.unusable)
|
if (ss.unusable)
|
||||||
return true;
|
return true;
|
||||||
|
@ -3887,7 +3887,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
|
||||||
unsigned int rpl;
|
unsigned int rpl;
|
||||||
|
|
||||||
vmx_get_segment(vcpu, &var, seg);
|
vmx_get_segment(vcpu, &var, seg);
|
||||||
rpl = var.selector & SELECTOR_RPL_MASK;
|
rpl = var.selector & SEGMENT_RPL_MASK;
|
||||||
|
|
||||||
if (var.unusable)
|
if (var.unusable)
|
||||||
return true;
|
return true;
|
||||||
|
@ -3914,7 +3914,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
if (tr.unusable)
|
if (tr.unusable)
|
||||||
return false;
|
return false;
|
||||||
if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
|
if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
|
||||||
return false;
|
return false;
|
||||||
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
|
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
|
||||||
return false;
|
return false;
|
||||||
|
@ -3932,7 +3932,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
if (ldtr.unusable)
|
if (ldtr.unusable)
|
||||||
return true;
|
return true;
|
||||||
if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
|
if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
|
||||||
return false;
|
return false;
|
||||||
if (ldtr.type != 2)
|
if (ldtr.type != 2)
|
||||||
return false;
|
return false;
|
||||||
|
@ -3949,8 +3949,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
|
||||||
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||||
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
||||||
|
|
||||||
return ((cs.selector & SELECTOR_RPL_MASK) ==
|
return ((cs.selector & SEGMENT_RPL_MASK) ==
|
||||||
(ss.selector & SELECTOR_RPL_MASK));
|
(ss.selector & SEGMENT_RPL_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue