KVM: selftests: Verify SEV+ guests can read and write EFER, CR0, CR4, and CR8
Add "do no harm" testing of EFER, CR0, CR4, and CR8 for SEV+ guests to verify that the guest can read and write the registers, without hitting e.g. a #VC on SEV-ES guests due to KVM incorrectly trying to intercept a register. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-ID: <20260310211841.2552361-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
dca01b0a26
commit
d2ea4ff1ce
|
|
@ -557,6 +557,11 @@ static inline uint64_t get_cr0(void)
|
|||
return cr0;
|
||||
}
|
||||
|
||||
static inline void set_cr0(uint64_t val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr3(void)
|
||||
{
|
||||
uint64_t cr3;
|
||||
|
|
@ -566,6 +571,11 @@ static inline uint64_t get_cr3(void)
|
|||
return cr3;
|
||||
}
|
||||
|
||||
static inline void set_cr3(uint64_t val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr4(void)
|
||||
{
|
||||
uint64_t cr4;
|
||||
|
|
@ -580,6 +590,19 @@ static inline void set_cr4(uint64_t val)
|
|||
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr8(void)
|
||||
{
|
||||
uint64_t cr8;
|
||||
|
||||
__asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
|
||||
return cr8;
|
||||
}
|
||||
|
||||
static inline void set_cr8(uint64_t val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline void set_idt(const struct desc_ptr *idt_desc)
|
||||
{
|
||||
__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
|
||||
|
|
|
|||
|
|
@ -13,6 +13,30 @@
|
|||
#include "linux/psp-sev.h"
|
||||
#include "sev.h"
|
||||
|
||||
static void guest_sev_test_msr(uint32_t msr)
|
||||
{
|
||||
uint64_t val = rdmsr(msr);
|
||||
|
||||
wrmsr(msr, val);
|
||||
GUEST_ASSERT(val == rdmsr(msr));
|
||||
}
|
||||
|
||||
#define guest_sev_test_reg(reg) \
|
||||
do { \
|
||||
uint64_t val = get_##reg(); \
|
||||
\
|
||||
set_##reg(val); \
|
||||
GUEST_ASSERT(val == get_##reg()); \
|
||||
} while (0)
|
||||
|
||||
static void guest_sev_test_regs(void)
|
||||
{
|
||||
guest_sev_test_msr(MSR_EFER);
|
||||
guest_sev_test_reg(cr0);
|
||||
guest_sev_test_reg(cr3);
|
||||
guest_sev_test_reg(cr4);
|
||||
guest_sev_test_reg(cr8);
|
||||
}
|
||||
|
||||
#define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
|
||||
|
||||
|
|
@ -24,6 +48,8 @@ static void guest_snp_code(void)
|
|||
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
|
||||
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
|
||||
|
||||
guest_sev_test_regs();
|
||||
|
||||
wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
|
||||
vmgexit();
|
||||
}
|
||||
|
|
@ -34,6 +60,8 @@ static void guest_sev_es_code(void)
|
|||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
|
||||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
|
||||
|
||||
guest_sev_test_regs();
|
||||
|
||||
/*
|
||||
* TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
|
||||
* force "termination" to signal "done" via the GHCB MSR protocol.
|
||||
|
|
@ -47,6 +75,8 @@ static void guest_sev_code(void)
|
|||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
|
||||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
|
||||
|
||||
guest_sev_test_regs();
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue