Merge: kvm/aarch64: Pick up fixes up to v6.14
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6574 JIRA: https://issues.redhat.com/browse/RHEL-82298 Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=67019166 Upstream Status: v6.14.rc7 Tested: Passed. kselftests, kvm-unit-tests, virt-install and live migration (4KB/64KB) This series picks up the following fixes, which are no later than v6.14.rc7 and the previous revisions. Note that any features, enhancements and their followup fixes are ignored. 3855a7b91d42 KVM: arm64: Initialize SCTLR_EL1 in __kvm_hyp_init_cpu() 7a68b55ff39b KVM: arm64: Initialize HCR_EL2.E2H early afa9b48f327c KVM: arm64: Shave a few bytes from the EL2 idmap code fa808ed4e199 KVM: arm64: Ensure a VMID is allocated before programming VTTBR_EL2 102c51c50db8 KVM: arm64: Fix tcr_el2 initialisation in hVHE mode b3aa9283c0c5 KVM: arm64: vgic: Hoist SGI/PPI alloc from vgic_init() to kvm_create_vgic() e6e3e0022ef8 KVM: arm64: timer: Drop warning on failed interrupt signalling b938731ed2d4 KVM: arm64: Fix alignment of kvm_hyp_memcache allocations 332b7e6d62b7 KVM: arm64: Simplify warning in kvm_arch_vcpu_load_fp() b450dcce93bc KVM: arm64: timer: Always evaluate the need for a soft timer 5417a2e9b130 KVM: arm64: Fix nested S2 MMU structures reallocation 32392e04cb50 KVM: arm64: Fail protected mode init if no vgic hardware is present 59419f10045b KVM: arm64: Eagerly switch ZCR_EL{1,2} f9dd00de1e53 KVM: arm64: Mark some header functions as inline 9b66195063c5 KVM: arm64: Refactor exit handlers 407a99c4654e KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN 459f059be702 KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN 8eca7f6d5100 KVM: arm64: Remove host FPSIMD saving for non-protected KVM d4db98791aa5 KVM: arm64: Add predicate for FPMR support in a VM 9bcbb6104a34 KVM: arm64: Flush hyp bss section after initialization of variables in bss 9fb4267a759c KVM: arm64: Fix selftests after sysreg field name update a7f1fa5564be KVM: arm64: Explicitly handle BRBE traps as UNDEFINED dea8838128c5 KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe() 68344037b764 KVM: arm64: Fix nVHE stacktrace VA bits mask b7f345fbc32a KVM: arm64: Fix FEAT_MTE in pKVM e891432cf717 KVM: arm64: nv: Advertise the lack of AArch32 EL0 support 1eccad35c926 KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE 2fd5b4b0e7b4 KVM: arm64: Calculate cptr_el2 traps on activating traps 9df9186f8df5 KVM: arm64: Fix RAS trapping in pKVM for protected VMs 8ca19c40c47d KVM: arm64: Drop MDSCR_EL1_DEBUG_MASK 9d86c3c97434 arm64/kvm: Avoid invalid physical addresses to signal owner updates f0da16992aef arm64/kvm: Configure HYP TCR.PS/DS based on host stage1 d3ba35b69eae KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change adf8623b3f51 KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change e22c369520d0 KVM: arm64: Add unified helper for reprogramming counters by mask 985bb51f17ab KVM: arm64: Always check the state from hyp_ack_unshare() Changes in v4: Improved commit logs for the following upstream commits (Eric) d3ba35b69eae KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change 59419f10045b KVM: arm64: Eagerly switch ZCR_EL{1,2} Added the following upstream commit, as dependency to upstream commit 7a68b55ff39b and 3855a7b91d42 (Eric) afa9b48f327c KVM: arm64: Shave a few bytes from the EL2 idmap code Changes in v3: 7 upstream commits added per Eric's comments to include 59419f10045b. All other commits are dependencies. 59419f10045b KVM: arm64: Eagerly switch ZCR_EL{1,2} f9dd00de1e53 KVM: arm64: Mark some header functions as inline 9b66195063c5 KVM: arm64: Refactor exit handlers 407a99c4654e KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN 459f059be702 KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN 8eca7f6d5100 KVM: arm64: Remove host FPSIMD saving for non-protected KVM d4db98791aa5 KVM: arm64: Add predicate for FPMR support in a VM Changes in v2: 3 upstream commmits added per Eric's comments 1eccad35c926 KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE 2fd5b4b0e7b4 KVM: arm64: Calculate cptr_el2 traps on activating traps 9df9186f8df5 KVM: arm64: Fix RAS trapping in pKVM for protected VMs 59419f10045b skipped due to too many dependencies Signed-off-by: Gavin Shan <gshan@redhat.com> Approved-by: Shaoqin Huang <shahuang@redhat.com> Approved-by: Eric Auger <eric.auger@redhat.com> Approved-by: Cornelia Huck <cohuck@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Augusto Caringi <acaringi@redhat.com>
This commit is contained in:
commit
70b67efabd
|
@ -16,6 +16,32 @@
|
|||
#include <asm/sysreg.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
.macro init_el2_hcr val
|
||||
mov_q x0, \val
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||
msr sctlr_el2, x0
|
||||
|
@ -204,11 +230,6 @@
|
|||
.Lskip_fgt_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
.endm
|
||||
|
||||
/**
|
||||
* Initialize EL2 registers to sane values. This should be called early on all
|
||||
* cores that were booted in EL2. Note that everything gets initialised as
|
||||
|
|
|
@ -117,7 +117,7 @@
|
|||
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
||||
#define TCR_EL2_T0SZ_MASK 0x3f
|
||||
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_DS TCR_EL2_DS
|
||||
|
@ -298,7 +298,7 @@
|
|||
#define CPTR_EL2_TSM (1 << 12)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
#define CPTR_EL2_TZ (1 << 8)
|
||||
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
|
||||
#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0))
|
||||
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
|
||||
GENMASK(29, 21) | \
|
||||
GENMASK(19, 14) | \
|
||||
|
|
|
@ -176,6 +176,7 @@ struct kvm_nvhe_init_params {
|
|||
unsigned long hcr_el2;
|
||||
unsigned long vttbr;
|
||||
unsigned long vtcr;
|
||||
unsigned long tmp;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -634,8 +634,8 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
|
||||
val |= CPTR_EL2_TZ;
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val &= ~CPTR_EL2_TSM;
|
||||
if (!cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPTR_EL2_TSM;
|
||||
}
|
||||
|
||||
return val;
|
||||
|
|
|
@ -98,7 +98,7 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
|
|||
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
|
||||
void *(*to_va)(phys_addr_t phys))
|
||||
{
|
||||
phys_addr_t *p = to_va(mc->head);
|
||||
phys_addr_t *p = to_va(mc->head & PAGE_MASK);
|
||||
|
||||
if (!mc->nr_pages)
|
||||
return NULL;
|
||||
|
@ -595,13 +595,13 @@ struct kvm_host_data {
|
|||
struct kvm_cpu_context host_ctxt;
|
||||
|
||||
/*
|
||||
* All pointers in this union are hyp VA.
|
||||
* Hyp VA.
|
||||
* sve_state is only used in pKVM and if system_supports_sve().
|
||||
*/
|
||||
union {
|
||||
struct user_fpsimd_state *fpsimd_state;
|
||||
struct cpu_sve_state *sve_state;
|
||||
};
|
||||
struct cpu_sve_state *sve_state;
|
||||
|
||||
/* Used by pKVM only. */
|
||||
u64 fpmr;
|
||||
|
||||
/* Ownership of the FP regs */
|
||||
enum {
|
||||
|
@ -682,7 +682,6 @@ struct kvm_vcpu_arch {
|
|||
u64 hcr_el2;
|
||||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
u64 cptr_el2;
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
@ -1234,7 +1233,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
|||
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
|
||||
int __init kvm_arm_vmid_alloc_init(void);
|
||||
void __init kvm_arm_vmid_alloc_free(void);
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
|
@ -1489,4 +1488,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
|||
(pa + pi + pa3) == 1; \
|
||||
})
|
||||
|
||||
#define kvm_has_fpmr(k) \
|
||||
(system_supports_fpmr() && \
|
||||
kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
extern u32 __hyp_va_bits;
|
||||
|
||||
/*
|
||||
* We currently support using a VM-specified IPA size. For backward
|
||||
* compatibility, the default IPA size is fixed to 40bits.
|
||||
|
|
|
@ -130,6 +130,7 @@ int main(void)
|
|||
DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2));
|
||||
DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr));
|
||||
DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr));
|
||||
DEFINE(NVHE_INIT_TMP, offsetof(struct kvm_nvhe_init_params, tmp));
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_PM
|
||||
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
|
||||
|
|
|
@ -295,25 +295,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
msr sctlr_el2, x0
|
||||
isb
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case. Publish the E2H bit early so that
|
||||
* it can be picked up by the init_el2_state macro.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
1:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
init_el2_hcr HCR_HOST_NVHE_FLAGS
|
||||
init_el2_state
|
||||
|
||||
/* Hypervisor stub */
|
||||
|
@ -336,7 +319,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
|||
msr sctlr_el1, x1
|
||||
mov x2, xzr
|
||||
3:
|
||||
__init_el2_nvhe_prepare_eret
|
||||
mov x0, #INIT_PSTATE_EL1
|
||||
msr spsr_el2, x0
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
orr x0, x0, x2
|
||||
|
|
|
@ -444,19 +444,17 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
|
|||
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
|
||||
struct arch_timer_context *timer_ctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
timer_ctx->irq.level = new_level;
|
||||
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
|
||||
timer_ctx->irq.level);
|
||||
|
||||
if (!userspace_irqchip(vcpu->kvm)) {
|
||||
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
|
||||
timer_irq(timer_ctx),
|
||||
timer_ctx->irq.level,
|
||||
timer_ctx);
|
||||
WARN_ON(ret);
|
||||
}
|
||||
if (userspace_irqchip(vcpu->kvm))
|
||||
return;
|
||||
|
||||
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
|
||||
timer_irq(timer_ctx),
|
||||
timer_ctx->irq.level,
|
||||
timer_ctx);
|
||||
}
|
||||
|
||||
/* Only called for a fully emulated timer */
|
||||
|
@ -466,10 +464,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
|||
|
||||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire != ctx->irq.level) {
|
||||
if (should_fire != ctx->irq.level)
|
||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the timer can fire now, we don't need to have a soft timer
|
||||
|
|
|
@ -577,6 +577,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
mmu = vcpu->arch.hw_mmu;
|
||||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
|
||||
* which happens eagerly in VHE.
|
||||
*
|
||||
* Also, the VMID allocator only preserves VMIDs that are active at the
|
||||
* time of rollover, so KVM might need to grab a new VMID for the MMU if
|
||||
* this is called from kvm_sched_in().
|
||||
*/
|
||||
kvm_arm_vmid_update(&mmu->vmid);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
|
@ -1144,18 +1154,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* The VMID allocator only tracks active VMIDs per
|
||||
* physical CPU, and therefore the VMID allocated may not be
|
||||
* preserved on VMID roll-over if the task was preempted,
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
|
||||
has_vhe())
|
||||
__load_stage2(vcpu->arch.hw_mmu,
|
||||
vcpu->arch.hw_mmu->arch);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
@ -1568,7 +1566,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
vcpu_reset_hcr(vcpu);
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
|
||||
/*
|
||||
* Handle the "start in power-off" case.
|
||||
|
@ -1987,7 +1984,6 @@ static int kvm_init_vector_slots(void)
|
|||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
unsigned long tcr;
|
||||
|
||||
/*
|
||||
|
@ -2003,17 +1999,17 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
|||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
|
||||
tcr &= TCR_EL2_MASK;
|
||||
tcr |= TCR_EL2_RES1;
|
||||
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
}
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
|
||||
if (kvm_lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
params->pgd_pa = kvm_mmu_get_httbr();
|
||||
|
@ -2297,6 +2293,19 @@ static int __init init_subsystems(void)
|
|||
break;
|
||||
case -ENODEV:
|
||||
case -ENXIO:
|
||||
/*
|
||||
* No VGIC? No pKVM for you.
|
||||
*
|
||||
* Protected mode assumes that VGICv3 is present, so no point
|
||||
* in trying to hobble along if vgic initialization fails.
|
||||
*/
|
||||
if (is_protected_kvm_enabled())
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Otherwise, userspace could choose to implement a GIC for its
|
||||
* guest on non-cooperative hardware.
|
||||
*/
|
||||
vgic_present = false;
|
||||
err = 0;
|
||||
break;
|
||||
|
@ -2407,6 +2416,13 @@ static void kvm_hyp_init_symbols(void)
|
|||
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
|
||||
kvm_nvhe_sym(__icache_flags) = __icache_flags;
|
||||
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
|
||||
|
||||
/*
|
||||
* Flush entire BSS since part of its data containing init symbols is read
|
||||
* while the MMU is off.
|
||||
*/
|
||||
kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
|
||||
kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
|
||||
}
|
||||
|
||||
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
|
@ -2468,14 +2484,6 @@ static void finalize_init_hyp_mode(void)
|
|||
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
|
||||
kern_hyp_va(sve_state);
|
||||
}
|
||||
} else {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct user_fpsimd_state *fpsimd_state;
|
||||
|
||||
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
|
||||
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
|
||||
kern_hyp_va(fpsimd_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,11 +16,6 @@
|
|||
|
||||
#include "trace.h"
|
||||
|
||||
/* These are the bits of MDSCR_EL1 we may manipulate */
|
||||
#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
|
||||
DBG_MDSCR_KDE | \
|
||||
DBG_MDSCR_MDE)
|
||||
|
||||
static DEFINE_PER_CPU(u64, mdcr_el2);
|
||||
|
||||
/*
|
||||
|
|
|
@ -64,24 +64,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
fpsimd_save_and_flush_cpu_state();
|
||||
*host_data_ptr(fp_owner) = FP_STATE_FREE;
|
||||
*host_data_ptr(fpsimd_state) = NULL;
|
||||
|
||||
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
||||
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
|
||||
|
||||
if (system_supports_sme()) {
|
||||
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
|
||||
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
|
||||
}
|
||||
|
||||
/*
|
||||
* If normal guests gain SME support, maintain this behavior for pKVM
|
||||
* guests, which don't support SME.
|
||||
*/
|
||||
WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
|
||||
read_sysreg_s(SYS_SVCR));
|
||||
WARN_ON_ONCE(system_supports_sme() && read_sysreg_s(SYS_SVCR));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -146,52 +130,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* If we have VHE then the Hyp code will reset CPACR_EL1 to
|
||||
* the default value and we need to reenable SME.
|
||||
*/
|
||||
if (has_vhe() && system_supports_sme()) {
|
||||
/* Also restore EL0 state seen on entry */
|
||||
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1,
|
||||
CPACR_EL1_SMEN_EL0EN,
|
||||
CPACR_EL1_SMEN_EL1EN);
|
||||
isb();
|
||||
}
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
u64 zcr = read_sysreg_el1(SYS_ZCR);
|
||||
|
||||
/*
|
||||
* If the vCPU is in the hyp context then ZCR_EL1 is
|
||||
* loaded with its vEL2 counterpart.
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
|
||||
|
||||
/*
|
||||
* Restore the VL that was saved when bound to the CPU,
|
||||
* which is the maximum VL for the guest. Because the
|
||||
* layout of the data when saving the sve state depends
|
||||
* on the VL, we need to use a consistent (i.e., the
|
||||
* maximum) VL.
|
||||
* Note that this means that at guest exit ZCR_EL1 is
|
||||
* not necessarily the same as on guest entry.
|
||||
*
|
||||
* ZCR_EL2 holds the guest hypervisor's VL when running
|
||||
* a nested guest, which could be smaller than the
|
||||
* max for the vCPU. Similar to above, we first need to
|
||||
* switch to a VL consistent with the layout of the
|
||||
* vCPU's SVE state. KVM support for NV implies VHE, so
|
||||
* using the ZCR_EL1 alias is safe.
|
||||
*/
|
||||
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
|
||||
SYS_ZCR_EL1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush (save and invalidate) the fpsimd/sve state so that if
|
||||
* the host tries to use fpsimd/sve, it's not using stale data
|
||||
|
@ -203,18 +142,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
|||
* when needed.
|
||||
*/
|
||||
fpsimd_save_and_flush_cpu_state();
|
||||
} else if (has_vhe() && system_supports_sve()) {
|
||||
/*
|
||||
* The FPSIMD/SVE state in the CPU has not been touched, and we
|
||||
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
|
||||
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
|
||||
* for EL0. To avoid spurious traps, restore the trap state
|
||||
* seen by kvm_arch_vcpu_load_fp():
|
||||
*/
|
||||
if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
|
|||
alternative_else_nop_endif
|
||||
mrs x1, isr_el1
|
||||
cbz x1, 1f
|
||||
|
||||
// Ensure that __guest_enter() always provides a context
|
||||
// synchronization event so that callers don't need ISBs for anything
|
||||
// that would usually be synchonized by the ERET.
|
||||
isb
|
||||
mov x0, #ARM_EXCEPTION_IRQ
|
||||
ret
|
||||
|
||||
|
|
|
@ -295,7 +295,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
|
|||
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
|
||||
|
@ -344,7 +344,87 @@ static inline void __hyp_sve_save_host(void)
|
|||
true);
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
|
||||
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 zcr_el1, zcr_el2;
|
||||
|
||||
if (!guest_owns_fp_regs())
|
||||
return;
|
||||
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
/* A guest hypervisor may restrict the effective max VL. */
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
|
||||
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
|
||||
else
|
||||
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
|
||||
|
||||
write_sysreg_el2(zcr_el2, SYS_ZCR);
|
||||
|
||||
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
|
||||
write_sysreg_el1(zcr_el1, SYS_ZCR);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 zcr_el1, zcr_el2;
|
||||
|
||||
if (!guest_owns_fp_regs())
|
||||
return;
|
||||
|
||||
/*
|
||||
* When the guest owns the FP regs, we know that guest+hyp traps for
|
||||
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
|
||||
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
|
||||
* prior to __guest_entry(). As __guest_entry() guarantees a context
|
||||
* synchronization event, we don't need an ISB here to avoid taking
|
||||
* traps for anything that was exposed to the guest.
|
||||
*/
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
|
||||
|
||||
/*
|
||||
* The guest's state is always saved using the guest's max VL.
|
||||
* Ensure that the host has the guest's max VL active such that
|
||||
* the host can save the guest's state lazily, but don't
|
||||
* artificially restrict the host to the guest's max VL.
|
||||
*/
|
||||
if (has_vhe()) {
|
||||
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
|
||||
write_sysreg_el2(zcr_el2, SYS_ZCR);
|
||||
} else {
|
||||
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
|
||||
write_sysreg_el2(zcr_el2, SYS_ZCR);
|
||||
|
||||
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
|
||||
write_sysreg_el1(zcr_el1, SYS_ZCR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Non-protected kvm relies on the host restoring its sve state.
|
||||
* Protected kvm restores the host's sve state as not to reveal that
|
||||
* fpsimd was used by a guest nor leak upper sve bits.
|
||||
*/
|
||||
if (system_supports_sve()) {
|
||||
__hyp_sve_save_host();
|
||||
|
||||
/* Re-enable SVE traps if not supported for the guest vcpu. */
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
cpacr_clear_set(CPACR_ELx_ZEN, 0);
|
||||
|
||||
} else {
|
||||
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
|
||||
}
|
||||
|
||||
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
|
||||
*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We trap the first access to the FP/SIMD to save the host context and
|
||||
|
@ -352,7 +432,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
|
|||
* If FP/SIMD is not implemented, handle the trap and inject an undefined
|
||||
* instruction exception to the guest. Similarly for trapped SVE accesses.
|
||||
*/
|
||||
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
bool sve_guest;
|
||||
u8 esr_ec;
|
||||
|
@ -394,7 +474,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
isb();
|
||||
|
||||
/* Write out the host state if it's in the registers */
|
||||
if (host_owns_fp_regs())
|
||||
if (is_protected_kvm_enabled() && host_owns_fp_regs())
|
||||
kvm_hyp_save_fpsimd_host(vcpu);
|
||||
|
||||
/* Restore the guest state */
|
||||
|
@ -540,7 +620,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
|
||||
handle_tx2_tvm(vcpu))
|
||||
|
@ -560,7 +640,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
|
||||
__vgic_v3_perform_cpuif_access(vcpu) == 1)
|
||||
|
@ -569,19 +649,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
|
||||
u64 *exit_code)
|
||||
{
|
||||
if (!__populate_fault_info(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
|
||||
#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
|
||||
|
||||
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
|
||||
return true;
|
||||
|
@ -611,23 +690,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
|
||||
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
|
||||
/*
|
||||
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
||||
*
|
||||
* Returns true if the hypervisor handled the exit, and control should go back
|
||||
* to the guest, or false if it hasn't.
|
||||
*/
|
||||
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
|
||||
const exit_handler_fn *handlers)
|
||||
{
|
||||
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
|
||||
exit_handler_fn fn;
|
||||
|
||||
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
|
||||
|
||||
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
|
||||
if (fn)
|
||||
return fn(vcpu, exit_code);
|
||||
|
||||
|
@ -657,20 +729,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
|
|||
* the guest, false when we should restore the host state and return to the
|
||||
* main run loop.
|
||||
*/
|
||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
|
||||
const exit_handler_fn *handlers)
|
||||
{
|
||||
/*
|
||||
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||
* early on.
|
||||
*/
|
||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||
|
||||
/*
|
||||
* Check whether we want to repaint the state one way or
|
||||
* another.
|
||||
*/
|
||||
early_exit_filter(vcpu, exit_code);
|
||||
|
||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||
|
||||
|
@ -700,7 +761,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
goto exit;
|
||||
|
||||
/* Check if there's an exit handler and allow it to handle the exit. */
|
||||
if (kvm_hyp_handle_exit(vcpu, exit_code))
|
||||
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
|
||||
goto guest;
|
||||
exit:
|
||||
/* Return to the host kernel and handle the exit */
|
||||
|
|
|
@ -24,28 +24,25 @@
|
|||
.align 11
|
||||
|
||||
SYM_CODE_START(__kvm_hyp_init)
|
||||
ventry __invalid // Synchronous EL2t
|
||||
ventry __invalid // IRQ EL2t
|
||||
ventry __invalid // FIQ EL2t
|
||||
ventry __invalid // Error EL2t
|
||||
ventry . // Synchronous EL2t
|
||||
ventry . // IRQ EL2t
|
||||
ventry . // FIQ EL2t
|
||||
ventry . // Error EL2t
|
||||
|
||||
ventry __invalid // Synchronous EL2h
|
||||
ventry __invalid // IRQ EL2h
|
||||
ventry __invalid // FIQ EL2h
|
||||
ventry __invalid // Error EL2h
|
||||
ventry . // Synchronous EL2h
|
||||
ventry . // IRQ EL2h
|
||||
ventry . // FIQ EL2h
|
||||
ventry . // Error EL2h
|
||||
|
||||
ventry __do_hyp_init // Synchronous 64-bit EL1
|
||||
ventry __invalid // IRQ 64-bit EL1
|
||||
ventry __invalid // FIQ 64-bit EL1
|
||||
ventry __invalid // Error 64-bit EL1
|
||||
ventry . // IRQ 64-bit EL1
|
||||
ventry . // FIQ 64-bit EL1
|
||||
ventry . // Error 64-bit EL1
|
||||
|
||||
ventry __invalid // Synchronous 32-bit EL1
|
||||
ventry __invalid // IRQ 32-bit EL1
|
||||
ventry __invalid // FIQ 32-bit EL1
|
||||
ventry __invalid // Error 32-bit EL1
|
||||
|
||||
__invalid:
|
||||
b .
|
||||
ventry . // Synchronous 32-bit EL1
|
||||
ventry . // IRQ 32-bit EL1
|
||||
ventry . // FIQ 32-bit EL1
|
||||
ventry . // Error 32-bit EL1
|
||||
|
||||
/*
|
||||
* Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
|
||||
|
@ -76,6 +73,17 @@ __do_hyp_init:
|
|||
eret
|
||||
SYM_CODE_END(__kvm_hyp_init)
|
||||
|
||||
/*
|
||||
* Initialize EL2 CPU state to sane values.
|
||||
*
|
||||
* HCR_EL2.E2H must have been initialized already.
|
||||
*/
|
||||
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
|
||||
init_el2_state // Clobbers x0..x2
|
||||
finalise_el2_state
|
||||
ret
|
||||
SYM_CODE_END(__kvm_init_el2_state)
|
||||
|
||||
/*
|
||||
* Initialize the hypervisor in EL2.
|
||||
*
|
||||
|
@ -102,9 +110,12 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
|
|||
// TPIDR_EL2 is used to preserve x0 across the macro maze...
|
||||
isb
|
||||
msr tpidr_el2, x0
|
||||
init_el2_state
|
||||
finalise_el2_state
|
||||
str lr, [x0, #NVHE_INIT_TMP]
|
||||
|
||||
bl __kvm_init_el2_state
|
||||
|
||||
mrs x0, tpidr_el2
|
||||
ldr lr, [x0, #NVHE_INIT_TMP]
|
||||
|
||||
1:
|
||||
ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
|
||||
|
@ -199,10 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
|||
|
||||
2: msr SPsel, #1 // We want to use SP_EL{1,2}
|
||||
|
||||
/* Initialize EL2 CPU state to sane values. */
|
||||
init_el2_state // Clobbers x0..x2
|
||||
finalise_el2_state
|
||||
__init_el2_nvhe_prepare_eret
|
||||
init_el2_hcr 0
|
||||
|
||||
bl __kvm_init_el2_state
|
||||
|
||||
/* Enable MMU, set vectors and stack. */
|
||||
mov x0, x28
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <hyp/adjust_pc.h>
|
||||
#include <hyp/switch.h>
|
||||
|
||||
#include <asm/pgtable-types.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
@ -77,7 +78,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
|||
if (system_supports_sve())
|
||||
__hyp_sve_restore_host();
|
||||
else
|
||||
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
|
||||
__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
|
||||
|
||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
@ -169,8 +170,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
|
|||
sync_hyp_vcpu(hyp_vcpu);
|
||||
pkvm_put_hyp_vcpu(hyp_vcpu);
|
||||
} else {
|
||||
/* The host is fully trusted, run its vCPU directly. */
|
||||
fpsimd_lazy_switch_to_guest(host_vcpu);
|
||||
ret = __kvm_vcpu_run(host_vcpu);
|
||||
fpsimd_lazy_switch_to_host(host_vcpu);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -471,12 +473,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
|
|||
case ESR_ELx_EC_SMC64:
|
||||
handle_host_smc(host_ctxt);
|
||||
break;
|
||||
case ESR_ELx_EC_SVE:
|
||||
cpacr_clear_set(0, CPACR_ELx_ZEN);
|
||||
isb();
|
||||
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
|
||||
SYS_ZCR_EL2);
|
||||
break;
|
||||
case ESR_ELx_EC_IABT_LOW:
|
||||
case ESR_ELx_EC_DABT_LOW:
|
||||
handle_host_mem_abort(host_ctxt);
|
||||
|
|
|
@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
|||
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
|
||||
return -EBUSY;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
return __hyp_check_page_state_range(addr, size,
|
||||
PKVM_PAGE_SHARED_BORROWED);
|
||||
}
|
||||
|
|
|
@ -31,8 +31,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
|
|||
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
|
||||
u64 hcr_set = HCR_RW;
|
||||
u64 hcr_clear = 0;
|
||||
u64 cptr_set = 0;
|
||||
u64 cptr_clear = 0;
|
||||
|
||||
/* Protected KVM does not support AArch32 guests. */
|
||||
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
|
||||
|
@ -52,31 +50,18 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
|
|||
if (has_hvhe())
|
||||
hcr_set |= HCR_E2H;
|
||||
|
||||
/* Trap RAS unless all current versions are supported */
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
|
||||
ID_AA64PFR0_EL1_RAS_V1P1) {
|
||||
/* Trap RAS */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids)) {
|
||||
hcr_set |= HCR_TERR | HCR_TEA;
|
||||
hcr_clear |= HCR_FIEN;
|
||||
}
|
||||
|
||||
/* Trap AMU */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids))
|
||||
hcr_clear |= HCR_AMVOFFEN;
|
||||
cptr_set |= CPTR_EL2_TAM;
|
||||
}
|
||||
|
||||
/* Trap SVE */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
|
||||
if (has_hvhe())
|
||||
cptr_clear |= CPACR_ELx_ZEN;
|
||||
else
|
||||
cptr_set |= CPTR_EL2_TZ;
|
||||
}
|
||||
|
||||
vcpu->arch.hcr_el2 |= hcr_set;
|
||||
vcpu->arch.hcr_el2 &= ~hcr_clear;
|
||||
vcpu->arch.cptr_el2 |= cptr_set;
|
||||
vcpu->arch.cptr_el2 &= ~cptr_clear;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -106,7 +91,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
|
||||
u64 mdcr_set = 0;
|
||||
u64 mdcr_clear = 0;
|
||||
u64 cptr_set = 0;
|
||||
|
||||
/* Trap/constrain PMU */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
|
||||
|
@ -133,21 +117,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
|
||||
mdcr_set |= MDCR_EL2_TTRF;
|
||||
|
||||
/* Trap Trace */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
|
||||
if (has_hvhe())
|
||||
cptr_set |= CPACR_EL1_TTA;
|
||||
else
|
||||
cptr_set |= CPTR_EL2_TTA;
|
||||
}
|
||||
|
||||
/* Trap External Trace */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
||||
mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
|
||||
|
||||
vcpu->arch.mdcr_el2 |= mdcr_set;
|
||||
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
||||
vcpu->arch.cptr_el2 |= cptr_set;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -198,10 +173,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
|
|||
/* Clear res0 and set res1 bits to trap potential new features. */
|
||||
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
|
||||
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
|
||||
if (!has_hvhe()) {
|
||||
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
|
||||
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
|
||||
}
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
|
@ -229,6 +200,9 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (vcpu_has_ptrauth(vcpu))
|
||||
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
|
||||
|
||||
if (kvm_has_mte(vcpu->kvm))
|
||||
vcpu->arch.hcr_el2 |= HCR_ATA;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -236,7 +210,6 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
vcpu->arch.mdcr_el2 = 0;
|
||||
|
||||
pkvm_vcpu_reset_hcr(vcpu);
|
||||
|
@ -332,6 +305,9 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
|
|||
struct kvm *kvm = &hyp_vm->kvm;
|
||||
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
|
||||
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
|
||||
|
||||
/* No restrictions for non-protected VMs. */
|
||||
if (!kvm_vm_is_protected(kvm)) {
|
||||
bitmap_copy(kvm->arch.vcpu_features,
|
||||
|
@ -693,8 +669,6 @@ unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
|
|||
if (is_cpu_on)
|
||||
release_boot_args(boot_args);
|
||||
|
||||
write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
|
||||
write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
|
||||
|
||||
__host_enter(host_ctxt);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,33 +36,41 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
|||
|
||||
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
|
||||
|
||||
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
|
||||
|
||||
if (has_hvhe()) {
|
||||
val |= CPACR_ELx_TTA;
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
val |= CPACR_ELx_FPEN;
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_ELx_ZEN;
|
||||
}
|
||||
} else {
|
||||
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
|
||||
|
||||
/*
|
||||
* Always trap SME since it's not supported in KVM.
|
||||
* TSM is RES1 if SME isn't implemented.
|
||||
*/
|
||||
val |= CPTR_EL2_TSM;
|
||||
|
||||
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
|
||||
val |= CPTR_EL2_TZ;
|
||||
|
||||
if (!guest_owns_fp_regs())
|
||||
val |= CPTR_EL2_TFP;
|
||||
}
|
||||
}
|
||||
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
||||
__activate_traps_common(vcpu);
|
||||
__activate_cptr_traps(vcpu);
|
||||
|
||||
val = vcpu->arch.cptr_el2;
|
||||
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
|
||||
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
|
||||
if (cpus_have_final_cap(ARM64_SME)) {
|
||||
if (has_hvhe())
|
||||
val &= ~CPACR_ELx_SMEN;
|
||||
else
|
||||
val |= CPTR_EL2_TSM;
|
||||
}
|
||||
|
||||
if (!guest_owns_fp_regs()) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
else
|
||||
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
|
||||
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
kvm_write_cptr_el2(val);
|
||||
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
|
@ -180,25 +188,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
kvm_handle_pvm_sysreg(vcpu, exit_code));
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Non-protected kvm relies on the host restoring its sve state.
|
||||
* Protected kvm restores the host's sve state as not to reveal that
|
||||
* fpsimd was used by a guest nor leak upper sve bits.
|
||||
*/
|
||||
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
|
||||
__hyp_sve_save_host();
|
||||
|
||||
/* Re-enable SVE traps if not supported for the guest vcpu. */
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
cpacr_clear_set(CPACR_ELx_ZEN, 0);
|
||||
|
||||
} else {
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
|
@ -230,19 +219,21 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
|||
return hyp_exit_handlers;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
|
||||
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
|
||||
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
|
||||
* hypervisor spots a guest in such a state ensure it is handled, and don't
|
||||
* trust the host to spot or fix it. The check below is based on the one in
|
||||
* kvm_arch_vcpu_ioctl_run().
|
||||
*
|
||||
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
||||
* thus should exit to the host, or true if a the guest run loop can continue.
|
||||
*/
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
|
||||
|
||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||
|
||||
/*
|
||||
* Some guests (e.g., protected VMs) are not be allowed to run in
|
||||
* AArch32. The ARMv8 architecture does not give the hypervisor a
|
||||
* mechanism to prevent a guest from dropping to AArch32 EL0 if
|
||||
* implemented by the CPU. If the hypervisor spots a guest in such a
|
||||
* state ensure it is handled, and don't trust the host to spot or fix
|
||||
* it. The check below is based on the one in
|
||||
* kvm_arch_vcpu_ioctl_run().
|
||||
*/
|
||||
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
|
||||
/*
|
||||
* As we have caught the guest red-handed, decide that it isn't
|
||||
|
@ -255,6 +246,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
||||
*exit_code |= ARM_EXCEPTION_IL;
|
||||
}
|
||||
|
||||
return __fixup_guest_exit(vcpu, exit_code, handlers);
|
||||
}
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
|
|
|
@ -77,14 +77,6 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
|
|||
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
|
||||
}
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
{
|
||||
u64 parange_max = kvm_get_parange_max();
|
||||
u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
|
||||
|
||||
return phys < BIT(shift);
|
||||
}
|
||||
|
||||
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
|
||||
{
|
||||
u64 granule = kvm_granule_size(ctx->level);
|
||||
|
@ -95,7 +87,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (granule > (ctx->end - ctx->addr))
|
||||
return false;
|
||||
|
||||
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
|
||||
if (!IS_ALIGNED(phys, granule))
|
||||
return false;
|
||||
|
||||
return IS_ALIGNED(ctx->addr, granule);
|
||||
|
@ -629,6 +621,9 @@ struct stage2_map_data {
|
|||
|
||||
/* Force mappings to page granularity */
|
||||
bool force_pte;
|
||||
|
||||
/* Walk should update owner_id only */
|
||||
bool annotation;
|
||||
};
|
||||
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
|
@ -927,18 +922,7 @@ static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
{
|
||||
u64 phys = data->phys;
|
||||
|
||||
/*
|
||||
* Stage-2 walks to update ownership data are communicated to the map
|
||||
* walker using an invalid PA. Avoid offsetting an already invalid PA,
|
||||
* which could overflow and make the address valid again.
|
||||
*/
|
||||
if (!kvm_phys_is_valid(phys))
|
||||
return phys;
|
||||
|
||||
/*
|
||||
* Otherwise, work out the correct PA based on how far the walk has
|
||||
* gotten.
|
||||
*/
|
||||
/* Work out the correct PA based on how far the walk has gotten */
|
||||
return phys + (ctx->addr - ctx->start);
|
||||
}
|
||||
|
||||
|
@ -950,6 +934,9 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
if (data->annotation)
|
||||
return true;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
}
|
||||
|
||||
|
@ -965,7 +952,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (!stage2_leaf_mapping_allowed(ctx, data))
|
||||
return -E2BIG;
|
||||
|
||||
if (kvm_phys_is_valid(phys))
|
||||
if (!data->annotation)
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
||||
else
|
||||
new = kvm_init_invalid_leaf_owner(data->owner_id);
|
||||
|
@ -1127,11 +1114,11 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
|||
{
|
||||
int ret;
|
||||
struct stage2_map_data map_data = {
|
||||
.phys = KVM_PHYS_INVALID,
|
||||
.mmu = pgt->mmu,
|
||||
.memcache = mc,
|
||||
.owner_id = owner_id,
|
||||
.force_pte = true,
|
||||
.annotation = true,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_map_walker,
|
||||
|
|
|
@ -308,11 +308,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
@ -427,13 +422,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
return hyp_exit_handlers;
|
||||
}
|
||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
/*
|
||||
* If we were in HYP context on entry, adjust the PSTATE view
|
||||
* so that the usual helpers work correctly.
|
||||
|
@ -453,6 +445,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||
*vcpu_cpsr(vcpu) |= mode;
|
||||
}
|
||||
|
||||
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
|
||||
}
|
||||
|
||||
/* Switch to the guest for VHE systems running in EL2 */
|
||||
|
@ -467,6 +461,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
sysreg_save_host_state_vhe(host_ctxt);
|
||||
|
||||
fpsimd_lazy_switch_to_guest(vcpu);
|
||||
|
||||
/*
|
||||
* Note that ARM erratum 1165522 requires us to configure both stage 1
|
||||
* and stage 2 translation for the guest context before we clear
|
||||
|
@ -491,6 +487,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
__deactivate_traps(vcpu);
|
||||
|
||||
fpsimd_lazy_switch_to_host(vcpu);
|
||||
|
||||
sysreg_restore_host_state_vhe(host_ctxt);
|
||||
|
||||
if (guest_owns_fp_regs())
|
||||
|
|
|
@ -29,6 +29,8 @@ static unsigned long __ro_after_init hyp_idmap_start;
|
|||
static unsigned long __ro_after_init hyp_idmap_end;
|
||||
static phys_addr_t __ro_after_init hyp_idmap_vector;
|
||||
|
||||
u32 __ro_after_init __hyp_va_bits;
|
||||
|
||||
static unsigned long __ro_after_init io_map_base;
|
||||
|
||||
static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
|
||||
|
@ -2049,6 +2051,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
|
|||
goto out_destroy_pgtable;
|
||||
|
||||
io_map_base = hyp_idmap_start;
|
||||
__hyp_va_bits = *hyp_va_bits;
|
||||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
|
|
|
@ -68,26 +68,27 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
|
|||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
swap(kvm->arch.nested_mmus, tmp);
|
||||
|
||||
/*
|
||||
* If we went through a realocation, adjust the MMU back-pointers in
|
||||
* the previously initialised kvm_pgtable structures.
|
||||
*/
|
||||
if (kvm->arch.nested_mmus != tmp)
|
||||
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
|
||||
tmp[i].pgt->mmu = &tmp[i];
|
||||
kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
|
||||
|
||||
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
|
||||
ret = init_nested_s2_mmu(kvm, &tmp[i]);
|
||||
ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
|
||||
|
||||
if (ret) {
|
||||
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
|
||||
kvm_free_stage2_pgd(&tmp[i]);
|
||||
kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
kvm->arch.nested_mmus_size = num_mmus;
|
||||
kvm->arch.nested_mmus = tmp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -820,8 +821,10 @@ static void limit_nv_id_regs(struct kvm *kvm)
|
|||
NV_FTR(PFR0, RAS) |
|
||||
NV_FTR(PFR0, EL3) |
|
||||
NV_FTR(PFR0, EL2) |
|
||||
NV_FTR(PFR0, EL1));
|
||||
/* 64bit EL1/EL2/EL3 only */
|
||||
NV_FTR(PFR0, EL1) |
|
||||
NV_FTR(PFR0, EL0));
|
||||
/* 64bit only at any EL */
|
||||
val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
|
||||
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
|
||||
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
|
||||
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
|
||||
|
|
|
@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
|
|||
|
||||
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
|
||||
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
|
||||
|
||||
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
|
||||
{
|
||||
|
@ -275,48 +276,25 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
|||
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_enable_counter_mask - enable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENSET register
|
||||
*
|
||||
* Call perf_event_enable to start counting the perf event
|
||||
*/
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
int i;
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
return;
|
||||
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
} else {
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
}
|
||||
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_disable_counter_mask - disable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENCLR register
|
||||
*
|
||||
* Call perf_event_disable to stop counting the perf event
|
||||
*/
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
}
|
||||
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -324,16 +302,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
if (kvm_pmu_counter_is_enabled(pmc))
|
||||
kvm_pmc_enable_perf_event(pmc);
|
||||
else
|
||||
kvm_pmc_disable_perf_event(pmc);
|
||||
}
|
||||
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
|
||||
|
@ -558,17 +538,13 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
val &= ~ARMV8_PMU_PMCR_LP;
|
||||
|
||||
/* Request a reload of the PMU to enable/disable affected counters */
|
||||
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
} else {
|
||||
kvm_pmu_disable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
|
@ -578,7 +554,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
}
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
|
@ -805,11 +780,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/stacktrace/nvhe.h>
|
||||
|
||||
static struct stack_info stackinfo_get_overflow(void)
|
||||
|
@ -145,7 +146,7 @@ static void unwind(struct unwind_state *state,
|
|||
*/
|
||||
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
|
||||
{
|
||||
unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
|
||||
unsigned long va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0);
|
||||
unsigned long hyp_offset = (unsigned long)arg;
|
||||
|
||||
/* Mask tags and convert to kern addr */
|
||||
|
|
|
@ -1175,16 +1175,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
if (p->is_write) {
|
||||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1) {
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
kvm_pmu_enable_counter_mask(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
kvm_pmu_disable_counter_mask(vcpu, val);
|
||||
}
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
}
|
||||
|
@ -1747,6 +1745,9 @@ static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
|||
/* Hide SPE from guests */
|
||||
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
|
||||
|
||||
/* Hide BRBE from guests */
|
||||
val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -2270,6 +2271,26 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (!access_rw(vcpu, p, r))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Request a reload of the PMU to enable/disable the counters affected
|
||||
* by HPME.
|
||||
*/
|
||||
if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
|
@ -2781,7 +2802,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
|
||||
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
|
||||
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
|
||||
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
|
||||
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
|
||||
|
@ -4650,6 +4671,14 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
|
|||
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
|
||||
HAFGRTR_EL2_RES1);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) {
|
||||
kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA |
|
||||
HDFGRTR_EL2_nBRBCTL |
|
||||
HDFGRTR_EL2_nBRBIDR);
|
||||
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ |
|
||||
HFGITR_EL2_nBRBIALL);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
|
|
@ -34,9 +34,9 @@
|
|||
*
|
||||
* CPU Interface:
|
||||
*
|
||||
* - kvm_vgic_vcpu_init(): initialization of static data that
|
||||
* doesn't depend on any sizing information or emulation type. No
|
||||
* allocation is allowed there.
|
||||
* - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
|
||||
* on any sizing information. Private interrupts are allocated if not
|
||||
* already allocated at vgic-creation time.
|
||||
*/
|
||||
|
||||
/* EARLY INIT */
|
||||
|
@ -58,6 +58,8 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
|||
|
||||
/* CREATION */
|
||||
|
||||
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
|
||||
|
||||
/**
|
||||
* kvm_vgic_create: triggered by the instantiation of the VGIC device by
|
||||
* user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
|
||||
|
@ -112,6 +114,22 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
ret = vgic_allocate_private_irqs_locked(vcpu, type);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
kfree(vgic_cpu->private_irqs);
|
||||
vgic_cpu->private_irqs = NULL;
|
||||
}
|
||||
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kvm->arch.vgic.in_kernel = true;
|
||||
kvm->arch.vgic.vgic_model = type;
|
||||
|
||||
|
@ -180,7 +198,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
|
||||
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
int i;
|
||||
|
@ -218,17 +236,28 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
|
|||
/* PPIs */
|
||||
irq->config = VGIC_CONFIG_LEVEL;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V3:
|
||||
irq->group = 1;
|
||||
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
|
||||
break;
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V2:
|
||||
irq->group = 0;
|
||||
irq->targets = BIT(vcpu->vcpu_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
|
||||
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
ret = vgic_allocate_private_irqs_locked(vcpu);
|
||||
ret = vgic_allocate_private_irqs_locked(vcpu, type);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -258,7 +287,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
ret = vgic_allocate_private_irqs(vcpu);
|
||||
ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -295,7 +324,7 @@ int vgic_init(struct kvm *kvm)
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int ret = 0, i;
|
||||
int ret = 0;
|
||||
unsigned long idx;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
@ -315,35 +344,6 @@ int vgic_init(struct kvm *kvm)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Initialize groups on CPUs created before the VGIC type was known */
|
||||
kvm_for_each_vcpu(idx, vcpu, kvm) {
|
||||
ret = vgic_allocate_private_irqs_locked(vcpu);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
|
||||
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
|
||||
|
||||
switch (dist->vgic_model) {
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V3:
|
||||
irq->group = 1;
|
||||
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
|
||||
break;
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V2:
|
||||
irq->group = 0;
|
||||
irq->targets = 1U << idx;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
vgic_put_irq(kvm, irq);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have GICv4.1 enabled, unconditionally request enable the
|
||||
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/kstrtox.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <kvm/arm_vgic.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
@ -651,9 +652,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
|||
if (info->has_v4) {
|
||||
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
|
||||
kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
|
||||
kvm_info("GICv4%s support %sabled\n",
|
||||
kvm_info("GICv4%s support %s\n",
|
||||
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
|
||||
gicv4_enable ? "en" : "dis");
|
||||
str_enabled_disabled(gicv4_enable));
|
||||
}
|
||||
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
|
|
|
@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
|
|||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
bool updated = false;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
|
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
|||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return false;
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid)) {
|
||||
if (!vmid_gen_match(vmid))
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
updated = true;
|
||||
}
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -52,8 +52,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
|
|||
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
|
||||
|
@ -120,8 +119,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
|||
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -147,7 +147,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
|||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
|
||||
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
||||
return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
|
||||
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
|
|
|
@ -570,7 +570,7 @@ int main(void)
|
|||
/* Check for AARCH64 only system */
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
||||
aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
|
||||
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
|
||||
|
||||
ksft_print_header();
|
||||
|
||||
|
|
Loading…
Reference in New Issue