KVM: arm64: Resolve vLPI by host IRQ in vgic_v4_unset_forwarding()
JIRA: https://issues.redhat.com/browse/RHEL-93666 The virtual mapping and "GSI" routing of a particular vLPI is subject to change in response to the guest / userspace. This can be pretty annoying to deal with when KVM needs to track the physical state that's managed for vLPI direct injection. Make vgic_v4_unset_forwarding() resilient by using the host IRQ to resolve the vgic IRQ. Since this uses the LPI xarray directly, finding the ITS by doorbell address + grabbing it's its_lock is no longer necessary. Note that matching the right ITS / ITE is already handled in vgic_v4_set_forwarding(), and unless there's a bug in KVM's VGIC ITS emulation the virtual mapping that should remain stable for the lifetime of the vLPI mapping. Tested-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me> Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250523194722.4066715-4-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org> (cherry picked from commit 05b9405f2fa1848e984f231708fa1e5d385e4d27) Signed-off-by: Gavin Shan <gshan@redhat.com>
This commit is contained in:
parent
084f8b0334
commit
5c929e49d3
|
@ -2751,8 +2751,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
|||
if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
|
||||
return;
|
||||
|
||||
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
|
||||
&irqfd->irq_entry);
|
||||
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
|
||||
}
|
||||
|
||||
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
|
||||
|
|
|
@ -492,10 +492,27 @@ out_unlock_irq:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
|
||||
struct kvm_kernel_irq_routing_entry *irq_entry)
|
||||
static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
|
||||
{
|
||||
struct vgic_irq *irq;
|
||||
unsigned long idx;
|
||||
|
||||
guard(rcu)();
|
||||
xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) {
|
||||
if (!irq->hw || irq->host_irq != host_irq)
|
||||
continue;
|
||||
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
return NULL;
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq)
|
||||
{
|
||||
struct vgic_its *its;
|
||||
struct vgic_irq *irq;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
@ -503,31 +520,19 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
|
|||
if (!vgic_supports_direct_msis(kvm))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Get the ITS, and escape early on error (not a valid
|
||||
* doorbell for any of our vITSs).
|
||||
*/
|
||||
its = vgic_get_its(kvm, irq_entry);
|
||||
if (IS_ERR(its))
|
||||
irq = __vgic_host_irq_get_vlpi(kvm, host_irq);
|
||||
if (!irq)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&its->its_lock);
|
||||
|
||||
ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
|
||||
irq_entry->msi.data, &irq);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
WARN_ON(irq->hw && irq->host_irq != virq);
|
||||
WARN_ON(irq->hw && irq->host_irq != host_irq);
|
||||
if (irq->hw) {
|
||||
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
|
||||
irq->hw = false;
|
||||
ret = its_unmap_vlpi(virq);
|
||||
ret = its_unmap_vlpi(host_irq);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
out:
|
||||
mutex_unlock(&its->its_lock);
|
||||
vgic_put_irq(kvm, irq);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -426,8 +426,7 @@ struct kvm_kernel_irq_routing_entry;
|
|||
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
|
||||
struct kvm_kernel_irq_routing_entry *irq_entry);
|
||||
|
||||
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
|
||||
struct kvm_kernel_irq_routing_entry *irq_entry);
|
||||
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
|
||||
|
||||
int vgic_v4_load(struct kvm_vcpu *vcpu);
|
||||
void vgic_v4_commit(struct kvm_vcpu *vcpu);
|
||||
|
|
Loading…
Reference in New Issue