]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
KVM: xen: split up kvm_xen_set_evtchn_fast()
authorPaul Durrant <pdurrant@amazon.com>
Wed, 15 Nov 2023 21:03:32 +0000 (21:03 +0000)
committerPaul Durrant <pdurrant@amazon.com>
Tue, 21 Nov 2023 17:38:34 +0000 (17:38 +0000)
The implementation of kvm_xen_set_evtchn_fast() is a rather lengthy piece
of code that performs two operations: updating of the shared_info
evtchn_pending mask, and updating of the vcpu_info evtchn_pending_sel
mask. Introdude a separate function to perform each of those operations and
re-work kvm_xen_set_evtchn_fast() to use them.

No functional change intended.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
v8:
 - New in this version.

arch/x86/kvm/xen.c

index 42a9f1ea25b3ac32b346c81453a3c5442ec78710..eff405eead1ca5a9dedd202a85d92289734dad05 100644 (file)
@@ -1623,60 +1623,28 @@ static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
        }
 }
 
-/*
- * The return value from this function is propagated to kvm_set_irq() API,
- * so it returns:
- *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
- *  = 0   Interrupt was coalesced (previous irq is still pending)
- *  > 0   Number of CPUs interrupt was delivered to
- *
- * It is also called directly from kvm_arch_set_irq_inatomic(), where the
- * only check on its return value is a comparison with -EWOULDBLOCK'.
- */
-int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+static int set_shinfo_evtchn_pending(struct kvm_vcpu *vcpu, u32 port, u32 *port_word_bit)
 {
+       struct kvm *kvm = vcpu->kvm;
        struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
-       struct kvm_vcpu *vcpu;
        unsigned long *pending_bits, *mask_bits;
        unsigned long flags;
-       int port_word_bit;
-       bool kick_vcpu = false;
-       int vcpu_idx, idx, rc;
-
-       vcpu_idx = READ_ONCE(xe->vcpu_idx);
-       if (vcpu_idx >= 0)
-               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
-       else {
-               vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
-               if (!vcpu)
-                       return -EINVAL;
-               WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
-       }
-
-       if (!vcpu->arch.xen.vcpu_info_cache.active)
-               return -EINVAL;
-
-       if (xe->port >= max_evtchn_port(kvm))
-               return -EINVAL;
-
-       rc = -EWOULDBLOCK;
-
-       idx = srcu_read_lock(&kvm->srcu);
+       int rc = -EWOULDBLOCK;
 
        read_lock_irqsave(&gpc->lock, flags);
        if (!kvm_gpc_check(gpc, PAGE_SIZE))
-               goto out_rcu;
+               goto out;
 
        if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
                struct shared_info *shinfo = gpc->khva;
                pending_bits = (unsigned long *)&shinfo->evtchn_pending;
                mask_bits = (unsigned long *)&shinfo->evtchn_mask;
-               port_word_bit = xe->port / 64;
+               *port_word_bit = port / 64;
        } else {
                struct compat_shared_info *shinfo = gpc->khva;
                pending_bits = (unsigned long *)&shinfo->evtchn_pending;
                mask_bits = (unsigned long *)&shinfo->evtchn_mask;
-               port_word_bit = xe->port / 32;
+               *port_word_bit = port / 32;
        }
 
        /*
@@ -1686,52 +1654,106 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
         * already set, then we kick the vCPU in question to write to the
         * *real* evtchn_pending_sel in its own guest vcpu_info struct.
         */
-       if (test_and_set_bit(xe->port, pending_bits)) {
+       if (test_and_set_bit(port, pending_bits)) {
                rc = 0; /* It was already raised */
-       } else if (test_bit(xe->port, mask_bits)) {
-               rc = -ENOTCONN; /* Masked */
-               kvm_xen_check_poller(vcpu, xe->port);
+       } else if (test_bit(port, mask_bits)) {
+               rc = -ENOTCONN; /* It is masked */
+               kvm_xen_check_poller(vcpu, port);
        } else {
-               rc = 1; /* Delivered to the bitmap in shared_info. */
-               /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
-               read_unlock_irqrestore(&gpc->lock, flags);
-               gpc = &vcpu->arch.xen.vcpu_info_cache;
+               rc = 1; /* It is newly raised */
+       }
 
-               read_lock_irqsave(&gpc->lock, flags);
-               if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
-                       /*
-                        * Could not access the vcpu_info. Set the bit in-kernel
-                        * and prod the vCPU to deliver it for itself.
-                        */
-                       if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
-                               kick_vcpu = true;
-                       goto out_rcu;
-               }
+ out:
+       read_unlock_irqrestore(&gpc->lock, flags);
+       return rc;
+}
 
-               if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
-                       struct vcpu_info *vcpu_info = gpc->khva;
-                       if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
-                               WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
-                               kick_vcpu = true;
-                       }
-               } else {
-                       struct compat_vcpu_info *vcpu_info = gpc->khva;
-                       if (!test_and_set_bit(port_word_bit,
-                                             (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
-                               WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
-                               kick_vcpu = true;
-                       }
+static bool set_vcpu_info_evtchn_pending(struct kvm_vcpu *vcpu, u32 port_word_bit)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
+       unsigned long flags;
+       bool kick_vcpu = false;
+
+       read_lock_irqsave(&gpc->lock, flags);
+       if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
+               /*
+                * Could not access the vcpu_info. Set the bit in-kernel
+                * and prod the vCPU to deliver it for itself.
+                */
+               if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
+                       kick_vcpu = true;
+               goto out;
+       }
+
+       if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
+               struct vcpu_info *vcpu_info = gpc->khva;
+
+               if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
+                       WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
+                       kick_vcpu = true;
                }
+       } else {
+               struct compat_vcpu_info *vcpu_info = gpc->khva;
 
-               /* For the per-vCPU lapic vector, deliver it as MSI. */
-               if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
-                       kvm_xen_inject_vcpu_vector(vcpu);
-                       kick_vcpu = false;
+               if (!test_and_set_bit(port_word_bit,
+                                     (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
+                       WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
+                       kick_vcpu = true;
                }
        }
 
- out_rcu:
+       /* For the per-vCPU lapic vector, deliver it as MSI. */
+       if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
+               kvm_xen_inject_vcpu_vector(vcpu);
+               kick_vcpu = false;
+       }
+
+ out:
        read_unlock_irqrestore(&gpc->lock, flags);
+       return kick_vcpu;
+}
+
+/*
+ * The return value from this function is propagated to kvm_set_irq() API,
+ * so it returns:
+ *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
+ *  = 0   Interrupt was coalesced (previous irq is still pending)
+ *  > 0   Number of CPUs interrupt was delivered to
+ *
+ * It is also called directly from kvm_arch_set_irq_inatomic(), where the
+ * only check on its return value is a comparison with -EWOULDBLOCK
+ * (which may be returned by set_shinfo_evtchn_pending()).
+ */
+int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+{
+       struct kvm_vcpu *vcpu;
+       u32 port_word_bit;
+       bool kick_vcpu = false;
+       int vcpu_idx, idx, rc;
+
+       vcpu_idx = READ_ONCE(xe->vcpu_idx);
+       if (vcpu_idx >= 0)
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+       else {
+               vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
+               if (!vcpu)
+                       return -EINVAL;
+               WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
+       }
+
+       if (!vcpu->arch.xen.vcpu_info_cache.active)
+               return -EINVAL;
+
+       if (xe->port >= max_evtchn_port(kvm))
+               return -EINVAL;
+
+       idx = srcu_read_lock(&kvm->srcu);
+
+       rc = set_shinfo_evtchn_pending(vcpu, xe->port, &port_word_bit);
+       if (rc == 1) /* Delivered to the bitmap in shared_info. */
+               kick_vcpu = set_vcpu_info_evtchn_pending(vcpu, port_word_bit);
+
        srcu_read_unlock(&kvm->srcu, idx);
 
        if (kick_vcpu) {