]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
stuff
authorPaul Durrant <pdurrant@amazon.com>
Wed, 15 Nov 2023 21:03:32 +0000 (21:03 +0000)
committerPaul Durrant <pdurrant@amazon.com>
Mon, 20 Nov 2023 18:16:17 +0000 (18:16 +0000)
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
arch/x86/kvm/xen.c

index 42a9f1ea25b3ac32b346c81453a3c5442ec78710..c49441b444772d2e40458616f51a4e3aed4327c1 100644 (file)
@@ -1623,60 +1623,26 @@ static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
        }
 }
 
-/*
- * The return value from this function is propagated to kvm_set_irq() API,
- * so it returns:
- *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
- *  = 0   Interrupt was coalesced (previous irq is still pending)
- *  > 0   Number of CPUs interrupt was delivered to
- *
- * It is also called directly from kvm_arch_set_irq_inatomic(), where the
- * only check on its return value is a comparison with -EWOULDBLOCK'.
- */
-int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+static int set_shinfo_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
 {
+       struct kvm *kvm = vcpu->kvm;
        struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
-       struct kvm_vcpu *vcpu;
        unsigned long *pending_bits, *mask_bits;
        unsigned long flags;
-       int port_word_bit;
-       bool kick_vcpu = false;
-       int vcpu_idx, idx, rc;
-
-       vcpu_idx = READ_ONCE(xe->vcpu_idx);
-       if (vcpu_idx >= 0)
-               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
-       else {
-               vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
-               if (!vcpu)
-                       return -EINVAL;
-               WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
-       }
-
-       if (!vcpu->arch.xen.vcpu_info_cache.active)
-               return -EINVAL;
-
-       if (xe->port >= max_evtchn_port(kvm))
-               return -EINVAL;
-
-       rc = -EWOULDBLOCK;
-
-       idx = srcu_read_lock(&kvm->srcu);
+       int rc;
 
        read_lock_irqsave(&gpc->lock, flags);
        if (!kvm_gpc_check(gpc, PAGE_SIZE))
-               goto out_rcu;
+               return -EWOULDBLOCK;
 
        if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
                struct shared_info *shinfo = gpc->khva;
                pending_bits = (unsigned long *)&shinfo->evtchn_pending;
                mask_bits = (unsigned long *)&shinfo->evtchn_mask;
-               port_word_bit = xe->port / 64;
        } else {
                struct compat_shared_info *shinfo = gpc->khva;
                pending_bits = (unsigned long *)&shinfo->evtchn_pending;
                mask_bits = (unsigned long *)&shinfo->evtchn_mask;
-               port_word_bit = xe->port / 32;
        }
 
        /*
@@ -1686,28 +1652,42 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
         * already set, then we kick the vCPU in question to write to the
         * *real* evtchn_pending_sel in its own guest vcpu_info struct.
         */
-       if (test_and_set_bit(xe->port, pending_bits)) {
+       if (test_and_set_bit(port, pending_bits)) {
                rc = 0; /* It was already raised */
-       } else if (test_bit(xe->port, mask_bits)) {
+       } else if (test_bit(port, mask_bits)) {
                rc = -ENOTCONN; /* Masked */
-               kvm_xen_check_poller(vcpu, xe->port);
+               kvm_xen_check_poller(vcpu, port);
        } else {
                rc = 1; /* Delivered to the bitmap in shared_info. */
-               /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
-               read_unlock_irqrestore(&gpc->lock, flags);
-               gpc = &vcpu->arch.xen.vcpu_info_cache;
+       }
 
-               read_lock_irqsave(&gpc->lock, flags);
-               if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
-                       /*
-                        * Could not access the vcpu_info. Set the bit in-kernel
-                        * and prod the vCPU to deliver it for itself.
-                        */
-                       if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
-                               kick_vcpu = true;
-                       goto out_rcu;
-               }
+       read_unlock_irqrestore(&gpc->lock, flags);
+
+       return rc;
+}
+
+static bool set_vcpu_info_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
+       unsigned long flags;
+       int port_word_bit;
+       bool kick_vcpu = false;
 
+       if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
+               port_word_bit = port / 64;
+       else
+               port_word_bit = port / 32;
+
+       read_lock_irqsave(&gpc->lock, flags);
+       if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
+               /*
+                * Could not access the vcpu_info. Set the bit in-kernel
+                * and prod the vCPU to deliver it for itself.
+                */
+               if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
+                       kick_vcpu = true;
+       } else {
                if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
                        struct vcpu_info *vcpu_info = gpc->khva;
                        if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
@@ -1730,8 +1710,47 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                }
        }
 
- out_rcu:
        read_unlock_irqrestore(&gpc->lock, flags);
+       return kick_vcpu;
+}
+/*
+ * The return value from this function is propagated to kvm_set_irq() API,
+ * so it returns:
+ *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
+ *  = 0   Interrupt was coalesced (previous irq is still pending)
+ *  > 0   Number of CPUs interrupt was delivered to
+ *
+ * It is also called directly from kvm_arch_set_irq_inatomic(), where the
+ * only check on its return value is a comparison with -EWOULDBLOCK'.
+ */
+int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
+{
+       int vcpu_idx, idx, rc;
+       struct kvm_vcpu *vcpu;
+       bool kick_vcpu = false;
+
+       vcpu_idx = READ_ONCE(xe->vcpu_idx);
+       if (vcpu_idx >= 0)
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+       else {
+               vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
+               if (!vcpu)
+                       return -EINVAL;
+               WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
+       }
+
+       if (!vcpu->arch.xen.vcpu_info_cache.active)
+               return -EINVAL;
+
+       if (xe->port >= max_evtchn_port(kvm))
+               return -EINVAL;
+
+       idx = srcu_read_lock(&kvm->srcu);
+
+       rc = set_shinfo_evtchn_pending(vcpu, xe->port);
+       if (!rc)
+               kick_vcpu = set_vcpu_info_evtchn_pending(vcpu, xe->port);
+
        srcu_read_unlock(&kvm->srcu, idx);
 
        if (kick_vcpu) {