]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
KVM: pfncache: add a helper to get the gpa
authorPaul Durrant <pdurrant@amazon.com>
Thu, 7 Sep 2023 16:40:54 +0000 (16:40 +0000)
committerPaul Durrant <pdurrant@amazon.com>
Thu, 2 Nov 2023 18:03:02 +0000 (18:03 +0000)
A subsequent patch will rename this field since it will become overloaded.
To avoid churn in places that currently retrieve the gpa, add a helper for
that purpose now.

No functional change intended.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
---
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
arch/x86/kvm/xen.c
include/linux/kvm_host.h
virt/kvm/pfncache.c

index 41a7c03f7204c57e1640d0f9b9e8a40ec93f21bc..b74bdf991e92a8246ae1ef1ccd1c637dc576affc 100644 (file)
@@ -283,8 +283,8 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * alignment (and the 32-bit ABI doesn't align the 64-bit integers
         * anyway, even if the overall struct had been 64-bit aligned).
         */
-       if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
-               user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
+       if ((kvm_gpc_gpa(gpc1) & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
+               user_len1 = PAGE_SIZE - (kvm_gpc_gpa(gpc1) & ~PAGE_MASK);
                user_len2 = user_len - user_len1;
        } else {
                user_len1 = user_len;
@@ -365,7 +365,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                         * to the second page now because the guest changed to
                         * 64-bit mode, the second GPC won't have been set up.
                         */
-                       if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
+                       if (kvm_gpc_activate(gpc2, kvm_gpc_gpa(gpc1) + user_len1,
                                             user_len2))
                                return;
 
@@ -699,7 +699,8 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
        case KVM_XEN_ATTR_TYPE_SHARED_INFO:
                if (kvm->arch.xen.shinfo_cache.active)
-                       data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
+                       data->u.shared_info.gfn =
+                               gpa_to_gfn(kvm_gpc_gpa(&kvm->arch.xen.shinfo_cache));
                else
                        data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
                r = 0;
@@ -977,7 +978,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
                if (vcpu->arch.xen.vcpu_info_cache.active)
-                       data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
+                       data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.vcpu_info_cache);
                else
                        data->u.gpa = KVM_XEN_INVALID_GPA;
                r = 0;
@@ -985,7 +986,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
                if (vcpu->arch.xen.vcpu_time_info_cache.active)
-                       data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
+                       data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.vcpu_time_info_cache);
                else
                        data->u.gpa = KVM_XEN_INVALID_GPA;
                r = 0;
@@ -997,7 +998,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        break;
                }
                if (vcpu->arch.xen.runstate_cache.active) {
-                       data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
+                       data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.runstate_cache);
                        r = 0;
                }
                break;
index c71e8fbccaaff96104fbbff98ab88ab7963f652d..4d8027fe9928be5d6417142f856f7d043b99489a 100644 (file)
@@ -1374,6 +1374,13 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
  */
 void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc);
 
+/**
+ * kvm_gpc_gpa - retrieve the guest physical address of a cached mapping
+ *
+ * @gpc:          struct gfn_to_pfn_cache object.
+ */
+gpa_t kvm_gpc_gpa(struct gfn_to_pfn_cache *gpc);
+
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 
index a8ac60599af69a26ff5f875ac0250b296bfcf5d3..23dfdb112d7e9e36fc17190d26ed9345c7785560 100644 (file)
@@ -390,6 +390,12 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
 }
 EXPORT_SYMBOL_GPL(kvm_gpc_activate);
 
+gpa_t kvm_gpc_gpa(struct gfn_to_pfn_cache *gpc)
+{
+       return gpc->gpa;
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_gpa);
+
 void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
 {
        mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);