]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/mem-sharing: copy GADDR based shared guest areas
authorJan Beulich <jbeulich@suse.com>
Wed, 4 Oct 2023 13:53:31 +0000 (15:53 +0200)
committerJulien Grall <jgrall@amazon.com>
Thu, 5 Oct 2023 13:10:37 +0000 (14:10 +0100)
In preparation of the introduction of new vCPU operations allowing to
register the respective areas (one of the two is x86-specific) by
guest-physical address, add the necessary fork handling (with the
backing function yet to be filled in).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Release-acked-by: Henry Wang <Henry.Wang@arm.com>
xen/arch/x86/mm/mem_sharing.c
xen/common/domain.c

index 5f8f1fb4d87186c11578635f4e8473d1f64718a2..445947b6a918973d13112966c43a0b1487851a31 100644 (file)
@@ -1641,6 +1641,24 @@ static void copy_vcpu_nonreg_state(struct vcpu *d_vcpu, struct vcpu *cd_vcpu)
     hvm_set_nonreg_state(cd_vcpu, &nrs);
 }
 
+static int copy_guest_area(struct guest_area *cd_area,
+                           const struct guest_area *d_area,
+                           struct vcpu *cd_vcpu,
+                           const struct domain *d)
+{
+    unsigned int offset;
+
+    /* Check if no area to map, or already mapped. */
+    if ( !d_area->pg || cd_area->pg )
+        return 0;
+
+    offset = PAGE_OFFSET(d_area->map);
+    return map_guest_area(cd_vcpu, gfn_to_gaddr(
+                                       mfn_to_gfn(d, page_to_mfn(d_area->pg))) +
+                                   offset,
+                          PAGE_SIZE - offset, cd_area, NULL);
+}
+
 static int copy_vpmu(struct vcpu *d_vcpu, struct vcpu *cd_vcpu)
 {
     struct vpmu_struct *d_vpmu = vcpu_vpmu(d_vcpu);
@@ -1709,6 +1727,16 @@ static int copy_vcpu_settings(struct domain *cd, const struct domain *d)
                 return ret;
         }
 
+        /* Same for the (physically registered) runstate and time info areas. */
+        ret = copy_guest_area(&cd_vcpu->runstate_guest_area,
+                              &d_vcpu->runstate_guest_area, cd_vcpu, d);
+        if ( ret )
+            return ret;
+        ret = copy_guest_area(&cd_vcpu->arch.time_guest_area,
+                              &d_vcpu->arch.time_guest_area, cd_vcpu, d);
+        if ( ret )
+            return ret;
+
         ret = copy_vpmu(d_vcpu, cd_vcpu);
         if ( ret )
             return ret;
@@ -1950,7 +1978,15 @@ int mem_sharing_fork_reset(struct domain *d, bool reset_state,
 
  state:
     if ( reset_state )
+    {
         rc = copy_settings(d, pd);
+        if ( rc == -ERESTART )
+            /*
+             * Translate to -EAGAIN, see TODO comment at top of function about
+             * hypercall continuations.
+             */
+            rc = -EAGAIN;
+    }
 
     domain_unpause(d);
 
index d4958ec5e14921b190be741741939df25e17758a..47fc90271901931ca0700a7944a2b41b5839631d 100644 (file)
@@ -1601,6 +1601,13 @@ void unmap_vcpu_info(struct vcpu *v)
     put_page_and_type(mfn_to_page(mfn));
 }
 
+int map_guest_area(struct vcpu *v, paddr_t gaddr, unsigned int size,
+                   struct guest_area *area,
+                   void (*populate)(void *dst, struct vcpu *v))
+{
+    return -EOPNOTSUPP;
+}
+
 /*
  * This is only intended to be used for domain cleanup (or more generally only
  * with at least the respective vCPU, if it's not the current one, reliably