]> xenbits.xensource.com Git - xen.git/commitdiff
x86: package up context switch hook pointers
authorJan Beulich <jbeulich@suse.com>
Fri, 17 Feb 2017 14:51:03 +0000 (15:51 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 17 Feb 2017 14:51:03 +0000 (15:51 +0100)
They're all solely dependent on guest type, so we don't need to repeat
all the same three pointers in every vCPU control structure. Instead use
static const structures, and store pointers to them in the domain
control structure.

Since touching it anyway, take the opportunity and expand
schedule_tail() in the only two places invoking it, allowing the macro
to be dropped.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/domain.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/current.h
xen/include/asm-x86/domain.h

index 130da296851fb48e4822fa790107952ee2978adb..7d3071eb5f533bff9ecd856be2443314b9d3b79d 100644 (file)
@@ -425,16 +425,8 @@ int vcpu_initialise(struct vcpu *v)
         /* PV guests by default have a 100Hz ticker. */
         v->periodic_period = MILLISECS(10);
     }
-
-    v->arch.schedule_tail = continue_nonidle_domain;
-    v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
-    v->arch.ctxt_switch_to   = paravirt_ctxt_switch_to;
-
-    if ( is_idle_domain(d) )
-    {
-        v->arch.schedule_tail = continue_idle_domain;
-        v->arch.cr3           = __pa(idle_pg_table);
-    }
+    else
+        v->arch.cr3 = __pa(idle_pg_table);
 
     v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
@@ -641,8 +633,23 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
             goto fail;
     }
     else
+    {
+        static const struct arch_csw pv_csw = {
+            .from = paravirt_ctxt_switch_from,
+            .to   = paravirt_ctxt_switch_to,
+            .tail = continue_nonidle_domain,
+        };
+        static const struct arch_csw idle_csw = {
+            .from = paravirt_ctxt_switch_from,
+            .to   = paravirt_ctxt_switch_to,
+            .tail = continue_idle_domain,
+        };
+
+        d->arch.ctxt_switch = is_idle_domain(d) ? &idle_csw : &pv_csw;
+
         /* 64-bit PV guest by default. */
         d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+    }
 
     /* initialize default tsc behavior in case tools don't */
     tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
@@ -1996,7 +2003,7 @@ static void __context_switch(void)
     {
         memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
         vcpu_save_fpu(p);
-        p->arch.ctxt_switch_from(p);
+        pd->arch.ctxt_switch->from(p);
     }
 
     /*
@@ -2022,7 +2029,7 @@ static void __context_switch(void)
                 set_msr_xss(n->arch.hvm_vcpu.msr_xss);
         }
         vcpu_restore_fpu_eager(n);
-        n->arch.ctxt_switch_to(n);
+        nd->arch.ctxt_switch->to(n);
     }
 
     psr_ctxt_switch_to(nd);
@@ -2141,12 +2148,20 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
     /* Ensure that the vcpu has an up-to-date time base. */
     update_vcpu_system_time(next);
 
-    schedule_tail(next);
+    /*
+     * Schedule tail *should* be a terminal function pointer, but leave a
+     * bug frame around just in case it returns, to save going back into the
+     * context switching code and leaving a far more subtle crash to diagnose.
+     */
+    nextd->arch.ctxt_switch->tail(next);
+    BUG();
 }
 
 void continue_running(struct vcpu *same)
 {
-    schedule_tail(same);
+    /* See the comment above. */
+    same->domain->arch.ctxt_switch->tail(same);
+    BUG();
 }
 
 int __sync_local_execstate(void)
index f9bfbdd5f710da9daf9a6af4e399942eb20ffde2..894c4575af6f775e0bb23242c40d7d738a779744 100644 (file)
@@ -1143,6 +1143,14 @@ void svm_host_osvw_init()
 
 static int svm_domain_initialise(struct domain *d)
 {
+    static const struct arch_csw csw = {
+        .from = svm_ctxt_switch_from,
+        .to   = svm_ctxt_switch_to,
+        .tail = svm_do_resume,
+    };
+
+    d->arch.ctxt_switch = &csw;
+
     return 0;
 }
 
@@ -1154,10 +1162,6 @@ static int svm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    v->arch.schedule_tail    = svm_do_resume;
-    v->arch.ctxt_switch_from = svm_ctxt_switch_from;
-    v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
-
     v->arch.hvm_svm.launch_core = -1;
 
     if ( (rc = svm_create_vmcb(v)) != 0 )
index 074939edf23a499821e21f62a9a59f1ab8206862..597d7ac47194d1d539ba3029bb03cf5e3e62d00b 100644 (file)
@@ -267,8 +267,15 @@ void vmx_pi_hooks_deassign(struct domain *d)
 
 static int vmx_domain_initialise(struct domain *d)
 {
+    static const struct arch_csw csw = {
+        .from = vmx_ctxt_switch_from,
+        .to   = vmx_ctxt_switch_to,
+        .tail = vmx_do_resume,
+    };
     int rc;
 
+    d->arch.ctxt_switch = &csw;
+
     if ( !has_vlapic(d) )
         return 0;
 
@@ -294,10 +301,6 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
 
-    v->arch.schedule_tail    = vmx_do_resume;
-    v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
-    v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
-
     if ( (rc = vmx_create_vmcs(v)) != 0 )
     {
         dprintk(XENLOG_WARNING,
index d2ee5986053b417131a7c10d82aad828dec3b2f1..89849929ebf98e9ba04659f59b5521982a718b2d 100644 (file)
@@ -101,16 +101,6 @@ unsigned long get_stack_dump_bottom (unsigned long sp);
         unreachable();                                                  \
     })
 
-/*
- * Schedule tail *should* be a terminal function pointer, but leave a bugframe
- * around just incase it returns, to save going back into the context
- * switching code and leaving a far more subtle crash to diagnose.
- */
-#define schedule_tail(vcpu) do {                \
-        (((vcpu)->arch.schedule_tail)(vcpu));   \
-        BUG();                                  \
-    } while (0)
-
 /*
  * Which VCPU's state is currently running on each CPU?
  * This is not necesasrily the same as 'current' as a CPU may be
index 9bb070feccbed2dc61fe5fcd3789b2b4016d0f31..2839a732c2f1633ae9be4af510bc5b0a91ace776 100644 (file)
@@ -313,6 +313,12 @@ struct arch_domain
     } relmem;
     struct page_list_head relmem_list;
 
+    const struct arch_csw {
+        void (*from)(struct vcpu *);
+        void (*to)(struct vcpu *);
+        void (*tail)(struct vcpu *);
+    } *ctxt_switch;
+
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
     mm_lock_t nested_p2m_lock;
@@ -509,11 +515,6 @@ struct arch_vcpu
 
     unsigned long      flags; /* TF_ */
 
-    void (*schedule_tail) (struct vcpu *);
-
-    void (*ctxt_switch_from) (struct vcpu *);
-    void (*ctxt_switch_to) (struct vcpu *);
-
     struct vpmu_struct vpmu;
 
     /* Virtual Machine Extensions */