]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
xen: Move async_exception_* infrastructure into x86
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 13 Feb 2020 12:58:35 +0000 (12:58 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 20 Feb 2020 18:24:51 +0000 (18:24 +0000)
The async_exception_{state,mask} infrastructure is implemented in common code,
but is limited to x86 because of the VCPU_TRAP_LAST ifdef-ary.

The internals are very x86 specific (and even then, in need of correction),
and won't be of interest to other architectures.  Move it all into x86
specific code.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
16 files changed:
xen/arch/x86/cpu/mcheck/vmce.c
xen/arch/x86/cpu/vpmu.c
xen/arch/x86/domain.c
xen/arch/x86/domctl.c
xen/arch/x86/hvm/irq.c
xen/arch/x86/hvm/vioapic.c
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/nmi.c
xen/arch/x86/oprofile/nmi_int.c
xen/arch/x86/pv/callback.c
xen/arch/x86/pv/iret.c
xen/arch/x86/pv/traps.c
xen/arch/x86/x86_64/asm-offsets.c
xen/common/domain.c
xen/include/asm-x86/domain.h
xen/include/xen/sched.h

index 4f5de07e01bc0ae467b7788260238f3d6a4b32c0..816ef61ad407ee4599d76f02d25c264d6a9dd4a8 100644 (file)
@@ -412,7 +412,7 @@ int inject_vmce(struct domain *d, int vcpu)
 
         if ( (is_hvm_domain(d) ||
               pv_trap_callback_registered(v, TRAP_machine_check)) &&
-             !test_and_set_bool(v->mce_pending) )
+             !test_and_set_bool(v->arch.mce_pending) )
         {
             mce_printk(MCE_VERBOSE, "MCE: inject vMCE to %pv\n", v);
             vcpu_kick(v);
index 3c778450ac7eb29cccc1e3eb66af6d3c1291113a..e50d478d23becace4cb3cd1817a3c9b069274255 100644 (file)
@@ -329,7 +329,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
         vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
         break;
     case APIC_MODE_NMI:
-        sampling->nmi_pending = 1;
+        sampling->arch.nmi_pending = true;
         break;
     }
 #endif
index 66150abf4c6086b7644061d0f6591f0163d400e3..fe63c23676b63039878041929e50416d3714ae71 100644 (file)
@@ -1246,6 +1246,10 @@ int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 int arch_vcpu_reset(struct vcpu *v)
 {
+    v->arch.async_exception_mask = 0;
+    memset(v->arch.async_exception_state, 0,
+           sizeof(v->arch.async_exception_state));
+
     if ( is_pv_vcpu(v) )
     {
         pv_destroy_gdt(v);
@@ -1264,6 +1268,14 @@ arch_do_vcpu_op(
 
     switch ( cmd )
     {
+    case VCPUOP_send_nmi:
+        if ( !guest_handle_is_null(arg) )
+            return -EINVAL;
+
+        if ( !test_and_set_bool(v->arch.nmi_pending) )
+            vcpu_kick(v);
+        break;
+
     case VCPUOP_register_vcpu_time_memory_area:
     {
         struct vcpu_register_time_memory_area area;
index ce76d6d77641f9c6a3a64dd4620c29d212bc88ef..ed86762fa6edb2abff9324cbe4e56e7629844b05 100644 (file)
@@ -614,7 +614,7 @@ long arch_do_domctl(
         {
         case XEN_DOMCTL_SENDTRIGGER_NMI:
             ret = 0;
-            if ( !test_and_set_bool(v->nmi_pending) )
+            if ( !test_and_set_bool(v->arch.nmi_pending) )
                 vcpu_kick(v);
             break;
 
index c684422b249cc51ffdafc608a52f77b0b6772ef5..dd202aab5a67d501bbf6019d1da2f2711381fac6 100644 (file)
@@ -526,10 +526,10 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
      */
     vlapic_sync_pir_to_irr(v);
 
-    if ( unlikely(v->nmi_pending) )
+    if ( unlikely(v->arch.nmi_pending) )
         return hvm_intack_nmi;
 
-    if ( unlikely(v->mce_pending) )
+    if ( unlikely(v->arch.mce_pending) )
         return hvm_intack_mce;
 
     if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
@@ -554,11 +554,11 @@ struct hvm_intack hvm_vcpu_ack_pending_irq(
     switch ( intack.source )
     {
     case hvm_intsrc_nmi:
-        if ( !test_and_clear_bool(v->nmi_pending) )
+        if ( !test_and_clear_bool(v->arch.nmi_pending) )
             intack = hvm_intack_none;
         break;
     case hvm_intsrc_mce:
-        if ( !test_and_clear_bool(v->mce_pending) )
+        if ( !test_and_clear_bool(v->arch.mce_pending) )
             intack = hvm_intack_none;
         break;
     case hvm_intsrc_pic:
index 9aeef32a149034873be58678cfd624b39eac673b..b87facb0e04cbe8206621e8df06d7c0319a54674 100644 (file)
@@ -469,7 +469,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
         for_each_vcpu ( d, v )
             if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
                                    0, dest, dest_mode) &&
-                 !test_and_set_bool(v->nmi_pending) )
+                 !test_and_set_bool(v->arch.nmi_pending) )
                 vcpu_kick(v);
         break;
     }
index acb9ddf46f088e0887ae62b9e16e4d4c64731f69..26726a4312ab3afb1ea29d54268815bf14b5f115 100644 (file)
@@ -355,7 +355,7 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low)
         break;
 
     case APIC_DM_NMI:
-        if ( !test_and_set_bool(v->nmi_pending) )
+        if ( !test_and_set_bool(v->arch.nmi_pending) )
         {
             bool_t wake = 0;
             domain_lock(v->domain);
index af1d1d52c7d95ddc76019be612efe2cf6ea192af..a69b91a924025dec8c946e6617b9402b8bf15f11 100644 (file)
@@ -598,8 +598,8 @@ static void do_nmi_stats(unsigned char key)
     if ( !hardware_domain || !(v = domain_vcpu(hardware_domain, 0)) )
         return;
 
-    pend = v->nmi_pending;
-    mask = v->async_exception_mask & (1 << VCPU_TRAP_NMI);
+    pend = v->arch.nmi_pending;
+    mask = v->arch.async_exception_mask & (1 << VCPU_TRAP_NMI);
     if ( pend || mask )
         printk("%pv: NMI%s%s\n",
                v, pend ? " pending" : "", mask ? " masked" : "");
index 8f97f7522cfd52c2778684dd9a314e57fdf49248..2969db47fcd6b194c5bf078bf1fa7cc7a297565e 100644 (file)
@@ -93,7 +93,7 @@ static int nmi_callback(const struct cpu_user_regs *regs, int cpu)
                send_guest_vcpu_virq(current, VIRQ_XENOPROF);
 
        if ( ovf == 2 )
-                current->nmi_pending = 1;
+               current->arch.nmi_pending = true;
        return 1;
 }
 
index 1178efddb69d0883a832e62c59da3702142549b0..106c16ed0138328fe89e7e544e6c8400a2a1abad 100644 (file)
@@ -52,7 +52,7 @@ static int register_guest_nmi_callback(unsigned long address)
      * now.
      */
     if ( curr->vcpu_id == 0 && arch_get_nmi_reason(d) != 0 )
-        curr->nmi_pending = 1;
+        curr->arch.nmi_pending = true;
 
     return 0;
 }
index 16b449ff645594cf1f895013e177e3cf3d140f5c..9e34b616f92166072b3e579a1c6a2a7ebe4cc5e4 100644 (file)
@@ -27,15 +27,15 @@ static void async_exception_cleanup(struct vcpu *curr)
 {
     unsigned int trap;
 
-    if ( !curr->async_exception_mask )
+    if ( !curr->arch.async_exception_mask )
         return;
 
-    if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
-        trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
+    if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
+        trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
     else
         for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
-            if ( (curr->async_exception_mask ^
-                  curr->async_exception_state(trap).old_mask) == (1u << trap) )
+            if ( (curr->arch.async_exception_mask ^
+                  curr->arch.async_exception_state(trap).old_mask) == (1u << trap) )
                 break;
     if ( unlikely(trap > VCPU_TRAP_LAST) )
     {
@@ -44,7 +44,8 @@ static void async_exception_cleanup(struct vcpu *curr)
     }
 
     /* Restore previous asynchronous exception mask. */
-    curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
+    curr->arch.async_exception_mask =
+        curr->arch.async_exception_state(trap).old_mask;
 }
 
 unsigned long do_iret(void)
index 950cf25b4a99199ad59107797617f32e1396ceed..d97ebf7890a568dc7272946edc14ccbb20933e73 100644 (file)
@@ -176,7 +176,7 @@ int pv_raise_nmi(struct vcpu *v)
 
     if ( cmpxchgptr(v_ptr, NULL, v) )
         return -EBUSY;
-    if ( !test_and_set_bool(v->nmi_pending) )
+    if ( !test_and_set_bool(v->arch.nmi_pending) )
     {
         /* Not safe to wake up a vcpu here */
         raise_softirq(NMI_SOFTIRQ);
index 07d2155bf5446edec34115a1cc81ff248c021e3e..b8e85104390c28e2d09c6acbfbee5a890091b578 100644 (file)
@@ -72,11 +72,11 @@ void __dummy__(void)
     OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags);
     OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
     OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
-    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
-    OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
-    OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
-    OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
-    OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
+    OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending);
+    OFFSET(VCPU_mce_pending, struct vcpu, arch.mce_pending);
+    OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_state.old_mask);
+    OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_state.old_mask);
+    OFFSET(VCPU_async_exception_mask, struct vcpu, arch.async_exception_mask);
     DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
     DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
     DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
index 0ae04d5bb94c2b6bd405cfbc077a5cace19f4aad..6ad458fa6be13ad6127cda6e751429c0ffc4979e 100644 (file)
@@ -1199,10 +1199,6 @@ int vcpu_reset(struct vcpu *v)
     v->fpu_initialised = 0;
     v->fpu_dirtied     = 0;
     v->is_initialised  = 0;
-#ifdef VCPU_TRAP_LAST
-    v->async_exception_mask = 0;
-    memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
-#endif
     if ( v->affinity_broken & VCPU_AFFINITY_OVERRIDE )
         vcpu_temporary_affinity(v, NR_CPUS, VCPU_AFFINITY_OVERRIDE);
     if ( v->affinity_broken & VCPU_AFFINITY_WAIT )
@@ -1511,17 +1507,6 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
-#ifdef VCPU_TRAP_NMI
-    case VCPUOP_send_nmi:
-        if ( !guest_handle_is_null(arg) )
-            return -EINVAL;
-
-        if ( !test_and_set_bool(v->nmi_pending) )
-            vcpu_kick(v);
-
-        break;
-#endif
-
     default:
         rc = arch_do_vcpu_op(cmd, v, arg);
         break;
index 1843c76d1a0b9030d0325997adc345b3c5e4b46d..105adf96ebff192c8a4cfc70931757d211e361d1 100644 (file)
@@ -19,6 +19,7 @@
 #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
 #define is_domain_direct_mapped(d) ((void)(d), 0)
 
+#define VCPU_TRAP_NONE         0
 #define VCPU_TRAP_NMI          1
 #define VCPU_TRAP_MCE          2
 #define VCPU_TRAP_LAST         VCPU_TRAP_MCE
@@ -556,6 +557,13 @@ struct arch_vcpu
 
     struct vpmu_struct vpmu;
 
+    struct {
+        bool    pending;
+        uint8_t old_mask;
+    } async_exception_state[VCPU_TRAP_LAST];
+#define async_exception_state(t) async_exception_state[(t)-1]
+    uint8_t async_exception_mask;
+
     /* Virtual Machine Extensions */
     union {
         struct pv_vcpu pv;
index 21b5f4cebd982f26f2196b0352af6b759e45184c..3a4f43098c39e0032e67a4edf2c0415dcf58f9fa 100644 (file)
@@ -191,17 +191,6 @@ struct vcpu
     bool             is_urgent;
     /* VCPU must context_switch without scheduling unit. */
     bool             force_context_switch;
-
-#ifdef VCPU_TRAP_LAST
-#define VCPU_TRAP_NONE    0
-    struct {
-        bool             pending;
-        uint8_t          old_mask;
-    }                async_exception_state[VCPU_TRAP_LAST];
-#define async_exception_state(t) async_exception_state[(t)-1]
-    uint8_t          async_exception_mask;
-#endif
-
     /* Require shutdown to be deferred for some asynchronous operation? */
     bool             defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */