ia64/xen-unstable

changeset 17067:8e89261a8308

[IA64] Add support for hvm live migration

This is a naive implementation of log dirty mode for HVM.
(I gave up on writing a dirty-bit fault handler in assembler.)

An HVM domain with PV drivers can't be migrated yet.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu Feb 14 11:50:55 2008 -0700 (2008-02-14)
parents 9e9ba5185ef1
children d8fbfd8ef184
files xen/arch/ia64/vmx/vmx_fault.c xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vtlb.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_fault.c	Thu Feb 14 09:34:27 2008 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c	Thu Feb 14 11:50:55 2008 -0700
     1.3 @@ -52,6 +52,7 @@
     1.4  #include <asm/vmx_phy_mode.h>
     1.5  #include <xen/mm.h>
     1.6  #include <asm/vmx_pal.h>
     1.7 +#include <asm/shadow.h>
     1.8  /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
     1.9  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    1.10  
    1.11 @@ -520,3 +521,47 @@ try_again:
    1.12      itlb_fault(v, vadr);
    1.13      return IA64_FAULT;
    1.14  }
    1.15 +
    1.16 +void
    1.17 +vmx_ia64_shadow_fault(u64 ifa, u64 isr, u64 mpa, REGS *regs)
    1.18 +{
    1.19 +    struct vcpu *v = current;
    1.20 +    struct domain *d = v->domain;
    1.21 +    u64 gpfn, pte;
    1.22 +    thash_data_t *data;
    1.23 +
    1.24 +    if (!shadow_mode_enabled(d))
    1.25 +        goto inject_dirty_bit;
    1.26 +
    1.27 +    gpfn = get_gpfn_from_mfn(mpa >> PAGE_SHIFT);
    1.28 +    data = vhpt_lookup(ifa);
    1.29 +    if (data) {
    1.30 +        pte = data->page_flags;
    1.31 +        // BUG_ON((pte ^ mpa) & (_PAGE_PPN_MASK & PAGE_MASK));
    1.32 +        if (!(pte & _PAGE_VIRT_D))
    1.33 +            goto inject_dirty_bit;
    1.34 +        data->page_flags = pte | _PAGE_D;
    1.35 +    } else {
    1.36 +        data = vtlb_lookup(v, ifa, DSIDE_TLB);
    1.37 +        if (data) {
    1.38 +            if (!(data->page_flags & _PAGE_VIRT_D))
    1.39 +                goto inject_dirty_bit;
    1.40 +        }
    1.41 +        pte = 0;
    1.42 +    }
    1.43 +
    1.44 +    /* Set the dirty bit in the bitmap.  */
    1.45 +    shadow_mark_page_dirty(d, gpfn);
    1.46 +
    1.47 +    /* Retry */
    1.48 +    atomic64_inc(&d->arch.shadow_fault_count);
    1.49 +    ia64_ptcl(ifa, PAGE_SHIFT << 2);
    1.50 +    return;
    1.51 +
    1.52 +inject_dirty_bit:
    1.53 +    /* Reflect. no need to purge.  */
    1.54 +    VCPU(v, isr) = isr;
    1.55 +    set_ifa_itir_iha (v, ifa, 1, 1, 1);
    1.56 +    inject_guest_interruption(v, IA64_DIRTY_BIT_VECTOR);
    1.57 +    return;
    1.58 +}
     2.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Thu Feb 14 09:34:27 2008 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Thu Feb 14 11:50:55 2008 -0700
     2.3 @@ -433,8 +433,16 @@ END(vmx_dkey_miss)
     2.4  /////////////////////////////////////////////////////////////////////////////////////////
     2.5  // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
     2.6  ENTRY(vmx_dirty_bit)
     2.7 -    VMX_DBG_FAULT(8)
     2.8 -    VMX_REFLECT(8)
     2.9 +    mov r29=cr.ipsr
    2.10 +    mov r31=pr
    2.11 +    ;;
    2.12 +    mov r19=cr.ifa
    2.13 +    tbit.z p6,p0=r29,IA64_PSR_VM_BIT
    2.14 +(p6)br.spnt.many vmx_fault_8
    2.15 +    ;;
    2.16 +    tpa r19=r19
    2.17 +    br.sptk vmx_dispatch_shadow_fault
    2.18 +    VMX_FAULT(8)
    2.19  END(vmx_dirty_bit)
    2.20  
    2.21      .org vmx_ia64_ivt+0x2400
    2.22 @@ -1332,6 +1340,30 @@ ENTRY(vmx_dispatch_interrupt)
    2.23      br.call.sptk.many b6=ia64_handle_irq
    2.24  END(vmx_dispatch_interrupt)
    2.25  
    2.26 +
    2.27 +ENTRY(vmx_dispatch_shadow_fault)
    2.28 +    VMX_SAVE_MIN_WITH_COVER_R19
    2.29 +    alloc r14=ar.pfs,0,0,4,0
    2.30 +    mov out0=cr.ifa
    2.31 +    mov out1=cr.isr
    2.32 +    mov out2=r15
    2.33 +    adds r3=8,r2                // set up second base pointer
    2.34 +    ;;
    2.35 +    ssm psr.ic
    2.36 +    ;;
    2.37 +    srlz.i                  // guarantee that interruption collection is on
    2.38 +    ;;
    2.39 +    (p15) ssm psr.i               // restore psr.i
    2.40 +    movl r14=ia64_leave_hypervisor
    2.41 +    ;;
    2.42 +    VMX_SAVE_REST
    2.43 +    mov rp=r14
    2.44 +    ;;
    2.45 +    P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
    2.46 +    adds out3=16,r12
    2.47 +    br.call.sptk.many b6=vmx_ia64_shadow_fault
    2.48 +END(vmx_dispatch_shadow_fault)
    2.49 +
    2.50  .Lvmx_dispatch_reflection_string:
    2.51      .asciz "vmx_dispatch_reflection\n"
    2.52  .Lvmx_dispatch_virtualization_fault_string:
    2.53 @@ -1340,3 +1372,5 @@ END(vmx_dispatch_interrupt)
    2.54      .asciz "vmx_dispatch_vexirq\n"
    2.55  .Lvmx_dispatch_tlb_miss_string:
    2.56      .asciz "vmx_dispatch_tlb_miss\n"
    2.57 +.Lvmx_dispatch_shadow_fault_string:
    2.58 +    .asciz "vmx_dispatch_shadow_fault\n"
     3.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu Feb 14 09:34:27 2008 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu Feb 14 11:50:55 2008 -0700
     3.3 @@ -22,6 +22,7 @@
     3.4  
     3.5  #include <asm/vmx_vcpu.h>
     3.6  #include <asm/vmx_phy_mode.h>
     3.7 +#include <asm/shadow.h>
     3.8  
     3.9  static thash_data_t *__alloc_chain(thash_cb_t *);
    3.10  
    3.11 @@ -132,7 +133,7 @@ static void vmx_vhpt_insert(thash_cb_t *
    3.12      ia64_rr rr;
    3.13      thash_data_t *head, *cch;
    3.14  
    3.15 -    pte = pte & ~PAGE_FLAGS_RV_MASK;
    3.16 +    pte &= ((~PAGE_FLAGS_RV_MASK)|_PAGE_VIRT_D);
    3.17      rr.rrval = ia64_get_rr(ifa);
    3.18      head = (thash_data_t *)ia64_thash(ifa);
    3.19      tag = ia64_ttag(ifa);
    3.20 @@ -514,13 +515,14 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
    3.21      u64 ps, ps_mask, paddr, maddr;
    3.22  //    ia64_rr rr;
    3.23      union pte_flags phy_pte;
    3.24 +    struct domain *d = v->domain;
    3.25  
    3.26      ps = itir_ps(itir);
    3.27      ps_mask = ~((1UL << ps) - 1);
    3.28      phy_pte.val = *pte;
    3.29      paddr = *pte;
    3.30      paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
    3.31 -    maddr = lookup_domain_mpa(v->domain, paddr, NULL);
    3.32 +    maddr = lookup_domain_mpa(d, paddr, NULL);
    3.33      if (maddr & GPFN_IO_MASK) {
    3.34          *pte |= VTLB_PTE_IO;
    3.35          return -1;
    3.36 @@ -536,6 +538,18 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
    3.37  //    ps = rr.ps;
    3.38      maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
    3.39      phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
    3.40 +
    3.41 +    /* If shadow mode is enabled, virtualize dirty bit.  */
    3.42 +    if (shadow_mode_enabled(d) && phy_pte.d) {
    3.43 +        u64 gpfn = paddr >> PAGE_SHIFT;
    3.44 +        phy_pte.val |= _PAGE_VIRT_D;
    3.45 +
    3.46 +        /* If the page is not already dirty, don't set the dirty bit! */
    3.47 +        if (gpfn < d->arch.shadow_bitmap_size * 8
    3.48 +            && !test_bit(gpfn, d->arch.shadow_bitmap))
    3.49 +            phy_pte.d = 0;
    3.50 +    }
    3.51 +
    3.52      return phy_pte.val;
    3.53  }
    3.54