ia64/xen-unstable

changeset 10786:86e5d8458c08

[IA64] live migration

Shadow mode and live migration.

Virtualize Dirty bit.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 26 09:36:36 2006 -0600 (2006-07-26)
parents e585c2dade14
children 4ebb9c91c886
files tools/libxc/ia64/xc_ia64_linux_restore.c tools/libxc/ia64/xc_ia64_linux_save.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/ivt.S xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vhpt.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/linux-xen/asm/pgtable.h xen/include/asm-ia64/shadow.h xen/include/asm-ia64/tlbflush.h
line diff
     1.1 --- a/tools/libxc/ia64/xc_ia64_linux_restore.c	Wed Jul 26 09:02:43 2006 -0600
     1.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c	Wed Jul 26 09:36:36 2006 -0600
     1.3 @@ -163,7 +163,7 @@ xc_linux_restore(int xc_handle, int io_f
     1.4  
     1.5  	pfn = page_array[mfn];
     1.6  
     1.7 -        DPRINTF ("xc_linux_restore: page %lu/%lu at %lx\n", mfn, max_pfn, pfn);
     1.8 +        //DPRINTF("xc_linux_restore: page %lu/%lu at %lx\n", mfn, max_pfn, pfn);
     1.9  
    1.10  	if (read_page(xc_handle, io_fd, dom, page_array[mfn]) < 0)
    1.11  		goto out;
     2.1 --- a/tools/libxc/ia64/xc_ia64_linux_save.c	Wed Jul 26 09:02:43 2006 -0600
     2.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c	Wed Jul 26 09:36:36 2006 -0600
     2.3 @@ -15,9 +15,73 @@
     2.4  
     2.5  #include "xg_private.h"
     2.6  
     2.7 +/*
     2.8 +** Default values for important tuning parameters. Can override by passing
     2.9 +** non-zero replacement values to xc_linux_save().
    2.10 +**
    2.11 +** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
    2.12 +**
    2.13 +*/
    2.14 +#define DEF_MAX_ITERS    (4 - 1)	/* limit us to 4 times round loop  */
    2.15 +#define DEF_MAX_FACTOR   3		/* never send more than 3x nr_pfns */
    2.16 +
    2.17 +/*
    2.18 +** During (live) save/migrate, we maintain a number of bitmaps to track
    2.19 +** which pages we have to send, and to skip.
    2.20 +*/
    2.21 +
    2.22 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
    2.23 +
    2.24 +#define BITMAP_ENTRY(_nr,_bmap) \
    2.25 +   ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
    2.26 +
    2.27 +#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
    2.28 +
    2.29 +static inline int test_bit (int nr, volatile void * addr)
    2.30 +{
    2.31 +    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
    2.32 +}
    2.33 +
    2.34 +static inline void clear_bit (int nr, volatile void * addr)
    2.35 +{
    2.36 +    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
    2.37 +}
    2.38 +
    2.39 +static inline void set_bit ( int nr, volatile void * addr)
    2.40 +{
    2.41 +    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
    2.42 +}
    2.43 +
    2.44  /* total number of pages used by the current guest */
    2.45  static unsigned long max_pfn;
    2.46  
    2.47 +static int xc_ia64_shadow_control(int xc_handle,
    2.48 +                                  uint32_t domid,
    2.49 +                                  unsigned int sop,
    2.50 +                                  unsigned long *dirty_bitmap,
    2.51 +                                  unsigned long pages,
    2.52 +                                  xc_shadow_control_stats_t *stats)
    2.53 +{
    2.54 +    if (dirty_bitmap != NULL && pages > 0) {
    2.55 +        int i;
    2.56 +        unsigned char *bmap = (unsigned char *)dirty_bitmap;
    2.57 +        unsigned long bmap_bytes =
    2.58 +            ((pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1)) / 8;
    2.59 +        unsigned int bmap_pages = (bmap_bytes + PAGE_SIZE - 1) / PAGE_SIZE; 
    2.60 +
    2.61 +        /* Touch the page so that it is in the TC.
    2.62 +           FIXME: use a more reliable method.  */
    2.63 +        for (i = 0 ; i < bmap_pages ; i++)
    2.64 +            bmap[i * PAGE_SIZE] = 0;
    2.65 +        /* Because bmap is not page aligned (allocated by malloc), be sure the
    2.66 +           last page is touched.  */
    2.67 +        bmap[bmap_bytes - 1] = 0;
    2.68 +    }
    2.69 +
    2.70 +    return xc_shadow_control(xc_handle, domid, sop,
    2.71 +                             dirty_bitmap, pages, stats);
    2.72 +}
    2.73 +
    2.74  static inline ssize_t
    2.75  write_exact(int fd, void *buf, size_t count)
    2.76  {
    2.77 @@ -77,10 +141,10 @@ xc_linux_save(int xc_handle, int io_fd, 
    2.78      xc_dominfo_t info;
    2.79  
    2.80      int rc = 1;
    2.81 -    unsigned long N;
    2.82  
    2.83      //int live  = (flags & XCFLAGS_LIVE);
    2.84      int debug = (flags & XCFLAGS_DEBUG);
    2.85 +    int live  = (flags & XCFLAGS_LIVE);
    2.86  
    2.87      /* The new domain's shared-info frame number. */
    2.88      unsigned long shared_info_frame;
    2.89 @@ -93,11 +157,39 @@ xc_linux_save(int xc_handle, int io_fd, 
    2.90      /* Live mapping of shared info structure */
    2.91      shared_info_t *live_shinfo = NULL;
    2.92  
    2.93 +    /* Iteration number.  */
    2.94 +    int iter;
    2.95 +
    2.96 +    /* Number of pages sent in the last iteration (live only).  */
    2.97 +    unsigned int sent_last_iter;
    2.98 +
    2.99 +    /* Number of pages sent (live only).  */
   2.100 +    unsigned int total_sent;
   2.101 +
   2.102 +    /* Size of the shadow bitmap (live only).  */
   2.103 +    unsigned int bitmap_size = 0;
   2.104 +
   2.105 +    /* True if last iteration.  */
   2.106 +    int last_iter;
   2.107 +
   2.108 +    /* Bitmap of pages to be sent.  */
   2.109 +    unsigned long *to_send = NULL;
   2.110 +    /* Bitmap of pages not to be sent (because dirtied).  */
   2.111 +    unsigned long *to_skip = NULL;
   2.112 +
   2.113      char *mem;
   2.114  
   2.115      if (debug)
   2.116          fprintf (stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
   2.117  
   2.118 +    /* If no explicit control parameters given, use defaults */
   2.119 +    if (!max_iters)
   2.120 +        max_iters = DEF_MAX_ITERS;
   2.121 +    if (!max_factor)
   2.122 +        max_factor = DEF_MAX_FACTOR;
   2.123 +
   2.124 +    //initialize_mbit_rate();
   2.125 +
   2.126      if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
   2.127          ERR("Could not get domain info");
   2.128          return 1;
   2.129 @@ -124,27 +216,12 @@ xc_linux_save(int xc_handle, int io_fd, 
   2.130  
   2.131      max_pfn = info.max_memkb >> (PAGE_SHIFT - 10);
   2.132  
   2.133 -
   2.134 -    /* This is a non-live suspend. Issue the call back to get the
   2.135 -       domain suspended */
   2.136 -
   2.137 -    if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
   2.138 -        ERR("Domain appears not to have suspended");
   2.139 -        goto out;
   2.140 -    }
   2.141 -
   2.142      page_array = malloc(max_pfn * sizeof(unsigned long));
   2.143      if (page_array == NULL) {
   2.144          ERR("Could not allocate memory");
   2.145          goto out;
   2.146      }
   2.147  
   2.148 -    if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
   2.149 -                             0, max_pfn) != max_pfn) {
   2.150 -        ERR("Could not get the page frame list");
   2.151 -        goto out;
   2.152 -    }
   2.153 -
   2.154      /* This is expected by xm restore.  */
   2.155      if (!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
   2.156          ERR("write: max_pfn");
   2.157 @@ -156,10 +233,13 @@ xc_linux_save(int xc_handle, int io_fd, 
   2.158         if the format change.
   2.159         The version is hard-coded, don't forget to change the restore code
   2.160         too!  */
   2.161 -    N = 1;
   2.162 -    if (!write_exact(io_fd, &N, sizeof(unsigned long))) {
   2.163 -        ERR("write: version");
   2.164 -        goto out;
   2.165 +    {
   2.166 +        unsigned long version = 1;
   2.167 +
   2.168 +        if (!write_exact(io_fd, &version, sizeof(unsigned long))) {
   2.169 +            ERR("write: version");
   2.170 +            goto out;
   2.171 +        }
   2.172      }
   2.173  
   2.174      op.cmd = DOM0_DOMAIN_SETUP;
   2.175 @@ -175,39 +255,165 @@ xc_linux_save(int xc_handle, int io_fd, 
   2.176          goto out;
   2.177      }
   2.178  
   2.179 -    /* Start writing out the saved-domain record. */
   2.180 -    for (N = 0; N < max_pfn; N++) {
   2.181 -        if (page_array[N] == INVALID_MFN)
   2.182 -            continue;
   2.183 -        if (debug)
   2.184 -            fprintf (stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
   2.185 -                     page_array[N], N, max_pfn);
   2.186 +    /* Domain is still running at this point */
   2.187 +    if (live) {
   2.188  
   2.189 -        if (!write_exact(io_fd, &N, sizeof(N))) {
   2.190 -            ERR("write: max_pfn");
   2.191 +        if (xc_ia64_shadow_control(xc_handle, dom,
   2.192 +                                   DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
   2.193 +                                   NULL, 0, NULL ) < 0) {
   2.194 +            ERR("Couldn't enable shadow mode");
   2.195              goto out;
   2.196          }
   2.197  
   2.198 -        mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   2.199 -                                   PROT_READ|PROT_WRITE, page_array[N]);
   2.200 -        if (mem == NULL) {
   2.201 -            ERR("cannot map page");
   2.202 +        last_iter = 0;
   2.203 +
   2.204 +        bitmap_size = ((max_pfn + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
   2.205 +        to_send = malloc(bitmap_size);
   2.206 +        to_skip = malloc(bitmap_size);
   2.207 +
   2.208 +        if (!to_send || !to_skip) {
   2.209 +            ERR("Couldn't allocate bitmap array");
   2.210              goto out;
   2.211          }
   2.212 -        if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
   2.213 -            ERR("Error when writing to state file (5)");
   2.214 +
   2.215 +        /* Initially all the pages must be sent.  */
   2.216 +        memset(to_send, 0xff, bitmap_size);
   2.217 +
   2.218 +        if (mlock(to_send, bitmap_size)) {
   2.219 +            ERR("Unable to mlock to_send");
   2.220              goto out;
   2.221          }
   2.222 -        munmap(mem, PAGE_SIZE);
   2.223 +        if (mlock(to_skip, bitmap_size)) {
   2.224 +            ERR("Unable to mlock to_skip");
   2.225 +            goto out;
   2.226 +        }
   2.227 +        
   2.228 +    } else {
   2.229 +
   2.230 +        /* This is a non-live suspend. Issue the call back to get the
   2.231 +           domain suspended */
   2.232 +
   2.233 +        last_iter = 1;
   2.234 +
   2.235 +        if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
   2.236 +            ERR("Domain appears not to have suspended");
   2.237 +            goto out;
   2.238 +        }
   2.239 +
   2.240 +    }
   2.241 +
   2.242 +    sent_last_iter = max_pfn;
   2.243 +    total_sent = 0;
   2.244 +
   2.245 +    for (iter = 1; ; iter++) {
   2.246 +        unsigned int sent_this_iter, skip_this_iter;
   2.247 +        unsigned long N;
   2.248 +
   2.249 +        sent_this_iter = 0;
   2.250 +        skip_this_iter = 0;
   2.251 +
   2.252 +        /* Get the pfn list, as it may change.  */
   2.253 +        if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
   2.254 +                                 0, max_pfn) != max_pfn) {
   2.255 +            ERR("Could not get the page frame list");
   2.256 +            goto out;
   2.257 +        }
   2.258 +
   2.259 +        /* Dirtied pages won't be saved.
   2.260 +           slightly wasteful to peek the whole array evey time,
   2.261 +           but this is fast enough for the moment. */
   2.262 +        if (!last_iter) {
   2.263 +            if (xc_ia64_shadow_control(xc_handle, dom,
   2.264 +                                       DOM0_SHADOW_CONTROL_OP_PEEK,
   2.265 +                                       to_skip, max_pfn, NULL) != max_pfn) {
   2.266 +                ERR("Error peeking shadow bitmap");
   2.267 +                goto out;
   2.268 +            }
   2.269 +        }
   2.270 +
   2.271 +        /* Start writing out the saved-domain record. */
   2.272 +        for (N = 0; N < max_pfn; N++) {
   2.273 +            if (page_array[N] == INVALID_MFN)
   2.274 +                continue;
   2.275 +            if (!last_iter) {
   2.276 +                if (test_bit(N, to_skip) && test_bit(N, to_send))
   2.277 +                    skip_this_iter++;
   2.278 +                if (test_bit(N, to_skip) || !test_bit(N, to_send))
   2.279 +                    continue;
   2.280 +            }
   2.281 +
   2.282 +            if (debug)
   2.283 +                fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
   2.284 +                        page_array[N], N, max_pfn);
   2.285 +
   2.286 +            mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   2.287 +                                       PROT_READ|PROT_WRITE, page_array[N]);
   2.288 +            if (mem == NULL) {
   2.289 +                /* The page may have move.
   2.290 +                   It will be remarked dirty.
   2.291 +                   FIXME: to be tracked.  */
   2.292 +                fprintf(stderr, "cannot map page %lx: %s\n",
   2.293 +                        page_array[N], strerror (errno));
   2.294 +                continue;
   2.295 +            }
   2.296 +
   2.297 +            if (!write_exact(io_fd, &N, sizeof(N))) {
   2.298 +                ERR("write: max_pfn");
   2.299 +                goto out;
   2.300 +            }
   2.301 +
   2.302 +            if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
   2.303 +                ERR("Error when writing to state file (5)");
   2.304 +                goto out;
   2.305 +            }
   2.306 +            munmap(mem, PAGE_SIZE);
   2.307 +            sent_this_iter++;
   2.308 +            total_sent++;
   2.309 +        }
   2.310 +
   2.311 +        if (last_iter)
   2.312 +            break;
   2.313 +
   2.314 +        DPRINTF(" %d: sent %d, skipped %d\n",
   2.315 +                iter, sent_this_iter, skip_this_iter );
   2.316 +
   2.317 +        if (live) {
   2.318 +            if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
   2.319 +                (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
   2.320 +                (total_sent > max_pfn*max_factor)) {
   2.321 +                DPRINTF("Start last iteration\n");
   2.322 +                last_iter = 1;
   2.323 +
   2.324 +                if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
   2.325 +                    ERR("Domain appears not to have suspended");
   2.326 +                    goto out;
   2.327 +                }
   2.328 +            }
   2.329 +
   2.330 +            /* Pages to be sent are pages which were dirty.  */
   2.331 +            if (xc_ia64_shadow_control(xc_handle, dom,
   2.332 +                                       DOM0_SHADOW_CONTROL_OP_CLEAN,
   2.333 +                                       to_send, max_pfn, NULL ) != max_pfn) {
   2.334 +                ERR("Error flushing shadow PT");
   2.335 +                goto out;
   2.336 +            }
   2.337 +
   2.338 +            sent_last_iter = sent_this_iter;
   2.339 +
   2.340 +            //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
   2.341 +        }
   2.342 +
   2.343      }
   2.344  
   2.345      fprintf (stderr, "All memory is saved\n");
   2.346  
   2.347      /* terminate */
   2.348 -    N = INVALID_MFN;
   2.349 -    if (!write_exact(io_fd, &N, sizeof(N))) {
   2.350 -        ERR("Error when writing to state file (6)");
   2.351 -        goto out;
   2.352 +    {
   2.353 +        unsigned long pfn = INVALID_MFN;
   2.354 +        if (!write_exact(io_fd, &pfn, sizeof(pfn))) {
   2.355 +            ERR("Error when writing to state file (6)");
   2.356 +            goto out;
   2.357 +        }
   2.358      }
   2.359  
   2.360      /* Send through a list of all the PFNs that were not in map at the close */
   2.361 @@ -274,8 +480,16 @@ xc_linux_save(int xc_handle, int io_fd, 
   2.362  
   2.363   out:
   2.364  
   2.365 -    free (page_array);
   2.366 +    if (live) {
   2.367 +        if (xc_ia64_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
   2.368 +                                   NULL, 0, NULL ) < 0) {
   2.369 +            DPRINTF("Warning - couldn't disable shadow mode");
   2.370 +        }
   2.371 +    }
   2.372  
   2.373 +    free(page_array);
   2.374 +    free(to_send);
   2.375 +    free(to_skip);
   2.376      if (live_shinfo)
   2.377          munmap(live_shinfo, PAGE_SIZE);
   2.378  
     3.1 --- a/xen/arch/ia64/asm-offsets.c	Wed Jul 26 09:02:43 2006 -0600
     3.2 +++ b/xen/arch/ia64/asm-offsets.c	Wed Jul 26 09:36:36 2006 -0600
     3.3 @@ -65,6 +65,11 @@ void foo(void)
     3.4  	DEFINE(IA64_VCPU_DTLB_OFFSET, offsetof (struct vcpu, arch.dtlb));
     3.5  
     3.6  	BLANK();
     3.7 +
     3.8 +	DEFINE(IA64_DOMAIN_SHADOW_BITMAP_OFFSET, offsetof (struct domain, arch.shadow_bitmap));
     3.9 +
    3.10 +	BLANK();
    3.11 +
    3.12  	DEFINE(IA64_CPUINFO_ITM_NEXT_OFFSET, offsetof (struct cpuinfo_ia64, itm_next));
    3.13  	DEFINE(IA64_CPUINFO_KSOFTIRQD_OFFSET, offsetof (struct cpuinfo_ia64, ksoftirqd));
    3.14  
     4.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Wed Jul 26 09:02:43 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Wed Jul 26 09:36:36 2006 -0600
     4.3 @@ -265,6 +265,20 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
     4.4      }
     4.5      break;
     4.6  
     4.7 +    case DOM0_SHADOW_CONTROL:
     4.8 +    {
     4.9 +        struct domain *d; 
    4.10 +        ret = -ESRCH;
    4.11 +        d = find_domain_by_id(op->u.shadow_control.domain);
    4.12 +        if ( d != NULL )
    4.13 +        {
    4.14 +            ret = shadow_mode_control(d, &op->u.shadow_control);
    4.15 +            put_domain(d);
    4.16 +            copy_to_guest(u_dom0_op, op, 1);
    4.17 +        } 
    4.18 +    }
    4.19 +    break;
    4.20 +
    4.21      default:
    4.22          printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
    4.23          ret = -ENOSYS;
     5.1 --- a/xen/arch/ia64/xen/domain.c	Wed Jul 26 09:02:43 2006 -0600
     5.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Jul 26 09:36:36 2006 -0600
     5.3 @@ -25,26 +25,15 @@
     5.4  #include <xen/mm.h>
     5.5  #include <xen/iocap.h>
     5.6  #include <asm/asm-xsi-offsets.h>
     5.7 -#include <asm/ptrace.h>
     5.8  #include <asm/system.h>
     5.9  #include <asm/io.h>
    5.10  #include <asm/processor.h>
    5.11 -#include <asm/desc.h>
    5.12 -#include <asm/hw_irq.h>
    5.13 -#include <asm/setup.h>
    5.14 -//#include <asm/mpspec.h>
    5.15 -#include <xen/irq.h>
    5.16  #include <xen/event.h>
    5.17 -//#include <xen/shadow.h>
    5.18  #include <xen/console.h>
    5.19  #include <xen/compile.h>
    5.20 -
    5.21  #include <xen/elf.h>
    5.22 -//#include <asm/page.h>
    5.23  #include <asm/pgalloc.h>
    5.24 -
    5.25  #include <asm/offsets.h>  /* for IA64_THREAD_INFO_SIZE */
    5.26 -
    5.27  #include <asm/vcpu.h>   /* for function declarations */
    5.28  #include <public/arch-ia64.h>
    5.29  #include <xen/domain.h>
    5.30 @@ -52,13 +41,12 @@
    5.31  #include <asm/vmx_vcpu.h>
    5.32  #include <asm/vmx_vpd.h>
    5.33  #include <asm/vmx_phy_mode.h>
    5.34 -#include <asm/pal.h>
    5.35  #include <asm/vhpt.h>
    5.36 -#include <public/hvm/ioreq.h>
    5.37  #include <public/arch-ia64.h>
    5.38  #include <asm/tlbflush.h>
    5.39  #include <asm/regionreg.h>
    5.40  #include <asm/dom_fw.h>
    5.41 +#include <asm/shadow.h>
    5.42  #include <asm/privop_stat.h>
    5.43  
    5.44  #ifndef CONFIG_XEN_IA64_DOM0_VP
    5.45 @@ -388,8 +376,11 @@ void arch_domain_destroy(struct domain *
    5.46  	BUG_ON(d->arch.mm.pgd != NULL);
    5.47  	if (d->shared_info != NULL)
    5.48  	    free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
    5.49 +	if (d->arch.shadow_bitmap != NULL)
    5.50 +		xfree(d->arch.shadow_bitmap);
    5.51  
    5.52 -	domain_flush_destroy (d);
    5.53 +	/* Clear vTLB for the next domain.  */
    5.54 +	domain_flush_tlb_vhpt(d);
    5.55  
    5.56  	deallocate_rid_range(d);
    5.57  }
    5.58 @@ -594,6 +585,148 @@ domain_set_shared_info_va (unsigned long
    5.59  	return 0;
    5.60  }
    5.61  
    5.62 +/* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
    5.63 +#define SHADOW_COPY_CHUNK (1024 / sizeof (unsigned long))
    5.64 +
    5.65 +int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
    5.66 +{
    5.67 +	unsigned int op = sc->op;
    5.68 +	int          rc = 0;
    5.69 +	int i;
    5.70 +	//struct vcpu *v;
    5.71 +
    5.72 +	if (unlikely(d == current->domain)) {
    5.73 +		DPRINTK("Don't try to do a shadow op on yourself!\n");
    5.74 +		return -EINVAL;
    5.75 +	}   
    5.76 +
    5.77 +	domain_pause(d);
    5.78 +
    5.79 +	switch (op)
    5.80 +	{
    5.81 +	case DOM0_SHADOW_CONTROL_OP_OFF:
    5.82 +		if (shadow_mode_enabled (d)) {
    5.83 +			u64 *bm = d->arch.shadow_bitmap;
    5.84 +
    5.85 +			/* Flush vhpt and tlb to restore dirty bit usage.  */
    5.86 +			domain_flush_tlb_vhpt(d);
    5.87 +
    5.88 +			/* Free bitmap.  */
    5.89 +			d->arch.shadow_bitmap_size = 0;
    5.90 +			d->arch.shadow_bitmap = NULL;
    5.91 +			xfree(bm);
    5.92 +		}
    5.93 +		break;
    5.94 +
    5.95 +	case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
    5.96 +	case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE:
    5.97 +		rc = -EINVAL;
    5.98 +		break;
    5.99 +
   5.100 +	case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
   5.101 +		if (shadow_mode_enabled(d)) {
   5.102 +			rc = -EINVAL;
   5.103 +			break;
   5.104 +		}
   5.105 +
   5.106 +		atomic64_set(&d->arch.shadow_fault_count, 0);
   5.107 +		atomic64_set(&d->arch.shadow_dirty_count, 0);
   5.108 +
   5.109 +		d->arch.shadow_bitmap_size = (d->max_pages + BITS_PER_LONG-1) &
   5.110 +		                             ~(BITS_PER_LONG-1);
   5.111 +		d->arch.shadow_bitmap = xmalloc_array(unsigned long,
   5.112 +		                   d->arch.shadow_bitmap_size / BITS_PER_LONG);
   5.113 +		if (d->arch.shadow_bitmap == NULL) {
   5.114 +			d->arch.shadow_bitmap_size = 0;
   5.115 +			rc = -ENOMEM;
   5.116 +		}
   5.117 +		else {
   5.118 +			memset(d->arch.shadow_bitmap, 0, 
   5.119 +			       d->arch.shadow_bitmap_size / 8);
   5.120 +			
   5.121 +			/* Flush vhtp and tlb to enable dirty bit
   5.122 +			   virtualization.  */
   5.123 +			domain_flush_tlb_vhpt(d);
   5.124 +		}
   5.125 +		break;
   5.126 +
   5.127 +	case DOM0_SHADOW_CONTROL_OP_FLUSH:
   5.128 +		atomic64_set(&d->arch.shadow_fault_count, 0);
   5.129 +		atomic64_set(&d->arch.shadow_dirty_count, 0);
   5.130 +		break;
   5.131 +   
   5.132 +	case DOM0_SHADOW_CONTROL_OP_CLEAN:
   5.133 +	  {
   5.134 +		int nbr_longs;
   5.135 +
   5.136 +		sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);
   5.137 +		sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);
   5.138 +
   5.139 +		atomic64_set(&d->arch.shadow_fault_count, 0);
   5.140 +		atomic64_set(&d->arch.shadow_dirty_count, 0);
   5.141 + 
   5.142 +		if (guest_handle_is_null(sc->dirty_bitmap) ||
   5.143 +		    (d->arch.shadow_bitmap == NULL)) {
   5.144 +			rc = -EINVAL;
   5.145 +			break;
   5.146 +		}
   5.147 +
   5.148 +		if (sc->pages > d->arch.shadow_bitmap_size)
   5.149 +			sc->pages = d->arch.shadow_bitmap_size; 
   5.150 +
   5.151 +		nbr_longs = (sc->pages + BITS_PER_LONG - 1) / BITS_PER_LONG;
   5.152 +
   5.153 +		for (i = 0; i < nbr_longs; i += SHADOW_COPY_CHUNK) {
   5.154 +			int size = (nbr_longs - i) > SHADOW_COPY_CHUNK ?
   5.155 +			           SHADOW_COPY_CHUNK : nbr_longs - i;
   5.156 +     
   5.157 +			if (copy_to_guest_offset(sc->dirty_bitmap, i,
   5.158 +			                         d->arch.shadow_bitmap + i,
   5.159 +			                         size)) {
   5.160 +				rc = -EFAULT;
   5.161 +				break;
   5.162 +			}
   5.163 +
   5.164 +			memset(d->arch.shadow_bitmap + i,
   5.165 +			       0, size * sizeof(unsigned long));
   5.166 +		}
   5.167 +		
   5.168 +		break;
   5.169 +	  }
   5.170 +
   5.171 +	case DOM0_SHADOW_CONTROL_OP_PEEK:
   5.172 +	{
   5.173 +		unsigned long size;
   5.174 +
   5.175 +		sc->stats.fault_count = atomic64_read(&d->arch.shadow_fault_count);
   5.176 +		sc->stats.dirty_count = atomic64_read(&d->arch.shadow_dirty_count);
   5.177 +
   5.178 +		if (guest_handle_is_null(sc->dirty_bitmap) ||
   5.179 +		    (d->arch.shadow_bitmap == NULL)) {
   5.180 +			rc = -EINVAL;
   5.181 +			break;
   5.182 +		}
   5.183 + 
   5.184 +		if (sc->pages > d->arch.shadow_bitmap_size)
   5.185 +			sc->pages = d->arch.shadow_bitmap_size; 
   5.186 +
   5.187 +		size = (sc->pages + BITS_PER_LONG - 1) / BITS_PER_LONG;
   5.188 +		if (copy_to_guest(sc->dirty_bitmap, 
   5.189 +		                  d->arch.shadow_bitmap, size)) {
   5.190 +			rc = -EFAULT;
   5.191 +			break;
   5.192 +		}
   5.193 +		break;
   5.194 +	}
   5.195 +	default:
   5.196 +		rc = -EINVAL;
   5.197 +		break;
   5.198 +	}
   5.199 +	
   5.200 +	domain_unpause(d);
   5.201 +	
   5.202 +	return rc;
   5.203 +}
   5.204  
   5.205  // remove following line if not privifying in memory
   5.206  //#define HAVE_PRIVIFY_MEMORY
     6.1 --- a/xen/arch/ia64/xen/faults.c	Wed Jul 26 09:02:43 2006 -0600
     6.2 +++ b/xen/arch/ia64/xen/faults.c	Wed Jul 26 09:36:36 2006 -0600
     6.3 @@ -1,4 +1,3 @@
     6.4 -
     6.5  /*
     6.6   * Miscellaneous process/domain related routines
     6.7   * 
     6.8 @@ -29,6 +28,7 @@
     6.9  #include <asm/bundle.h>
    6.10  #include <asm/privop_stat.h>
    6.11  #include <asm/asm-xsi-offsets.h>
    6.12 +#include <asm/shadow.h>
    6.13  
    6.14  extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    6.15  /* FIXME: where these declarations shold be there ? */
    6.16 @@ -648,3 +648,92 @@ ia64_handle_reflection (unsigned long if
    6.17  	reflect_interruption(isr,regs,vector);
    6.18  }
    6.19  
    6.20 +void
    6.21 +ia64_shadow_fault(unsigned long ifa, unsigned long itir,
    6.22 +                  unsigned long isr, struct pt_regs *regs)
    6.23 +{
    6.24 +	struct vcpu *v = current;
    6.25 +	struct domain *d = current->domain;
    6.26 +	unsigned long gpfn;
    6.27 +	unsigned long pte = 0;
    6.28 +	struct vhpt_lf_entry *vlfe;
    6.29 +
    6.30 +	/* There are 2 jobs to do:
    6.31 +	   -  marking the page as dirty (the metaphysical address must be
    6.32 +	      extracted to do that).
    6.33 +	   -  reflecting or not the fault (the virtual Dirty bit must be
    6.34 +	      extracted to decide).
    6.35 +	   Unfortunatly these informations are not immediatly available!
    6.36 +	*/
    6.37 +
    6.38 +	/* Extract the metaphysical address.
    6.39 +	   Try to get it from VHPT and M2P as we need the flags.  */
    6.40 +	vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
    6.41 +	pte = vlfe->page_flags;
    6.42 +	if (vlfe->ti_tag == ia64_ttag(ifa)) {
    6.43 +		/* The VHPT entry is valid.  */
    6.44 +		gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
    6.45 +		BUG_ON(gpfn == INVALID_M2P_ENTRY);
    6.46 +	}
    6.47 +	else {
    6.48 +		unsigned long itir, iha;
    6.49 +		IA64FAULT fault;
    6.50 +
    6.51 +		/* The VHPT entry is not valid.  */
    6.52 +		vlfe = NULL;
    6.53 +
    6.54 +		/* FIXME: gives a chance to tpa, as the TC was valid.  */
    6.55 +
    6.56 +		fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
    6.57 +
    6.58 +		/* Try again!  */
    6.59 +		if (fault != IA64_NO_FAULT) {
    6.60 +			/* This will trigger a dtlb miss.  */
    6.61 +			ia64_ptcl(ifa, PAGE_SHIFT << 2);
    6.62 +			return;
    6.63 +		}
    6.64 +		gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
    6.65 +		if (pte & _PAGE_D)
    6.66 +			pte |= _PAGE_VIRT_D;
    6.67 +	}
    6.68 +
    6.69 +	/* Set the dirty bit in the bitmap.  */
    6.70 +	shadow_mark_page_dirty (d, gpfn);
    6.71 +
    6.72 +	/* Update the local TC/VHPT and decides wether or not the fault should
    6.73 +	   be reflected.
    6.74 +	   SMP note: we almost ignore the other processors.  The shadow_bitmap
    6.75 +	   has been atomically updated.  If the dirty fault happen on another
    6.76 +	   processor, it will do its job.
    6.77 +	*/
    6.78 +
    6.79 +	if (pte != 0) {
    6.80 +		/* We will know how to handle the fault.  */
    6.81 +
    6.82 +		if (pte & _PAGE_VIRT_D) {
    6.83 +			/* Rewrite VHPT entry.
    6.84 +			   There is no race here because only the
    6.85 +			   cpu VHPT owner can write page_flags.  */
    6.86 +			if (vlfe)
    6.87 +				vlfe->page_flags = pte | _PAGE_D;
    6.88 +			
    6.89 +			/* Purge the TC locally.
    6.90 +			   It will be reloaded from the VHPT iff the
    6.91 +			   VHPT entry is still valid.  */
    6.92 +			ia64_ptcl(ifa, PAGE_SHIFT << 2);
    6.93 +
    6.94 +			atomic64_inc(&d->arch.shadow_fault_count);
    6.95 +		}
    6.96 +		else {
    6.97 +			/* Reflect.
    6.98 +			   In this case there is no need to purge.  */
    6.99 +			ia64_handle_reflection(ifa, regs, isr, 0, 8);
   6.100 +		}
   6.101 +	}
   6.102 +	else {
   6.103 +		/* We don't know wether or not the fault must be
   6.104 +		   reflected.  The VHPT entry is not valid.  */
   6.105 +		/* FIXME: in metaphysical mode, we could do an ITC now.  */
   6.106 +		ia64_ptcl(ifa, PAGE_SHIFT << 2);
   6.107 +	}
   6.108 +}
     7.1 --- a/xen/arch/ia64/xen/ivt.S	Wed Jul 26 09:02:43 2006 -0600
     7.2 +++ b/xen/arch/ia64/xen/ivt.S	Wed Jul 26 09:36:36 2006 -0600
     7.3 @@ -746,7 +746,48 @@ END(dkey_miss)
     7.4  ENTRY(dirty_bit)
     7.5  	DBG_FAULT(8)
     7.6  #ifdef XEN
     7.7 -	FAULT_OR_REFLECT(8)
     7.8 +	mov r20=cr.ipsr
     7.9 +	mov r31=pr;;
    7.10 +	extr.u r20=r20,IA64_PSR_CPL0_BIT,2;;
    7.11 +	mov r19=8	/* prepare to save predicates */
    7.12 +	cmp.eq p6,p0=r0,r20 	/* cpl == 0?*/
    7.13 +(p6)	br.sptk.few dispatch_to_fault_handler
    7.14 +	/* If shadow mode is not enabled, reflect the fault.  */
    7.15 +	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
    7.16 +	;;
    7.17 +	ld8 r22=[r22]
    7.18 +	;;
    7.19 +	add r22=IA64_VCPU_DOMAIN_OFFSET,r22
    7.20 +	;;
    7.21 +	/* Read domain.  */
    7.22 +	ld8 r22=[r22]
    7.23 +	;;
    7.24 +	add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
    7.25 +	;;
    7.26 +	ld8 r22=[r22]
    7.27 +	;;
    7.28 +	cmp.eq p6,p0=r0,r22 	/* !shadow_bitmap ?*/
    7.29 +(p6)	br.dptk.many dispatch_reflection
    7.30 +
    7.31 +	SAVE_MIN_WITH_COVER
    7.32 +	alloc r14=ar.pfs,0,0,4,0
    7.33 +	mov out0=cr.ifa
    7.34 +	mov out1=cr.itir
    7.35 +	mov out2=cr.isr
    7.36 +	adds out3=16,sp
    7.37 +
    7.38 +	ssm psr.ic | PSR_DEFAULT_BITS
    7.39 +	;;
    7.40 +	srlz.i					// guarantee that interruption collection is on
    7.41 +	;;
    7.42 +(p15)	ssm psr.i				// restore psr.i
    7.43 +	adds r3=8,r2				// set up second base pointer
    7.44 +	;;
    7.45 +	SAVE_REST
    7.46 +	movl r14=ia64_leave_kernel
    7.47 +	;;
    7.48 +	mov rp=r14
    7.49 +	br.call.sptk.many b6=ia64_shadow_fault
    7.50  #else
    7.51  	/*
    7.52  	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
     8.1 --- a/xen/arch/ia64/xen/mm.c	Wed Jul 26 09:02:43 2006 -0600
     8.2 +++ b/xen/arch/ia64/xen/mm.c	Wed Jul 26 09:36:36 2006 -0600
     8.3 @@ -170,6 +170,7 @@
     8.4  #include <asm/pgalloc.h>
     8.5  #include <asm/vhpt.h>
     8.6  #include <asm/vcpu.h>
     8.7 +#include <asm/shadow.h>
     8.8  #include <linux/efi.h>
     8.9  
    8.10  #ifndef CONFIG_XEN_IA64_DOM0_VP
    8.11 @@ -470,7 +471,7 @@ u64 translate_domain_pte(u64 pteval, u64
    8.12  	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
    8.13  	pteval2 |= (pteval & _PAGE_ED);
    8.14  	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
    8.15 -	pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
    8.16 +	pteval2 |= (pteval & ~_PAGE_PPN_MASK);
    8.17  	/*
    8.18  	 * Don't let non-dom0 domains map uncached addresses.  This can
    8.19  	 * happen when domU tries to touch i/o port space.  Also prevents
    8.20 @@ -482,6 +483,18 @@ u64 translate_domain_pte(u64 pteval, u64
    8.21  	if (d != dom0 && (pteval2 & _PAGE_MA_MASK) != _PAGE_MA_NAT)
    8.22  		pteval2 &= ~_PAGE_MA_MASK;
    8.23  
    8.24 +    /* If shadow mode is enabled, virtualize dirty bit.  */
    8.25 +    if (shadow_mode_enabled(d) && (pteval2 & _PAGE_D)) {
    8.26 +        u64 mp_page = mpaddr >> PAGE_SHIFT;
    8.27 +        pteval2 |= _PAGE_VIRT_D;
    8.28 +
    8.29 +        /* If the page is not already dirty, don't set the dirty bit.
    8.30 +           This is a small optimization!  */
    8.31 +        if (mp_page < d->arch.shadow_bitmap_size * 8
    8.32 +            && !test_bit(mp_page, d->arch.shadow_bitmap))
    8.33 +            pteval2 = (pteval2 & ~_PAGE_D);
    8.34 +    }
    8.35 +
    8.36  	return pteval2;
    8.37  }
    8.38  
    8.39 @@ -1418,10 +1431,13 @@ guest_physmap_remove_page(struct domain 
    8.40  
    8.41  //XXX sledgehammer.
    8.42  //    flush finer range.
    8.43 -void
    8.44 +static void
    8.45  domain_page_flush(struct domain* d, unsigned long mpaddr,
    8.46                    unsigned long old_mfn, unsigned long new_mfn)
    8.47  {
    8.48 +    if (shadow_mode_enabled(d))
    8.49 +        shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT);
    8.50 +
    8.51      domain_flush_vtlb_all();
    8.52  }
    8.53  
     9.1 --- a/xen/arch/ia64/xen/privop.c	Wed Jul 26 09:02:43 2006 -0600
     9.2 +++ b/xen/arch/ia64/xen/privop.c	Wed Jul 26 09:36:36 2006 -0600
     9.3 @@ -686,7 +686,8 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
     9.4  		(void)vcpu_increment_iip(vcpu);
     9.5  	}
     9.6  	if (fault == IA64_ILLOP_FAULT)
     9.7 -		printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
     9.8 +		printf("priv_emulate: priv_handle_op fails, "
     9.9 +		       "isr=0x%lx iip=%lx\n",isr, regs->cr_iip);
    9.10  	return fault;
    9.11  }
    9.12  
    10.1 --- a/xen/arch/ia64/xen/vhpt.c	Wed Jul 26 09:02:43 2006 -0600
    10.2 +++ b/xen/arch/ia64/xen/vhpt.c	Wed Jul 26 09:36:36 2006 -0600
    10.3 @@ -236,7 +236,7 @@ static void flush_tlb_vhpt_all (struct d
    10.4  	local_flush_tlb_all ();
    10.5  }
    10.6  
    10.7 -void domain_flush_destroy (struct domain *d)
    10.8 +void domain_flush_tlb_vhpt(struct domain *d)
    10.9  {
   10.10  	/* Very heavy...  */
   10.11  	on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
    11.1 --- a/xen/include/asm-ia64/domain.h	Wed Jul 26 09:02:43 2006 -0600
    11.2 +++ b/xen/include/asm-ia64/domain.h	Wed Jul 26 09:36:36 2006 -0600
    11.3 @@ -49,6 +49,9 @@ extern unsigned long domain_set_shared_i
    11.4     if false, flush and invalidate caches.  */
    11.5  extern void domain_cache_flush (struct domain *d, int sync_only);
    11.6  
    11.7 +/* Control the shadow mode.  */
    11.8 +extern int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc);
    11.9 +
   11.10  /* Cleanly crash the current domain with a message.  */
   11.11  extern void panic_domain(struct pt_regs *, const char *, ...)
   11.12       __attribute__ ((noreturn, format (printf, 2, 3)));
   11.13 @@ -117,6 +120,16 @@ struct arch_domain {
   11.14      /* Address of fpswa_interface_t (placed in domain memory)  */
   11.15      void *fpswa_inf;
   11.16  
   11.17 +    /* Bitmap of shadow dirty bits.
   11.18 +       Set iff shadow mode is enabled.  */
   11.19 +    u64 *shadow_bitmap;
   11.20 +    /* Length (in bits!) of shadow bitmap.  */
   11.21 +    unsigned long shadow_bitmap_size;
   11.22 +    /* Number of bits set in bitmap.  */
   11.23 +    atomic64_t shadow_dirty_count;
   11.24 +    /* Number of faults.  */
   11.25 +    atomic64_t shadow_fault_count;
   11.26 +
   11.27      struct last_vcpu last_vcpu[NR_CPUS];
   11.28  };
   11.29  #define INT_ENABLE_OFFSET(v) 		  \
    12.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Wed Jul 26 09:02:43 2006 -0600
    12.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Wed Jul 26 09:36:36 2006 -0600
    12.3 @@ -62,7 +62,12 @@
    12.4  #define _PAGE_D			(1 << _PAGE_D_BIT)	/* page dirty bit */
    12.5  #define _PAGE_PPN_MASK		(((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
    12.6  #define _PAGE_ED		(__IA64_UL(1) << 52)	/* exception deferral */
    12.7 +#ifdef XEN
    12.8 +#define _PAGE_VIRT_D		(__IA64_UL(1) << 53)	/* Virtual dirty bit */
    12.9 +#define _PAGE_PROTNONE		0
   12.10 +#else
   12.11  #define _PAGE_PROTNONE		(__IA64_UL(1) << 63)
   12.12 +#endif
   12.13  
   12.14  /* Valid only for a PTE with the present bit cleared: */
   12.15  #define _PAGE_FILE		(1 << 1)		/* see swap & file pte remarks below */
    13.1 --- a/xen/include/asm-ia64/shadow.h	Wed Jul 26 09:02:43 2006 -0600
    13.2 +++ b/xen/include/asm-ia64/shadow.h	Wed Jul 26 09:36:36 2006 -0600
    13.3 @@ -45,6 +45,24 @@ void guest_physmap_add_page(struct domai
    13.4  void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
    13.5  #endif
    13.6  
    13.7 +static inline int
    13.8 +shadow_mode_enabled(struct domain *d)
    13.9 +{
   13.10 +    return d->arch.shadow_bitmap != NULL;
   13.11 +}
   13.12 +
   13.13 +static inline int
   13.14 +shadow_mark_page_dirty(struct domain *d, unsigned long gpfn)
   13.15 +{
   13.16 +    if (gpfn < d->arch.shadow_bitmap_size * 8
   13.17 +        && !test_and_set_bit(gpfn, d->arch.shadow_bitmap)) {
   13.18 +        /* The page was not dirty.  */
   13.19 +        atomic64_inc(&d->arch.shadow_dirty_count);
   13.20 +        return 1;
   13.21 +    } else
   13.22 +        return 0;
   13.23 +}
   13.24 +
   13.25  #endif // _XEN_SHADOW_H
   13.26  
   13.27  /*
    14.1 --- a/xen/include/asm-ia64/tlbflush.h	Wed Jul 26 09:02:43 2006 -0600
    14.2 +++ b/xen/include/asm-ia64/tlbflush.h	Wed Jul 26 09:36:36 2006 -0600
    14.3 @@ -22,8 +22,8 @@ void domain_flush_vtlb_all (void);
    14.4  /* Global range-flush of vTLB.  */
    14.5  void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);
    14.6  
    14.7 -/* Final vTLB flush on every dirty cpus.  */
    14.8 -void domain_flush_destroy (struct domain *d);
    14.9 +/* Flush vhpt and mTLB on every dirty cpus.  */
   14.10 +void domain_flush_tlb_vhpt(struct domain *d);
   14.11  
   14.12  /* Flush v-tlb on cpus set in mask for current domain.  */
   14.13  void flush_tlb_mask(cpumask_t mask);