ia64/xen-unstable

changeset 16173:c1e272707063

[IA64] vti domain save/restore: libxc: implement vti domain save/restore

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Sun Oct 21 14:57:13 2007 -0600 (2007-10-21)
parents d251f99b55e7
children 1e27eb0c9f22
files tools/libxc/ia64/xc_ia64_linux_restore.c tools/libxc/ia64/xc_ia64_linux_save.c tools/libxc/ia64/xc_ia64_save_restore.h
line diff
     1.1 --- a/tools/libxc/ia64/xc_ia64_linux_restore.c	Sun Oct 21 14:45:20 2007 -0600
     1.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c	Sun Oct 21 14:57:13 2007 -0600
     1.3 @@ -8,6 +8,7 @@
     1.4   *
     1.5   * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
     1.6   *   Use foreign p2m exposure.
     1.7 + *   VTi domain support
     1.8   */
     1.9  
    1.10  #include <stdlib.h>
    1.11 @@ -17,6 +18,7 @@
    1.12  #include "xc_ia64_save_restore.h"
    1.13  #include "xc_ia64.h"
    1.14  #include "xc_efi.h"
    1.15 +#include "xen/hvm/params.h"
    1.16  
    1.17  #define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
    1.18  
    1.19 @@ -75,6 +77,354 @@ read_page(int xc_handle, int io_fd, uint
    1.20      return 0;
    1.21  }
    1.22  
    1.23 +/*
    1.24 + * Get the list of PFNs that are not in the psuedo-phys map.
    1.25 + * Although we allocate pages on demand, balloon driver may 
    1.26 + * decreased simaltenously. So we have to free the freed
    1.27 + * pages here.
    1.28 + */
    1.29 +static int
    1.30 +xc_ia64_recv_unallocated_list(int xc_handle, int io_fd, uint32_t dom,
    1.31 +                              struct xen_ia64_p2m_table *p2m_table)
    1.32 +{
    1.33 +    int rc = -1;
    1.34 +    unsigned int i;
    1.35 +    unsigned int count;
    1.36 +    unsigned long *pfntab = NULL;
    1.37 +    unsigned int nr_frees;
    1.38 +
    1.39 +    if (!read_exact(io_fd, &count, sizeof(count))) {
    1.40 +        ERROR("Error when reading pfn count");
    1.41 +        goto out;
    1.42 +    }
    1.43 +
    1.44 +    pfntab = malloc(sizeof(unsigned long) * count);
    1.45 +    if (pfntab == NULL) {
    1.46 +        ERROR("Out of memory");
    1.47 +        goto out;
    1.48 +    }
    1.49 +
    1.50 +    if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
    1.51 +        ERROR("Error when reading pfntab");
    1.52 +        goto out;
    1.53 +    }
    1.54 +
    1.55 +    nr_frees = 0;
    1.56 +    for (i = 0; i < count; i++) {
    1.57 +        if (xc_ia64_p2m_allocated(p2m_table, pfntab[i])) {
    1.58 +            pfntab[nr_frees] = pfntab[i];
    1.59 +            nr_frees++;
    1.60 +        }
    1.61 +    }
    1.62 +    if (nr_frees > 0) {
    1.63 +        if (xc_domain_memory_decrease_reservation(xc_handle, dom, nr_frees,
    1.64 +                                                  0, pfntab) < 0) {
    1.65 +            PERROR("Could not decrease reservation");
    1.66 +            goto out;
    1.67 +        } else
    1.68 +            DPRINTF("Decreased reservation by %d / %d pages\n",
    1.69 +                    nr_frees, count);
    1.70 +    }
    1.71 +
    1.72 +    rc = 0;
    1.73 +    
    1.74 + out:
    1.75 +    if (pfntab != NULL)
    1.76 +        free(pfntab);
    1.77 +    return rc;
    1.78 +}
    1.79 +
    1.80 +static int
    1.81 +xc_ia64_recv_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
    1.82 +                          uint32_t vcpu, vcpu_guest_context_t *ctxt)
    1.83 +{
    1.84 +    if (!read_exact(io_fd, ctxt, sizeof(*ctxt))) {
    1.85 +        ERROR("Error when reading ctxt");
    1.86 +        return -1;
    1.87 +    }
    1.88 +
    1.89 +    fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
    1.90 +
    1.91 +    /* Initialize and set registers.  */
    1.92 +    ctxt->flags = VGCF_EXTRA_REGS;
    1.93 +    if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt) != 0) {
    1.94 +        ERROR("Couldn't set vcpu context");
    1.95 +        return -1;
    1.96 +    }
    1.97 +
    1.98 +    /* Just a check.  */
    1.99 +    ctxt->flags = 0;
   1.100 +    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
   1.101 +        ERROR("Could not get vcpu context");
   1.102 +        return -1;
   1.103 +    }
   1.104 +
   1.105 +    return 0;
   1.106 +}
   1.107 +
   1.108 +/* Read shared info.  */
   1.109 +static int
   1.110 +xc_ia64_recv_shared_info(int xc_handle, int io_fd, uint32_t dom,
   1.111 +                         unsigned long shared_info_frame,
   1.112 +                         unsigned long *start_info_pfn)
   1.113 +{
   1.114 +    unsigned int i;
   1.115 +
   1.116 +    /* The new domain's shared-info frame. */
   1.117 +    shared_info_t *shared_info;
   1.118 +    
   1.119 +    /* Read shared info.  */
   1.120 +    shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.121 +                                       PROT_READ|PROT_WRITE,
   1.122 +                                       shared_info_frame);
   1.123 +    if (shared_info == NULL) {
   1.124 +        ERROR("cannot map page");
   1.125 +        return -1;
   1.126 +    }
   1.127 +
   1.128 +    if (!read_exact(io_fd, shared_info, PAGE_SIZE)) {
   1.129 +        ERROR("Error when reading shared_info page");
   1.130 +        munmap(shared_info, PAGE_SIZE);
   1.131 +        return -1;
   1.132 +    }
   1.133 +
   1.134 +    /* clear any pending events and the selector */
   1.135 +    memset(&(shared_info->evtchn_pending[0]), 0,
   1.136 +           sizeof (shared_info->evtchn_pending));
   1.137 +    for (i = 0; i < MAX_VIRT_CPUS; i++)
   1.138 +        shared_info->vcpu_info[i].evtchn_pending_sel = 0;
   1.139 +
   1.140 +    if (start_info_pfn != NULL)
   1.141 +        *start_info_pfn = shared_info->arch.start_info_pfn;
   1.142 +
   1.143 +    munmap (shared_info, PAGE_SIZE);
   1.144 +
   1.145 +    return 0;
   1.146 +}
   1.147 +
   1.148 +static int
   1.149 +xc_ia64_pv_recv_context(int xc_handle, int io_fd, uint32_t dom,
   1.150 +                        unsigned long shared_info_frame,
   1.151 +                        struct xen_ia64_p2m_table *p2m_table,
   1.152 +                        unsigned int store_evtchn, unsigned long *store_mfn,
   1.153 +                        unsigned int console_evtchn,
   1.154 +                        unsigned long *console_mfn)
   1.155 +{
   1.156 +    int rc = -1;
   1.157 +    unsigned long gmfn;
   1.158 +
   1.159 +    /* A copy of the CPU context of the guest. */
   1.160 +    vcpu_guest_context_t ctxt;
   1.161 +
   1.162 +    /* A temporary mapping of the guest's start_info page. */
   1.163 +    start_info_t *start_info;
   1.164 +
   1.165 +    if (lock_pages(&ctxt, sizeof(ctxt))) {
   1.166 +        /* needed for build domctl, but might as well do early */
   1.167 +        ERROR("Unable to lock_pages ctxt");
   1.168 +        return -1;
   1.169 +    }
   1.170 +
   1.171 +    if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
   1.172 +        goto out;
   1.173 +
   1.174 +    /* Then get privreg page.  */
   1.175 +    if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) {
   1.176 +        ERROR("Could not read vcpu privregs");
   1.177 +        goto out;
   1.178 +    }
   1.179 +
   1.180 +    /* Read shared info.  */
   1.181 +    if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom,
   1.182 +                                 shared_info_frame, &gmfn))
   1.183 +        goto out;
   1.184 +
   1.185 +    /* Uncanonicalise the suspend-record frame number and poke resume rec. */
   1.186 +    if (populate_page_if_necessary(xc_handle, dom, gmfn, p2m_table)) {
   1.187 +        ERROR("cannot populate page 0x%lx", gmfn);
   1.188 +        goto out;
   1.189 +    }
   1.190 +    start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.191 +                                      PROT_READ | PROT_WRITE, gmfn);
   1.192 +    if (start_info == NULL) {
   1.193 +        ERROR("cannot map start_info page");
   1.194 +        goto out;
   1.195 +    }
   1.196 +    start_info->nr_pages = p2m_size;
   1.197 +    start_info->shared_info = shared_info_frame << PAGE_SHIFT;
   1.198 +    start_info->flags = 0;
   1.199 +    *store_mfn = start_info->store_mfn;
   1.200 +    start_info->store_evtchn = store_evtchn;
   1.201 +    *console_mfn = start_info->console.domU.mfn;
   1.202 +    start_info->console.domU.evtchn = console_evtchn;
   1.203 +    munmap(start_info, PAGE_SIZE);
   1.204 +
   1.205 +    rc = 0;
   1.206 +
   1.207 + out:
   1.208 +    unlock_pages(&ctxt, sizeof(ctxt));
   1.209 +    return rc;
   1.210 +}
   1.211 +
   1.212 +static int
   1.213 +xc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom,
   1.214 +                         unsigned long shared_info_frame,
   1.215 +                         struct xen_ia64_p2m_table *p2m_table,
   1.216 +                         unsigned int store_evtchn, unsigned long *store_mfn,
   1.217 +                         unsigned int console_evtchn,
   1.218 +                         unsigned long *console_mfn)
   1.219 +{
   1.220 +    int rc = -1;
   1.221 +    xc_dominfo_t info;
   1.222 +    unsigned int i;
   1.223 +    
   1.224 +    /* cpu */
   1.225 +    uint64_t max_virt_cpus;
   1.226 +    unsigned long vcpumap_size;
   1.227 +    uint64_t *vcpumap = NULL;
   1.228 +
   1.229 +    /* HVM: magic frames for ioreqs and xenstore comms */
   1.230 +    const int hvm_params[] = {
   1.231 +        HVM_PARAM_IOREQ_PFN,
   1.232 +        HVM_PARAM_BUFIOREQ_PFN,
   1.233 +        HVM_PARAM_STORE_PFN,
   1.234 +    };
   1.235 +    const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
   1.236 +    /* ioreq_pfn, bufioreq_pfn, store_pfn */
   1.237 +    uint64_t magic_pfns[NR_PARAMS];
   1.238 +
   1.239 +    /* HVM: a buffer for holding HVM contxt */
   1.240 +    uint64_t rec_size = 0;
   1.241 +    uint8_t *hvm_buf = NULL;
   1.242 +
   1.243 +    /* Read shared info.  */
   1.244 +    if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
   1.245 +                                 NULL))
   1.246 +        goto out;
   1.247 +
   1.248 +    /* vcpu map */
   1.249 +    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
   1.250 +        ERROR("Could not get domain info");
   1.251 +        goto out;
   1.252 +    }
   1.253 +    if (!read_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
   1.254 +        ERROR("error reading max_virt_cpus");
   1.255 +        goto out;
   1.256 +    }
   1.257 +    if (max_virt_cpus < info.max_vcpu_id) {
   1.258 +        ERROR("too large max_virt_cpus %i < %i\n",
   1.259 +              max_virt_cpus, info.max_vcpu_id);
   1.260 +        goto out;
   1.261 +    }
   1.262 +    vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
   1.263 +        sizeof(vcpumap[0]);
   1.264 +    vcpumap = malloc(vcpumap_size);
   1.265 +    if (vcpumap == NULL) {
   1.266 +        ERROR("memory alloc for vcpumap");
   1.267 +        goto out;
   1.268 +    }
   1.269 +    memset(vcpumap, 0, vcpumap_size);
   1.270 +    if (!read_exact(io_fd, vcpumap, vcpumap_size)) {
   1.271 +        ERROR("read vcpumap");
   1.272 +        goto out;
   1.273 +    }
   1.274 +    
   1.275 +    /* vcpu context */
   1.276 +    for (i = 0; i <= info.max_vcpu_id; i++) {
   1.277 +        /* A copy of the CPU context of the guest. */
   1.278 +        vcpu_guest_context_t ctxt;
   1.279 +
   1.280 +        if (!__test_bit(i, vcpumap))
   1.281 +            continue;
   1.282 +
   1.283 +        if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
   1.284 +            goto out;
   1.285 +
   1.286 +        // system context of vcpu is recieved as hvm context.
   1.287 +    }    
   1.288 +
   1.289 +    /* Set HVM-specific parameters */
   1.290 +    if (!read_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
   1.291 +        ERROR("error reading magic page addresses");
   1.292 +        goto out;
   1.293 +    }
   1.294 +
   1.295 +    /* These comms pages need to be zeroed at the start of day */
   1.296 +    for (i = 0; i < NR_PARAMS; i++) {
   1.297 +        rc = xc_clear_domain_page(xc_handle, dom, magic_pfns[i]);
   1.298 +        if (rc != 0) {
   1.299 +            ERROR("error zeroing magic pages: %i", rc);
   1.300 +            goto out;
   1.301 +        }
   1.302 +        rc = xc_set_hvm_param(xc_handle, dom, hvm_params[i], magic_pfns[i]);
   1.303 +        if (rc != 0) {
   1.304 +            ERROR("error setting HVM params: %i", rc);
   1.305 +            goto out;
   1.306 +        }
   1.307 +    }
   1.308 +    rc = xc_set_hvm_param(xc_handle, dom,
   1.309 +                          HVM_PARAM_STORE_EVTCHN, store_evtchn);
   1.310 +    if (rc != 0) {
   1.311 +        ERROR("error setting HVM params: %i", rc);
   1.312 +        goto out;
   1.313 +    }
   1.314 +    *store_mfn = magic_pfns[2];
   1.315 +
   1.316 +    /* Read HVM context */
   1.317 +    if (!read_exact(io_fd, &rec_size, sizeof(rec_size))) {
   1.318 +        ERROR("error read hvm context size!\n");
   1.319 +        goto out;
   1.320 +    }
   1.321 +
   1.322 +    hvm_buf = malloc(rec_size);
   1.323 +    if (hvm_buf == NULL) {
   1.324 +        ERROR("memory alloc for hvm context buffer failed");
   1.325 +        errno = ENOMEM;
   1.326 +        goto out;
   1.327 +    }
   1.328 +
   1.329 +    if (!read_exact(io_fd, hvm_buf, rec_size)) {
   1.330 +        ERROR("error loading the HVM context");
   1.331 +        goto out;
   1.332 +    }
   1.333 +
   1.334 +    rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_size);
   1.335 +    if (rc != 0) {
   1.336 +        ERROR("error setting the HVM context");
   1.337 +        goto out;
   1.338 +    }
   1.339 +       
   1.340 +    rc = 0;
   1.341 +
   1.342 +out:
   1.343 +    if (vcpumap != NULL)
   1.344 +        free(vcpumap);
   1.345 +    if (hvm_buf != NULL)
   1.346 +        free(hvm_buf);
   1.347 +    return rc;
   1.348 +}
   1.349 +
   1.350 +/*
   1.351 + * hvm domain requires IO pages allocated when XEN_DOMCTL_arch_setup
   1.352 + */
   1.353 +static int
   1.354 +xc_ia64_hvm_domain_setup(int xc_handle, uint32_t dom)
   1.355 +{
   1.356 +    int rc;
   1.357 +    xen_pfn_t pfn_list[] = {
   1.358 +        IO_PAGE_START >> PAGE_SHIFT,
   1.359 +        BUFFER_IO_PAGE_START >> PAGE_SHIFT,
   1.360 +        BUFFER_PIO_PAGE_START >> PAGE_SHIFT,
   1.361 +    };
   1.362 +    unsigned long nr_pages = sizeof(pfn_list) / sizeof(pfn_list[0]);
   1.363 +
   1.364 +    rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
   1.365 +                                           0, 0, &pfn_list[0]);
   1.366 +    if (rc != 0)
   1.367 +        PERROR("Could not allocate IO page or buffer io page.\n");
   1.368 +    return rc;
   1.369 +}
   1.370 +
   1.371  int
   1.372  xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
   1.373                   unsigned int store_evtchn, unsigned long *store_mfn,
   1.374 @@ -83,29 +433,14 @@ xc_domain_restore(int xc_handle, int io_
   1.375  {
   1.376      DECLARE_DOMCTL;
   1.377      int rc = 1;
   1.378 -    unsigned int i;
   1.379 -    unsigned long gmfn;
   1.380      unsigned long ver;
   1.381  
   1.382      /* The new domain's shared-info frame number. */
   1.383      unsigned long shared_info_frame;
   1.384 -    unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
   1.385 -    shared_info_t *shared_info = (shared_info_t *)shared_info_page;
   1.386 -
   1.387 -    /* A copy of the CPU context of the guest. */
   1.388 -    vcpu_guest_context_t ctxt;
   1.389 -
   1.390 -    /* A temporary mapping of the guest's start_info page. */
   1.391 -    start_info_t *start_info;
   1.392  
   1.393      struct xen_ia64_p2m_table p2m_table;
   1.394      xc_ia64_p2m_init(&p2m_table);
   1.395  
   1.396 -    if (hvm) {
   1.397 -        ERROR("HVM Restore is unsupported");
   1.398 -        goto out;
   1.399 -    }
   1.400 -
   1.401      /* For info only */
   1.402      nr_pfns = 0;
   1.403  
   1.404 @@ -125,17 +460,14 @@ xc_domain_restore(int xc_handle, int io_
   1.405          goto out;
   1.406      }
   1.407  
   1.408 -    if (lock_pages(&ctxt, sizeof(ctxt))) {
   1.409 -        /* needed for build domctl, but might as well do early */
   1.410 -        ERROR("Unable to lock_pages ctxt");
   1.411 -        return 1;
   1.412 -    }
   1.413 -
   1.414      if (!read_exact(io_fd, &domctl.u.arch_setup, sizeof(domctl.u.arch_setup))) {
   1.415          ERROR("read: domain setup");
   1.416          goto out;
   1.417      }
   1.418  
   1.419 +    if (hvm && xc_ia64_hvm_domain_setup(xc_handle, dom) != 0)
   1.420 +        goto out;
   1.421 +    
   1.422      /* Build firmware (will be overwritten).  */
   1.423      domctl.domain = (domid_t)dom;
   1.424      domctl.u.arch_setup.flags &= ~XEN_DOMAINSETUP_query;
   1.425 @@ -212,6 +544,7 @@ xc_domain_restore(int xc_handle, int io_
   1.426      DPRINTF("Reloading memory pages:   0%%\n");
   1.427  
   1.428      while (1) {
   1.429 +        unsigned long gmfn;
   1.430          if (!read_exact(io_fd, &gmfn, sizeof(unsigned long))) {
   1.431              ERROR("Error when reading batch size");
   1.432              goto out;
   1.433 @@ -229,127 +562,19 @@ xc_domain_restore(int xc_handle, int io_
   1.434  
   1.435      DPRINTF("Received all pages\n");
   1.436  
   1.437 -    /*
   1.438 -     * Get the list of PFNs that are not in the psuedo-phys map.
   1.439 -     * Although we allocate pages on demand, balloon driver may 
   1.440 -     * decreased simaltenously. So we have to free the freed
   1.441 -     * pages here.
   1.442 -     */
   1.443 -    {
   1.444 -        unsigned int count;
   1.445 -        unsigned long *pfntab;
   1.446 -        unsigned int nr_frees;
   1.447 -
   1.448 -        if (!read_exact(io_fd, &count, sizeof(count))) {
   1.449 -            ERROR("Error when reading pfn count");
   1.450 -            goto out;
   1.451 -        }
   1.452 -
   1.453 -        pfntab = malloc(sizeof(unsigned long) * count);
   1.454 -        if (!pfntab) {
   1.455 -            ERROR("Out of memory");
   1.456 -            goto out;
   1.457 -        }
   1.458 -
   1.459 -        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
   1.460 -            ERROR("Error when reading pfntab");
   1.461 -            free(pfntab);
   1.462 -            goto out;
   1.463 -        }
   1.464 -
   1.465 -        nr_frees = 0;
   1.466 -        for (i = 0; i < count; i++) {
   1.467 -            if (xc_ia64_p2m_allocated(&p2m_table, pfntab[i])) {
   1.468 -                pfntab[nr_frees] = pfntab[i];
   1.469 -                nr_frees++;
   1.470 -            }
   1.471 -        }
   1.472 -        if (nr_frees > 0) {
   1.473 -            if (xc_domain_memory_decrease_reservation(xc_handle, dom, nr_frees,
   1.474 -                                                      0, pfntab) < 0) {
   1.475 -                ERROR("Could not decrease reservation : %d", rc);
   1.476 -                free(pfntab);
   1.477 -                goto out;
   1.478 -            }
   1.479 -            else
   1.480 -                DPRINTF("Decreased reservation by %d / %d pages\n",
   1.481 -                        nr_frees, count);
   1.482 -        }
   1.483 -        free(pfntab);
   1.484 -    }
   1.485 -
   1.486 -    if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
   1.487 -        ERROR("Error when reading ctxt");
   1.488 +    if (xc_ia64_recv_unallocated_list(xc_handle, io_fd, dom, &p2m_table))
   1.489          goto out;
   1.490 -    }
   1.491 -
   1.492 -    fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt.regs.ip, ctxt.regs.b[0]);
   1.493  
   1.494 -    /* Initialize and set registers.  */
   1.495 -    ctxt.flags = VGCF_EXTRA_REGS;
   1.496 -    domctl.cmd = XEN_DOMCTL_setvcpucontext;
   1.497 -    domctl.domain = (domid_t)dom;
   1.498 -    domctl.u.vcpucontext.vcpu   = 0;
   1.499 -    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
   1.500 -    if (xc_domctl(xc_handle, &domctl) != 0) {
   1.501 -        ERROR("Couldn't set vcpu context");
   1.502 -        goto out;
   1.503 -    }
   1.504 -
   1.505 -    /* Just a check.  */
   1.506 -    if (xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, &ctxt)) {
   1.507 -        ERROR("Could not get vcpu context");
   1.508 -        goto out;
   1.509 -    }
   1.510 -
   1.511 -    /* Then get privreg page.  */
   1.512 -    if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) {
   1.513 -        ERROR("Could not read vcpu privregs");
   1.514 +    if (!hvm)
   1.515 +        rc = xc_ia64_pv_recv_context(xc_handle, io_fd, dom, shared_info_frame,
   1.516 +                                     &p2m_table, store_evtchn, store_mfn,
   1.517 +                                     console_evtchn, console_mfn);
   1.518 +    else
   1.519 +        rc = xc_ia64_hvm_recv_context(xc_handle, io_fd, dom, shared_info_frame,
   1.520 +                                      &p2m_table, store_evtchn, store_mfn,
   1.521 +                                      console_evtchn, console_mfn);
   1.522 +    if (rc)
   1.523          goto out;
   1.524 -    }
   1.525 -
   1.526 -    /* Read shared info.  */
   1.527 -    shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.528 -                                       PROT_READ|PROT_WRITE, shared_info_frame);
   1.529 -    if (shared_info == NULL) {
   1.530 -            ERROR("cannot map page");
   1.531 -            goto out;
   1.532 -    }
   1.533 -    if (!read_exact(io_fd, shared_info, PAGE_SIZE)) {
   1.534 -            ERROR("Error when reading shared_info page");
   1.535 -            munmap(shared_info, PAGE_SIZE);
   1.536 -            goto out;
   1.537 -    }
   1.538 -
   1.539 -    /* clear any pending events and the selector */
   1.540 -    memset(&(shared_info->evtchn_pending[0]), 0,
   1.541 -           sizeof (shared_info->evtchn_pending));
   1.542 -    for (i = 0; i < MAX_VIRT_CPUS; i++)
   1.543 -        shared_info->vcpu_info[i].evtchn_pending_sel = 0;
   1.544 -
   1.545 -    gmfn = shared_info->arch.start_info_pfn;
   1.546 -
   1.547 -    munmap (shared_info, PAGE_SIZE);
   1.548 -
   1.549 -    /* Uncanonicalise the suspend-record frame number and poke resume rec. */
   1.550 -    if (populate_page_if_necessary(xc_handle, dom, gmfn, &p2m_table)) {
   1.551 -        ERROR("cannot populate page 0x%lx", gmfn);
   1.552 -        goto out;
   1.553 -    }
   1.554 -    start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.555 -                                      PROT_READ | PROT_WRITE, gmfn);
   1.556 -    if (start_info == NULL) {
   1.557 -        ERROR("cannot map start_info page");
   1.558 -        goto out;
   1.559 -    }
   1.560 -    start_info->nr_pages = p2m_size;
   1.561 -    start_info->shared_info = shared_info_frame << PAGE_SHIFT;
   1.562 -    start_info->flags = 0;
   1.563 -    *store_mfn = start_info->store_mfn;
   1.564 -    start_info->store_evtchn = store_evtchn;
   1.565 -    *console_mfn = start_info->console.domU.mfn;
   1.566 -    start_info->console.domU.evtchn = console_evtchn;
   1.567 -    munmap(start_info, PAGE_SIZE);
   1.568  
   1.569      /*
   1.570       * Safety checking of saved context:
   1.571 @@ -368,13 +593,11 @@ xc_domain_restore(int xc_handle, int io_
   1.572      rc = 0;
   1.573  
   1.574   out:
   1.575 +    xc_ia64_p2m_unmap(&p2m_table);
   1.576 +
   1.577      if ((rc != 0) && (dom != 0))
   1.578          xc_domain_destroy(xc_handle, dom);
   1.579  
   1.580 -    xc_ia64_p2m_unmap(&p2m_table);
   1.581 -
   1.582 -    unlock_pages(&ctxt, sizeof(ctxt));
   1.583 -
   1.584      DPRINTF("Restore exit with rc=%d\n", rc);
   1.585  
   1.586      return rc;
     2.1 --- a/tools/libxc/ia64/xc_ia64_linux_save.c	Sun Oct 21 14:45:20 2007 -0600
     2.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c	Sun Oct 21 14:57:13 2007 -0600
     2.3 @@ -8,6 +8,7 @@
     2.4   *
     2.5   * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
     2.6   *   Use foreign p2m exposure.
     2.7 + *   VTi domain support.
     2.8   */
     2.9  
    2.10  #include <inttypes.h>
    2.11 @@ -20,6 +21,7 @@
    2.12  #include "xc_ia64.h"
    2.13  #include "xc_ia64_save_restore.h"
    2.14  #include "xc_efi.h"
    2.15 +#include "xen/hvm/params.h"
    2.16  
    2.17  /*
    2.18  ** Default values for important tuning parameters. Can override by passing
    2.19 @@ -35,14 +37,6 @@
    2.20  ** During (live) save/migrate, we maintain a number of bitmaps to track
    2.21  ** which pages we have to send, and to skip.
    2.22  */
    2.23 -
    2.24 -#define BITS_PER_LONG (sizeof(unsigned long) * 8)
    2.25 -
    2.26 -#define BITMAP_ENTRY(_nr,_bmap) \
    2.27 -   ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
    2.28 -
    2.29 -#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
    2.30 -
    2.31  static inline int test_bit(int nr, volatile void * addr)
    2.32  {
    2.33      return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
    2.34 @@ -136,6 +130,271 @@ retry:
    2.35      return -1;
    2.36  }
    2.37  
    2.38 +static inline int
    2.39 +md_is_not_ram(const efi_memory_desc_t *md)
    2.40 +{
    2.41 +    return ((md->type != EFI_CONVENTIONAL_MEMORY) ||
    2.42 +            (md->attribute != EFI_MEMORY_WB) ||
    2.43 +            (md->num_pages == 0));
    2.44 +}
    2.45 +
    2.46 +/*
    2.47 + * Send through a list of all the PFNs that were not in map at the close.
    2.48 + * We send pages which was allocated. However balloon driver may 
    2.49 + * decreased after sending page. So we have to check the freed
    2.50 + * page after pausing the domain.
    2.51 + */
    2.52 +static int
    2.53 +xc_ia64_send_unallocated_list(int xc_handle, int io_fd, 
    2.54 +                              struct xen_ia64_p2m_table *p2m_table,
    2.55 +                              xen_ia64_memmap_info_t *memmap_info, 
    2.56 +                              void *memmap_desc_start, void *memmap_desc_end)
    2.57 +{
    2.58 +    void *p;
    2.59 +    efi_memory_desc_t *md;
    2.60 +
    2.61 +    unsigned long N;
    2.62 +    unsigned long pfntab[1024];
    2.63 +    unsigned int j;
    2.64 +
    2.65 +    j = 0;
    2.66 +    for (p = memmap_desc_start;
    2.67 +         p < memmap_desc_end;
    2.68 +         p += memmap_info->efi_memdesc_size) {
    2.69 +        md = p;
    2.70 +
    2.71 +        if (md_is_not_ram(md))
    2.72 +            continue;
    2.73 +
    2.74 +        for (N = md->phys_addr >> PAGE_SHIFT;
    2.75 +             N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
    2.76 +                 PAGE_SHIFT;
    2.77 +             N++) {
    2.78 +            if (!xc_ia64_p2m_allocated(p2m_table, N))
    2.79 +                j++;
    2.80 +        }
    2.81 +    }
    2.82 +    if (!write_exact(io_fd, &j, sizeof(unsigned int))) {
    2.83 +        ERROR("Error when writing to state file (6a)");
    2.84 +        return -1;
    2.85 +    }
    2.86 +        
    2.87 +    j = 0;
    2.88 +    for (p = memmap_desc_start;
    2.89 +         p < memmap_desc_end;
    2.90 +         p += memmap_info->efi_memdesc_size) {
    2.91 +        md = p;
    2.92 +
    2.93 +        if (md_is_not_ram(md))
    2.94 +            continue;
    2.95 +
    2.96 +        for (N = md->phys_addr >> PAGE_SHIFT;
    2.97 +             N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
    2.98 +                 PAGE_SHIFT;
    2.99 +             N++) {
   2.100 +            if (!xc_ia64_p2m_allocated(p2m_table, N))
   2.101 +                pfntab[j++] = N;
   2.102 +            if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
   2.103 +                if (!write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
   2.104 +                    ERROR("Error when writing to state file (6b)");
   2.105 +                    return -1;
   2.106 +                }
   2.107 +                j = 0;
   2.108 +            }
   2.109 +        }
   2.110 +    }
   2.111 +    if (j > 0) {
   2.112 +        if (!write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
   2.113 +            ERROR("Error when writing to state file (6c)");
   2.114 +            return -1;
   2.115 +        }
   2.116 +    }
   2.117 +
   2.118 +    return 0;
   2.119 +}
   2.120 +
   2.121 +static int
   2.122 +xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
   2.123 +                          uint32_t vcpu, vcpu_guest_context_t *ctxt)
   2.124 +{
   2.125 +    if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
   2.126 +        ERROR("Could not get vcpu context");
   2.127 +        return -1;
   2.128 +    }
   2.129 +
   2.130 +    if (!write_exact(io_fd, ctxt, sizeof(*ctxt))) {
   2.131 +        ERROR("Error when writing to state file (1)");
   2.132 +        return -1;
   2.133 +    }
   2.134 +
   2.135 +    fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
   2.136 +    return 0;
   2.137 +}
   2.138 +
   2.139 +static int
   2.140 +xc_ia64_send_shared_info(int xc_handle, int io_fd, shared_info_t *live_shinfo)
   2.141 +{
   2.142 +    if (!write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
   2.143 +        ERROR("Error when writing to state file (1)");
   2.144 +        return -1;
   2.145 +    }
   2.146 +    return 0;
   2.147 +}
   2.148 +
   2.149 +static int
   2.150 +xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
   2.151 +                        shared_info_t *live_shinfo)
   2.152 +{
   2.153 +    /* A copy of the CPU context of the guest. */
   2.154 +    vcpu_guest_context_t ctxt;
   2.155 +    char *mem;
   2.156 +
   2.157 +    if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
   2.158 +        return -1;
   2.159 +
   2.160 +    mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   2.161 +                               PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
   2.162 +    if (mem == NULL) {
   2.163 +        ERROR("cannot map privreg page");
   2.164 +        return -1;
   2.165 +    }
   2.166 +    if (!write_exact(io_fd, mem, PAGE_SIZE)) {
   2.167 +        ERROR("Error when writing privreg to state file (5)");
   2.168 +        munmap(mem, PAGE_SIZE);
   2.169 +        return -1;
   2.170 +    }
   2.171 +    munmap(mem, PAGE_SIZE);
   2.172 +
   2.173 +    if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
   2.174 +        return -1;
   2.175 +
   2.176 +    return 0;
   2.177 +}
   2.178 +
   2.179 +static int
   2.180 +xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
   2.181 +                         const xc_dominfo_t *info, shared_info_t *live_shinfo)
   2.182 +{
   2.183 +    int rc = -1;
   2.184 +    unsigned int i;
   2.185 +
   2.186 +    /* vcpu map */
   2.187 +    uint64_t max_virt_cpus;
   2.188 +    unsigned long vcpumap_size;
   2.189 +    uint64_t *vcpumap = NULL;
   2.190 +
   2.191 +    /* HVM: magic frames for ioreqs and xenstore comms */
   2.192 +    const int hvm_params[] = {
   2.193 +        HVM_PARAM_IOREQ_PFN,
   2.194 +        HVM_PARAM_BUFIOREQ_PFN,
   2.195 +        HVM_PARAM_STORE_PFN,
   2.196 +    };
   2.197 +    const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
   2.198 +    /* ioreq_pfn, bufioreq_pfn, store_pfn */
   2.199 +    uint64_t magic_pfns[NR_PARAMS];
   2.200 +
   2.201 +    /* HVM: a buffer for holding HVM contxt */
   2.202 +    uint64_t rec_size;
   2.203 +    uint64_t hvm_buf_size = 0;
   2.204 +    uint8_t *hvm_buf = NULL;
   2.205 +
   2.206 +    if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
   2.207 +        return -1;
   2.208 +
   2.209 +    /* vcpu map */
   2.210 +    max_virt_cpus = MAX_VIRT_CPUS;
   2.211 +    vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
   2.212 +        sizeof(vcpumap[0]);
   2.213 +    vcpumap = malloc(vcpumap_size);
   2.214 +    if (vcpumap == NULL) {
   2.215 +        ERROR("memory alloc for vcpumap");
   2.216 +        goto out;
   2.217 +    }
   2.218 +    memset(vcpumap, 0, vcpumap_size);
   2.219 +
   2.220 +    for (i = 0; i <= info->max_vcpu_id; i++) {
   2.221 +        xc_vcpuinfo_t vinfo;
   2.222 +        if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) && vinfo.online)
   2.223 +            __set_bit(i, vcpumap);
   2.224 +    }
   2.225 +
   2.226 +    if (!write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
   2.227 +        ERROR("write max_virt_cpus");
   2.228 +        goto out;
   2.229 +    }
   2.230 +
   2.231 +    if (!write_exact(io_fd, vcpumap, vcpumap_size)) {
   2.232 +        ERROR("write vcpumap");
   2.233 +        goto out;
   2.234 +    }
   2.235 +
   2.236 +    /* vcpu context */
   2.237 +    for (i = 0; i <= info->max_vcpu_id; i++) {
   2.238 +        /* A copy of the CPU context of the guest. */
   2.239 +        vcpu_guest_context_t ctxt;
   2.240 +
   2.241 +        if (!__test_bit(i, vcpumap))
   2.242 +            continue;
   2.243 +
   2.244 +        if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
   2.245 +            goto out;
   2.246 +
   2.247 +        // system context of vcpu is sent as hvm context.
   2.248 +    }    
   2.249 +
   2.250 +    /* Save magic-page locations. */
   2.251 +    memset(magic_pfns, 0, sizeof(magic_pfns));
   2.252 +    for (i = 0; i < NR_PARAMS; i++) {
   2.253 +        if (xc_get_hvm_param(xc_handle, dom, hvm_params[i], &magic_pfns[i])) {
   2.254 +            PERROR("Error when xc_get_hvm_param");
   2.255 +            goto out;
   2.256 +        }
   2.257 +    }
   2.258 +
   2.259 +    if (!write_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
   2.260 +        ERROR("Error when writing to state file (7)");
   2.261 +        goto out;
   2.262 +    }
   2.263 +
   2.264 +    /* Need another buffer for HVM context */
   2.265 +    hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
   2.266 +    if (hvm_buf_size == -1) {
   2.267 +        ERROR("Couldn't get HVM context size from Xen");
   2.268 +        goto out;
   2.269 +    }
   2.270 +
   2.271 +    hvm_buf = malloc(hvm_buf_size);
   2.272 +    if (!hvm_buf) {
   2.273 +        ERROR("Couldn't allocate memory");
   2.274 +        goto out;
   2.275 +    }
   2.276 +
   2.277 +    /* Get HVM context from Xen and save it too */
   2.278 +    rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, hvm_buf_size);
   2.279 +    if (rec_size == -1) {
   2.280 +        ERROR("HVM:Could not get hvm buffer");
   2.281 +        goto out;
   2.282 +    }
   2.283 +        
   2.284 +    if (!write_exact(io_fd, &rec_size, sizeof(rec_size))) {
   2.285 +        ERROR("error write hvm buffer size");
   2.286 +        goto out;
   2.287 +    }
   2.288 +        
   2.289 +    if (!write_exact(io_fd, hvm_buf, rec_size)) {
   2.290 +        ERROR("write HVM info failed!\n");
   2.291 +        goto out;
   2.292 +    }
   2.293 +
   2.294 +    rc = 0;
   2.295 +out:
   2.296 +    if (hvm_buf != NULL)
   2.297 +        free(hvm_buf);
   2.298 +    if (vcpumap != NULL)
   2.299 +        free(vcpumap);
   2.300 +    return rc;
   2.301 +}
   2.302 +
   2.303  int
   2.304  xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
   2.305                 uint32_t max_factor, uint32_t flags, int (*suspend)(int),
   2.306 @@ -147,16 +406,12 @@ xc_domain_save(int xc_handle, int io_fd,
   2.307  
   2.308      int rc = 1;
   2.309  
   2.310 -    //int live  = (flags & XCFLAGS_LIVE);
   2.311      int debug = (flags & XCFLAGS_DEBUG);
   2.312      int live  = (flags & XCFLAGS_LIVE);
   2.313  
   2.314      /* The new domain's shared-info frame number. */
   2.315      unsigned long shared_info_frame;
   2.316  
   2.317 -    /* A copy of the CPU context of the guest. */
   2.318 -    vcpu_guest_context_t ctxt;
   2.319 -
   2.320      /* Live mapping of shared info structure */
   2.321      shared_info_t *live_shinfo = NULL;
   2.322  
   2.323 @@ -185,6 +440,12 @@ xc_domain_save(int xc_handle, int io_fd,
   2.324  
   2.325      char *mem;
   2.326  
   2.327 +    /* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
   2.328 +    unsigned long *qemu_bitmaps[2];
   2.329 +    int qemu_active = 0;
   2.330 +    int qemu_non_active = 1;
   2.331 +
   2.332 +    /* for foreign p2m exposure */
   2.333      unsigned int memmap_info_num_pages;
   2.334      unsigned long memmap_size = 0;
   2.335      xen_ia64_memmap_info_t *memmap_info_live = NULL;
   2.336 @@ -299,6 +560,14 @@ xc_domain_save(int xc_handle, int io_fd,
   2.337              goto out;
   2.338          }
   2.339  
   2.340 +        if (hvm) {
   2.341 +            /* Get qemu-dm logging dirty pages too */
   2.342 +            void *seg = init_qemu_maps(dom, bitmap_size);
   2.343 +            qemu_bitmaps[0] = seg;
   2.344 +            qemu_bitmaps[1] = seg + bitmap_size;
   2.345 +            qemu_active = 0;
   2.346 +            qemu_non_active = 1;
   2.347 +        }
   2.348      } else {
   2.349  
   2.350          /* This is a non-live suspend. Issue the call back to get the
   2.351 @@ -374,9 +643,7 @@ xc_domain_save(int xc_handle, int io_fd,
   2.352               p < memmap_desc_end;
   2.353               p += memmap_info->efi_memdesc_size) {
   2.354              md = p;
   2.355 -            if (md->type != EFI_CONVENTIONAL_MEMORY ||
   2.356 -                md->attribute != EFI_MEMORY_WB ||
   2.357 -                md->num_pages == 0)
   2.358 +            if (md_is_not_ram(md))
   2.359                  continue;
   2.360              
   2.361              for (N = md->phys_addr >> PAGE_SHIFT;
   2.362 @@ -455,11 +722,27 @@ xc_domain_save(int xc_handle, int io_fd,
   2.363                  goto out;
   2.364              }
   2.365  
   2.366 +            if (hvm) {
   2.367 +                unsigned int j;
   2.368 +                /* Pull in the dirty bits from qemu-dm too */
   2.369 +                if (!last_iter) {
   2.370 +                    qemu_active = qemu_non_active;
   2.371 +                    qemu_non_active = qemu_active ? 0 : 1;
   2.372 +                    qemu_flip_buffer(dom, qemu_active);
   2.373 +                    for (j = 0; j < bitmap_size / sizeof(unsigned long); j++) {
   2.374 +                        to_send[j] |= qemu_bitmaps[qemu_non_active][j];
   2.375 +                        qemu_bitmaps[qemu_non_active][j] = 0;
   2.376 +                    }
   2.377 +                } else {
   2.378 +                    for (j = 0; j < bitmap_size / sizeof(unsigned long); j++)
   2.379 +                        to_send[j] |= qemu_bitmaps[qemu_active][j];
   2.380 +                }
   2.381 +            }
   2.382 +
   2.383              sent_last_iter = sent_this_iter;
   2.384  
   2.385              //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
   2.386          }
   2.387 -
   2.388      }
   2.389  
   2.390      fprintf(stderr, "All memory is saved\n");
   2.391 @@ -473,100 +756,18 @@ xc_domain_save(int xc_handle, int io_fd,
   2.392          }
   2.393      }
   2.394  
   2.395 -    /*
   2.396 -     * Send through a list of all the PFNs that were not in map at the close.
   2.397 -     * We send pages which was allocated. However balloon driver may 
   2.398 -     * decreased after sending page. So we have to check the freed
   2.399 -     * page after pausing the domain.
   2.400 -     */
   2.401 -    {
   2.402 -        unsigned long N;
   2.403 -        unsigned long pfntab[1024];
   2.404 -        unsigned int j;
   2.405 +    if (xc_ia64_send_unallocated_list(xc_handle, io_fd, &p2m_table,
   2.406 +                                      memmap_info,
   2.407 +                                      memmap_desc_start, memmap_desc_end))
   2.408 +        goto out;
   2.409  
   2.410 -        j = 0;
   2.411 -        for (p = memmap_desc_start;
   2.412 -             p < memmap_desc_end;
   2.413 -             p += memmap_info->efi_memdesc_size) {
   2.414 -            md = p;
   2.415 -            if (md->type != EFI_CONVENTIONAL_MEMORY ||
   2.416 -                md->attribute != EFI_MEMORY_WB ||
   2.417 -                md->num_pages == 0)
   2.418 -                continue;
   2.419 -            for (N = md->phys_addr >> PAGE_SHIFT;
   2.420 -                 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
   2.421 -                     PAGE_SHIFT;
   2.422 -                 N++) {
   2.423 -                if (!xc_ia64_p2m_allocated(&p2m_table, N))
   2.424 -                    j++;
   2.425 -            }
   2.426 -        }
   2.427 -        if (!write_exact(io_fd, &j, sizeof(unsigned int))) {
   2.428 -            ERROR("Error when writing to state file (6a)");
   2.429 -            goto out;
   2.430 -        }
   2.431 -        
   2.432 -        j = 0;
   2.433 -        for (p = memmap_desc_start;
   2.434 -             p < memmap_desc_end;
   2.435 -             p += memmap_info->efi_memdesc_size) {
   2.436 -            md = p;
   2.437 -            if (md->type != EFI_CONVENTIONAL_MEMORY ||
   2.438 -                md->attribute != EFI_MEMORY_WB ||
   2.439 -                md->num_pages == 0)
   2.440 -                continue;
   2.441 -            for (N = md->phys_addr >> PAGE_SHIFT;
   2.442 -                 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
   2.443 -                     PAGE_SHIFT;
   2.444 -                 N++) {
   2.445 -                if (!xc_ia64_p2m_allocated(&p2m_table, N))
   2.446 -                    pfntab[j++] = N;
   2.447 -                if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
   2.448 -                    if (!write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
   2.449 -                        ERROR("Error when writing to state file (6b)");
   2.450 -                        goto out;
   2.451 -                    }
   2.452 -                    j = 0;
   2.453 -                }
   2.454 -            }
   2.455 -        }
   2.456 -        if (j > 0) {
   2.457 -            if (!write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
   2.458 -                ERROR("Error when writing to state file (6b)");
   2.459 -                goto out;
   2.460 -            }
   2.461 -        }
   2.462 -    }
   2.463 -
   2.464 -    if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
   2.465 -        ERROR("Could not get vcpu context");
   2.466 +    if (!hvm)
   2.467 +        rc = xc_ia64_pv_send_context(xc_handle, io_fd, dom, live_shinfo);
   2.468 +    else
   2.469 +        rc = xc_ia64_hvm_send_context(xc_handle, io_fd,
   2.470 +                                      dom, &info, live_shinfo);
   2.471 +    if (rc)
   2.472          goto out;
   2.473 -    }
   2.474 -
   2.475 -    if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) {
   2.476 -        ERROR("Error when writing to state file (1)");
   2.477 -        goto out;
   2.478 -    }
   2.479 -
   2.480 -    fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt.regs.ip, ctxt.regs.b[0]);
   2.481 -
   2.482 -    mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   2.483 -                               PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
   2.484 -    if (mem == NULL) {
   2.485 -        ERROR("cannot map privreg page");
   2.486 -        goto out;
   2.487 -    }
   2.488 -    if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
   2.489 -        ERROR("Error when writing privreg to state file (5)");
   2.490 -        munmap(mem, PAGE_SIZE);
   2.491 -        goto out;
   2.492 -    }
   2.493 -    munmap(mem, PAGE_SIZE);
   2.494 -
   2.495 -    if (!write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
   2.496 -        ERROR("Error when writing to state file (1)");
   2.497 -        goto out;
   2.498 -    }
   2.499  
   2.500      /* Success! */
   2.501      rc = 0;
     3.1 --- a/tools/libxc/ia64/xc_ia64_save_restore.h	Sun Oct 21 14:45:20 2007 -0600
     3.2 +++ b/tools/libxc/ia64/xc_ia64_save_restore.h	Sun Oct 21 14:57:13 2007 -0600
     3.3 @@ -31,6 +31,27 @@
     3.4  
     3.5  #define XC_IA64_SR_FORMAT_VER_CURRENT   XC_IA64_SR_FORMAT_VER_TWO
     3.6  
     3.7 +/*
     3.8 +** During (live) save/migrate, we maintain a number of bitmaps to track
     3.9 +** which pages we have to send, and to skip.
    3.10 +*/
    3.11 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
    3.12 +
    3.13 +#define BITMAP_ENTRY(_nr,_bmap) \
    3.14 +   ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
    3.15 +
    3.16 +#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
    3.17 +
    3.18 +static inline int __test_bit(int nr, void * addr)
    3.19 +{
    3.20 +    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
    3.21 +}
    3.22 +
    3.23 +static inline void __set_bit(int nr, void * addr)
    3.24 +{
    3.25 +    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
    3.26 +}
    3.27 +
    3.28  #endif /* XC_IA64_SAVE_RESTORE_H */
    3.29  
    3.30  /*