/* callbacks provided by xc_domain_save */
struct save_callbacks {
- /* Called after expiration of checkpoint interval,
+ /*
+ * Called after expiration of checkpoint interval,
* to suspend the guest.
*/
- int (*suspend)(void* data);
+ int (*suspend)(void *data);
/*
* Called before and after every batch of page data sent during
* xc_domain_save then flushes the output buffer, while the
* guest continues to run.
*/
- int (*postcopy)(void* data);
+ int (*postcopy)(void *data);
/*
* Called after the memory checkpoint has been flushed
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*checkpoint)(void* data);
+ int (*checkpoint)(void *data);
/*
* Called after the checkpoint callback.
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*wait_checkpoint)(void* data);
+ int (*wait_checkpoint)(void *data);
/* Enable qemu-dm logging dirty pages to xen */
int (*switch_qemu_logdirty)(uint32_t domid, unsigned enable, void *data); /* HVM only */
/* to be provided as the last argument to each callback function */
- void* data;
+ void *data;
};
/* Type of stream. Plain, or using a continuous replication protocol? */
/* callbacks provided by xc_domain_restore */
struct restore_callbacks {
- /* Called after a new checkpoint to suspend the guest.
- */
- int (*suspend)(void* data);
+ /* Called after a new checkpoint to suspend the guest. */
+ int (*suspend)(void *data);
- /* Called after the secondary vm is ready to resume.
+ /*
+ * Called after the secondary vm is ready to resume.
* Callback function resumes the guest & the device model,
* returns to xc_domain_restore.
*/
- int (*postcopy)(void* data);
+ int (*postcopy)(void *data);
- /* A checkpoint record has been found in the stream.
- * returns: */
+ /*
+ * A checkpoint record has been found in the stream.
+ * returns:
+ */
#define XGR_CHECKPOINT_ERROR 0 /* Terminate processing */
#define XGR_CHECKPOINT_SUCCESS 1 /* Continue reading more data from the stream */
#define XGR_CHECKPOINT_FAILOVER 2 /* Failover and resume VM */
- int (*checkpoint)(void* data);
+ int (*checkpoint)(void *data);
/*
* Called after the checkpoint callback.
* 0: terminate checkpointing gracefully
* 1: take another checkpoint
*/
- int (*wait_checkpoint)(void* data);
+ int (*wait_checkpoint)(void *data);
/*
* callback to send store gfn and console gfn to xl
void *data);
/* to be provided as the last argument to each callback function */
- void* data;
+ void *data;
};
/**
#include <xen-tools/libs.h>
-static const char *dhdr_types[] =
+static const char *const dhdr_types[] =
{
[DHDR_TYPE_X86_PV] = "x86 PV",
[DHDR_TYPE_X86_HVM] = "x86 HVM",
return "Reserved";
}
-static const char *mandatory_rec_types[] =
+static const char *const mandatory_rec_types[] =
{
[REC_TYPE_END] = "End",
[REC_TYPE_PAGE_DATA] = "Page data",
xc_interface *xch = ctx->xch;
typeof(rec->length) combined_length = rec->length + sz;
size_t record_length = ROUNDUP(combined_length, REC_ALIGN_ORDER);
- struct iovec parts[] =
- {
+ struct iovec parts[] = {
{ &rec->type, sizeof(rec->type) },
{ &combined_length, sizeof(combined_length) },
{ rec->data, rec->length },
{ buf, sz },
- { (void*)zeroes, record_length - combined_length },
+ { (void *)zeroes, record_length - combined_length },
};
if ( record_length > REC_LENGTH_MAX )
struct precopy_stats stats;
xen_pfn_t *batch_pfns;
- unsigned nr_batch_pfns;
+ unsigned int nr_batch_pfns;
unsigned long *deferred_pages;
unsigned long nr_deferred_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
*/
#define DEFAULT_BUF_RECORDS 1024
struct xc_sr_record *buffered_records;
- unsigned allocated_rec_num;
- unsigned buffered_rec_num;
+ unsigned int allocated_rec_num;
+ unsigned int buffered_rec_num;
/*
* Xenstore and Console parameters.
{
struct xc_sr_blob basic, extd, xsave, msr;
} *vcpus;
- unsigned nr_vcpus;
+ unsigned int nr_vcpus;
} restore;
};
} x86_pv;
* x86_pv_localise_page() if we receive pagetables frames ahead of the
* contents of the frames they point at.
*/
-int populate_pfns(struct xc_sr_context *ctx, unsigned count,
+int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
const xen_pfn_t *original_pfns, const uint32_t *types);
#endif
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_x86_tsc_info tsc = {};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_TSC_INFO,
.length = sizeof(tsc),
- .data = &tsc
+ .data = &tsc,
};
if ( xc_domain_get_tsc_info(xch, ctx->domid, &tsc.mode,
bool mfn_in_pseudophysmap(struct xc_sr_context *ctx, xen_pfn_t mfn)
{
- return ( (mfn <= ctx->x86_pv.max_mfn) &&
- (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
- (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
- ctx->x86_pv.width) == mfn) );
+ return ((mfn <= ctx->x86_pv.max_mfn) &&
+ (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
+ (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
+ ctx->x86_pv.width) == mfn));
}
void dump_bad_pseudophysmap_entry(struct xc_sr_context *ctx, xen_pfn_t mfn)
{
struct xen_machphys_mfn_list xmml = {
.max_extents = 1,
- .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 }
+ .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 },
};
rc = do_memory_op(xch, XENMEM_machphys_compat_mfn_list,
rc = 0;
DPRINTF("max_mfn %#lx", ctx->x86_pv.max_mfn);
-err:
+ err:
free(entries);
free(extents_start);
* unpopulated subset. If types is NULL, no page type checking is performed
* and all unpopulated pfns are populated.
*/
-int populate_pfns(struct xc_sr_context *ctx, unsigned count,
+int populate_pfns(struct xc_sr_context *ctx, unsigned int count,
const xen_pfn_t *original_pfns, const uint32_t *types)
{
xc_interface *xch = ctx->xch;
xen_pfn_t *mfns = malloc(count * sizeof(*mfns)),
*pfns = malloc(count * sizeof(*pfns));
- unsigned i, nr_pfns = 0;
+ unsigned int i, nr_pfns = 0;
int rc = -1;
if ( !mfns || !pfns )
* stream, populate and record their types, map the relevant subset and copy
* the data into the guest.
*/
-static int process_page_data(struct xc_sr_context *ctx, unsigned count,
+static int process_page_data(struct xc_sr_context *ctx, unsigned int count,
xen_pfn_t *pfns, uint32_t *types, void *page_data)
{
xc_interface *xch = ctx->xch;
int *map_errs = malloc(count * sizeof(*map_errs));
int rc;
void *mapping = NULL, *guest_page = NULL;
- unsigned i, /* i indexes the pfns from the record. */
- j, /* j indexes the subset of pfns we decide to map. */
+ unsigned int i, /* i indexes the pfns from the record. */
+ j, /* j indexes the subset of pfns we decide to map. */
nr_pages = 0;
if ( !mfns || !map_errs )
if ( nr_pages == 0 )
goto done;
- mapping = guest_page = xenforeignmemory_map(xch->fmem,
- ctx->domid, PROT_READ | PROT_WRITE,
+ mapping = guest_page = xenforeignmemory_map(
+ xch->fmem, ctx->domid, PROT_READ | PROT_WRITE,
nr_pages, mfns, map_errs);
if ( !mapping )
{
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_page_data_header *pages = rec->data;
- unsigned i, pages_of_data = 0;
+ unsigned int i, pages_of_data = 0;
int rc = -1;
xen_pfn_t *pfns = NULL, pfn;
{
xc_interface *xch = ctx->xch;
int rc = -1;
- unsigned count, written;
+ unsigned int count, written;
uint64_t i, *pfns = NULL;
struct iovec *iov = NULL;
xc_shadow_op_stats_t stats = { 0, ctx->restore.p2m_size };
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_CHECKPOINT_DIRTY_PFN_LIST,
};
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
{
xc_interface *xch = ctx->xch;
int rc = 0, ret;
- unsigned i;
+ unsigned int i;
if ( ctx->stream_type == XC_STREAM_PLAIN )
{
/* Wait for a new checkpoint */
ret = ctx->restore.callbacks->wait_checkpoint(
- ctx->restore.callbacks->data);
+ ctx->restore.callbacks->data);
HANDLE_CALLBACK_RETURN_VALUE(ret);
/* suspend secondary vm */
static int buffer_record(struct xc_sr_context *ctx, struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
- unsigned new_alloc_num;
+ unsigned int new_alloc_num;
struct xc_sr_record *p;
if ( ctx->restore.buffered_rec_num >= ctx->restore.allocated_rec_num )
if ( ctx->stream_type == XC_STREAM_COLO )
{
- dirty_bitmap = xc_hypercall_buffer_alloc_pages(xch, dirty_bitmap,
- NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+ dirty_bitmap = xc_hypercall_buffer_alloc_pages(
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
if ( !dirty_bitmap )
{
static void cleanup(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- unsigned i;
+ unsigned int i;
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
&ctx->restore.dirty_bitmap_hbuf);
free(ctx->restore.buffered_records[i].data);
if ( ctx->stream_type == XC_STREAM_COLO )
- xc_hypercall_buffer_free_pages(xch, dirty_bitmap,
- NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+ xc_hypercall_buffer_free_pages(
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->restore.p2m_size)));
+
free(ctx->restore.buffered_records);
free(ctx->restore.populated_pfns);
+
if ( ctx->restore.ops.cleanup(ctx) )
PERROR("Failed to clean up");
}
rc = 0;
-err:
+ err:
if ( guest_start_info )
munmap(guest_start_info, PAGE_SIZE);
xc_interface *xch = ctx->xch;
vcpu_guest_context_any_t *vcpu = ctx->x86_pv.restore.vcpus[vcpuid].basic.ptr;
xen_pfn_t pfn, mfn;
- unsigned i, gdt_count;
+ unsigned int i, gdt_count;
int rc = -1;
/* Vcpu 0 is special: Convert the suspend record to an mfn. */
{
xc_interface *xch = ctx->xch;
struct xc_sr_x86_pv_restore_vcpu *vcpu;
- unsigned i;
+ unsigned int i;
int rc = 0;
for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
{
xc_interface *xch = ctx->xch;
xen_pfn_t mfn, pfn, *guest_p2m = NULL;
- unsigned i;
+ unsigned int i;
int rc = -1;
for ( i = 0; i < ctx->x86_pv.p2m_frames; ++i )
guest_p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_WRITE,
ctx->x86_pv.p2m_pfns,
- ctx->x86_pv.p2m_frames );
+ ctx->x86_pv.p2m_frames);
if ( !guest_p2m )
{
PERROR("Failed to map p2m frames");
memcpy(guest_p2m, ctx->x86_pv.p2m,
(ctx->x86_pv.max_pfn + 1) * ctx->x86_pv.width);
rc = 0;
+
err:
if ( guest_p2m )
munmap(guest_p2m, ctx->x86_pv.p2m_frames * PAGE_SIZE);
{
xc_interface *xch = ctx->xch;
struct xc_sr_rec_x86_pv_p2m_frames *data = rec->data;
- unsigned start, end, x, fpp = PAGE_SIZE / ctx->x86_pv.width;
+ unsigned int start, end, x, fpp = PAGE_SIZE / ctx->x86_pv.width;
int rc;
if ( !ctx->x86_pv.restore.seen_pv_info )
struct xc_sr_record *rec)
{
xc_interface *xch = ctx->xch;
- unsigned i;
+ unsigned int i;
int rc = -1;
shared_info_any_t *guest_shinfo = NULL;
const shared_info_any_t *old_shinfo = rec->data;
MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_mask, 0xff, ctx->x86_pv.width);
rc = 0;
- err:
+ err:
if ( guest_shinfo )
munmap(guest_shinfo, PAGE_SIZE);
xc_interface *xch = ctx->xch;
uint64_t *table = page;
uint64_t pte;
- unsigned i, to_populate;
+ unsigned int i, to_populate;
xen_pfn_t pfns[(PAGE_SIZE / sizeof(uint64_t))];
type &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
if ( ctx->x86_pv.restore.vcpus )
{
- unsigned i;
+ unsigned int i;
for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
{
{
xc_interface *xch = ctx->xch;
int32_t xen_version = xc_version(xch, XENVER_version, NULL);
- struct xc_sr_ihdr ihdr =
- {
- .marker = IHDR_MARKER,
- .id = htonl(IHDR_ID),
- .version = htonl(IHDR_VERSION),
- .options = htons(IHDR_OPT_LITTLE_ENDIAN),
- };
- struct xc_sr_dhdr dhdr =
- {
- .type = guest_type,
- .page_shift = XC_PAGE_SHIFT,
- .xen_major = (xen_version >> 16) & 0xffff,
- .xen_minor = (xen_version) & 0xffff,
- };
+ struct xc_sr_ihdr ihdr = {
+ .marker = IHDR_MARKER,
+ .id = htonl(IHDR_ID),
+ .version = htonl(IHDR_VERSION),
+ .options = htons(IHDR_OPT_LITTLE_ENDIAN),
+ };
+ struct xc_sr_dhdr dhdr = {
+ .type = guest_type,
+ .page_shift = XC_PAGE_SHIFT,
+ .xen_major = (xen_version >> 16) & 0xffff,
+ .xen_minor = (xen_version) & 0xffff,
+ };
if ( xen_version < 0 )
{
*/
static int write_end_record(struct xc_sr_context *ctx)
{
- struct xc_sr_record end = { REC_TYPE_END, 0, NULL };
+ struct xc_sr_record end = { .type = REC_TYPE_END };
return write_record(ctx, &end);
}
*/
static int write_checkpoint_record(struct xc_sr_context *ctx)
{
- struct xc_sr_record checkpoint = { REC_TYPE_CHECKPOINT, 0, NULL };
+ struct xc_sr_record checkpoint = { .type = REC_TYPE_CHECKPOINT };
return write_record(ctx, &checkpoint);
}
void **guest_data = NULL;
void **local_pages = NULL;
int *errors = NULL, rc = -1;
- unsigned i, p, nr_pages = 0, nr_pages_mapped = 0;
- unsigned nr_pfns = ctx->save.nr_batch_pfns;
+ unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0;
+ unsigned int nr_pfns = ctx->save.nr_batch_pfns;
void *page, *orig_page;
uint64_t *rec_pfns = NULL;
struct iovec *iov = NULL; int iovcnt = 0;
struct xc_sr_rec_page_data_header hdr = { 0 };
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_PAGE_DATA,
};
if ( nr_pages > 0 )
{
- guest_mapping = xenforeignmemory_map(xch->fmem,
- ctx->domid, PROT_READ, nr_pages, mfns, errors);
+ guest_mapping = xenforeignmemory_map(
+ xch->fmem, ctx->domid, PROT_READ, nr_pages, mfns, errors);
if ( !guest_mapping )
{
PERROR("Failed to map guest pages");
static int simple_precopy_policy(struct precopy_stats stats, void *user)
{
return ((stats.dirty_count >= 0 &&
- stats.dirty_count < SPP_TARGET_DIRTY_COUNT) ||
+ stats.dirty_count < SPP_TARGET_DIRTY_COUNT) ||
stats.iteration >= SPP_MAX_ITERATIONS)
? XGS_POLICY_STOP_AND_COPY
: XGS_POLICY_CONTINUE_PRECOPY;
if ( rc )
goto out;
- ctx->save.stats = (struct precopy_stats)
- { .dirty_count = ctx->save.p2m_size };
+ ctx->save.stats = (struct precopy_stats){
+ .dirty_count = ctx->save.p2m_size,
+ };
policy_stats = &ctx->save.stats;
if ( precopy_policy == NULL )
- precopy_policy = simple_precopy_policy;
+ precopy_policy = simple_precopy_policy;
bitmap_set(dirty_bitmap, ctx->save.p2m_size);
policy_decision = precopy_policy(*policy_stats, data);
if ( policy_decision != XGS_POLICY_CONTINUE_PRECOPY )
- break;
+ break;
if ( xc_shadow_control(
xch, ctx->domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
static int colo_merge_secondary_dirty_bitmap(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- struct xc_sr_record rec = { 0, 0, NULL };
+ struct xc_sr_record rec;
uint64_t *pfns = NULL;
uint64_t pfn;
- unsigned count, i;
+ unsigned int count, i;
int rc;
DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
&ctx->save.dirty_bitmap_hbuf);
if ( rec.type != REC_TYPE_CHECKPOINT_DIRTY_PFN_LIST )
{
- PERROR("Expect dirty bitmap record, but received %u", rec.type );
+ PERROR("Expect dirty bitmap record, but received %u", rec.type);
rc = -1;
goto err;
}
if ( rec.length % sizeof(*pfns) )
{
- PERROR("Invalid dirty pfn list record length %u", rec.length );
+ PERROR("Invalid dirty pfn list record length %u", rec.length);
rc = -1;
goto err;
}
for ( i = 0; i < count; i++ )
{
pfn = pfns[i];
- if (pfn > ctx->save.p2m_size)
+ if ( pfn > ctx->save.p2m_size )
{
PERROR("Invalid pfn 0x%" PRIx64, pfn);
rc = -1;
xc_interface *xch = ctx->xch;
xc_shadow_op_stats_t stats = { 0, ctx->save.p2m_size };
int rc;
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_VERIFY,
- .length = 0,
- };
+ struct xc_sr_record rec = { .type = REC_TYPE_VERIFY };
DPRINTF("Enabling verify mode");
goto out;
}
- out:
+ out:
return rc;
}
goto err;
dirty_bitmap = xc_hypercall_buffer_alloc_pages(
- xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
+ xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE *
sizeof(*ctx->save.batch_pfns));
ctx->save.deferred_pages = calloc(1, bitmap_size(ctx->save.p2m_size));
};
int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
- uint32_t flags, struct save_callbacks* callbacks,
+ uint32_t flags, struct save_callbacks *callbacks,
xc_stream_type_t stream_type, int recv_fd)
{
struct xc_sr_context ctx = {
{
xc_interface *xch = ctx->xch;
int rc, hvm_buf_size;
- struct xc_sr_record hvm_rec =
- {
+ struct xc_sr_record hvm_rec = {
.type = REC_TYPE_HVM_CONTEXT,
};
static int x86_hvm_normalise_page(struct xc_sr_context *ctx,
xen_pfn_t type, void **page)
{
- /* no-op */
return 0;
}
static int x86_hvm_start_of_stream(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
static int x86_hvm_start_of_checkpoint(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
static int x86_hvm_check_vm_state(struct xc_sr_context *ctx)
{
- /* no-op */
return 0;
}
size_t n_mfns)
{
xc_interface *xch = ctx->xch;
- unsigned x;
+ unsigned int x;
ctx->x86_pv.p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_READ,
mfns, n_mfns);
*/
xc_interface *xch = ctx->xch;
int rc = -1;
- unsigned x, saved_x, fpp, fll_entries, fl_entries;
+ unsigned int x, saved_x, fpp, fll_entries, fl_entries;
xen_pfn_t fll_mfn, saved_mfn, max_pfn;
xen_pfn_t *local_fll = NULL;
/* Map the p2m leaves themselves. */
rc = map_p2m_leaves(ctx, local_fl, fl_entries);
-err:
-
+ err:
free(local_fl);
if ( guest_fl )
munmap(guest_fl, fll_entries * PAGE_SIZE);
xen_pfn_t p2m_mfn, mfn, saved_mfn, max_pfn;
uint64_t *ptes = NULL;
xen_pfn_t *mfns = NULL;
- unsigned fpp, n_pages, level, shift, idx_start, idx_end, idx, saved_idx;
+ unsigned int fpp, n_pages, level, shift, idx_start, idx_end, idx, saved_idx;
int rc = -1;
p2m_mfn = cr3_to_mfn(ctx, p2m_cr3);
/* Map the p2m leaves themselves. */
rc = map_p2m_leaves(ctx, mfns, idx_end - idx_start + 1);
-err:
+ err:
free(mfns);
if ( ptes )
munmap(ptes, n_pages * PAGE_SIZE);
{
xc_interface *xch = ctx->xch;
xen_pfn_t mfn, pfn;
- unsigned i, gdt_count;
+ unsigned int i, gdt_count;
int rc = -1;
vcpu_guest_context_any_t vcpu;
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_BASIC,
.length = sizeof(vhdr),
.data = &vhdr,
static int write_one_vcpu_extended(struct xc_sr_context *ctx, uint32_t id)
{
xc_interface *xch = ctx->xch;
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_EXTENDED,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_get_ext_vcpucontext,
.domain = ctx->domid,
.u.ext_vcpucontext.vcpu = id,
xc_interface *xch = ctx->xch;
int rc = -1;
DECLARE_HYPERCALL_BUFFER(void, buffer);
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_XSAVE,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_getvcpuextstate,
.domain = ctx->domid,
.u.vcpuextstate.vcpu = id,
int rc = -1;
size_t buffersz;
DECLARE_HYPERCALL_BUFFER(void, buffer);
- struct xc_sr_rec_x86_pv_vcpu_hdr vhdr =
- {
+ struct xc_sr_rec_x86_pv_vcpu_hdr vhdr = {
.vcpu_id = id,
};
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_X86_PV_VCPU_MSRS,
.length = sizeof(vhdr),
.data = &vhdr,
};
- struct xen_domctl domctl =
- {
+ struct xen_domctl domctl = {
.cmd = XEN_DOMCTL_get_vcpu_msrs,
.domain = ctx->domid,
.u.vcpu_msrs.vcpu = id,
*/
static int write_x86_pv_info(struct xc_sr_context *ctx)
{
- struct xc_sr_rec_x86_pv_info info =
- {
- .guest_width = ctx->x86_pv.width,
- .pt_levels = ctx->x86_pv.levels,
- };
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_X86_PV_INFO,
- .length = sizeof(info),
- .data = &info
- };
+ struct xc_sr_rec_x86_pv_info info = {
+ .guest_width = ctx->x86_pv.width,
+ .pt_levels = ctx->x86_pv.levels,
+ };
+ struct xc_sr_record rec = {
+ .type = REC_TYPE_X86_PV_INFO,
+ .length = sizeof(info),
+ .data = &info,
+ };
return write_record(ctx, &rec);
}
static int write_x86_pv_p2m_frames(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- int rc; unsigned i;
+ int rc; unsigned int i;
size_t datasz = ctx->x86_pv.p2m_frames * sizeof(uint64_t);
uint64_t *data = NULL;
- struct xc_sr_rec_x86_pv_p2m_frames hdr =
- {
- .start_pfn = 0,
- .end_pfn = ctx->x86_pv.max_pfn,
- };
- struct xc_sr_record rec =
- {
- .type = REC_TYPE_X86_PV_P2M_FRAMES,
- .length = sizeof(hdr),
- .data = &hdr,
- };
+ struct xc_sr_rec_x86_pv_p2m_frames hdr = {
+ .end_pfn = ctx->x86_pv.max_pfn,
+ };
+ struct xc_sr_record rec = {
+ .type = REC_TYPE_X86_PV_P2M_FRAMES,
+ .length = sizeof(hdr),
+ .data = &hdr,
+ };
/* No need to translate if sizeof(uint64_t) == sizeof(xen_pfn_t). */
if ( sizeof(uint64_t) != sizeof(*ctx->x86_pv.p2m_pfns) )
*/
static int write_shared_info(struct xc_sr_context *ctx)
{
- struct xc_sr_record rec =
- {
+ struct xc_sr_record rec = {
.type = REC_TYPE_SHARED_INFO,
.length = PAGE_SIZE,
.data = ctx->x86_pv.shinfo,
{
xc_interface *xch = ctx->xch;
uint64_t pte;
- unsigned i, xen_first = -1, xen_last = -1; /* Indices of Xen mappings. */
+ unsigned int i, xen_first = -1, xen_last = -1; /* Indices of Xen mappings. */
type &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
return 0;
}
-/* save_ops function. */
static xen_pfn_t x86_pv_pfn_to_gfn(const struct xc_sr_context *ctx,
xen_pfn_t pfn)
{
rc = normalise_pagetable(ctx, *page, local_page, type);
*page = local_page;
- out:
+ out:
return rc;
}
return 0;
}
-/*
- * save_ops function. Writes PV header records into the stream.
- */
static int x86_pv_start_of_stream(struct xc_sr_context *ctx)
{
int rc;
return x86_pv_check_vm_state_p2m_list(ctx);
}
-/*
- * save_ops function. Cleanup.
- */
static int x86_pv_cleanup(struct xc_sr_context *ctx)
{
free(ctx->x86_pv.p2m_pfns);
if so_far != total_length:
- raise StreamError("Overshot Extended Info size by %d bytes"
- % (so_far - total_length,))
+ raise StreamError("Overshot Extended Info size by %d bytes" %
+ (so_far - total_length, ))
def read_pv_p2m_frames(vm):
fpp = 4096 / vm.width
elif marker > 0:
if marker > legacy.MAX_BATCH:
- raise StreamError("Page batch (%d) exceeded MAX_BATCH (%d)"
- % (marker, legacy.MAX_BATCH))
+ raise StreamError("Page batch (%d) exceeded MAX_BATCH (%d)" %
+ (marker, legacy.MAX_BATCH))
pfns = unpack_ulongs(marker)
# xc_domain_save() leaves many XEN_DOMCTL_PFINFO_XTAB records for
max_id, = unpack_exact("i")
if max_id > legacy.MAX_VCPU_ID:
- raise StreamError("Vcpu max_id out of range: %d > %d"
- % (max_id, legacy.MAX_VCPU_ID))
+ raise StreamError("Vcpu max_id out of range: %d > %d" %
+ (max_id, legacy.MAX_VCPU_ID))
vm.max_vcpu_id = max_id
bitmap = unpack_exact("Q" * ((max_id/64) + 1))
bit_idx += 1
word >>= 1
- info(" Vcpu info: max_id %d, online map %s"
- % (vm.max_vcpu_id, vm.online_vcpu_map))
+ info(" Vcpu info: max_id %d, online map %s" %
+ (vm.max_vcpu_id, vm.online_vcpu_map))
elif marker == legacy.CHUNK_hvm_ident_pt:
_, ident_pt = unpack_exact("=IQ")
[public.HVM_PARAM_NR_IOREQ_SERVER_PAGES, nr_pages])
else:
- raise StreamError("Unrecognised chunk %d" % (marker,))
+ raise StreamError("Unrecognised chunk %d" % (marker, ))
def read_hvm_tail(vm):
try:
vm.p2m_size, = unpack_ulongs(1)
- info("P2M Size: 0x%x" % (vm.p2m_size,))
+ info("P2M Size: 0x%x" % (vm.p2m_size, ))
if vm.libxl:
write_libxl_hdr()
REC_TYPE_x86_pv_vcpu_msrs : "x86 PV vcpu msrs",
REC_TYPE_verify : "Verify",
REC_TYPE_checkpoint : "Checkpoint",
- REC_TYPE_checkpoint_dirty_pfn_list : "Checkpoint dirty pfn list"
+ REC_TYPE_checkpoint_dirty_pfn_list : "Checkpoint dirty pfn list",
}
# page_data
self.unpack_exact(IHDR_FORMAT)
if marker != IHDR_MARKER:
- raise StreamError("Bad image marker: Expected 0x%x, got 0x%x"
- % (IHDR_MARKER, marker))
+ raise StreamError("Bad image marker: Expected 0x%x, got 0x%x" %
+ (IHDR_MARKER, marker))
if ident != IHDR_IDENT:
- raise StreamError("Bad image id: Expected 0x%x, got 0x%x"
- % (IHDR_IDENT, ident))
+ raise StreamError("Bad image id: Expected 0x%x, got 0x%x" %
+ (IHDR_IDENT, ident))
if version != IHDR_VERSION:
- raise StreamError("Unknown image version: Expected %d, got %d"
- % (IHDR_VERSION, version))
+ raise StreamError("Unknown image version: Expected %d, got %d" %
+ (IHDR_VERSION, version))
if options & IHDR_OPT_RESZ_MASK:
- raise StreamError("Reserved bits set in image options field: 0x%x"
- % (options & IHDR_OPT_RESZ_MASK))
+ raise StreamError("Reserved bits set in image options field: 0x%x" %
+ (options & IHDR_OPT_RESZ_MASK))
if res1 != 0 or res2 != 0:
- raise StreamError("Reserved bits set in image header: 0x%04x:0x%08x"
- % (res1, res2))
+ raise StreamError(
+ "Reserved bits set in image header: 0x%04x:0x%08x" %
+ (res1, res2))
if ( (sys.byteorder == "little") and
((options & IHDR_OPT_BIT_ENDIAN) != IHDR_OPT_LE) ):
raise StreamError("Unrecognised domain type 0x%x" % (gtype, ))
if res1 != 0:
- raise StreamError("Reserved bits set in domain header 0x%04x"
- % (res1, ))
+ raise StreamError("Reserved bits set in domain header 0x%04x" %
+ (res1, ))
if page_shift != 12:
- raise StreamError("Page shift expected to be 12. Got %d"
- % (page_shift, ))
+ raise StreamError("Page shift expected to be 12. Got %d" %
+ (page_shift, ))
if major == 0:
- self.info("Domain Header: legacy converted %s"
- % (dhdr_type_to_str[gtype], ))
+ self.info("Domain Header: legacy converted %s" %
+ (dhdr_type_to_str[gtype], ))
else:
- self.info("Domain Header: %s from Xen %d.%d"
- % (dhdr_type_to_str[gtype], major, minor))
+ self.info("Domain Header: %s from Xen %d.%d" %
+ (dhdr_type_to_str[gtype], major, minor))
def verify_record(self):
if rtype != REC_TYPE_page_data:
if self.squashed_pagedata_records > 0:
- self.info("Squashed %d Page Data records together"
- % (self.squashed_pagedata_records, ))
+ self.info("Squashed %d Page Data records together" %
+ (self.squashed_pagedata_records, ))
self.squashed_pagedata_records = 0
- self.info("Libxc Record: %s, length %d"
- % (rec_type_to_str[rtype], length))
+ self.info("Libxc Record: %s, length %d" %
+ (rec_type_to_str[rtype], length))
else:
self.squashed_pagedata_records += 1
raise StreamError("Padding containing non0 bytes found")
if rtype not in record_verifiers:
- raise RuntimeError("No verification function for libxc record '%s'"
- % rec_type_to_str[rtype])
+ raise RuntimeError(
+ "No verification function for libxc record '%s'" %
+ rec_type_to_str[rtype])
else:
record_verifiers[rtype](self, content[:length])
minsz = calcsize(PAGE_DATA_FORMAT)
if len(content) <= minsz:
- raise RecordError("PAGE_DATA record must be at least %d bytes long"
- % (minsz, ))
+ raise RecordError(
+ "PAGE_DATA record must be at least %d bytes long" % (minsz, ))
count, res1 = unpack(PAGE_DATA_FORMAT, content[:minsz])
if res1 != 0:
- raise StreamError("Reserved bits set in PAGE_DATA record 0x%04x"
- % (res1, ))
+ raise StreamError(
+ "Reserved bits set in PAGE_DATA record 0x%04x" % (res1, ))
pfnsz = count * 8
if (len(content) - minsz) < pfnsz:
- raise RecordError("PAGE_DATA record must contain a pfn record for "
- "each count")
+ raise RecordError(
+ "PAGE_DATA record must contain a pfn record for each count")
- pfns = list(unpack("=%dQ" % (count,), content[minsz:minsz + pfnsz]))
+ pfns = list(unpack("=%dQ" % (count, ), content[minsz:minsz + pfnsz]))
nr_pages = 0
for idx, pfn in enumerate(pfns):
if pfn & PAGE_DATA_PFN_RESZ_MASK:
- raise RecordError("Reserved bits set in pfn[%d]: 0x%016x",
- idx, pfn & PAGE_DATA_PFN_RESZ_MASK)
+ raise RecordError("Reserved bits set in pfn[%d]: 0x%016x" %
+ (idx, pfn & PAGE_DATA_PFN_RESZ_MASK))
if pfn >> PAGE_DATA_TYPE_SHIFT in (5, 6, 7, 8):
- raise RecordError("Invalid type value in pfn[%d]: 0x%016x",
- idx, pfn & PAGE_DATA_TYPE_LTAB_MASK)
+ raise RecordError("Invalid type value in pfn[%d]: 0x%016x" %
+ (idx, pfn & PAGE_DATA_TYPE_LTAB_MASK))
# We expect page data for each normal page or pagetable
if PAGE_DATA_TYPE_NOTAB <= (pfn & PAGE_DATA_TYPE_LTABTYPE_MASK) \
pagesz = nr_pages * 4096
if len(content) != minsz + pfnsz + pagesz:
- raise RecordError("Expected %u + %u + %u, got %u"
- % (minsz, pfnsz, pagesz, len(content)))
+ raise RecordError("Expected %u + %u + %u, got %u" %
+ (minsz, pfnsz, pagesz, len(content)))
def verify_record_x86_pv_info(self, content):
expectedsz = calcsize(X86_PV_INFO_FORMAT)
if len(content) != expectedsz:
- raise RecordError("x86_pv_info: expected length of %d, got %d"
- % (expectedsz, len(content)))
+ raise RecordError("x86_pv_info: expected length of %d, got %d" %
+ (expectedsz, len(content)))
width, levels, res1, res2 = unpack(X86_PV_INFO_FORMAT, content)
raise RecordError("Expected levels of 3 or 4, got %d" % (levels, ))
if res1 != 0 or res2 != 0:
- raise StreamError("Reserved bits set in X86_PV_INFO: 0x%04x 0x%08x"
- % (res1, res2))
+ raise StreamError(
+ "Reserved bits set in X86_PV_INFO: 0x%04x 0x%08x" %
+ (res1, res2))
bitness = {4:32, 8:64}[width]
self.info(" %sbit guest, %d levels of pagetables" % (bitness, levels))
" least 8 bytes long")
if len(content) % 8 != 0:
- raise RecordError("Length expected to be a multiple of 8, not %d"
- % (len(content), ))
+ raise RecordError("Length expected to be a multiple of 8, not %d" %
+ (len(content), ))
start, end = unpack("=II", content[:8])
self.info(" Start pfn 0x%x, End 0x%x" % (start, end))
minsz = calcsize(X86_PV_VCPU_HDR_FORMAT)
if len(content) < minsz:
- raise RecordError("X86_PV_VCPU_%s record length must be at least %d"
- " bytes long" % (name, minsz))
+ raise RecordError(
+ "X86_PV_VCPU_%s record length must be at least %d bytes long" %
+ (name, minsz))
if len(content) == minsz:
- self.info("Warning: X86_PV_VCPU_%s record with zero content"
- % (name, ))
+ self.info("Warning: X86_PV_VCPU_%s record with zero content" %
+ (name, ))
vcpuid, res1 = unpack(X86_PV_VCPU_HDR_FORMAT, content[:minsz])
if res1 != 0:
raise StreamError(
- "Reserved bits set in x86_pv_vcpu_%s record 0x%04x"
- % (name, res1))
+ "Reserved bits set in x86_pv_vcpu_%s record 0x%04x" %
+ (name, res1))
- self.info(" vcpu%d %s context, %d bytes"
- % (vcpuid, name, len(content) - minsz))
+ self.info(" vcpu%d %s context, %d bytes" %
+ (vcpuid, name, len(content) - minsz))
def verify_record_shared_info(self, content):
""" shared info record """
- if len(content) != 4096:
- raise RecordError("Length expected to be 4906 bytes, not %d"
- % (len(content), ))
+ contentsz = len(content)
+ if contentsz != 4096:
+ raise RecordError("Length expected to be 4906 bytes, not %d" %
+ (contentsz, ))
def verify_record_tsc_info(self, content):
mode, khz, nsec, incarn, res1 = unpack(X86_TSC_INFO_FORMAT, content)
if res1 != 0:
- raise StreamError("Reserved bits set in X86_TSC_INFO: 0x%08x"
- % (res1, ))
+ raise StreamError("Reserved bits set in X86_TSC_INFO: 0x%08x" %
+ (res1, ))
- self.info(" Mode %u, %u kHz, %u ns, incarnation %d"
- % (mode, khz, nsec, incarn))
+ self.info(" Mode %u, %u kHz, %u ns, incarnation %d" %
+ (mode, khz, nsec, incarn))
def verify_record_hvm_context(self, content):
if len(content) != 0:
raise RecordError("Checkpoint record with non-zero length")
+
def verify_record_checkpoint_dirty_pfn_list(self, content):
""" checkpoint dirty pfn list """
raise RecordError("Found checkpoint dirty pfn list record in stream")
REC_TYPE_emulator_xenstore_data : "Emulator xenstore data",
REC_TYPE_emulator_context : "Emulator context",
REC_TYPE_checkpoint_end : "Checkpoint end",
- REC_TYPE_checkpoint_state : "Checkpoint state"
+ REC_TYPE_checkpoint_state : "Checkpoint state",
}
# emulator_* header
ident, version, options = self.unpack_exact(HDR_FORMAT)
if ident != HDR_IDENT:
- raise StreamError("Bad image id: Expected 0x%x, got 0x%x"
- % (HDR_IDENT, ident))
+ raise StreamError("Bad image id: Expected 0x%x, got 0x%x" %
+ (HDR_IDENT, ident))
if version != HDR_VERSION:
- raise StreamError("Unknown image version: Expected %d, got %d"
- % (HDR_VERSION, version))
+ raise StreamError("Unknown image version: Expected %d, got %d" %
+ (HDR_VERSION, version))
if options & HDR_OPT_RESZ_MASK:
- raise StreamError("Reserved bits set in image options field: 0x%x"
- % (options & HDR_OPT_RESZ_MASK))
+ raise StreamError("Reserved bits set in image options field: 0x%x" %
+ (options & HDR_OPT_RESZ_MASK))
if ( (sys.byteorder == "little") and
((options & HDR_OPT_BIT_ENDIAN) != HDR_OPT_LE) ):
if rtype not in rec_type_to_str:
raise StreamError("Unrecognised record type %x" % (rtype, ))
- self.info("Libxl Record: %s, length %d"
- % (rec_type_to_str[rtype], length))
+ self.info("Libxl Record: %s, length %d" %
+ (rec_type_to_str[rtype], length))
contentsz = (length + 7) & ~7
content = self.rdexact(contentsz)
raise StreamError("Padding containing non0 bytes found")
if rtype not in record_verifiers:
- raise RuntimeError("No verification function for libxl record '%s'"
- % rec_type_to_str[rtype])
+ raise RuntimeError(
+ "No verification function for libxl record '%s'" %
+ rec_type_to_str[rtype])
else:
record_verifiers[rtype](self, content[:length])
minsz = calcsize(EMULATOR_HEADER_FORMAT)
if len(content) < minsz:
- raise RecordError("Length must be at least %d bytes, got %d"
- % (minsz, len(content)))
+ raise RecordError("Length must be at least %d bytes, got %d" %
+ (minsz, len(content)))
emu_id, emu_idx = unpack(EMULATOR_HEADER_FORMAT, content[:minsz])
if emu_id not in emulator_id_to_str:
raise RecordError("Unrecognised emulator id 0x%x" % (emu_id, ))
- self.info("Emulator Xenstore Data (%s, idx %d)"
- % (emulator_id_to_str[emu_id], emu_idx))
+ self.info("Emulator Xenstore Data (%s, idx %d)" %
+ (emulator_id_to_str[emu_id], emu_idx))
# Chop off the emulator header
content = content[minsz:]
parts = content[:-1].split("\x00")
if (len(parts) % 2) != 0:
- raise RecordError("Expected an even number of strings, got %d"
- % (len(parts), ))
+ raise RecordError("Expected an even number of strings, got %d" %
+ (len(parts), ))
for key, val in zip(parts[0::2], parts[1::2]):
self.info(" '%s' = '%s'" % (key, val))
minsz = calcsize(EMULATOR_HEADER_FORMAT)
if len(content) < minsz:
- raise RecordError("Length must be at least %d bytes, got %d"
- % (minsz, len(content)))
+ raise RecordError("Length must be at least %d bytes, got %d" %
+ (minsz, len(content)))
emu_id, emu_idx = unpack(EMULATOR_HEADER_FORMAT, content[:minsz])