direct-io.hg
changeset 13740:ffcd586dbaae
[HVM] Save/restore: clean up marshalling code
- All entries are now defined as structs and saved/restored
in self-contained operations.
- Save/restore operations are type-safe, to tie each entry's
typecode to a particular struct and its length.
- Save/restore handlers are registered once per host instead of
per domain.
- Detect buffer overrun before it happens and abort.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
- All entries are now defined as structs and saved/restored
in self-contained operations.
- Save/restore operations are type-safe, to tie each entry's
typecode to a particular struct and its length.
- Save/restore handlers are registered once per host instead of
per domain.
- Detect buffer overrun before it happens and abort.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Wed Jan 31 10:27:10 2007 +0000 (2007-01-31) |
parents | 7d3bb465e938 |
children | 9130206e27f8 |
files | xen/arch/x86/domctl.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h xen/include/public/hvm/save.h |
line diff
1.1 --- a/xen/arch/x86/domctl.c Wed Jan 31 10:11:26 2007 +0000 1.2 +++ b/xen/arch/x86/domctl.c Wed Jan 31 10:27:10 2007 +0000 1.3 @@ -302,6 +302,8 @@ long arch_do_domctl( 1.4 ret = -EFAULT; 1.5 if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 ) 1.6 goto sethvmcontext_out; 1.7 + c->size = sizeof (c->data); 1.8 + c->cur = 0; 1.9 1.10 ret = -EINVAL; 1.11 if ( !is_hvm_domain(d) ) 1.12 @@ -330,6 +332,7 @@ long arch_do_domctl( 1.13 if ( (c = xmalloc(struct hvm_domain_context)) == NULL ) 1.14 goto gethvmcontext_out; 1.15 memset(c, 0, sizeof(*c)); 1.16 + c->size = sizeof (c->data); 1.17 1.18 ret = -ENODATA; 1.19 if ( !is_hvm_domain(d) )
2.1 --- a/xen/arch/x86/hvm/hvm.c Wed Jan 31 10:11:26 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Jan 31 10:27:10 2007 +0000 2.3 @@ -168,19 +168,11 @@ int hvm_domain_initialise(struct domain 2.4 2.5 void hvm_domain_destroy(struct domain *d) 2.6 { 2.7 - HVMStateEntry *se, *dse; 2.8 pit_deinit(d); 2.9 rtc_deinit(d); 2.10 pmtimer_deinit(d); 2.11 hpet_deinit(d); 2.12 2.13 - se = d->arch.hvm_domain.first_se; 2.14 - while (se) { 2.15 - dse = se; 2.16 - se = se->next; 2.17 - xfree(dse); 2.18 - } 2.19 - 2.20 if ( d->arch.hvm_domain.shared_page_va ) 2.21 unmap_domain_page_global( 2.22 (void *)d->arch.hvm_domain.shared_page_va); 2.23 @@ -189,23 +181,43 @@ void hvm_domain_destroy(struct domain *d 2.24 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va); 2.25 } 2.26 2.27 -void hvm_save_cpu_ctxt(hvm_domain_context_t *h, void *opaque) 2.28 +static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) 2.29 { 2.30 - struct vcpu *v = opaque; 2.31 + struct vcpu *v; 2.32 + struct hvm_hw_cpu ctxt; 2.33 2.34 - /* We don't need to save state for a vcpu that is down; the restore 2.35 - * code will leave it down if there is nothing saved. */ 2.36 - if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) 2.37 - return; 2.38 + for_each_vcpu(d, v) 2.39 + { 2.40 + /* We don't need to save state for a vcpu that is down; the restore 2.41 + * code will leave it down if there is nothing saved. */ 2.42 + if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) 2.43 + continue; 2.44 2.45 - hvm_funcs.save_cpu_ctxt(h, opaque); 2.46 + hvm_funcs.save_cpu_ctxt(v, &ctxt); 2.47 + if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 ) 2.48 + return 1; 2.49 + } 2.50 + return 0; 2.51 } 2.52 2.53 -int hvm_load_cpu_ctxt(hvm_domain_context_t *h, void *opaque, int version) 2.54 +static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) 2.55 { 2.56 - struct vcpu *v = opaque; 2.57 + int vcpuid; 2.58 + struct vcpu *v; 2.59 + struct hvm_hw_cpu ctxt; 2.60 2.61 - if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 ) 2.62 + /* Which vcpu is this? */ 2.63 + vcpuid = hvm_load_instance(h); 2.64 + if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL ) 2.65 + { 2.66 + gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid); 2.67 + return -EINVAL; 2.68 + } 2.69 + 2.70 + if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 2.71 + return -EINVAL; 2.72 + 2.73 + if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 ) 2.74 return -EINVAL; 2.75 2.76 /* Auxiliary processors should be woken immediately. */ 2.77 @@ -215,14 +227,12 @@ int hvm_load_cpu_ctxt(hvm_domain_context 2.78 return 0; 2.79 } 2.80 2.81 +HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt); 2.82 + 2.83 int hvm_vcpu_initialise(struct vcpu *v) 2.84 { 2.85 int rc; 2.86 2.87 - hvm_register_savevm(v->domain, "xen_hvm_cpu", v->vcpu_id, 1, 2.88 - hvm_save_cpu_ctxt, hvm_load_cpu_ctxt, 2.89 - (void *)v); 2.90 - 2.91 if ( (rc = vlapic_init(v)) != 0 ) 2.92 return rc; 2.93 2.94 @@ -248,9 +258,6 @@ int hvm_vcpu_initialise(struct vcpu *v) 2.95 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS); 2.96 hpet_init(v); 2.97 2.98 - /* init hvm sharepage */ 2.99 - shpage_init(v->domain, get_sp(v->domain)); 2.100 - 2.101 /* Init guest TSC to start from zero. */ 2.102 hvm_set_guest_time(v, 0); 2.103
3.1 --- a/xen/arch/x86/hvm/i8254.c Wed Jan 31 10:11:26 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/i8254.c Wed Jan 31 10:27:10 2007 +0000 3.3 @@ -411,28 +411,24 @@ static void pit_info(PITState *pit) 3.4 } 3.5 #endif 3.6 3.7 -static void pit_save(hvm_domain_context_t *h, void *opaque) 3.8 +static int pit_save(struct domain *d, hvm_domain_context_t *h) 3.9 { 3.10 - struct domain *d = opaque; 3.11 PITState *pit = &d->arch.hvm_domain.pl_time.vpit; 3.12 3.13 pit_info(pit); 3.14 3.15 /* Save the PIT hardware state */ 3.16 - hvm_put_struct(h, &pit->hw); 3.17 + return hvm_save_entry(PIT, 0, h, &pit->hw); 3.18 } 3.19 3.20 -static int pit_load(hvm_domain_context_t *h, void *opaque, int version_id) 3.21 +static int pit_load(struct domain *d, hvm_domain_context_t *h) 3.22 { 3.23 - struct domain *d = opaque; 3.24 PITState *pit = &d->arch.hvm_domain.pl_time.vpit; 3.25 int i; 3.26 3.27 - if (version_id != 1) 3.28 - return -EINVAL; 3.29 - 3.30 /* Restore the PIT hardware state */ 3.31 - hvm_get_struct(h, &pit->hw); 3.32 + if ( hvm_load_entry(PIT, h, &pit->hw) ) 3.33 + return 1; 3.34 3.35 /* Recreate platform timers from hardware state. There will be some 3.36 * time jitter here, but the wall-clock will have jumped massively, so 3.37 @@ -447,6 +443,8 @@ static int pit_load(hvm_domain_context_t 3.38 return 0; 3.39 } 3.40 3.41 +HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load); 3.42 + 3.43 static void pit_reset(void *opaque) 3.44 { 3.45 PITState *pit = opaque; 3.46 @@ -474,7 +472,6 @@ void pit_init(struct vcpu *v, unsigned l 3.47 pt++; pt->vcpu = v; 3.48 pt++; pt->vcpu = v; 3.49 3.50 - hvm_register_savevm(v->domain, "xen_hvm_i8254", PIT_BASE, 1, pit_save, pit_load, v->domain); 3.51 register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io); 3.52 /* register the speaker port */ 3.53 register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
4.1 --- a/xen/arch/x86/hvm/intercept.c Wed Jan 31 10:11:26 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/intercept.c Wed Jan 31 10:27:10 2007 +0000 4.3 @@ -157,287 +157,174 @@ static inline void hvm_mmio_access(struc 4.4 } 4.5 } 4.6 4.7 - 4.8 -int hvm_register_savevm(struct domain *d, 4.9 - const char *idstr, 4.10 - int instance_id, 4.11 - int version_id, 4.12 - SaveStateHandler *save_state, 4.13 - LoadStateHandler *load_state, 4.14 - void *opaque) 4.15 -{ 4.16 - HVMStateEntry *se, **pse; 4.17 - 4.18 - if ( (se = xmalloc(struct HVMStateEntry)) == NULL ){ 4.19 - printk("allocat hvmstate entry fail.\n"); 4.20 - return -1; 4.21 - } 4.22 +/* List of handlers for various HVM save and restore types */ 4.23 +static struct { 4.24 + hvm_save_handler save; 4.25 + hvm_load_handler load; 4.26 +} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL},}; 4.27 4.28 - safe_strcpy(se->idstr, idstr); 4.29 +/* Init-time function to add entries to that list */ 4.30 +void hvm_register_savevm(uint16_t typecode, 4.31 + hvm_save_handler save_state, 4.32 + hvm_load_handler load_state) 4.33 +{ 4.34 + ASSERT(typecode <= HVM_SAVE_CODE_MAX); 4.35 + ASSERT(hvm_sr_handlers[typecode].save == NULL); 4.36 + ASSERT(hvm_sr_handlers[typecode].load == NULL); 4.37 + hvm_sr_handlers[typecode].save = save_state; 4.38 + hvm_sr_handlers[typecode].load = load_state; 4.39 +} 4.40 4.41 - se->instance_id = instance_id; 4.42 - se->version_id = version_id; 4.43 - se->save_state = save_state; 4.44 - se->load_state = load_state; 4.45 - se->opaque = opaque; 4.46 - se->next = NULL; 4.47 - 4.48 - /* add at the end of list */ 4.49 - pse = &d->arch.hvm_domain.first_se; 4.50 - while (*pse != NULL) 4.51 - pse = &(*pse)->next; 4.52 - *pse = se; 4.53 - return 0; 4.54 -} 4.55 4.56 int hvm_save(struct domain *d, hvm_domain_context_t *h) 4.57 { 4.58 - uint32_t len, len_pos, cur_pos; 4.59 uint32_t eax, ebx, ecx, edx; 4.60 - HVMStateEntry *se; 4.61 - char *chgset; 4.62 + char *c; 4.63 struct hvm_save_header hdr; 4.64 + struct hvm_save_end end; 4.65 + hvm_save_handler handler; 4.66 + uint16_t i; 4.67 4.68 hdr.magic = HVM_FILE_MAGIC; 4.69 hdr.version = HVM_FILE_VERSION; 4.70 + 4.71 + /* Save some CPUID bits */ 4.72 cpuid(1, &eax, &ebx, &ecx, &edx); 4.73 hdr.cpuid = eax; 4.74 - hvm_put_struct(h, &hdr); 4.75 4.76 - /* save xen changeset */ 4.77 - chgset = strrchr(XEN_CHANGESET, ' '); 4.78 - if ( chgset ) 4.79 - chgset++; 4.80 - else 4.81 - chgset = XEN_CHANGESET; 4.82 + /* Save xen changeset */ 4.83 + c = strrchr(XEN_CHANGESET, ':'); 4.84 + if ( c ) 4.85 + hdr.changeset = simple_strtoll(c, NULL, 16); 4.86 + else 4.87 + hdr.changeset = -1ULL; /* Unknown */ 4.88 4.89 - len = strlen(chgset); 4.90 - hvm_put_8u(h, len); 4.91 - hvm_put_buffer(h, chgset, len); 4.92 + if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 ) 4.93 + { 4.94 + gdprintk(XENLOG_ERR, "HVM save: failed to write header\n"); 4.95 + return -1; 4.96 + } 4.97 4.98 - for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) { 4.99 - /* ID string */ 4.100 - len = strnlen(se->idstr, sizeof(se->idstr)); 4.101 - hvm_put_8u(h, len); 4.102 - hvm_put_buffer(h, se->idstr, len); 4.103 - 4.104 - hvm_put_32u(h, se->instance_id); 4.105 - hvm_put_32u(h, se->version_id); 4.106 - 4.107 - /* record size */ 4.108 - len_pos = hvm_ctxt_tell(h); 4.109 - hvm_put_32u(h, 0); 4.110 - 4.111 - se->save_state(h, se->opaque); 4.112 - 4.113 - cur_pos = hvm_ctxt_tell(h); 4.114 - len = cur_pos - len_pos - 4; 4.115 - hvm_ctxt_seek(h, len_pos); 4.116 - hvm_put_32u(h, len); 4.117 - hvm_ctxt_seek(h, cur_pos); 4.118 - 4.119 + /* Save all available kinds of state */ 4.120 + for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 4.121 + { 4.122 + handler = hvm_sr_handlers[i].save; 4.123 + if ( handler != NULL ) 4.124 + { 4.125 + if ( handler(d, h) != 0 ) 4.126 + { 4.127 + gdprintk(XENLOG_ERR, 4.128 + "HVM save: failed to save type %"PRIu16"\n", i); 4.129 + return -1; 4.130 + } 4.131 + } 4.132 } 4.133 4.134 - h->size = hvm_ctxt_tell(h); 4.135 - hvm_ctxt_seek(h, 0); 4.136 - 4.137 - if (h->size >= HVM_CTXT_SIZE) { 4.138 - printk("hvm_domain_context overflow when hvm_save! need %"PRId32" bytes for use.\n", h->size); 4.139 + /* Save an end-of-file marker */ 4.140 + if ( hvm_save_entry(END, 0, h, &end) != 0 ) 4.141 + { 4.142 + /* Run out of data */ 4.143 + gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n"); 4.144 return -1; 4.145 } 4.146 4.147 + /* Save macros should not have let us overrun */ 4.148 + ASSERT(h->cur <= h->size); 4.149 return 0; 4.150 - 4.151 -} 4.152 - 4.153 -static HVMStateEntry *find_se(struct domain *d, const char *idstr, int instance_id) 4.154 -{ 4.155 - HVMStateEntry *se; 4.156 - 4.157 - for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) { 4.158 - if (!strncmp(se->idstr, idstr, sizeof(se->idstr)) && 4.159 - instance_id == se->instance_id){ 4.160 - return se; 4.161 - } 4.162 - } 4.163 - return NULL; 4.164 } 4.165 4.166 int hvm_load(struct domain *d, hvm_domain_context_t *h) 4.167 { 4.168 - uint32_t len, rec_len, rec_pos, instance_id, version_id; 4.169 uint32_t eax, ebx, ecx, edx; 4.170 - HVMStateEntry *se; 4.171 - char idstr[HVM_SE_IDSTR_LEN]; 4.172 - xen_changeset_info_t chgset; 4.173 - char *cur_chgset; 4.174 - int ret; 4.175 + char *c; 4.176 + uint64_t cset; 4.177 struct hvm_save_header hdr; 4.178 + struct hvm_save_descriptor *desc; 4.179 + hvm_load_handler handler; 4.180 struct vcpu *v; 4.181 - 4.182 - if (h->size >= HVM_CTXT_SIZE) { 4.183 - printk("hvm_load fail! seems hvm_domain_context overflow when hvm_save! need %"PRId32" bytes.\n", h->size); 4.184 + 4.185 + /* Read the save header, which must be first */ 4.186 + if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 4.187 return -1; 4.188 - } 4.189 - 4.190 - hvm_ctxt_seek(h, 0); 4.191 - 4.192 - hvm_get_struct(h, &hdr); 4.193 4.194 if (hdr.magic != HVM_FILE_MAGIC) { 4.195 - printk("HVM restore magic dismatch!\n"); 4.196 + gdprintk(XENLOG_ERR, 4.197 + "HVM restore: bad magic number %#"PRIx32"\n", hdr.magic); 4.198 return -1; 4.199 } 4.200 4.201 if (hdr.version != HVM_FILE_VERSION) { 4.202 - printk("HVM restore version dismatch!\n"); 4.203 + gdprintk(XENLOG_ERR, 4.204 + "HVM restore: unsupported version %u\n", hdr.version); 4.205 return -1; 4.206 } 4.207 4.208 - /* check cpuid */ 4.209 cpuid(1, &eax, &ebx, &ecx, &edx); 4.210 - /*TODO: need difine how big difference is acceptable */ 4.211 + /*TODO: need to define how big a difference is acceptable */ 4.212 if (hdr.cpuid != eax) 4.213 - printk("warnings: try to restore hvm guest(0x%"PRIx32") " 4.214 - "on a different type processor(0x%"PRIx32").\n", 4.215 - hdr.cpuid, 4.216 - eax); 4.217 + gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") " 4.218 + "does not match host (%#"PRIx32").\n", hdr.cpuid, eax); 4.219 4.220 4.221 - /* check xen change set */ 4.222 - cur_chgset = strrchr(XEN_CHANGESET, ' '); 4.223 - if ( cur_chgset ) 4.224 - cur_chgset++; 4.225 + c = strrchr(XEN_CHANGESET, ':'); 4.226 + if ( hdr.changeset == -1ULL ) 4.227 + gdprintk(XENLOG_WARNING, 4.228 + "HVM restore: Xen changeset was not saved.\n"); 4.229 + else if ( c == NULL ) 4.230 + gdprintk(XENLOG_WARNING, 4.231 + "HVM restore: Xen changeset is not available.\n"); 4.232 else 4.233 - cur_chgset = XEN_CHANGESET; 4.234 - 4.235 - len = hvm_get_8u(h); 4.236 - if (len > 20) { /*typical length is 18 -- "revision number:changeset id" */ 4.237 - printk("wrong change set length %d when hvm restore!\n", len); 4.238 - return -1; 4.239 + { 4.240 + cset = simple_strtoll(c, NULL, 16); 4.241 + if ( hdr.changeset != cset ) 4.242 + gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64 4.243 + ") does not match host (%#"PRIx64").\n", hdr.changeset, cset); 4.244 } 4.245 4.246 - hvm_get_buffer(h, chgset, len); 4.247 - chgset[len] = '\0'; 4.248 - if (strncmp(cur_chgset, chgset, len + 1)) 4.249 - printk("warnings: try to restore hvm guest(%s) on a different changeset %s.\n", 4.250 - chgset, cur_chgset); 4.251 - 4.252 - 4.253 - if ( !strcmp(cur_chgset, "unavailable") ) 4.254 - printk("warnings: try to restore hvm guest when changeset is unavailable.\n"); 4.255 - 4.256 - 4.257 /* Down all the vcpus: we only re-enable the ones that had state saved. */ 4.258 for_each_vcpu(d, v) 4.259 if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) ) 4.260 vcpu_sleep_nosync(v); 4.261 4.262 while(1) { 4.263 - if (hvm_ctxt_end(h)) { 4.264 - break; 4.265 + 4.266 + if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) ) 4.267 + { 4.268 + /* Run out of data */ 4.269 + gdprintk(XENLOG_ERR, 4.270 + "HVM restore: save did not end with a null entry\n"); 4.271 + return -1; 4.272 } 4.273 - 4.274 - /* ID string */ 4.275 - len = hvm_get_8u(h); 4.276 - if (len > HVM_SE_IDSTR_LEN) { 4.277 - printk("wrong HVM save entry idstr len %d!", len); 4.278 + 4.279 + /* Read the typecode of the next entry and check for the end-marker */ 4.280 + desc = (struct hvm_save_descriptor *)(&h->data[h->cur]); 4.281 + if ( desc->typecode == 0 ) 4.282 + return 0; 4.283 + 4.284 + /* Find the handler for this entry */ 4.285 + if ( desc->typecode > HVM_SAVE_CODE_MAX 4.286 + || (handler = hvm_sr_handlers[desc->typecode].load) == NULL ) 4.287 + { 4.288 + gdprintk(XENLOG_ERR, 4.289 + "HVM restore: unknown entry typecode %u\n", 4.290 + desc->typecode); 4.291 return -1; 4.292 } 4.293 4.294 - hvm_get_buffer(h, idstr, len); 4.295 - idstr[len] = '\0'; 4.296 - 4.297 - instance_id = hvm_get_32u(h); 4.298 - version_id = hvm_get_32u(h); 4.299 - 4.300 - printk("HVM S/R Loading \"%s\" instance %#x\n", idstr, instance_id); 4.301 - 4.302 - rec_len = hvm_get_32u(h); 4.303 - rec_pos = hvm_ctxt_tell(h); 4.304 - 4.305 - se = find_se(d, idstr, instance_id); 4.306 - if (se == NULL) { 4.307 - printk("warnings: hvm load can't find device %s's instance %d!\n", 4.308 - idstr, instance_id); 4.309 - } else { 4.310 - ret = se->load_state(h, se->opaque, version_id); 4.311 - if (ret < 0) 4.312 - printk("warnings: loading state fail for device %s instance %d!\n", 4.313 - idstr, instance_id); 4.314 + /* Load the entry */ 4.315 + if ( handler(d, h) != 0 ) 4.316 + { 4.317 + gdprintk(XENLOG_ERR, 4.318 + "HVM restore: failed to load entry %u/%u\n", 4.319 + desc->typecode, desc->instance); 4.320 + return -1; 4.321 } 4.322 - 4.323 - 4.324 - /* make sure to jump end of record */ 4.325 - if ( hvm_ctxt_tell(h) - rec_pos != rec_len) { 4.326 - printk("wrong hvm record size, maybe some dismatch between save&restore handler!\n"); 4.327 - } 4.328 - hvm_ctxt_seek(h, rec_pos + rec_len); 4.329 } 4.330 4.331 - return 0; 4.332 + /* Not reached */ 4.333 } 4.334 4.335 4.336 -#ifdef HVM_DEBUG_SUSPEND 4.337 -static void shpage_info(shared_iopage_t *sh) 4.338 -{ 4.339 - 4.340 - vcpu_iodata_t *p = &sh->vcpu_iodata[0]; 4.341 - ioreq_t *req = &p->vp_ioreq; 4.342 - printk("*****sharepage_info******!\n"); 4.343 - printk("vp_eport=%d\n", p->vp_eport); 4.344 - printk("io packet: " 4.345 - "state:%x, pvalid: %x, dir:%x, port: %"PRIx64", " 4.346 - "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n", 4.347 - req->state, req->data_is_ptr, req->dir, req->addr, 4.348 - req->data, req->count, req->size); 4.349 -} 4.350 -#else 4.351 -static void shpage_info(shared_iopage_t *sh) 4.352 -{ 4.353 -} 4.354 -#endif 4.355 - 4.356 -static void shpage_save(hvm_domain_context_t *h, void *opaque) 4.357 -{ 4.358 - /* XXX:no action required for shpage save/restore, since it's in guest memory 4.359 - * keep it for debug purpose only */ 4.360 - 4.361 -#if 0 4.362 - struct shared_iopage *s = opaque; 4.363 - /* XXX:smp */ 4.364 - struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq; 4.365 - 4.366 - shpage_info(s); 4.367 - 4.368 - hvm_put_buffer(h, (char*)req, sizeof(struct ioreq)); 4.369 -#endif 4.370 -} 4.371 - 4.372 -static int shpage_load(hvm_domain_context_t *h, void *opaque, int version_id) 4.373 -{ 4.374 - struct shared_iopage *s = opaque; 4.375 -#if 0 4.376 - /* XXX:smp */ 4.377 - struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq; 4.378 - 4.379 - if (version_id != 1) 4.380 - return -EINVAL; 4.381 - 4.382 - hvm_get_buffer(h, (char*)req, sizeof(struct ioreq)); 4.383 - 4.384 - 4.385 -#endif 4.386 - shpage_info(s); 4.387 - return 0; 4.388 -} 4.389 - 4.390 -void shpage_init(struct domain *d, shared_iopage_t *sp) 4.391 -{ 4.392 - hvm_register_savevm(d, "xen_hvm_shpage", 0x10, 1, shpage_save, shpage_load, sp); 4.393 -} 4.394 - 4.395 int hvm_buffered_io_intercept(ioreq_t *p) 4.396 { 4.397 struct vcpu *v = current;
5.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Jan 31 10:11:26 2007 +0000 5.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Jan 31 10:27:10 2007 +0000 5.3 @@ -603,29 +603,16 @@ void svm_load_cpu_state(struct vcpu *v, 5.4 // dump_msr_state(guest_state); 5.5 } 5.6 5.7 -void svm_save_vmcb_ctxt(hvm_domain_context_t *h, void *opaque) 5.8 +void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) 5.9 { 5.10 - struct vcpu *v = opaque; 5.11 - struct hvm_hw_cpu ctxt; 5.12 - 5.13 - svm_save_cpu_state(v, &ctxt); 5.14 - 5.15 - svm_vmcs_save(v, &ctxt); 5.16 - 5.17 - hvm_put_struct(h, &ctxt); 5.18 + svm_save_cpu_state(v, ctxt); 5.19 + svm_vmcs_save(v, ctxt); 5.20 } 5.21 5.22 -int svm_load_vmcb_ctxt(hvm_domain_context_t *h, void *opaque, int version) 5.23 +int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) 5.24 { 5.25 - struct vcpu *v = opaque; 5.26 - struct hvm_hw_cpu ctxt; 5.27 - 5.28 - if (version != 1) 5.29 - return -EINVAL; 5.30 - 5.31 - hvm_get_struct(h, &ctxt); 5.32 - svm_load_cpu_state(v, &ctxt); 5.33 - if (svm_vmcb_restore(v, &ctxt)) { 5.34 + svm_load_cpu_state(v, ctxt); 5.35 + if (svm_vmcb_restore(v, ctxt)) { 5.36 printk("svm_vmcb restore failed!\n"); 5.37 domain_crash(v->domain); 5.38 return -EINVAL;
6.1 --- a/xen/arch/x86/hvm/vioapic.c Wed Jan 31 10:11:26 2007 +0000 6.2 +++ b/xen/arch/x86/hvm/vioapic.c Wed Jan 31 10:27:10 2007 +0000 6.3 @@ -523,50 +523,58 @@ static void hvmirq_info(struct hvm_hw_ir 6.4 } 6.5 #endif 6.6 6.7 -static void ioapic_save(hvm_domain_context_t *h, void *opaque) 6.8 + 6.9 +static int ioapic_save(struct domain *d, hvm_domain_context_t *h) 6.10 { 6.11 - struct domain *d = opaque; 6.12 struct hvm_hw_vioapic *s = domain_vioapic(d); 6.13 - struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 6.14 + ioapic_info(s); 6.15 6.16 - ioapic_info(s); 6.17 + /* save io-apic state*/ 6.18 + return ( hvm_save_entry(IOAPIC, 0, h, s) ); 6.19 +} 6.20 + 6.21 +static int ioapic_save_irqs(struct domain *d, hvm_domain_context_t *h) 6.22 +{ 6.23 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 6.24 hvmirq_info(hvm_irq); 6.25 6.26 - /* save io-apic state*/ 6.27 - hvm_put_struct(h, s); 6.28 - 6.29 - /* save hvm irq state */ 6.30 - hvm_put_struct(h, hvm_irq); 6.31 + /* save IRQ state*/ 6.32 + return ( hvm_save_entry(IRQ, 0, h, hvm_irq) ); 6.33 } 6.34 6.35 -static int ioapic_load(hvm_domain_context_t *h, void *opaque, int version_id) 6.36 + 6.37 +static int ioapic_load(struct domain *d, hvm_domain_context_t *h) 6.38 { 6.39 - struct domain *d = opaque; 6.40 struct hvm_hw_vioapic *s = domain_vioapic(d); 6.41 - struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 6.42 6.43 - if (version_id != 1) 6.44 + /* restore ioapic state */ 6.45 + if ( hvm_load_entry(IOAPIC, h, s) != 0 ) 6.46 return -EINVAL; 6.47 6.48 - /* restore ioapic state */ 6.49 - hvm_get_struct(h, s); 6.50 + ioapic_info(s); 6.51 + return 0; 6.52 +} 6.53 + 6.54 +static int ioapic_load_irqs(struct domain *d, hvm_domain_context_t *h) 6.55 +{ 6.56 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 6.57 6.58 /* restore irq state */ 6.59 - hvm_get_struct(h, hvm_irq); 6.60 + if ( hvm_load_entry(IRQ, h, hvm_irq) != 0 ) 6.61 + return -EINVAL; 6.62 6.63 - ioapic_info(s); 6.64 hvmirq_info(hvm_irq); 6.65 - 6.66 return 0; 6.67 } 6.68 6.69 +HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load); 6.70 +HVM_REGISTER_SAVE_RESTORE(IRQ, ioapic_save_irqs, ioapic_load_irqs); 6.71 + 6.72 void vioapic_init(struct domain *d) 6.73 { 6.74 struct hvm_hw_vioapic *vioapic = domain_vioapic(d); 6.75 int i; 6.76 6.77 - hvm_register_savevm(d, "xen_hvm_ioapic", 0, 1, ioapic_save, ioapic_load, d); 6.78 - 6.79 memset(vioapic, 0, sizeof(*vioapic)); 6.80 for ( i = 0; i < VIOAPIC_NUM_PINS; i++ ) 6.81 vioapic->redirtbl[i].fields.mask = 1;
7.1 --- a/xen/arch/x86/hvm/vlapic.c Wed Jan 31 10:11:26 2007 +0000 7.2 +++ b/xen/arch/x86/hvm/vlapic.c Wed Jan 31 10:27:10 2007 +0000 7.3 @@ -810,51 +810,106 @@ static void lapic_info(struct vlapic *s) 7.4 } 7.5 #endif 7.6 7.7 -static void lapic_save(hvm_domain_context_t *h, void *opaque) 7.8 +/* rearm the actimer if needed, after a HVM restore */ 7.9 +static void lapic_rearm(struct vlapic *s) 7.10 { 7.11 - struct vlapic *s = opaque; 7.12 - 7.13 - lapic_info(s); 7.14 - 7.15 - hvm_put_struct(h, &s->hw); 7.16 - hvm_put_struct(h, s->regs); 7.17 -} 7.18 - 7.19 -static int lapic_load(hvm_domain_context_t *h, void *opaque, int version_id) 7.20 -{ 7.21 - struct vlapic *s = opaque; 7.22 - struct vcpu *v = vlapic_vcpu(s); 7.23 unsigned long tmict; 7.24 7.25 - if (version_id != 1) 7.26 - return -EINVAL; 7.27 - 7.28 - hvm_get_struct(h, &s->hw); 7.29 - hvm_get_struct(h, s->regs); 7.30 - 7.31 - /* rearm the actiemr if needed */ 7.32 tmict = vlapic_get_reg(s, APIC_TMICT); 7.33 if (tmict > 0) { 7.34 uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->hw.timer_divisor; 7.35 uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT); 7.36 7.37 s->pt.irq = lvtt & APIC_VECTOR_MASK; 7.38 - create_periodic_time(v, &s->pt, period, s->pt.irq, 7.39 + create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq, 7.40 vlapic_lvtt_period(s), NULL, s); 7.41 7.42 printk("lapic_load to rearm the actimer:" 7.43 "bus cycle is %uns, " 7.44 "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n", 7.45 APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq); 7.46 - 7.47 } 7.48 7.49 + lapic_info(s); 7.50 +} 7.51 + 7.52 +static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h) 7.53 +{ 7.54 + struct vcpu *v; 7.55 + struct vlapic *s; 7.56 + 7.57 + for_each_vcpu(d, v) 7.58 + { 7.59 + s = vcpu_vlapic(v); 7.60 + lapic_info(s); 7.61 + 7.62 + if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 ) 7.63 + return 1; 7.64 + } 7.65 + return 0; 7.66 +} 7.67 + 7.68 +static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h) 7.69 +{ 7.70 + struct vcpu *v; 7.71 + struct vlapic *s; 7.72 + 7.73 + for_each_vcpu(d, v) 7.74 + { 7.75 + s = vcpu_vlapic(v); 7.76 + if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 ) 7.77 + return 1; 7.78 + } 7.79 + return 0; 7.80 +} 7.81 + 7.82 +static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h) 7.83 +{ 7.84 + uint16_t vcpuid; 7.85 + struct vcpu *v; 7.86 + struct vlapic *s; 7.87 + 7.88 + /* Which vlapic to load? */ 7.89 + vcpuid = hvm_load_instance(h); 7.90 + if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL ) 7.91 + { 7.92 + gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid); 7.93 + return -EINVAL; 7.94 + } 7.95 + s = vcpu_vlapic(v); 7.96 + 7.97 + if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 ) 7.98 + return -EINVAL; 7.99 7.100 lapic_info(s); 7.101 - 7.102 return 0; 7.103 } 7.104 7.105 +static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h) 7.106 +{ 7.107 + uint16_t vcpuid; 7.108 + struct vcpu *v; 7.109 + struct vlapic *s; 7.110 + 7.111 + /* Which vlapic to load? */ 7.112 + vcpuid = hvm_load_instance(h); 7.113 + if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL ) 7.114 + { 7.115 + gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid); 7.116 + return -EINVAL; 7.117 + } 7.118 + s = vcpu_vlapic(v); 7.119 + 7.120 + if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) 7.121 + return -EINVAL; 7.122 + 7.123 + lapic_rearm(s); 7.124 + return 0; 7.125 +} 7.126 + 7.127 +HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden); 7.128 +HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs); 7.129 + 7.130 int vlapic_init(struct vcpu *v) 7.131 { 7.132 struct vlapic *vlapic = vcpu_vlapic(v); 7.133 @@ -873,7 +928,6 @@ int vlapic_init(struct vcpu *v) 7.134 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page)); 7.135 memset(vlapic->regs, 0, PAGE_SIZE); 7.136 7.137 - hvm_register_savevm(v->domain, "xen_hvm_lapic", v->vcpu_id, 1, lapic_save, lapic_load, vlapic); 7.138 vlapic_reset(vlapic); 7.139 7.140 vlapic->hw.apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
8.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jan 31 10:11:26 2007 +0000 8.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jan 31 10:27:10 2007 +0000 8.3 @@ -364,19 +364,9 @@ static inline void __restore_debug_regis 8.4 /* DR7 is loaded from the VMCS. */ 8.5 } 8.6 8.7 -static int __get_instruction_length(void); 8.8 int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) 8.9 -{ 8.10 - unsigned long inst_len; 8.11 - 8.12 - inst_len = __get_instruction_length(); 8.13 +{ 8.14 c->eip = __vmread(GUEST_RIP); 8.15 - 8.16 -#ifdef HVM_DEBUG_SUSPEND 8.17 - printk("vmx_vmcs_save: inst_len=0x%lx, eip=0x%"PRIx64".\n", 8.18 - inst_len, c->eip); 8.19 -#endif 8.20 - 8.21 c->esp = __vmread(GUEST_RSP); 8.22 c->eflags = __vmread(GUEST_RFLAGS); 8.23 8.24 @@ -632,30 +622,18 @@ void vmx_load_cpu_state(struct vcpu *v, 8.25 } 8.26 8.27 8.28 -void vmx_save_vmcs_ctxt(hvm_domain_context_t *h, void *opaque) 8.29 +void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) 8.30 { 8.31 - struct vcpu *v = opaque; 8.32 - struct hvm_hw_cpu ctxt; 8.33 - 8.34 - vmx_save_cpu_state(v, &ctxt); 8.35 + vmx_save_cpu_state(v, ctxt); 8.36 vmx_vmcs_enter(v); 8.37 - vmx_vmcs_save(v, &ctxt); 8.38 + vmx_vmcs_save(v, ctxt); 8.39 vmx_vmcs_exit(v); 8.40 - 8.41 - hvm_put_struct(h, &ctxt); 8.42 } 8.43 8.44 -int vmx_load_vmcs_ctxt(hvm_domain_context_t *h, void *opaque, int version) 8.45 +int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) 8.46 { 8.47 - struct vcpu *v = opaque; 8.48 - struct hvm_hw_cpu ctxt; 8.49 - 8.50 - if (version != 1) 8.51 - return -EINVAL; 8.52 - 8.53 - hvm_get_struct(h, &ctxt); 8.54 - vmx_load_cpu_state(v, &ctxt); 8.55 - if (vmx_vmcs_restore(v, &ctxt)) { 8.56 + vmx_load_cpu_state(v, ctxt); 8.57 + if (vmx_vmcs_restore(v, ctxt)) { 8.58 printk("vmx_vmcs restore failed!\n"); 8.59 domain_crash(v->domain); 8.60 return -EINVAL;
9.1 --- a/xen/arch/x86/hvm/vpic.c Wed Jan 31 10:11:26 2007 +0000 9.2 +++ b/xen/arch/x86/hvm/vpic.c Wed Jan 31 10:27:10 2007 +0000 9.3 @@ -404,27 +404,44 @@ static void vpic_info(struct hvm_hw_vpic 9.4 } 9.5 #endif 9.6 9.7 -static void vpic_save(hvm_domain_context_t *h, void *opaque) 9.8 +static int vpic_save(struct domain *d, hvm_domain_context_t *h) 9.9 { 9.10 - struct hvm_hw_vpic *s = opaque; 9.11 - 9.12 - vpic_info(s); 9.13 - hvm_put_struct(h, s); 9.14 -} 9.15 + struct hvm_hw_vpic *s; 9.16 + int i; 9.17 9.18 -static int vpic_load(hvm_domain_context_t *h, void *opaque, int version_id) 9.19 -{ 9.20 - struct hvm_hw_vpic *s = opaque; 9.21 - 9.22 - if (version_id != 1) 9.23 - return -EINVAL; 9.24 - 9.25 - hvm_get_struct(h, s); 9.26 - vpic_info(s); 9.27 + /* Save the state of both PICs */ 9.28 + for ( i = 0; i < 2 ; i++ ) 9.29 + { 9.30 + s = &d->arch.hvm_domain.vpic[i]; 9.31 + vpic_info(s); 9.32 + if ( hvm_save_entry(PIC, i, h, s) ) 9.33 + return 1; 9.34 + } 9.35 9.36 return 0; 9.37 } 9.38 9.39 +static int vpic_load(struct domain *d, hvm_domain_context_t *h) 9.40 +{ 9.41 + struct hvm_hw_vpic *s; 9.42 + uint16_t inst; 9.43 + 9.44 + /* Which PIC is this? */ 9.45 + inst = hvm_load_instance(h); 9.46 + if ( inst > 1 ) 9.47 + return -EINVAL; 9.48 + s = &d->arch.hvm_domain.vpic[inst]; 9.49 + 9.50 + /* Load the state */ 9.51 + if ( hvm_load_entry(PIC, h, s) != 0 ) 9.52 + return -EINVAL; 9.53 + 9.54 + vpic_info(s); 9.55 + return 0; 9.56 +} 9.57 + 9.58 +HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load); 9.59 + 9.60 void vpic_init(struct domain *d) 9.61 { 9.62 struct hvm_hw_vpic *vpic; 9.63 @@ -434,14 +451,12 @@ void vpic_init(struct domain *d) 9.64 memset(vpic, 0, sizeof(*vpic)); 9.65 vpic->is_master = 1; 9.66 vpic->elcr = 1 << 2; 9.67 - hvm_register_savevm(d, "xen_hvm_i8259", 0x20, 1, vpic_save, vpic_load, vpic); 9.68 register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io); 9.69 register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io); 9.70 9.71 /* Slave PIC. */ 9.72 vpic++; 9.73 memset(vpic, 0, sizeof(*vpic)); 9.74 - hvm_register_savevm(d, "xen_hvm_i8259", 0xa0, 1, vpic_save, vpic_load, vpic); 9.75 register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io); 9.76 register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io); 9.77 }
10.1 --- a/xen/include/asm-x86/hvm/domain.h Wed Jan 31 10:11:26 2007 +0000 10.2 +++ b/xen/include/asm-x86/hvm/domain.h Wed Jan 31 10:27:10 2007 +0000 10.3 @@ -28,20 +28,6 @@ 10.4 #include <public/hvm/params.h> 10.5 #include <public/hvm/save.h> 10.6 10.7 -typedef void SaveStateHandler(hvm_domain_context_t *h, void *opaque); 10.8 -typedef int LoadStateHandler(hvm_domain_context_t *h, void *opaque, int version_id); 10.9 - 10.10 -#define HVM_SE_IDSTR_LEN 32 10.11 -typedef struct HVMStateEntry { 10.12 - char idstr[HVM_SE_IDSTR_LEN]; 10.13 - int instance_id; 10.14 - int version_id; 10.15 - SaveStateHandler *save_state; 10.16 - LoadStateHandler *load_state; 10.17 - void *opaque; 10.18 - struct HVMStateEntry *next; 10.19 -} HVMStateEntry; 10.20 - 10.21 struct hvm_domain { 10.22 unsigned long shared_page_va; 10.23 unsigned long buffered_io_va; 10.24 @@ -65,7 +51,6 @@ struct hvm_domain { 10.25 uint64_t params[HVM_NR_PARAMS]; 10.26 10.27 struct hvm_domain_context *hvm_ctxt; 10.28 - HVMStateEntry *first_se; 10.29 }; 10.30 10.31 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
11.1 --- a/xen/include/asm-x86/hvm/hvm.h Wed Jan 31 10:11:26 2007 +0000 11.2 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Jan 31 10:27:10 2007 +0000 11.3 @@ -83,8 +83,8 @@ struct hvm_function_table { 11.4 struct vcpu *v, struct cpu_user_regs *r); 11.5 11.6 /* save and load hvm guest cpu context for save/restore */ 11.7 - void (*save_cpu_ctxt)(hvm_domain_context_t *h, void *opaque); 11.8 - int (*load_cpu_ctxt)(hvm_domain_context_t *h, void *opaque, int version); 11.9 + void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); 11.10 + int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); 11.11 11.12 /* 11.13 * Examine specifics of the guest state:
12.1 --- a/xen/include/asm-x86/hvm/support.h Wed Jan 31 10:11:26 2007 +0000 12.2 +++ b/xen/include/asm-x86/hvm/support.h Wed Jan 31 10:27:10 2007 +0000 12.3 @@ -119,133 +119,121 @@ extern unsigned int opt_hvm_debug_level; 12.4 #define TRACE_VMEXIT(index, value) \ 12.5 current->arch.hvm_vcpu.hvm_trace_values[index] = (value) 12.6 12.7 -/* save/restore support */ 12.8 - 12.9 -//#define HVM_DEBUG_SUSPEND 12.10 - 12.11 -extern int hvm_register_savevm(struct domain *d, 12.12 - const char *idstr, 12.13 - int instance_id, 12.14 - int version_id, 12.15 - SaveStateHandler *save_state, 12.16 - LoadStateHandler *load_state, 12.17 - void *opaque); 12.18 +/* 12.19 + * Save/restore support 12.20 + */ 12.21 12.22 -static inline void hvm_ctxt_seek(hvm_domain_context_t *h, unsigned int pos) 12.23 -{ 12.24 - h->cur = pos; 12.25 -} 12.26 - 12.27 -static inline uint32_t hvm_ctxt_tell(hvm_domain_context_t *h) 12.28 -{ 12.29 - return h->cur; 12.30 -} 12.31 - 12.32 -static inline int hvm_ctxt_end(hvm_domain_context_t *h) 12.33 -{ 12.34 - return (h->cur >= h->size || h->cur >= HVM_CTXT_SIZE); 12.35 -} 12.36 - 12.37 -static inline void hvm_put_byte(hvm_domain_context_t *h, unsigned int i) 12.38 +/* Marshalling an entry: check space and fill in the header */ 12.39 +static inline int _hvm_init_entry(struct hvm_domain_context *h, 12.40 + uint16_t tc, uint16_t inst, uint32_t len) 12.41 { 12.42 - if (h->cur >= HVM_CTXT_SIZE) { 12.43 - h->cur++; 12.44 - return; 12.45 + struct hvm_save_descriptor *d 12.46 + = (struct hvm_save_descriptor *)&h->data[h->cur]; 12.47 + if ( h->size - h->cur < len + sizeof (*d) ) 12.48 + { 12.49 + gdprintk(XENLOG_WARNING, 12.50 + "HVM save: no room for %"PRIu32" + %u bytes " 12.51 + "for typecode %"PRIu16"\n", 12.52 + len, (unsigned) sizeof (*d), tc); 12.53 + return -1; 12.54 } 12.55 - h->data[h->cur++] = (char)i; 12.56 -} 12.57 - 12.58 -static inline void hvm_put_8u(hvm_domain_context_t *h, uint8_t b) 12.59 -{ 12.60 - hvm_put_byte(h, b); 12.61 -} 12.62 - 12.63 -static inline void hvm_put_16u(hvm_domain_context_t *h, uint16_t b) 12.64 -{ 12.65 - hvm_put_8u(h, b >> 8); 12.66 - hvm_put_8u(h, b); 12.67 -} 12.68 - 12.69 -static inline void hvm_put_32u(hvm_domain_context_t *h, uint32_t b) 12.70 -{ 12.71 - hvm_put_16u(h, b >> 16); 12.72 - hvm_put_16u(h, b); 12.73 -} 12.74 - 12.75 -static inline void hvm_put_64u(hvm_domain_context_t *h, uint64_t b) 12.76 -{ 12.77 - hvm_put_32u(h, b >> 32); 12.78 - hvm_put_32u(h, b); 12.79 + d->typecode = tc; 12.80 + d->instance = inst; 12.81 + d->length = len; 12.82 + h->cur += sizeof (*d); 12.83 + return 0; 12.84 } 12.85 12.86 -static inline void hvm_put_buffer(hvm_domain_context_t *h, const char *buf, int len) 12.87 +/* Marshalling: copy the contents in a type-safe way */ 12.88 +#define _hvm_write_entry(_x, _h, _src) do { \ 12.89 + *(HVM_SAVE_TYPE(_x) *)(&(_h)->data[(_h)->cur]) = *(_src); \ 12.90 + (_h)->cur += HVM_SAVE_LENGTH(_x); \ 12.91 +} while (0) 12.92 + 12.93 +/* Marshalling: init and copy; evaluates to zero on success */ 12.94 +#define hvm_save_entry(_x, _inst, _h, _src) ({ \ 12.95 + int r; \ 12.96 + r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \ 12.97 + (_inst), HVM_SAVE_LENGTH(_x)); \ 12.98 + if ( r == 0 ) \ 12.99 + _hvm_write_entry(_x, (_h), (_src)); \ 12.100 + r; }) 12.101 + 12.102 +/* Unmarshalling: test an entry's size and typecode and record the instance */ 12.103 +static inline int _hvm_check_entry(struct hvm_domain_context *h, 12.104 + uint16_t type, uint32_t len) 12.105 { 12.106 - memcpy(&h->data[h->cur], buf, len); 12.107 - h->cur += len; 12.108 -} 12.109 - 12.110 -static inline char hvm_get_byte(hvm_domain_context_t *h) 12.111 -{ 12.112 - if (h->cur >= HVM_CTXT_SIZE) { 12.113 - printk("hvm_get_byte overflow.\n"); 12.114 + struct hvm_save_descriptor *d 12.115 + = (struct hvm_save_descriptor *)&h->data[h->cur]; 12.116 + if ( len + sizeof (*d) > h->size - h->cur) 12.117 + { 12.118 + gdprintk(XENLOG_WARNING, 12.119 + "HVM restore: not enough data left to read %u bytes " 12.120 + "for type %u\n", len, type); 12.121 + return -1; 12.122 + } 12.123 + if ( type != d->typecode || len != d->length ) 12.124 + { 12.125 + gdprintk(XENLOG_WARNING, 12.126 + "HVM restore mismatch: expected type %u length %u, " 12.127 + "saw type %u length %u\n", type, len, d->typecode, d->length); 12.128 return -1; 12.129 } 12.130 - 12.131 - if (h->cur >= h->size) { 12.132 - printk("hvm_get_byte exceed data area.\n"); 12.133 - return -1; 12.134 - } 12.135 - 12.136 - return h->data[h->cur++]; 12.137 -} 12.138 - 12.139 -static inline uint8_t hvm_get_8u(hvm_domain_context_t *h) 12.140 -{ 12.141 - return hvm_get_byte(h); 12.142 + h->cur += sizeof (*d); 12.143 + return 0; 12.144 } 12.145 12.146 -static inline uint16_t hvm_get_16u(hvm_domain_context_t *h) 12.147 -{ 12.148 - uint16_t v; 12.149 - v = hvm_get_8u(h) << 8; 12.150 - v |= hvm_get_8u(h); 12.151 +/* Unmarshalling: copy the contents in a type-safe way */ 12.152 +#define _hvm_read_entry(_x, _h, _dst) do { \ 12.153 + *(_dst) = *(HVM_SAVE_TYPE(_x) *) (&(_h)->data[(_h)->cur]); \ 12.154 + (_h)->cur += HVM_SAVE_LENGTH(_x); \ 12.155 +} while (0) 12.156 12.157 - return v; 12.158 -} 12.159 +/* Unmarshalling: check, then copy. Evaluates to zero on success. */ 12.160 +#define hvm_load_entry(_x, _h, _dst) ({ \ 12.161 + int r; \ 12.162 + r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), HVM_SAVE_LENGTH(_x)); \ 12.163 + if ( r == 0 ) \ 12.164 + _hvm_read_entry(_x, (_h), (_dst)); \ 12.165 + r; }) 12.166 12.167 -static inline uint32_t hvm_get_32u(hvm_domain_context_t *h) 12.168 +/* Unmarshalling: what is the instance ID of the next entry? */ 12.169 +static inline uint16_t hvm_load_instance(struct hvm_domain_context *h) 12.170 { 12.171 - uint32_t v; 12.172 - v = hvm_get_16u(h) << 16; 12.173 - v |= hvm_get_16u(h); 12.174 - 12.175 - return v; 12.176 + struct hvm_save_descriptor *d 12.177 + = (struct hvm_save_descriptor *)&h->data[h->cur]; 12.178 + return d->instance; 12.179 } 12.180 12.181 -static inline uint64_t hvm_get_64u(hvm_domain_context_t *h) 12.182 -{ 12.183 - uint64_t v; 12.184 - v = (uint64_t)hvm_get_32u(h) << 32; 12.185 - v |= hvm_get_32u(h); 12.186 - 12.187 - return v; 12.188 -} 12.189 +/* Handler types for different types of save-file entry. 12.190 + * The save handler may save multiple instances of a type into the buffer; 12.191 + * the load handler will be called once for each instance found when 12.192 + * restoring. Both return non-zero on error. */ 12.193 +typedef int (*hvm_save_handler) (struct domain *d, 12.194 + hvm_domain_context_t *h); 12.195 +typedef int (*hvm_load_handler) (struct domain *d, 12.196 + hvm_domain_context_t *h); 12.197 12.198 -static inline void hvm_get_buffer(hvm_domain_context_t *h, char *buf, int len) 12.199 -{ 12.200 - memcpy(buf, &h->data[h->cur], len); 12.201 - h->cur += len; 12.202 -} 12.203 +/* Init-time function to declare a pair of handlers for a type */ 12.204 +void hvm_register_savevm(uint16_t typecode, 12.205 + hvm_save_handler save_state, 12.206 + hvm_load_handler load_state); 12.207 12.208 -#define hvm_put_struct(_h, _p) \ 12.209 - hvm_put_buffer((_h), (char *)(_p), sizeof(*(_p))) 12.210 -#define hvm_get_struct(_h, _p) \ 12.211 - hvm_get_buffer((_h), (char *)(_p), sizeof(*(_p))) 12.212 +/* Syntactic sugar around that function */ 12.213 +#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load) \ 12.214 +static int __hvm_register_##_x##_save_and_restore(void) \ 12.215 +{ \ 12.216 + hvm_register_savevm(HVM_SAVE_CODE(_x), &_save, &_load); \ 12.217 + return 0; \ 12.218 +} \ 12.219 +__initcall(__hvm_register_##_x##_save_and_restore); 12.220 12.221 + 12.222 +/* Entry points for saving and restoring HVM domain state */ 12.223 int hvm_save(struct domain *d, hvm_domain_context_t *h); 12.224 int hvm_load(struct domain *d, hvm_domain_context_t *h); 12.225 12.226 -void shpage_init(struct domain *d, shared_iopage_t *sp); 12.227 +/* End of save/restore */ 12.228 12.229 extern char hvm_io_bitmap[]; 12.230 extern int hvm_enabled;
13.1 --- a/xen/include/public/hvm/save.h Wed Jan 31 10:11:26 2007 +0000 13.2 +++ b/xen/include/public/hvm/save.h Wed Jan 31 10:27:10 2007 +0000 13.3 @@ -40,24 +40,52 @@ 13.4 */ 13.5 13.6 /* 13.7 - * Save/restore header 13.8 + * Each entry is preceded by a descriptor giving its type and length 13.9 + */ 13.10 +struct hvm_save_descriptor { 13.11 + uint16_t typecode; /* Used to demux the various types below */ 13.12 + uint16_t instance; /* Further demux within a type */ 13.13 + uint32_t length; /* In bytes, *not* including this descriptor */ 13.14 +}; 13.15 + 13.16 + 13.17 +/* 13.18 + * Each entry has a datatype associated with it: for example, the CPU state 13.19 + * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), 13.20 + * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). 13.21 + * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system 13.22 + * ugliness. 13.23 */ 13.24 13.25 -#define HVM_SAVE_TYPE_HEADER 0 13.26 +#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ 13.27 + struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } 13.28 + 13.29 +#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) 13.30 +#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) 13.31 +#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) 13.32 + 13.33 + 13.34 +/* 13.35 + * Save/restore header: general info about the save file. 13.36 + */ 13.37 13.38 #define HVM_FILE_MAGIC 0x54381286 13.39 #define HVM_FILE_VERSION 0x00000001 13.40 13.41 struct hvm_save_header { 13.42 - uint32_t magic; 13.43 - uint32_t version; 13.44 - uint32_t cpuid; 13.45 + uint32_t magic; /* Must be HVM_FILE_MAGIC */ 13.46 + uint32_t version; /* File format version */ 13.47 + uint64_t changeset; /* Version of Xen that saved this file */ 13.48 + uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ 13.49 }; 13.50 13.51 +DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); 13.52 + 13.53 + 13.54 /* 13.55 * Processor 13.56 */ 13.57 -#define HVM_SAVE_TYPE_CPU 1 13.58 + 13.59 struct hvm_hw_cpu { 13.60 uint64_t eip; 13.61 uint64_t esp; 13.62 @@ -124,11 +152,13 @@ struct hvm_hw_cpu { 13.63 uint64_t tsc; 13.64 }; 13.65 13.66 +DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); 13.67 + 13.68 13.69 /* 13.70 * PIT 13.71 */ 13.72 -#define HVM_SAVE_TYPE_PIT 2 13.73 + 13.74 struct hvm_hw_pit { 13.75 struct hvm_hw_pit_channel { 13.76 int64_t count_load_time; 13.77 @@ -148,11 +178,13 @@ struct hvm_hw_pit { 13.78 uint32_t speaker_data_on; 13.79 }; 13.80 13.81 +DECLARE_HVM_SAVE_TYPE(PIT, 3, struct hvm_hw_pit); 13.82 + 13.83 13.84 /* 13.85 * PIC 13.86 */ 13.87 -#define HVM_SAVE_TYPE_PIC 3 13.88 + 13.89 struct hvm_hw_vpic { 13.90 /* IR line bitmasks. */ 13.91 uint8_t irr; 13.92 @@ -201,11 +233,12 @@ struct hvm_hw_vpic { 13.93 uint8_t int_output; 13.94 }; 13.95 13.96 +DECLARE_HVM_SAVE_TYPE(PIC, 4, struct hvm_hw_vpic); 13.97 + 13.98 13.99 /* 13.100 * IO-APIC 13.101 */ 13.102 -#define HVM_SAVE_TYPE_IOAPIC 4 13.103 13.104 #ifdef __ia64__ 13.105 #define VIOAPIC_IS_IOSAPIC 1 13.106 @@ -242,11 +275,13 @@ struct hvm_hw_vioapic { 13.107 } redirtbl[VIOAPIC_NUM_PINS]; 13.108 }; 13.109 13.110 +DECLARE_HVM_SAVE_TYPE(IOAPIC, 5, struct hvm_hw_vioapic); 13.111 + 13.112 13.113 /* 13.114 * IRQ 13.115 */ 13.116 -#define HVM_SAVE_TYPE_IRQ 5 13.117 + 13.118 struct hvm_hw_irq { 13.119 /* 13.120 * Virtual interrupt wires for a single PCI bus. 13.121 @@ -309,22 +344,40 @@ struct hvm_hw_irq { 13.122 u8 round_robin_prev_vcpu; 13.123 }; 13.124 13.125 +DECLARE_HVM_SAVE_TYPE(IRQ, 6, struct hvm_hw_irq); 13.126 13.127 /* 13.128 * LAPIC 13.129 */ 13.130 -#define HVM_SAVE_TYPE_LAPIC 6 13.131 + 13.132 struct hvm_hw_lapic { 13.133 uint64_t apic_base_msr; 13.134 uint32_t disabled; /* VLAPIC_xx_DISABLED */ 13.135 uint32_t timer_divisor; 13.136 }; 13.137 13.138 -#define HVM_SAVE_TYPE_LAPIC_REGS 7 13.139 +DECLARE_HVM_SAVE_TYPE(LAPIC, 7, struct hvm_hw_lapic); 13.140 13.141 struct hvm_hw_lapic_regs { 13.142 /* A 4k page of register state */ 13.143 uint8_t data[0x400]; 13.144 }; 13.145 13.146 +DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 8, struct hvm_hw_lapic_regs); 13.147 + 13.148 + 13.149 +/* 13.150 + * Largest type-code in use 13.151 + */ 13.152 +#define HVM_SAVE_CODE_MAX 8 13.153 + 13.154 + 13.155 +/* 13.156 + * The series of save records is teminated by a zero-type, zero-length 13.157 + * descriptor. 13.158 + */ 13.159 + 13.160 +struct hvm_save_end {}; 13.161 +DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); 13.162 + 13.163 #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */