ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_vcpu_save.c @ 16171:e7d7a4adf357

[IA64] vti domain save/restore: implement hvm_save/load. work in progress.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Sun Oct 21 14:39:07 2007 -0600 (2007-10-21)
parents ff1f49f62204
children 0cc58b6dfeb2
line source
1 /******************************************************************************
2 * vmx_vcpu_save.c
3 *
4 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
23 #include <asm/vmx_vcpu.h>
24 #include <asm/vmx_vcpu_save.h>
25 #include <asm/hvm/support.h>
26 #include <public/hvm/save.h>
28 void
29 vmx_arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
30 {
31 vpd_t *vpd = (void *)v->arch.privregs;
32 struct mapped_regs *vpd_low = &vpd->vpd_low;
33 unsigned long nats;
34 unsigned long bnats;
36 union vcpu_ar_regs *ar = &c.nat->regs.ar;
37 union vcpu_cr_regs *cr = &c.nat->regs.cr;
38 int i;
40 // banked registers
41 if (vpd_low->vpsr & IA64_PSR_BN) {
42 for (i = 0; i < 16; i++) {
43 //c.nat->regs.r[i + 16] = vpd_low->vgr[i];
44 c.nat->regs.bank[i] = vpd_low->vbgr[i];
45 }
46 nats = vpd_low->vnat;
47 bnats = vpd_low->vbnat;
48 } else {
49 for (i = 0; i < 16; i++) {
50 c.nat->regs.bank[i] = vpd_low->vgr[i];
51 //c.nat->regs.r[i + 16] = vpd_low->vbgr[i];
52 }
53 bnats = vpd_low->vnat;
54 nats = vpd_low->vbnat;
55 }
56 // c.nat->regs.nats[0:15] is already set. we shouldn't overwrite.
57 c.nat->regs.nats =
58 (c.nat->regs.nats & MASK(0, 16)) | (nats & MASK(16, 16));
59 c.nat->regs.bnats = bnats & MASK(16, 16);
61 //c.nat->regs.psr = vpd_low->vpsr;
62 //c.nat->regs.pr = vpd_low->vpr;
64 // ar
65 ar->kr[0] = v->arch.arch_vmx.vkr[0];
66 ar->kr[1] = v->arch.arch_vmx.vkr[1];
67 ar->kr[2] = v->arch.arch_vmx.vkr[2];
68 ar->kr[3] = v->arch.arch_vmx.vkr[3];
69 ar->kr[4] = v->arch.arch_vmx.vkr[4];
70 ar->kr[5] = v->arch.arch_vmx.vkr[5];
71 ar->kr[6] = v->arch.arch_vmx.vkr[6];
72 ar->kr[7] = v->arch.arch_vmx.vkr[7];
73 #ifdef CONFIG_IA32_SUPPORT
74 // csd and ssd are done by arch_get_info_guest()
75 ar->fcr = v->arch._thread.fcr;
76 ar->eflag = v->arch._thread.eflag;
77 ar->cflg = v->arch._thread.cflg;
78 ar->fsr = v->arch._thread.fsr;
79 ar->fir = v->arch._thread.fir;
80 ar->fdr = v->arch._thread.fdr;
81 #endif
82 //ar->itc = vpd_low->itc;//see vtime
84 // cr
85 //cr->dcr = vpd_low->dcr;
86 //cr->itm = vpd_low->itm;
87 //cr->iva = vpd_low->iva;
88 //cr->pta = vpd_low->pta;
89 //cr->ipsr = vpd_low->ipsr;
90 //cr->isr = vpd_low->isr;
91 //cr->iip = vpd_low->iip;
92 //cr->ifa = vpd_low->ifa;
93 //cr->itir = vpd_low->itir;
94 cr->iipa = vpd_low->iipa;
95 cr->ifs = vpd_low->ifs;
96 //cr->iim = vpd_low->iim;
97 //cr->iha = vpd_low->iha;
98 cr->lid = vpd_low->lid;
99 cr->ivr = vpd_low->ivr;
100 //cr->tpr = vpd_low->tpr;
101 cr->eoi = vpd_low->eoi;
102 //cr->irr[0] = vpd_low->irr[0];
103 //cr->irr[1] = vpd_low->irr[1];
104 //cr->irr[2] = vpd_low->irr[2];
105 //cr->irr[3] = vpd_low->irr[3];
106 //cr->itv = vpd_low->itv;
107 //cr->pmv = vpd_low->pmv;
108 //cr->cmcv = vpd_low->cmcv;
109 cr->lrr0 = vpd_low->lrr0;
110 cr->lrr1 = vpd_low->lrr1;
111 }
113 int
114 vmx_arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
115 {
116 vpd_t *vpd = (void *)v->arch.privregs;
117 struct mapped_regs *vpd_low = &vpd->vpd_low;
118 unsigned long vnat;
119 unsigned long vbnat;
121 union vcpu_ar_regs *ar = &c.nat->regs.ar;
122 union vcpu_cr_regs *cr = &c.nat->regs.cr;
123 int i;
125 // banked registers
126 if (c.nat->regs.psr & IA64_PSR_BN) {
127 for (i = 0; i < 16; i++) {
128 //vpd_low->vgr[i] = c.nat->regs.r[i + 16];
129 vpd_low->vbgr[i] = c.nat->regs.bank[i];
130 }
131 vnat = c.nat->regs.nats;
132 vbnat = c.nat->regs.bnats;
133 } else {
134 for (i = 0; i < 16; i++) {
135 vpd_low->vgr[i] = c.nat->regs.bank[i];
136 //vpd_low->vbgr[i] = c.nat->regs.r[i + 16];
137 }
138 vbnat = c.nat->regs.nats;
139 vnat = c.nat->regs.bnats;
140 }
141 vpd_low->vnat = vnat & MASK(16, 16);
142 vpd_low->vbnat = vbnat & MASK(16, 16);
143 //vpd_low->vpsr = c.nat->regs.psr;
144 //vpd_low->vpr = c.nat->regs.pr;
146 // ar
147 v->arch.arch_vmx.vkr[0] = ar->kr[0];
148 v->arch.arch_vmx.vkr[1] = ar->kr[1];
149 v->arch.arch_vmx.vkr[2] = ar->kr[2];
150 v->arch.arch_vmx.vkr[3] = ar->kr[3];
151 v->arch.arch_vmx.vkr[4] = ar->kr[4];
152 v->arch.arch_vmx.vkr[5] = ar->kr[5];
153 v->arch.arch_vmx.vkr[6] = ar->kr[6];
154 v->arch.arch_vmx.vkr[7] = ar->kr[7];
155 #ifdef CONFIG_IA32_SUPPORT
156 v->arch._thread.fcr = ar->fcr;
157 v->arch._thread.eflag = ar->eflag;
158 v->arch._thread.cflg = ar->cflg;
159 v->arch._thread.fsr = ar->fsr;
160 v->arch._thread.fir = ar->fir;
161 v->arch._thread.fdr = ar->fdr;
162 #endif
163 //vpd_low->itc = ar->itc;// see vtime.
165 // cr
166 vpd_low->dcr = cr->dcr;
167 vpd_low->itm = cr->itm;
168 //vpd_low->iva = cr->iva;
169 vpd_low->pta = cr->pta;
170 vpd_low->ipsr = cr->ipsr;
171 vpd_low->isr = cr->isr;
172 vpd_low->iip = cr->iip;
173 vpd_low->ifa = cr->ifa;
174 vpd_low->itir = cr->itir;
175 vpd_low->iipa = cr->iipa;
176 vpd_low->ifs = cr->ifs;
177 vpd_low->iim = cr->iim;
178 vpd_low->iha = cr->iha;
179 vpd_low->lid = cr->lid;
180 vpd_low->ivr = cr->ivr; //XXX vlsapic
181 vpd_low->tpr = cr->tpr;
182 vpd_low->eoi = cr->eoi;
183 vpd_low->irr[0] = cr->irr[0];
184 vpd_low->irr[1] = cr->irr[1];
185 vpd_low->irr[2] = cr->irr[2];
186 vpd_low->irr[3] = cr->irr[3];
187 vpd_low->itv = cr->itv;
188 vpd_low->pmv = cr->pmv;
189 vpd_low->cmcv = cr->cmcv;
190 vpd_low->lrr0 = cr->lrr0;
191 vpd_low->lrr1 = cr->lrr1;
193 v->arch.irq_new_condition = 1;
194 return 0;
195 }
198 static int vmx_cpu_save(struct domain *d, hvm_domain_context_t *h)
199 {
200 struct vcpu *v;
202 for_each_vcpu(d, v) {
203 struct pt_regs *regs = vcpu_regs(v);
204 struct hvm_hw_ia64_cpu ia64_cpu;
206 if (test_bit(_VPF_down, &v->pause_flags))
207 continue;
209 memset(&ia64_cpu, 0, sizeof(ia64_cpu));
211 ia64_cpu.ipsr = regs->cr_ipsr;
213 if (hvm_save_entry(CPU, v->vcpu_id, h, &ia64_cpu))
214 return -EINVAL;
215 }
217 return 0;
218 }
220 static int vmx_cpu_load(struct domain *d, hvm_domain_context_t *h)
221 {
222 int rc = 0;
223 uint16_t vcpuid;
224 struct vcpu *v;
225 struct hvm_hw_ia64_cpu ia64_cpu;
226 struct pt_regs *regs;
228 vcpuid = hvm_load_instance(h);
229 if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
230 gdprintk(XENLOG_ERR,
231 "%s: domain has no vcpu %u\n", __func__, vcpuid);
232 rc = -EINVAL;
233 goto out;
234 }
236 if (hvm_load_entry(CPU, h, &ia64_cpu) != 0) {
237 rc = -EINVAL;
238 goto out;
239 }
241 regs = vcpu_regs(v);
242 regs->cr_ipsr = ia64_cpu.ipsr | IA64_PSR_VM;
244 out:
245 return rc;
246 }
248 HVM_REGISTER_SAVE_RESTORE(CPU, vmx_cpu_save, vmx_cpu_load, 1, HVMSR_PER_VCPU);
250 static int vmx_vpd_save(struct domain *d, hvm_domain_context_t *h)
251 {
252 struct vcpu *v;
254 for_each_vcpu(d, v) {
255 vpd_t *vpd = (void *)v->arch.privregs;
257 if (test_bit(_VPF_down, &v->pause_flags))
258 continue;
260 // currently struct hvm_hw_ia64_vpd = struct vpd
261 // if it is changed, this must be revised.
262 if (hvm_save_entry(VPD, v->vcpu_id, h, (struct hvm_hw_ia64_vpd*)vpd))
263 return -EINVAL;
264 }
266 return 0;
267 }
269 static int vmx_vpd_load(struct domain *d, hvm_domain_context_t *h)
270 {
271 int rc = 0;
272 uint16_t vcpuid;
273 struct vcpu *v;
274 vpd_t *vpd;
275 struct hvm_hw_ia64_vpd *ia64_vpd = NULL;
276 int i;
278 vcpuid = hvm_load_instance(h);
279 if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
280 gdprintk(XENLOG_ERR,
281 "%s: domain has no vcpu %u\n", __func__, vcpuid);
282 rc = -EINVAL;
283 goto out;
284 }
286 ia64_vpd = xmalloc(struct hvm_hw_ia64_vpd);
287 if (ia64_vpd == NULL) {
288 gdprintk(XENLOG_ERR,
289 "%s: can't allocate memory %d\n", __func__, vcpuid);
290 rc = -ENOMEM;
291 goto out;
292 }
294 if (hvm_load_entry(VPD, h, ia64_vpd) != 0) {
295 rc = -EINVAL;
296 goto out;
297 }
299 vpd = (void *)v->arch.privregs;
300 #define VPD_COPY(x) vpd->vpd_low.x = ia64_vpd->vpd.vpd_low.x
302 for (i = 0; i < 16; i++)
303 VPD_COPY(vgr[i]);
304 for (i = 0; i < 16; i++)
305 VPD_COPY(vbgr[i]);
306 VPD_COPY(vnat);
307 VPD_COPY(vbnat);
308 for (i = 0; i < 5; i++)
309 VPD_COPY(vcpuid[i]);
310 VPD_COPY(vpsr);
311 VPD_COPY(vpr);
313 // cr
314 #if 0
315 VPD_COPY(dcr);
316 VPD_COPY(itm);
317 VPD_COPY(iva);
318 VPD_COPY(pta);
319 VPD_COPY(ipsr);
320 VPD_COPY(isr);
321 VPD_COPY(iip);
322 VPD_COPY(ifa);
323 VPD_COPY(itir);
324 VPD_COPY(iipa);
325 VPD_COPY(ifs);
326 VPD_COPY(iim);
327 VPD_COPY(iha);
328 VPD_COPY(lid);
329 VPD_COPY(ivr);
330 VPD_COPY(tpr);
331 VPD_COPY(eoi);
332 VPD_COPY(irr[0]);
333 VPD_COPY(irr[1]);
334 VPD_COPY(irr[2]);
335 VPD_COPY(irr[3]);
336 VPD_COPY(itv);
337 VPD_COPY(pmv);
338 VPD_COPY(cmcv);
339 VPD_COPY(lrr0);
340 VPD_COPY(lrr1);
341 #else
342 memcpy(&vpd->vpd_low.vcr[0], &ia64_vpd->vpd.vpd_low.vcr[0],
343 sizeof(vpd->vpd_low.vcr));
344 #endif
345 #undef VPD_COPY
347 v->arch.irq_new_condition = 1;
349 out:
350 if (ia64_vpd != NULL)
351 xfree(ia64_vpd);
352 return rc;
353 }
355 HVM_REGISTER_SAVE_RESTORE(VPD, vmx_vpd_save, vmx_vpd_load, 1, HVMSR_PER_VCPU);
357 /*
358 * Local variables:
359 * mode: C
360 * c-set-style: "BSD"
361 * c-basic-offset: 4
362 * tab-width: 4
363 * indent-tabs-mode: nil
364 * End:
365 */