ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_init.c @ 18096:1e7a371cee11

[IA64] Don't perform implicit sync when vps_save/restore

When calling vps_save/restore, pass 1 as the third parameter not to
perform implicit sync.

The third parameter of vps_save/restore is used to indidate whether
vps_save/restore do implicit vps_read_sync/vps_write_sync.
When the third parameter is 1, it doesn't perform implicit sync.
This parameter adds flexibility of vps_save/restore.
This feature was newly introduced by SDM specification update June 2008.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed Jul 23 11:21:47 2008 +0900 (2008-07-23)
parents 24cbbd5e3155
children 3f9d904d92c4
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_init.c: initialization work for vt specific domain
4 * Copyright (c) 2005, Intel Corporation.
5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Fred Yang <fred.yang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
24 /*
25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
26 * Disable doubling mapping
27 *
28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
29 * Simplied design in first step:
30 * - One virtual environment
31 * - Domain is bound to one LP
32 * Later to support guest SMP:
33 * - Need interface to handle VP scheduled to different LP
34 */
35 #include <xen/config.h>
36 #include <xen/types.h>
37 #include <xen/sched.h>
38 #include <asm/pal.h>
39 #include <asm/page.h>
40 #include <asm/processor.h>
41 #include <asm/vmx_vcpu.h>
42 #include <xen/lib.h>
43 #include <asm/vmmu.h>
44 #include <public/xen.h>
45 #include <public/hvm/ioreq.h>
46 #include <public/event_channel.h>
47 #include <public/arch-ia64/hvm/memmap.h>
48 #include <asm/vmx_phy_mode.h>
49 #include <asm/processor.h>
50 #include <asm/vmx.h>
51 #include <xen/mm.h>
52 #include <asm/viosapic.h>
53 #include <xen/event.h>
54 #include <asm/vlsapic.h>
55 #include <asm/vhpt.h>
56 #include <asm/vmx_pal_vsa.h>
57 #include <asm/patch.h>
59 /* Global flag to identify whether Intel vmx feature is on */
60 u32 vmx_enabled = 0;
61 static u64 buffer_size;
62 static u64 vp_env_info;
63 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
64 u64 __vsa_base = 0; /* Run-time service base of VMX */
66 /* Check whether vt feature is enabled or not. */
68 void vmx_vps_patch(void)
69 {
70 u64 addr;
72 addr = (u64)&vmx_vps_sync_read;
73 ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_READ);
74 ia64_fc((void *)addr);
75 addr = (u64)&vmx_vps_sync_write;
76 ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_WRITE);
77 ia64_fc((void *)addr);
78 addr = (u64)&vmx_vps_resume_normal;
79 ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_NORMAL);
80 ia64_fc((void *)addr);
81 addr = (u64)&vmx_vps_resume_handler;
82 ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_HANDLER);
83 ia64_fc((void *)addr);
84 ia64_sync_i();
85 ia64_srlz_i();
86 }
89 void
90 identify_vmx_feature(void)
91 {
92 pal_status_t ret;
93 u64 avail = 1, status = 1, control = 1;
95 vmx_enabled = 0;
96 /* Check VT-i feature */
97 ret = ia64_pal_proc_get_features(&avail, &status, &control);
98 if (ret != PAL_STATUS_SUCCESS) {
99 printk("Get proc features failed.\n");
100 goto no_vti;
101 }
103 /* FIXME: do we need to check status field, to see whether
104 * PSR.vm is actually enabled? If yes, aonther call to
105 * ia64_pal_proc_set_features may be reuqired then.
106 */
107 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
108 avail, status, control, avail & PAL_PROC_VM_BIT);
109 if (!(avail & PAL_PROC_VM_BIT)) {
110 printk("No VT feature supported.\n");
111 goto no_vti;
112 }
114 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
115 if (ret != PAL_STATUS_SUCCESS) {
116 printk("Get vp environment info failed.\n");
117 goto no_vti;
118 }
120 /* Does xen has ability to decode itself? */
121 if (!(vp_env_info & VP_OPCODE))
122 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
123 printk("vm buffer size: %ld\n", buffer_size);
125 vmx_enabled = 1;
126 no_vti:
127 return;
128 }
130 /*
131 * ** This function must be called on every processor **
132 *
133 * Init virtual environment on current LP
134 * vsa_base is the indicator whether it's first LP to be initialized
135 * for current domain.
136 */
137 void*
138 vmx_init_env(void *start, unsigned long end_in_pa)
139 {
140 u64 status, tmp_base;
142 if (!vm_buffer) {
143 /* VM buffer must must be 4K aligned and
144 * must be pinned by both itr and dtr. */
145 #define VM_BUFFER_ALIGN (4 * 1024)
146 #define VM_BUFFER_ALIGN_UP(x) (((x) + (VM_BUFFER_ALIGN - 1)) & \
147 ~(VM_BUFFER_ALIGN - 1))
148 unsigned long s_vm_buffer =
149 VM_BUFFER_ALIGN_UP((unsigned long)start);
150 unsigned long e_vm_buffer = s_vm_buffer + buffer_size;
151 if (__pa(e_vm_buffer) < end_in_pa) {
152 init_xenheap_pages(__pa(start), __pa(s_vm_buffer));
153 start = (void*)e_vm_buffer;
154 vm_buffer = virt_to_xenva(s_vm_buffer);
155 printk("vm_buffer: 0x%lx\n", vm_buffer);
156 } else {
157 printk("Can't allocate vm_buffer "
158 "start 0x%p end_in_pa 0x%lx "
159 "buffer_size 0x%lx\n",
160 start, end_in_pa, buffer_size);
161 vmx_enabled = 0;
162 return start;
163 }
164 }
166 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
167 __pa(vm_buffer),
168 vm_buffer,
169 &tmp_base);
171 if (status != PAL_STATUS_SUCCESS) {
172 printk("ia64_pal_vp_init_env failed.\n");
173 vmx_enabled = 0;
174 return start;
175 }
177 if (!__vsa_base){
178 __vsa_base = tmp_base;
179 vmx_vps_patch();
180 }
181 else
182 ASSERT(tmp_base == __vsa_base);
184 return start;
185 }
187 typedef union {
188 u64 value;
189 struct {
190 u64 number : 8;
191 u64 revision : 8;
192 u64 model : 8;
193 u64 family : 8;
194 u64 archrev : 8;
195 u64 rv : 24;
196 };
197 } cpuid3_t;
199 /* Allocate vpd from domheap */
200 static vpd_t *alloc_vpd(void)
201 {
202 int i;
203 cpuid3_t cpuid3;
204 struct page_info *page;
205 vpd_t *vpd;
206 mapped_regs_t *mregs;
208 page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0);
209 if (page == NULL) {
210 printk("VPD allocation failed.\n");
211 return NULL;
212 }
213 vpd = page_to_virt(page);
215 printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
216 vpd, sizeof(vpd_t));
217 memset(vpd, 0, VPD_SIZE);
218 mregs = &vpd->vpd_low;
220 /* CPUID init */
221 for (i = 0; i < 5; i++)
222 mregs->vcpuid[i] = ia64_get_cpuid(i);
224 /* Limit the CPUID number to 5 */
225 cpuid3.value = mregs->vcpuid[3];
226 cpuid3.number = 4; /* 5 - 1 */
227 mregs->vcpuid[3] = cpuid3.value;
229 mregs->vac.a_from_int_cr = 1;
230 mregs->vac.a_to_int_cr = 1;
231 mregs->vac.a_from_psr = 1;
232 mregs->vac.a_from_cpuid = 1;
233 mregs->vac.a_cover = 1;
234 mregs->vac.a_bsw = 1;
235 mregs->vac.a_int = 1;
236 mregs->vdc.d_vmsw = 1;
238 return vpd;
239 }
241 /* Free vpd to domheap */
242 static void
243 free_vpd(struct vcpu *v)
244 {
245 if ( v->arch.privregs )
246 free_domheap_pages(virt_to_page(v->arch.privregs),
247 get_order(VPD_SIZE));
248 }
250 // This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT
251 // so that we don't have to pin the vpd down with itr[].
252 void
253 __vmx_vpd_pin(struct vcpu* v)
254 {
255 unsigned long privregs = (unsigned long)v->arch.privregs;
256 u64 psr;
258 privregs &= ~(IA64_GRANULE_SIZE - 1);
260 // check overlapping with current stack
261 if (privregs ==
262 ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1)))
263 return;
265 if (!VMX_DOMAIN(current)) {
266 // check overlapping with vhpt
267 if (privregs ==
268 (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1)))
269 return;
270 } else {
271 // check overlapping with vhpt
272 if (privregs ==
273 ((unsigned long)current->arch.vhpt.hash &
274 ~(IA64_GRANULE_SHIFT - 1)))
275 return;
277 // check overlapping with privregs
278 if (privregs ==
279 ((unsigned long)current->arch.privregs &
280 ~(IA64_GRANULE_SHIFT - 1)))
281 return;
282 }
284 psr = ia64_clear_ic();
285 ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE);
286 ia64_srlz_d();
287 ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs,
288 pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)),
289 IA64_GRANULE_SHIFT);
290 ia64_set_psr(psr);
291 ia64_srlz_d();
292 }
294 void
295 __vmx_vpd_unpin(struct vcpu* v)
296 {
297 if (!VMX_DOMAIN(current)) {
298 int rc;
299 rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7]));
300 BUG_ON(rc);
301 } else {
302 IA64FAULT fault;
303 fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT,
304 VMX(current, vrr[VRN7]));
305 BUG_ON(fault != IA64_NO_FAULT);
306 }
307 }
309 /*
310 * Create a VP on intialized VMX environment.
311 */
312 static void
313 vmx_create_vp(struct vcpu *v)
314 {
315 u64 ret;
316 vpd_t *vpd = (vpd_t *)v->arch.privregs;
317 u64 ivt_base;
318 extern char vmx_ia64_ivt;
319 /* ia64_ivt is function pointer, so need this tranlation */
320 ivt_base = (u64) &vmx_ia64_ivt;
321 printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
323 vmx_vpd_pin(v);
324 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
325 vmx_vpd_unpin(v);
327 if (ret != PAL_STATUS_SUCCESS){
328 panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
329 }
330 }
332 /* Other non-context related tasks can be done in context switch */
333 void
334 vmx_save_state(struct vcpu *v)
335 {
336 BUG_ON(v != current);
338 ia64_call_vsa(PAL_VPS_SAVE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0);
340 /* Need to save KR when domain switch, though HV itself doesn;t
341 * use them.
342 */
343 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
344 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
345 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
346 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
347 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
348 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
349 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
350 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
351 }
353 /* Even guest is in physical mode, we still need such double mapping */
354 void
355 vmx_load_state(struct vcpu *v)
356 {
357 BUG_ON(v != current);
359 vmx_load_all_rr(v);
361 /* vmx_load_all_rr() pins down v->arch.privregs with both dtr/itr*/
362 ia64_call_vsa(PAL_VPS_RESTORE, (u64)v->arch.privregs, 1, 0, 0, 0, 0, 0);
364 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
365 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
366 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
367 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
368 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
369 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
370 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
371 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
372 /* Guest vTLB is not required to be switched explicitly, since
373 * anchored in vcpu */
375 migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
376 }
378 static int
379 vmx_vcpu_initialise(struct vcpu *v)
380 {
381 struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq;
383 int rc = alloc_unbound_xen_event_channel(v, 0);
384 if (rc < 0)
385 return rc;
386 v->arch.arch_vmx.xen_port = rc;
388 spin_lock(&iorp->lock);
389 if (v->domain->arch.vmx_platform.ioreq.va != 0) {
390 vcpu_iodata_t *p = get_vio(v);
391 p->vp_eport = v->arch.arch_vmx.xen_port;
392 }
393 spin_unlock(&iorp->lock);
395 gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n",
396 v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id);
398 return 0;
399 }
401 static int vmx_create_event_channels(struct vcpu *v)
402 {
403 struct vcpu *o;
405 if (v->vcpu_id == 0) {
406 /* Ugly: create event channels for every vcpu when vcpu 0
407 starts, so that they're available for ioemu to bind to. */
408 for_each_vcpu(v->domain, o) {
409 int rc = vmx_vcpu_initialise(o);
410 if (rc < 0) //XXX error recovery
411 return rc;
412 }
413 }
415 return 0;
416 }
418 /*
419 * Event channel has destoryed in domain_kill(), so we needn't
420 * do anything here
421 */
422 static void vmx_release_assist_channel(struct vcpu *v)
423 {
424 return;
425 }
427 /* following three functions are based from hvm_xxx_ioreq_page()
428 * in xen/arch/x86/hvm/hvm.c */
429 static void vmx_init_ioreq_page(
430 struct domain *d, struct vmx_ioreq_page *iorp)
431 {
432 memset(iorp, 0, sizeof(*iorp));
433 spin_lock_init(&iorp->lock);
434 domain_pause(d);
435 }
437 static void vmx_destroy_ioreq_page(
438 struct domain *d, struct vmx_ioreq_page *iorp)
439 {
440 spin_lock(&iorp->lock);
442 ASSERT(d->is_dying);
444 if (iorp->va != NULL) {
445 put_page(iorp->page);
446 iorp->page = NULL;
447 iorp->va = NULL;
448 }
450 spin_unlock(&iorp->lock);
451 }
453 int vmx_set_ioreq_page(
454 struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn)
455 {
456 struct page_info *page;
457 unsigned long mfn;
458 pte_t pte;
460 pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
461 if (!pte_present(pte) || !pte_mem(pte))
462 return -EINVAL;
463 mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
464 ASSERT(mfn_valid(mfn));
466 page = mfn_to_page(mfn);
467 if (get_page(page, d) == 0)
468 return -EINVAL;
470 spin_lock(&iorp->lock);
472 if ((iorp->va != NULL) || d->is_dying) {
473 spin_unlock(&iorp->lock);
474 put_page(page);
475 return -EINVAL;
476 }
478 iorp->va = mfn_to_virt(mfn);
479 iorp->page = page;
481 spin_unlock(&iorp->lock);
483 domain_unpause(d);
485 return 0;
486 }
488 /*
489 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
490 * is registered here.
491 */
492 int
493 vmx_final_setup_guest(struct vcpu *v)
494 {
495 vpd_t *vpd;
496 int rc;
498 vpd = alloc_vpd();
499 ASSERT(vpd);
500 if (!vpd)
501 return -ENOMEM;
503 v->arch.privregs = (mapped_regs_t *)vpd;
504 vpd->vpd_low.virt_env_vaddr = vm_buffer;
506 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
507 * to this solution. Maybe it can be deferred until we know created
508 * one as vmx domain */
509 rc = init_domain_tlb(v);
510 if (rc)
511 return rc;
513 if (!v->domain->arch.is_sioemu) {
514 rc = vmx_create_event_channels(v);
515 if (rc)
516 return rc;
517 }
519 /* v->arch.schedule_tail = arch_vmx_do_launch; */
520 vmx_create_vp(v);
522 /* Physical mode emulation initialization, including
523 * emulation ID allcation and related memory request
524 */
525 physical_mode_init(v);
527 vlsapic_reset(v);
528 vtm_init(v);
530 /* Set up guest 's indicator for VTi domain*/
531 set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
533 return 0;
534 }
536 void
537 vmx_relinquish_guest_resources(struct domain *d)
538 {
539 struct vcpu *v;
541 if (d->arch.is_sioemu)
542 return;
544 for_each_vcpu(d, v)
545 vmx_release_assist_channel(v);
547 vacpi_relinquish_resources(d);
549 vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
550 vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
551 vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
552 }
554 void
555 vmx_relinquish_vcpu_resources(struct vcpu *v)
556 {
557 vtime_t *vtm = &(v->arch.arch_vmx.vtm);
559 kill_timer(&vtm->vtm_timer);
561 if (v->arch.arch_vmx.sioemu_info_mva)
562 put_page(virt_to_page((unsigned long)
563 v->arch.arch_vmx.sioemu_info_mva));
565 free_domain_tlb(v);
566 free_vpd(v);
567 }
569 typedef struct io_range {
570 unsigned long start;
571 unsigned long size;
572 unsigned long type;
573 } io_range_t;
575 static const io_range_t io_ranges[] = {
576 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER << PAGE_SHIFT},
577 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO << PAGE_SHIFT},
578 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO << PAGE_SHIFT},
579 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC << PAGE_SHIFT},
580 {PIB_START, PIB_SIZE, GPFN_PIB << PAGE_SHIFT},
581 };
583 // The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest()
584 // so only mark IO memory space here
585 static void vmx_build_io_physmap_table(struct domain *d)
586 {
587 unsigned long i, j;
589 /* Mark I/O ranges */
590 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
591 for (j = io_ranges[i].start;
592 j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE)
593 (void)__assign_domain_page(d, j, io_ranges[i].type,
594 ASSIGN_writable | ASSIGN_io);
595 }
597 }
599 int vmx_setup_platform(struct domain *d)
600 {
601 ASSERT(d != dom0); /* only for non-privileged vti domain */
603 if (!d->arch.is_sioemu) {
604 vmx_build_io_physmap_table(d);
606 vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
607 vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
608 vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
609 }
610 /* TEMP */
611 d->arch.vmx_platform.pib_base = 0xfee00000UL;
613 d->arch.sal_data = xmalloc(struct xen_sal_data);
614 if (d->arch.sal_data == NULL)
615 return -ENOMEM;
617 /* Only open one port for I/O and interrupt emulation */
618 memset(&d->shared_info->evtchn_mask[0], 0xff,
619 sizeof(d->shared_info->evtchn_mask));
621 /* Initialize iosapic model within hypervisor */
622 viosapic_init(d);
624 if (!d->arch.is_sioemu)
625 vacpi_init(d);
627 if (d->arch.is_sioemu) {
628 int i;
629 for (i = 1; i < MAX_VIRT_CPUS; i++)
630 d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
631 }
633 return 0;
634 }
636 void vmx_do_resume(struct vcpu *v)
637 {
638 ioreq_t *p;
640 vmx_load_state(v);
642 if (v->domain->arch.is_sioemu)
643 return;
645 /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
646 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
647 p = &get_vio(v)->vp_ioreq;
648 while (p->state != STATE_IOREQ_NONE) {
649 switch (p->state) {
650 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
651 vmx_io_assist(v);
652 break;
653 case STATE_IOREQ_READY:
654 case STATE_IOREQ_INPROCESS:
655 /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
656 wait_on_xen_event_channel(v->arch.arch_vmx.xen_port,
657 (p->state != STATE_IOREQ_READY) &&
658 (p->state != STATE_IOREQ_INPROCESS));
659 break;
660 default:
661 gdprintk(XENLOG_ERR,
662 "Weird HVM iorequest state %d.\n", p->state);
663 domain_crash_synchronous();
664 }
665 }
666 }