int xcHandle;
int sts;
- /* pause domain first, to avoid repeated reboot request*/
- xc_domain_pause(xc_handle, domid);
-
xcHandle = xc_interface_open();
if (xcHandle < 0)
fprintf(logfile, "Cannot acquire xenctrl handle\n");
extern int suspend_requested;
CPUState *env = cpu_single_env;
int evtchn_fd = xc_evtchn_fd(xce_handle);
+ char qemu_file[20];
buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
cpu_single_env);
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
- while (1) {
- if (vm_running) {
- if (shutdown_requested)
- break;
- if (reset_requested) {
- qemu_system_reset();
- reset_requested = 0;
- }
- if (suspend_requested) {
- fprintf(logfile, "device model received suspend signal!\n");
- break;
- }
- }
-
+ while (!(vm_running && suspend_requested))
/* Wait up to 10 msec. */
main_loop_wait(10);
- }
- if (!suspend_requested)
- destroy_hvm_domain();
- else {
- char qemu_file[20];
- ioreq_t *req;
- int rc;
-
- sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
- xc_domain_pause(xc_handle, domid);
-
- /* Pull all outstanding ioreqs through the system */
- handle_buffered_io(env);
- main_loop_wait(1); /* For the select() on events */
-
- /* Stop the IDE thread */
- ide_stop_dma_thread();
-
- /* Make sure that all outstanding IO responses are handled too */
- if ( xc_hvm_drain_io(xc_handle, domid) != 0 )
- {
- fprintf(stderr, "error clearing ioreq rings (%s)\n",
- strerror(errno));
- return -1;
- }
- /* Save the device state */
- if (qemu_savevm(qemu_file) < 0)
- fprintf(stderr, "qemu save fail.\n");
- }
+ fprintf(logfile, "device model received suspend signal!\n");
+
+ /* Pull all outstanding ioreqs through the system */
+ handle_buffered_io(env);
+ main_loop_wait(1); /* For the select() on events */
+
+ /* Stop the IDE thread */
+ ide_stop_dma_thread();
+
+ /* Save the device state */
+ sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
+ if (qemu_savevm(qemu_file) < 0)
+ fprintf(stderr, "qemu save fail.\n");
return 0;
}
static int qemu_active;
static int qemu_non_active;
-int xc_hvm_drain_io(int handle, domid_t dom)
-{
- DECLARE_HYPERCALL;
- xen_hvm_drain_io_t arg;
- int rc;
-
- hypercall.op = __HYPERVISOR_hvm_op;
- hypercall.arg[0] = HVMOP_drain_io;
- hypercall.arg[1] = (unsigned long)&arg;
- arg.domid = dom;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
- return -1;
- rc = do_xen_hypercall(handle, &hypercall);
- unlock_pages(&arg, sizeof(arg));
- return rc;
-}
-
/*
** During (live) save/migrate, we maintain a number of bitmaps to track
** which pages we have to send, to fixup, and to skip.
int xc_get_hvm_param(
int handle, domid_t dom, int param, unsigned long *value);
-int xc_hvm_drain_io(int handle, domid_t dom);
-
/* PowerPC specific. */
int xc_prose_build(int xc_handle,
uint32_t domid,
return -ENOSYS;
}
-__attribute__((weak)) int xc_hvm_drain_io(int handle, domid_t dom)
-{
- return -ENOSYS;
-}
-
/*
* Local variables:
* mode: C
}
}
-/* Called from the tools when saving a domain to make sure the io
- * request-response ring is entirely empty. */
-static int hvmop_drain_io(
- XEN_GUEST_HANDLE(xen_hvm_drain_io_t) uop)
-{
- struct xen_hvm_drain_io op;
- struct domain *d;
- struct vcpu *v;
- ioreq_t *p;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- if ( !IS_PRIV(current->domain) )
- return -EPERM;
-
- d = rcu_lock_domain_by_id(op.domid);
- if ( d == NULL )
- return -ESRCH;
-
- rc = -EINVAL;
- /* Can't do this to yourself, or to a domain without an ioreq ring */
- if ( d == current->domain || !is_hvm_domain(d) || get_sp(d) == NULL )
- goto out;
-
- rc = 0;
-
- domain_pause(d); /* It's not safe to do this to running vcpus */
- for_each_vcpu(d, v)
- {
- p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- if ( p->state == STATE_IORESP_READY )
- hvm_io_assist(v);
- }
- domain_unpause(d);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
int hvm_domain_initialise(struct domain *d)
{
int rc;
guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
break;
- case HVMOP_drain_io:
- rc = hvmop_drain_io(
- guest_handle_cast(arg, xen_hvm_drain_io_t));
- break;
-
-
default:
{
gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
if ( d == NULL )
break;
+ /* domain_pause() prevens any further execution in guest context. */
+ domain_pause(d);
domain_shutdown(d, (u8)sched_remote_shutdown.reason);
+ domain_unpause(d);
+
rcu_unlock_domain(d);
ret = 0;
typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
-/* Drain all outstanding qemu-dm IO responses from a domain's ioreq ring. */
-#define HVMOP_drain_io 5
-struct xen_hvm_drain_io {
- domid_t domid;
-};
-typedef struct xen_hvm_drain_io xen_hvm_drain_io_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_drain_io_t);
-
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */