ia64/xen-unstable

changeset 14717:c278b1c580db

Merge with xen-ia64-unstable.hg
author kfraser@localhost.localdomain
date Wed Apr 04 11:49:37 2007 +0100 (2007-04-04)
parents f378c424e0ce 6cfc491631d5
children bc43ac9631e3
files
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Tue Apr 03 13:04:51 2007 -0600
     1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Wed Apr 04 11:49:37 2007 +0100
     1.3 @@ -136,9 +136,6 @@ void cpu_reset(CPUX86State *env)
     1.4      int xcHandle;
     1.5      int sts;
     1.6  
     1.7 -    /* pause domain first, to avoid repeated reboot request*/
     1.8 -    xc_domain_pause(xc_handle, domid);
     1.9 -
    1.10      xcHandle = xc_interface_open();
    1.11      if (xcHandle < 0)
    1.12          fprintf(logfile, "Cannot acquire xenctrl handle\n");
    1.13 @@ -597,6 +594,7 @@ int main_loop(void)
    1.14      extern int suspend_requested;
    1.15      CPUState *env = cpu_single_env;
    1.16      int evtchn_fd = xc_evtchn_fd(xce_handle);
    1.17 +    char qemu_file[20];
    1.18  
    1.19      buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
    1.20  				       cpu_single_env);
    1.21 @@ -604,52 +602,23 @@ int main_loop(void)
    1.22  
    1.23      qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
    1.24  
    1.25 -    while (1) {
    1.26 -        if (vm_running) {
    1.27 -            if (shutdown_requested)
    1.28 -                break;
    1.29 -            if (reset_requested) {
    1.30 -                qemu_system_reset();
    1.31 -                reset_requested = 0;
    1.32 -            }
    1.33 -            if (suspend_requested) {
    1.34 -                fprintf(logfile, "device model received suspend signal!\n");
    1.35 -                break;
    1.36 -            }
    1.37 -        }
    1.38 -
    1.39 +    while (!(vm_running && suspend_requested))
    1.40          /* Wait up to 10 msec. */
    1.41          main_loop_wait(10);
    1.42 -    }
    1.43 -    if (!suspend_requested)
    1.44 -        destroy_hvm_domain();
    1.45 -    else {
    1.46 -        char qemu_file[20];
    1.47 -        ioreq_t *req;
    1.48 -        int rc;
    1.49 +
    1.50 +    fprintf(logfile, "device model received suspend signal!\n");
    1.51  
    1.52 -        sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
    1.53 -        xc_domain_pause(xc_handle, domid);
    1.54 +    /* Pull all outstanding ioreqs through the system */
    1.55 +    handle_buffered_io(env);
    1.56 +    main_loop_wait(1); /* For the select() on events */
    1.57  
    1.58 -        /* Pull all outstanding ioreqs through the system */
    1.59 -        handle_buffered_io(env);
    1.60 -        main_loop_wait(1); /* For the select() on events */
    1.61 -        
    1.62 -        /* Stop the IDE thread */
    1.63 -        ide_stop_dma_thread();
    1.64 +    /* Stop the IDE thread */
    1.65 +    ide_stop_dma_thread();
    1.66  
    1.67 -        /* Make sure that all outstanding IO responses are handled too */ 
    1.68 -        if ( xc_hvm_drain_io(xc_handle, domid) != 0 )
    1.69 -        {
    1.70 -            fprintf(stderr, "error clearing ioreq rings (%s)\n", 
    1.71 -                    strerror(errno));
    1.72 -            return -1;
    1.73 -        }
    1.74 -
    1.75 -        /* Save the device state */
    1.76 -        if (qemu_savevm(qemu_file) < 0)
    1.77 -            fprintf(stderr, "qemu save fail.\n");
    1.78 -    }
    1.79 +    /* Save the device state */
    1.80 +    sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
    1.81 +    if (qemu_savevm(qemu_file) < 0)
    1.82 +        fprintf(stderr, "qemu save fail.\n");
    1.83  
    1.84      return 0;
    1.85  }
     2.1 --- a/tools/libxc/xc_hvm_save.c	Tue Apr 03 13:04:51 2007 -0600
     2.2 +++ b/tools/libxc/xc_hvm_save.c	Wed Apr 04 11:49:37 2007 +0100
     2.3 @@ -59,23 +59,6 @@ static unsigned long *qemu_bitmaps[2];
     2.4  static int qemu_active;
     2.5  static int qemu_non_active;
     2.6  
     2.7 -int xc_hvm_drain_io(int handle, domid_t dom)
     2.8 -{
     2.9 -    DECLARE_HYPERCALL;
    2.10 -    xen_hvm_drain_io_t arg;
    2.11 -    int rc;
    2.12 -
    2.13 -    hypercall.op     = __HYPERVISOR_hvm_op;
    2.14 -    hypercall.arg[0] = HVMOP_drain_io;
    2.15 -    hypercall.arg[1] = (unsigned long)&arg;
    2.16 -    arg.domid = dom;
    2.17 -    if ( lock_pages(&arg, sizeof(arg)) != 0 )
    2.18 -        return -1;
    2.19 -    rc = do_xen_hypercall(handle, &hypercall);
    2.20 -    unlock_pages(&arg, sizeof(arg));
    2.21 -    return rc;
    2.22 -}
    2.23 -
    2.24  /*
    2.25  ** During (live) save/migrate, we maintain a number of bitmaps to track
    2.26  ** which pages we have to send, to fixup, and to skip.
     3.1 --- a/tools/libxc/xenguest.h	Tue Apr 03 13:04:51 2007 -0600
     3.2 +++ b/tools/libxc/xenguest.h	Wed Apr 04 11:49:37 2007 +0100
     3.3 @@ -159,8 +159,6 @@ int xc_set_hvm_param(
     3.4  int xc_get_hvm_param(
     3.5      int handle, domid_t dom, int param, unsigned long *value);
     3.6  
     3.7 -int xc_hvm_drain_io(int handle, domid_t dom);
     3.8 -
     3.9  /* PowerPC specific. */
    3.10  int xc_prose_build(int xc_handle,
    3.11                     uint32_t domid,
     4.1 --- a/tools/libxc/xg_private.c	Tue Apr 03 13:04:51 2007 -0600
     4.2 +++ b/tools/libxc/xg_private.c	Wed Apr 04 11:49:37 2007 +0100
     4.3 @@ -231,11 +231,6 @@ unsigned long csum_page(void *page)
     4.4      return -ENOSYS;
     4.5  }
     4.6  
     4.7 -__attribute__((weak)) int xc_hvm_drain_io(int handle, domid_t dom)
     4.8 -{
     4.9 -    return -ENOSYS;
    4.10 -}
    4.11 -
    4.12  /*
    4.13   * Local variables:
    4.14   * mode: C
     5.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Apr 03 13:04:51 2007 -0600
     5.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Apr 04 11:49:37 2007 +0100
     5.3 @@ -131,7 +131,7 @@ void hvm_do_resume(struct vcpu *v)
     5.4          switch ( p->state )
     5.5          {
     5.6          case STATE_IORESP_READY: /* IORESP_READY -> NONE */
     5.7 -            hvm_io_assist(v);
     5.8 +            hvm_io_assist();
     5.9              break;
    5.10          case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
    5.11          case STATE_IOREQ_INPROCESS:
    5.12 @@ -146,48 +146,6 @@ void hvm_do_resume(struct vcpu *v)
    5.13      }
    5.14  }
    5.15  
    5.16 -/* Called from the tools when saving a domain to make sure the io
    5.17 - * request-response ring is entirely empty. */
    5.18 -static int hvmop_drain_io(
    5.19 -    XEN_GUEST_HANDLE(xen_hvm_drain_io_t) uop)
    5.20 -{
    5.21 -    struct xen_hvm_drain_io op;
    5.22 -    struct domain *d;
    5.23 -    struct vcpu *v;
    5.24 -    ioreq_t *p;
    5.25 -    int rc;
    5.26 -
    5.27 -    if ( copy_from_guest(&op, uop, 1) )
    5.28 -        return -EFAULT;
    5.29 -
    5.30 -    if ( !IS_PRIV(current->domain) )
    5.31 -        return -EPERM;
    5.32 -
    5.33 -    d = rcu_lock_domain_by_id(op.domid);
    5.34 -    if ( d == NULL )
    5.35 -        return -ESRCH;
    5.36 -
    5.37 -    rc = -EINVAL;
    5.38 -    /* Can't do this to yourself, or to a domain without an ioreq ring */
    5.39 -    if ( d == current->domain || !is_hvm_domain(d) || get_sp(d) == NULL )
    5.40 -        goto out;
    5.41 -
    5.42 -    rc = 0;
    5.43 -
    5.44 -    domain_pause(d);  /* It's not safe to do this to running vcpus */
    5.45 -    for_each_vcpu(d, v)
    5.46 -    {
    5.47 -        p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    5.48 -        if ( p->state == STATE_IORESP_READY )
    5.49 -            hvm_io_assist(v);
    5.50 -    }
    5.51 -    domain_unpause(d);
    5.52 -
    5.53 - out:
    5.54 -    rcu_unlock_domain(d);
    5.55 -    return rc;
    5.56 -}
    5.57 -
    5.58  int hvm_domain_initialise(struct domain *d)
    5.59  {
    5.60      int rc;
    5.61 @@ -963,12 +921,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
    5.62              guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
    5.63          break;
    5.64  
    5.65 -    case HVMOP_drain_io:
    5.66 -        rc = hvmop_drain_io(
    5.67 -            guest_handle_cast(arg, xen_hvm_drain_io_t));
    5.68 -        break;
    5.69 -
    5.70 -
    5.71      default:
    5.72      {
    5.73          gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
     6.1 --- a/xen/arch/x86/hvm/io.c	Tue Apr 03 13:04:51 2007 -0600
     6.2 +++ b/xen/arch/x86/hvm/io.c	Wed Apr 04 11:49:37 2007 +0100
     6.3 @@ -764,13 +764,14 @@ static void hvm_mmio_assist(struct cpu_u
     6.4      }
     6.5  }
     6.6  
     6.7 -void hvm_io_assist(struct vcpu *v)
     6.8 +void hvm_io_assist(void)
     6.9  {
    6.10      vcpu_iodata_t *vio;
    6.11      ioreq_t *p;
    6.12      struct cpu_user_regs *regs;
    6.13      struct hvm_io_op *io_opp;
    6.14      unsigned long gmfn;
    6.15 +    struct vcpu *v = current;
    6.16      struct domain *d = v->domain;
    6.17  
    6.18      io_opp = &v->arch.hvm_vcpu.io_op;
     7.1 --- a/xen/arch/x86/hvm/platform.c	Tue Apr 03 13:04:51 2007 -0600
     7.2 +++ b/xen/arch/x86/hvm/platform.c	Wed Apr 04 11:49:37 2007 +0100
     7.3 @@ -865,7 +865,7 @@ void send_pio_req(unsigned long port, un
     7.4      if ( hvm_portio_intercept(p) )
     7.5      {
     7.6          p->state = STATE_IORESP_READY;
     7.7 -        hvm_io_assist(v);
     7.8 +        hvm_io_assist();
     7.9          return;
    7.10      }
    7.11  
    7.12 @@ -914,7 +914,7 @@ static void send_mmio_req(unsigned char 
    7.13      if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
    7.14      {
    7.15          p->state = STATE_IORESP_READY;
    7.16 -        hvm_io_assist(v);
    7.17 +        hvm_io_assist();
    7.18          return;
    7.19      }
    7.20  
     8.1 --- a/xen/common/schedule.c	Tue Apr 03 13:04:51 2007 -0600
     8.2 +++ b/xen/common/schedule.c	Wed Apr 04 11:49:37 2007 +0100
     8.3 @@ -461,7 +461,11 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HAN
     8.4          if ( d == NULL )
     8.5              break;
     8.6  
     8.7 +        /* domain_pause() prevens any further execution in guest context. */
     8.8 +        domain_pause(d);
     8.9          domain_shutdown(d, (u8)sched_remote_shutdown.reason);
    8.10 +        domain_unpause(d);
    8.11 +
    8.12          rcu_unlock_domain(d);
    8.13          ret = 0;
    8.14  
     9.1 --- a/xen/include/asm-x86/hvm/io.h	Tue Apr 03 13:04:51 2007 -0600
     9.2 +++ b/xen/include/asm-x86/hvm/io.h	Wed Apr 04 11:49:37 2007 +0100
     9.3 @@ -149,7 +149,7 @@ extern void send_pio_req(unsigned long p
     9.4  void send_timeoffset_req(unsigned long timeoff);
     9.5  extern void handle_mmio(unsigned long gpa);
     9.6  extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
     9.7 -extern void hvm_io_assist(struct vcpu *v);
     9.8 +extern void hvm_io_assist(void);
     9.9  
    9.10  #endif /* __ASM_X86_HVM_IO_H__ */
    9.11  
    10.1 --- a/xen/include/public/foreign/Makefile	Tue Apr 03 13:04:51 2007 -0600
    10.2 +++ b/xen/include/public/foreign/Makefile	Wed Apr 04 11:49:37 2007 +0100
    10.3 @@ -13,7 +13,7 @@ clean:
    10.4  	rm -f checker checker.c $(XEN_TARGET_ARCH).size
    10.5  	rm -f *.pyc *.o *~
    10.6  
    10.7 -ifeq ($(CROSS_COMPILE),)
    10.8 +ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
    10.9  check-headers: checker
   10.10  	./checker > $(XEN_TARGET_ARCH).size
   10.11  	diff -u reference.size $(XEN_TARGET_ARCH).size
    11.1 --- a/xen/include/public/hvm/hvm_op.h	Tue Apr 03 13:04:51 2007 -0600
    11.2 +++ b/xen/include/public/hvm/hvm_op.h	Wed Apr 04 11:49:37 2007 +0100
    11.3 @@ -70,12 +70,4 @@ struct xen_hvm_set_pci_link_route {
    11.4  typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
    11.5  DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
    11.6  
    11.7 -/* Drain all outstanding qemu-dm IO responses from a domain's ioreq ring. */
    11.8 -#define HVMOP_drain_io            5
    11.9 -struct xen_hvm_drain_io {
   11.10 -    domid_t  domid;
   11.11 -};
   11.12 -typedef struct xen_hvm_drain_io xen_hvm_drain_io_t;
   11.13 -DEFINE_XEN_GUEST_HANDLE(xen_hvm_drain_io_t);
   11.14 -
   11.15  #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */