ia64/xen-unstable

changeset 15366:3b51eebdf9ab

merge with xen-unstable.hg
author Alex Williamson <alex.williamson@hp.com>
date Fri Jun 15 13:33:11 2007 -0600 (2007-06-15)
parents fe42519fb10e f1ba2e652724
children 093bc9dcbbca
files
line diff
     1.1 --- a/buildconfigs/mk.linux-2.6-xen	Fri Jun 15 12:38:35 2007 -0600
     1.2 +++ b/buildconfigs/mk.linux-2.6-xen	Fri Jun 15 13:33:11 2007 -0600
     1.3 @@ -49,7 +49,7 @@ endif
     1.4  	mkdir -p $(INSTALL_BOOT_PATH)
     1.5  	$(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) INSTALL_PATH=$(INSTALL_BOOT_PATH) install
     1.6  
     1.7 -$(LINUX_DIR)/include/linux/autoconf.h: CONFIG_FILE=$(PWD)/$(LINUX_DIR)/.config
     1.8 +$(LINUX_DIR)/include/linux/autoconf.h: CONFIG_FILE=$(CURDIR)/$(LINUX_DIR)/.config
     1.9  $(LINUX_DIR)/include/linux/autoconf.h: $(LINUX_SRCDIR)/.valid-src
    1.10  	rm -rf $(LINUX_DIR)
    1.11  	mkdir -p $(LINUX_DIR)
    1.12 @@ -90,8 +90,8 @@ endif
    1.13  	      echo "VERSION = 2"; \
    1.14  	      echo "PATCHLEVEL = 6"; \
    1.15  	      echo ""; \
    1.16 -	      echo "KERNELSRC    := $(PWD)/$(LINUX_SRCDIR)"; \
    1.17 -	      echo "KERNELOUTPUT := $(PWD)/$(LINUX_DIR)"; \
    1.18 +	      echo "KERNELSRC    := $(CURDIR)/$(LINUX_SRCDIR)"; \
    1.19 +	      echo "KERNELOUTPUT := $(CURDIR)/$(LINUX_DIR)"; \
    1.20  	      echo ""; \
    1.21  	      echo "MAKEFLAGS += --no-print-directory"; \
    1.22  	      echo ""; \
     2.1 --- a/docs/src/user.tex	Fri Jun 15 12:38:35 2007 -0600
     2.2 +++ b/docs/src/user.tex	Fri Jun 15 13:33:11 2007 -0600
     2.3 @@ -3195,6 +3195,8 @@ writing to the VGA console after domain 
     2.4  \item [ edid=no,force ] (x86 only) Either force retrieval of monitor
     2.5    EDID information via VESA DDC, or disable it (edid=no). This option
     2.6    should not normally be required except for debugging purposes.
     2.7 +\item [ edd=off,on,skipmbr ] (x86 only) Control retrieval of Extended
     2.8 +  Disc Data (EDD) from the BIOS during boot.
     2.9  \item [ console\_to\_ring ] Place guest console output into the
    2.10    hypervisor console ring buffer. This is disabled by default.
    2.11    When enabled, both hypervisor output and guest console output
     3.1 --- a/tools/libxen/src/xen_vbd.c	Fri Jun 15 12:38:35 2007 -0600
     3.2 +++ b/tools/libxen/src/xen_vbd.c	Fri Jun 15 13:33:11 2007 -0600
     3.3 @@ -463,7 +463,7 @@ xen_vbd_set_mode(xen_session *session, x
     3.4              { .type = &abstract_type_string,
     3.5                .u.string_val = vbd },
     3.6              { .type = &xen_vbd_mode_abstract_type_,
     3.7 -              .u.string_val = xen_vbd_mode_to_string(mode) }
     3.8 +              .u.enum_val = mode }
     3.9          };
    3.10  
    3.11      xen_call_(session, "VBD.set_mode", param_values, 2, NULL, NULL);
    3.12 @@ -479,7 +479,7 @@ xen_vbd_set_type(xen_session *session, x
    3.13              { .type = &abstract_type_string,
    3.14                .u.string_val = vbd },
    3.15              { .type = &xen_vbd_type_abstract_type_,
    3.16 -              .u.string_val = xen_vbd_type_to_string(type) }
    3.17 +              .u.enum_val = type }
    3.18          };
    3.19  
    3.20      xen_call_(session, "VBD.set_type", param_values, 2, NULL, NULL);
     4.1 --- a/tools/libxen/src/xen_vm.c	Fri Jun 15 12:38:35 2007 -0600
     4.2 +++ b/tools/libxen/src/xen_vm.c	Fri Jun 15 13:33:11 2007 -0600
     4.3 @@ -1142,7 +1142,7 @@ xen_vm_set_actions_after_shutdown(xen_se
     4.4              { .type = &abstract_type_string,
     4.5                .u.string_val = vm },
     4.6              { .type = &xen_on_normal_exit_abstract_type_,
     4.7 -              .u.string_val = xen_on_normal_exit_to_string(after_shutdown) }
     4.8 +              .u.enum_val = after_shutdown }
     4.9          };
    4.10  
    4.11      xen_call_(session, "VM.set_actions_after_shutdown", param_values, 2, NULL, NULL);
    4.12 @@ -1158,7 +1158,7 @@ xen_vm_set_actions_after_reboot(xen_sess
    4.13              { .type = &abstract_type_string,
    4.14                .u.string_val = vm },
    4.15              { .type = &xen_on_normal_exit_abstract_type_,
    4.16 -              .u.string_val = xen_on_normal_exit_to_string(after_reboot) }
    4.17 +              .u.enum_val = after_reboot }
    4.18          };
    4.19  
    4.20      xen_call_(session, "VM.set_actions_after_reboot", param_values, 2, NULL, NULL);
    4.21 @@ -1174,7 +1174,7 @@ xen_vm_set_actions_after_crash(xen_sessi
    4.22              { .type = &abstract_type_string,
    4.23                .u.string_val = vm },
    4.24              { .type = &xen_on_crash_behaviour_abstract_type_,
    4.25 -              .u.string_val = xen_on_crash_behaviour_to_string(after_crash) }
    4.26 +              .u.enum_val = after_crash }
    4.27          };
    4.28  
    4.29      xen_call_(session, "VM.set_actions_after_crash", param_values, 2, NULL, NULL);
     5.1 --- a/tools/python/xen/xend/XendAPI.py	Fri Jun 15 12:38:35 2007 -0600
     5.2 +++ b/tools/python/xen/xend/XendAPI.py	Fri Jun 15 13:33:11 2007 -0600
     5.3 @@ -1483,6 +1483,12 @@ class XendAPI(object):
     5.4          else:
     5.5              return xen_api_success_void()
     5.6      
     5.7 +    def VM_set_VCPUs_at_startup(self, session, vm_ref, num):
     5.8 +        return self.VM_set('VCPUs_at_startup', session, vm_ref, num)
     5.9 +
    5.10 +    def VM_set_VCPUs_max(self, session, vm_ref, num):
    5.11 +        return self.VM_set('VCPUs_max', session, vm_ref, num)
    5.12 +
    5.13      def VM_set_actions_after_shutdown(self, session, vm_ref, action):
    5.14          if action not in XEN_API_ON_NORMAL_EXIT:
    5.15              return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
    5.16 @@ -1887,6 +1893,17 @@ class XendAPI(object):
    5.17          xd.managed_config_save(vm)
    5.18          return xen_api_success_void()
    5.19  
    5.20 +    def VBD_set_mode(self, session, vbd_ref, mode):
    5.21 +        if mode == 'RW':
    5.22 +            mode = 'w'
    5.23 +        else:
    5.24 +            mode = 'r'
    5.25 +        xd = XendDomain.instance()
    5.26 +        vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
    5.27 +        vm.set_dev_property('vbd', vbd_ref, 'mode', mode)
    5.28 +        xd.managed_config_save(vm)
    5.29 +        return xen_api_success_void()
    5.30 +
    5.31      def VBD_get_all(self, session):
    5.32          xendom = XendDomain.instance()
    5.33          vbds = [d.get_vbds() for d in XendDomain.instance().list('all')]
     6.1 --- a/tools/python/xen/xend/osdep.py	Fri Jun 15 12:38:35 2007 -0600
     6.2 +++ b/tools/python/xen/xend/osdep.py	Fri Jun 15 13:33:11 2007 -0600
     6.3 @@ -65,11 +65,11 @@ def _solaris_balloon_stat(label):
     6.4      import fcntl
     6.5      import array
     6.6      DEV_XEN_BALLOON = '/dev/xen/balloon'
     6.7 -    BLN_IOCTL_CURRENT = 0x4201
     6.8 -    BLN_IOCTL_TARGET = 0x4202
     6.9 -    BLN_IOCTL_LOW = 0x4203
    6.10 -    BLN_IOCTL_HIGH = 0x4204
    6.11 -    BLN_IOCTL_LIMIT = 0x4205
    6.12 +    BLN_IOCTL_CURRENT = 0x42410001
    6.13 +    BLN_IOCTL_TARGET = 0x42410002
    6.14 +    BLN_IOCTL_LOW = 0x42410003
    6.15 +    BLN_IOCTL_HIGH = 0x42410004
    6.16 +    BLN_IOCTL_LIMIT = 0x42410005
    6.17      label_to_ioctl = {	'Current allocation'	: BLN_IOCTL_CURRENT,
    6.18  			'Requested target'	: BLN_IOCTL_TARGET,
    6.19  			'Low-mem balloon'	: BLN_IOCTL_LOW,
     7.1 --- a/tools/python/xen/xend/server/DevController.py	Fri Jun 15 12:38:35 2007 -0600
     7.2 +++ b/tools/python/xen/xend/server/DevController.py	Fri Jun 15 13:33:11 2007 -0600
     7.3 @@ -213,7 +213,7 @@ class DevController:
     7.4          devid = int(devid)
     7.5  
     7.6          frontpath = self.frontendPath(devid)
     7.7 -	if frontpath:
     7.8 +        if frontpath:
     7.9              backpath = xstransact.Read(frontpath, "backend")
    7.10  
    7.11          # Modify online status /before/ updating state (latter is watched by
    7.12 @@ -224,22 +224,22 @@ class DevController:
    7.13          if force:
    7.14              if backpath:
    7.15                  xstransact.Remove(backpath)
    7.16 -	    if frontpath:
    7.17 +            if frontpath:
    7.18                  xstransact.Remove(frontpath)
    7.19 -	    return
    7.20 +            return
    7.21  
    7.22 -	# Wait till both frontpath and backpath are removed from
    7.23 -	# xenstore, or timed out
    7.24 -	if frontpath:
    7.25 -	    status = self.waitUntilDestroyed(frontpath)
    7.26 -	    if status == Timeout:
    7.27 -	        # Exception will be caught by destroyDevice in XendDomainInfo.py
    7.28 -	        raise EnvironmentError
    7.29 -	if backpath:
    7.30 -	    status = self.waitUntilDestroyed(backpath)
    7.31 -	    if status == Timeout:
    7.32 -	        # Exception will be caught by destroyDevice in XendDomainInfo.py
    7.33 -	        raise EnvironmentError
    7.34 +        # Wait till both frontpath and backpath are removed from
    7.35 +        # xenstore, or timed out
    7.36 +        if frontpath:
    7.37 +            status = self.waitUntilDestroyed(frontpath)
    7.38 +            if status == Timeout:
    7.39 +                # Exception will be caught by destroyDevice in XendDomainInfo.py
    7.40 +                raise EnvironmentError
    7.41 +        if backpath:
    7.42 +            status = self.waitUntilDestroyed(backpath)
    7.43 +            if status == Timeout:
    7.44 +                # Exception will be caught by destroyDevice in XendDomainInfo.py
    7.45 +                raise EnvironmentError
    7.46  
    7.47          self.vm._removeVm("device/%s/%d" % (self.deviceClass, devid))
    7.48  
     8.1 --- a/tools/xenfb/xenfb.c	Fri Jun 15 12:38:35 2007 -0600
     8.2 +++ b/tools/xenfb/xenfb.c	Fri Jun 15 13:33:11 2007 -0600
     8.3 @@ -10,7 +10,6 @@
     8.4  #include <xen/io/protocols.h>
     8.5  #include <sys/select.h>
     8.6  #include <stdbool.h>
     8.7 -#include <xen/linux/evtchn.h>
     8.8  #include <xen/event_channel.h>
     8.9  #include <sys/mman.h>
    8.10  #include <errno.h>
     9.1 --- a/tools/xenstat/xentop/xentop.c	Fri Jun 15 12:38:35 2007 -0600
     9.2 +++ b/tools/xenstat/xentop/xentop.c	Fri Jun 15 13:33:11 2007 -0600
     9.3 @@ -290,12 +290,24 @@ static void print(const char *fmt, ...)
     9.4  	}
     9.5  }
     9.6  
     9.7 +static void xentop_attron(int attr)
     9.8 +{
     9.9 +	if (!batch)
    9.10 +		attron(attr);
    9.11 +}
    9.12 +
    9.13 +static void xentop_attroff(int attr)
    9.14 +{
    9.15 +	if (!batch)
    9.16 +		attroff(attr);
    9.17 +}
    9.18 +
    9.19  /* Print a string with the given attributes set. */
    9.20  static void attr_addstr(int attr, const char *str)
    9.21  {
    9.22 -	attron(attr);
    9.23 +	xentop_attron(attr);
    9.24  	addstr((curses_str_t)str);
    9.25 -	attroff(attr);
    9.26 +	xentop_attroff(attr);
    9.27  }
    9.28  
    9.29  /* Handle setting the delay from the user-supplied value in prompt_val */
    9.30 @@ -780,18 +792,18 @@ void do_header(void)
    9.31  	field_id i;
    9.32  
    9.33  	/* Turn on REVERSE highlight attribute for headings */
    9.34 -	attron(A_REVERSE);
    9.35 +	xentop_attron(A_REVERSE);
    9.36  	for(i = 0; i < NUM_FIELDS; i++) {
    9.37 -		if(i != 0)
    9.38 +		if (i != 0)
    9.39  			print(" ");
    9.40  		/* The BOLD attribute is turned on for the sort column */
    9.41 -		if(i == sort_field)
    9.42 -			attron(A_BOLD);
    9.43 +		if (i == sort_field)
    9.44 +			xentop_attron(A_BOLD);
    9.45  		print("%*s", fields[i].default_width, fields[i].header);
    9.46 -		if(i == sort_field)
    9.47 -			attroff(A_BOLD);
    9.48 +		if (i == sort_field)
    9.49 +			xentop_attroff(A_BOLD);
    9.50  	}
    9.51 -	attroff(A_REVERSE);
    9.52 +	xentop_attroff(A_REVERSE);
    9.53  	print("\n");
    9.54  }
    9.55  
    9.56 @@ -838,14 +850,14 @@ void do_bottom_line(void)
    9.57  void do_domain(xenstat_domain *domain)
    9.58  {
    9.59  	unsigned int i;
    9.60 -	for(i = 0; i < NUM_FIELDS; i++) {
    9.61 -		if(i != 0)
    9.62 +	for (i = 0; i < NUM_FIELDS; i++) {
    9.63 +		if (i != 0)
    9.64  			print(" ");
    9.65 -		if(i == sort_field)
    9.66 -			attron(A_BOLD);
    9.67 +		if (i == sort_field)
    9.68 +			xentop_attron(A_BOLD);
    9.69  		fields[i].print(domain);
    9.70 -		if(i == sort_field)
    9.71 -			attroff(A_BOLD);
    9.72 +		if (i == sort_field)
    9.73 +			xentop_attroff(A_BOLD);
    9.74  	}
    9.75  	print("\n");
    9.76  }
    9.77 @@ -956,7 +968,8 @@ static void top(void)
    9.78  		fail("Failed to retrieve statistics from libxenstat\n");
    9.79  
    9.80  	/* dump summary top information */
    9.81 -	do_summary();
    9.82 +	if (!batch)
    9.83 +		do_summary();
    9.84  
    9.85  	/* Count the number of domains for which to report data */
    9.86  	num_domains = xenstat_node_num_domains(cur_node);
    9.87 @@ -976,7 +989,7 @@ static void top(void)
    9.88  		first_domain_index = num_domains-1;
    9.89  
    9.90  	for (i = first_domain_index; i < num_domains; i++) {
    9.91 -		if(current_row() == lines()-1)
    9.92 +		if(!batch && current_row() == lines()-1)
    9.93  			break;
    9.94  		if (i == first_domain_index || repeat_header)
    9.95  			do_header();
    9.96 @@ -989,8 +1002,8 @@ static void top(void)
    9.97  			do_vbd(domains[i]);
    9.98  	}
    9.99  
   9.100 -	if(!batch)
   9.101 -	do_bottom_line();
   9.102 +	if (!batch)
   9.103 +		do_bottom_line();
   9.104  
   9.105  	free(domains);
   9.106  }
    10.1 --- a/unmodified_drivers/linux-2.6/mkbuildtree	Fri Jun 15 12:38:35 2007 -0600
    10.2 +++ b/unmodified_drivers/linux-2.6/mkbuildtree	Fri Jun 15 13:33:11 2007 -0600
    10.3 @@ -51,6 +51,7 @@ in
    10.4      ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/hypercall.h include/asm
    10.5      ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/synch_bitops.h include/asm
    10.6      ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/maddr.h include/asm
    10.7 +    ln -sf ${XL}/include/asm-x86_64/mach-xen/asm/gnttab_dma.h include/asm
    10.8      mkdir -p include/asm-i386
    10.9      lndir -silent ${XL}/include/asm-i386 include/asm-i386
   10.10    ;;
   10.11 @@ -59,12 +60,14 @@ i[34567]86)
   10.12      ln -sf ${XL}/include/asm-i386/mach-xen/asm/hypercall.h include/asm
   10.13      ln -sf ${XL}/include/asm-i386/mach-xen/asm/synch_bitops.h include/asm
   10.14      ln -sf ${XL}/include/asm-i386/mach-xen/asm/maddr.h include/asm
   10.15 +    ln -sf ${XL}/include/asm-i386/mach-xen/asm/gnttab_dma.h include/asm
   10.16    ;;
   10.17  "ia64")
   10.18      ln -sf ${XL}/include/asm-ia64/hypervisor.h include/asm
   10.19      ln -sf ${XL}/include/asm-ia64/hypercall.h include/asm
   10.20      ln -sf ${XL}/include/asm-ia64/synch_bitops.h include/asm
   10.21      ln -sf ${XL}/include/asm-ia64/maddr.h include/asm
   10.22 +    ln -sf ${XL}/include/asm-ia64/gnttab_dma.h include/asm
   10.23      mkdir -p include/asm/xen
   10.24      ln -sf ${XL}/include/asm-ia64/xen/xcom_hcall.h include/asm/xen
   10.25      ln -sf ${XL}/include/asm-ia64/xen/xencomm.h include/asm/xen
    11.1 --- a/xen/arch/x86/genapic/es7000plat.c	Fri Jun 15 12:38:35 2007 -0600
    11.2 +++ b/xen/arch/x86/genapic/es7000plat.c	Fri Jun 15 13:33:11 2007 -0600
    11.3 @@ -299,12 +299,12 @@ es7000_stop_cpu(int cpu)
    11.4  void __init
    11.5  es7000_sw_apic()
    11.6  {
    11.7 -	if (es7000_plat) {
    11.8 +	if (es7000_plat && (es7000_plat != ES7000_ZORRO)) {
    11.9  		int mip_status;
   11.10  		struct mip_reg es7000_mip_reg;
   11.11  
   11.12  		printk("ES7000: Enabling APIC mode.\n");
   11.13 -        	memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
   11.14 +		memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
   11.15          	es7000_mip_reg.off_0 = MIP_SW_APIC;
   11.16          	es7000_mip_reg.off_38 = (MIP_VALID);
   11.17          	while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
    12.1 --- a/xen/arch/x86/hvm/hpet.c	Fri Jun 15 12:38:35 2007 -0600
    12.2 +++ b/xen/arch/x86/hvm/hpet.c	Fri Jun 15 13:33:11 2007 -0600
    12.3 @@ -381,6 +381,9 @@ void hpet_migrate_timers(struct vcpu *v)
    12.4      struct HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
    12.5      int i;
    12.6  
    12.7 +    if ( v != h->vcpu )
    12.8 +        return;
    12.9 +
   12.10      for ( i = 0; i < HPET_TIMER_NUM; i++ )
   12.11          migrate_timer(&h->timers[i], v->processor);
   12.12  }
    13.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Jun 15 12:38:35 2007 -0600
    13.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Jun 15 13:33:11 2007 -0600
    13.3 @@ -109,11 +109,9 @@ u64 hvm_get_guest_time(struct vcpu *v)
    13.4  
    13.5  void hvm_migrate_timers(struct vcpu *v)
    13.6  {
    13.7 -    pit_migrate_timers(v);
    13.8      rtc_migrate_timers(v);
    13.9      hpet_migrate_timers(v);
   13.10 -    if ( vcpu_vlapic(v)->pt.enabled )
   13.11 -        migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
   13.12 +    pt_migrate(v);
   13.13  }
   13.14  
   13.15  void hvm_do_resume(struct vcpu *v)
    14.1 --- a/xen/arch/x86/hvm/i8254.c	Fri Jun 15 12:38:35 2007 -0600
    14.2 +++ b/xen/arch/x86/hvm/i8254.c	Fri Jun 15 13:33:11 2007 -0600
    14.3 @@ -3,12 +3,13 @@
    14.4   * 
    14.5   * Copyright (c) 2003-2004 Fabrice Bellard
    14.6   * Copyright (c) 2006 Intel Corperation
    14.7 + * Copyright (c) 2007 Keir Fraser, XenSource Inc.
    14.8   * 
    14.9   * Permission is hereby granted, free of charge, to any person obtaining a copy
   14.10 - * of this software and associated documentation files (the "Software"), to deal
   14.11 - * in the Software without restriction, including without limitation the rights
   14.12 - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
   14.13 - * copies of the Software, and to permit persons to whom the Software is
   14.14 + * of this software and associated documentation files (the "Software"), to
   14.15 + * deal in the Software without restriction, including without limitation the
   14.16 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
   14.17 + * sell copies of the Software, and to permit persons to whom the Software is
   14.18   * furnished to do so, subject to the following conditions:
   14.19   *
   14.20   * The above copyright notice and this permission notice shall be included in
   14.21 @@ -18,14 +19,9 @@
   14.22   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   14.23   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
   14.24   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   14.25 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
   14.26 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
   14.27 - * THE SOFTWARE.
   14.28 - */
   14.29 -/* Edwin Zhai <edwin.zhai@intel.com>, Eddie Dong <eddie.dong@intel.com>
   14.30 - * Ported to xen:
   14.31 - * Add a new layer of periodic time on top of PIT;
   14.32 - * move speaker io access to hypervisor;
   14.33 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   14.34 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   14.35 + * IN THE SOFTWARE.
   14.36   */
   14.37  
   14.38  #include <xen/config.h>
   14.39 @@ -41,8 +37,11 @@
   14.40  #include <asm/hvm/vpt.h>
   14.41  #include <asm/current.h>
   14.42  
   14.43 -/* Enable DEBUG_PIT may cause guest calibration inaccuracy */
   14.44 -/* #define DEBUG_PIT */
   14.45 +#define domain_vpit(d)   (&(d)->arch.hvm_domain.pl_time.vpit)
   14.46 +#define vcpu_vpit(vcpu)  (domain_vpit((vcpu)->domain))
   14.47 +#define vpit_domain(pit) (container_of((pit), struct domain, \
   14.48 +                                       arch.hvm_domain.pl_time.vpit))
   14.49 +#define vpit_vcpu(pit)   (vpit_domain(pit)->vcpu[0])
   14.50  
   14.51  #define RW_STATE_LSB 1
   14.52  #define RW_STATE_MSB 2
   14.53 @@ -52,8 +51,8 @@
   14.54  static int handle_pit_io(ioreq_t *p);
   14.55  static int handle_speaker_io(ioreq_t *p);
   14.56  
   14.57 -/* compute with 96 bit intermediate result: (a*b)/c */
   14.58 -uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
   14.59 +/* Compute with 96 bit intermediate result: (a*b)/c */
   14.60 +static uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
   14.61  {
   14.62      union {
   14.63          uint64_t ll;
   14.64 @@ -76,16 +75,18 @@ uint64_t muldiv64(uint64_t a, uint32_t b
   14.65      return res.ll;
   14.66  }
   14.67  
   14.68 -static int pit_get_count(PITState *s, int channel)
   14.69 +static int pit_get_count(PITState *pit, int channel)
   14.70  {
   14.71      uint64_t d;
   14.72      int  counter;
   14.73 -    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
   14.74 -    struct periodic_time *pt = &s->pt[channel];
   14.75 +    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
   14.76 +    struct vcpu *v = vpit_vcpu(pit);
   14.77  
   14.78 -    d = muldiv64(hvm_get_guest_time(pt->vcpu) - s->count_load_time[channel],
   14.79 -                 PIT_FREQ, ticks_per_sec(pt->vcpu));
   14.80 -    switch(c->mode) {
   14.81 +    d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
   14.82 +                 PIT_FREQ, ticks_per_sec(v));
   14.83 +
   14.84 +    switch ( c->mode )
   14.85 +    {
   14.86      case 0:
   14.87      case 1:
   14.88      case 4:
   14.89 @@ -103,16 +104,18 @@ static int pit_get_count(PITState *s, in
   14.90      return counter;
   14.91  }
   14.92  
   14.93 -/* get pit output bit */
   14.94 -int pit_get_out(PITState *pit, int channel, int64_t current_time)
   14.95 +static int pit_get_out(PITState *pit, int channel)
   14.96  {
   14.97      struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
   14.98      uint64_t d;
   14.99      int out;
  14.100 +    struct vcpu *v = vpit_vcpu(pit);
  14.101  
  14.102 -    d = muldiv64(current_time - pit->count_load_time[channel], 
  14.103 -                 PIT_FREQ, ticks_per_sec(pit->pt[channel].vcpu));
  14.104 -    switch(s->mode) {
  14.105 +    d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel], 
  14.106 +                 PIT_FREQ, ticks_per_sec(v));
  14.107 +
  14.108 +    switch ( s->mode )
  14.109 +    {
  14.110      default:
  14.111      case 0:
  14.112          out = (d >= s->count);
  14.113 @@ -121,29 +124,27 @@ int pit_get_out(PITState *pit, int chann
  14.114          out = (d < s->count);
  14.115          break;
  14.116      case 2:
  14.117 -        if ((d % s->count) == 0 && d != 0)
  14.118 -            out = 1;
  14.119 -        else
  14.120 -            out = 0;
  14.121 +        out = (((d % s->count) == 0) && (d != 0));
  14.122          break;
  14.123      case 3:
  14.124 -        out = (d % s->count) < ((s->count + 1) >> 1);
  14.125 +        out = ((d % s->count) < ((s->count + 1) >> 1));
  14.126          break;
  14.127      case 4:
  14.128      case 5:
  14.129          out = (d == s->count);
  14.130          break;
  14.131      }
  14.132 +
  14.133      return out;
  14.134  }
  14.135  
  14.136 -/* val must be 0 or 1 */
  14.137 -void pit_set_gate(PITState *pit, int channel, int val)
  14.138 +static void pit_set_gate(PITState *pit, int channel, int val)
  14.139  {
  14.140      struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
  14.141 -    struct periodic_time *pt = &pit->pt[channel];
  14.142 +    struct vcpu *v = vpit_vcpu(pit);
  14.143  
  14.144 -    switch(s->mode) {
  14.145 +    switch ( s->mode )
  14.146 +    {
  14.147      default:
  14.148      case 0:
  14.149      case 4:
  14.150 @@ -151,22 +152,14 @@ void pit_set_gate(PITState *pit, int cha
  14.151          break;
  14.152      case 1:
  14.153      case 5:
  14.154 -        if (s->gate < val) {
  14.155 -            /* restart counting on rising edge */
  14.156 -            pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
  14.157 -//            pit_irq_timer_update(s, s->count_load_time);
  14.158 -        }
  14.159 -        break;
  14.160      case 2:
  14.161      case 3:
  14.162 -        if (s->gate < val) {
  14.163 -            /* restart counting on rising edge */
  14.164 -            pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
  14.165 -//            pit_irq_timer_update(s, s->count_load_time);
  14.166 -        }
  14.167 -        /* XXX: disable/enable counting */
  14.168 +        /* Restart counting on rising edge. */
  14.169 +        if ( s->gate < val )
  14.170 +            pit->count_load_time[channel] = hvm_get_guest_time(v);
  14.171          break;
  14.172      }
  14.173 +
  14.174      s->gate = val;
  14.175  }
  14.176  
  14.177 @@ -175,57 +168,40 @@ int pit_get_gate(PITState *pit, int chan
  14.178      return pit->hw.channels[channel].gate;
  14.179  }
  14.180  
  14.181 -void pit_time_fired(struct vcpu *v, void *priv)
  14.182 +static void pit_time_fired(struct vcpu *v, void *priv)
  14.183  {
  14.184      uint64_t *count_load_time = priv;
  14.185      *count_load_time = hvm_get_guest_time(v);
  14.186  }
  14.187  
  14.188 -static inline void pit_load_count(PITState *pit, int channel, int val)
  14.189 +static void pit_load_count(PITState *pit, int channel, int val)
  14.190  {
  14.191      u32 period;
  14.192      struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
  14.193      struct periodic_time *pt = &pit->pt[channel];
  14.194 -    struct vcpu *v;
  14.195 +    struct vcpu *v = vpit_vcpu(pit);
  14.196  
  14.197 -    if (val == 0)
  14.198 +    if ( val == 0 )
  14.199          val = 0x10000;
  14.200 +
  14.201      pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
  14.202      s->count = val;
  14.203      period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
  14.204  
  14.205 -    if (channel != 0)
  14.206 +    if ( (v == NULL) || !is_hvm_vcpu(v) || (channel != 0) )
  14.207          return;
  14.208  
  14.209 -#ifdef DEBUG_PIT
  14.210 -    printk("HVM_PIT: pit-load-counter(%p), count=0x%x, period=%uns mode=%d, load_time=%lld\n",
  14.211 -            s,
  14.212 -            val,
  14.213 -            period,
  14.214 -            s->mode,
  14.215 -            (long long)pit->count_load_time[channel]);
  14.216 -#endif
  14.217 -
  14.218 -    /* Choose a vcpu to set the timer on: current if appropriate else vcpu 0 */
  14.219 -    if ( likely(pit == &current->domain->arch.hvm_domain.pl_time.vpit) )
  14.220 -        v = current;
  14.221 -    else 
  14.222 -        v = container_of(pit, struct domain, 
  14.223 -                         arch.hvm_domain.pl_time.vpit)->vcpu[0];
  14.224 -
  14.225 -    switch (s->mode) {
  14.226 +    switch ( s->mode )
  14.227 +    {
  14.228          case 2:
  14.229 -            /* create periodic time */
  14.230 +            /* Periodic timer. */
  14.231              create_periodic_time(v, pt, period, 0, 0, pit_time_fired, 
  14.232                                   &pit->count_load_time[channel]);
  14.233              break;
  14.234          case 1:
  14.235 -            /* create one shot time */
  14.236 +            /* One-shot timer. */
  14.237              create_periodic_time(v, pt, period, 0, 1, pit_time_fired,
  14.238                                   &pit->count_load_time[channel]);
  14.239 -#ifdef DEBUG_PIT
  14.240 -            printk("HVM_PIT: create one shot time.\n");
  14.241 -#endif
  14.242              break;
  14.243          default:
  14.244              destroy_periodic_time(pt);
  14.245 @@ -233,63 +209,84 @@ static inline void pit_load_count(PITSta
  14.246      }
  14.247  }
  14.248  
  14.249 -/* if already latched, do not latch again */
  14.250  static void pit_latch_count(PITState *s, int channel)
  14.251  {
  14.252      struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
  14.253 -    if (!c->count_latched) {
  14.254 +    if ( !c->count_latched )
  14.255 +    {
  14.256          c->latched_count = pit_get_count(s, channel);
  14.257          c->count_latched = c->rw_mode;
  14.258      }
  14.259  }
  14.260  
  14.261 -static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
  14.262 +static void pit_latch_status(PITState *s, int channel)
  14.263  {
  14.264 -    PITState *pit = opaque;
  14.265 +    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
  14.266 +    if ( !c->status_latched )
  14.267 +    {
  14.268 +        /* TODO: Return NULL COUNT (bit 6). */
  14.269 +        c->status = ((pit_get_out(s, channel) << 7) |
  14.270 +                     (c->rw_mode << 4) |
  14.271 +                     (c->mode << 1) |
  14.272 +                     c->bcd);
  14.273 +        c->status_latched = 1;
  14.274 +    }
  14.275 +}
  14.276 +
  14.277 +static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val)
  14.278 +{
  14.279      int channel, access;
  14.280      struct hvm_hw_pit_channel *s;
  14.281 -    val &= 0xff;
  14.282  
  14.283 +    val  &= 0xff;
  14.284      addr &= 3;
  14.285 -    if (addr == 3) {
  14.286 +
  14.287 +    if ( addr == 3 )
  14.288 +    {
  14.289          channel = val >> 6;
  14.290 -        if (channel == 3) {
  14.291 -            /* read back command */
  14.292 -            for(channel = 0; channel < 3; channel++) {
  14.293 +        if ( channel == 3 )
  14.294 +        {
  14.295 +            /* Read-Back Command. */
  14.296 +            for ( channel = 0; channel < 3; channel++ )
  14.297 +            {
  14.298                  s = &pit->hw.channels[channel];
  14.299 -                if (val & (2 << channel)) {
  14.300 -                    if (!(val & 0x20)) {
  14.301 +                if ( val & (2 << channel) )
  14.302 +                {
  14.303 +                    if ( !(val & 0x20) )
  14.304                          pit_latch_count(pit, channel);
  14.305 -                    }
  14.306 -                    if (!(val & 0x10) && !s->status_latched) {
  14.307 -                        /* status latch */
  14.308 -                        /* XXX: add BCD and null count */
  14.309 -                        s->status = (pit_get_out(pit, channel, hvm_get_guest_time(pit->pt[channel].vcpu)) << 7) |
  14.310 -                            (s->rw_mode << 4) |
  14.311 -                            (s->mode << 1) |
  14.312 -                            s->bcd;
  14.313 -                        s->status_latched = 1;
  14.314 -                    }
  14.315 +                    if ( !(val & 0x10) )
  14.316 +                        pit_latch_status(pit, channel);
  14.317                  }
  14.318              }
  14.319 -        } else {
  14.320 +        }
  14.321 +        else
  14.322 +        {
  14.323 +            /* Select Counter <channel>. */
  14.324              s = &pit->hw.channels[channel];
  14.325              access = (val >> 4) & 3;
  14.326 -            if (access == 0) {
  14.327 +            if ( access == 0 )
  14.328 +            {
  14.329                  pit_latch_count(pit, channel);
  14.330 -            } else {
  14.331 +            }
  14.332 +            else
  14.333 +            {
  14.334                  s->rw_mode = access;
  14.335                  s->read_state = access;
  14.336                  s->write_state = access;
  14.337 -
  14.338                  s->mode = (val >> 1) & 7;
  14.339 +                if ( s->mode > 5 )
  14.340 +                    s->mode -= 4;
  14.341                  s->bcd = val & 1;
  14.342                  /* XXX: update irq timer ? */
  14.343              }
  14.344          }
  14.345 -    } else {
  14.346 +    }
  14.347 +    else
  14.348 +    {
  14.349 +        /* Write Count. */
  14.350          s = &pit->hw.channels[addr];
  14.351 -        switch(s->write_state) {
  14.352 +        switch ( s->write_state )
  14.353 +        {
  14.354          default:
  14.355          case RW_STATE_LSB:
  14.356              pit_load_count(pit, addr, val);
  14.357 @@ -309,19 +306,23 @@ static void pit_ioport_write(void *opaqu
  14.358      }
  14.359  }
  14.360  
  14.361 -static uint32_t pit_ioport_read(void *opaque, uint32_t addr)
  14.362 +static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
  14.363  {
  14.364 -    PITState *pit = opaque;
  14.365      int ret, count;
  14.366      struct hvm_hw_pit_channel *s;
  14.367      
  14.368      addr &= 3;
  14.369      s = &pit->hw.channels[addr];
  14.370 -    if (s->status_latched) {
  14.371 +
  14.372 +    if ( s->status_latched )
  14.373 +    {
  14.374          s->status_latched = 0;
  14.375          ret = s->status;
  14.376 -    } else if (s->count_latched) {
  14.377 -        switch(s->count_latched) {
  14.378 +    }
  14.379 +    else if ( s->count_latched )
  14.380 +    {
  14.381 +        switch ( s->count_latched )
  14.382 +        {
  14.383          default:
  14.384          case RW_STATE_LSB:
  14.385              ret = s->latched_count & 0xff;
  14.386 @@ -336,8 +337,11 @@ static uint32_t pit_ioport_read(void *op
  14.387              s->count_latched = RW_STATE_MSB;
  14.388              break;
  14.389          }
  14.390 -    } else {
  14.391 -        switch(s->read_state) {
  14.392 +    }
  14.393 +    else
  14.394 +    {
  14.395 +        switch ( s->read_state )
  14.396 +        {
  14.397          default:
  14.398          case RW_STATE_LSB:
  14.399              count = pit_get_count(pit, addr);
  14.400 @@ -359,10 +363,11 @@ static uint32_t pit_ioport_read(void *op
  14.401              break;
  14.402          }
  14.403      }
  14.404 +
  14.405      return ret;
  14.406  }
  14.407  
  14.408 -void pit_stop_channel0_irq(PITState * pit)
  14.409 +void pit_stop_channel0_irq(PITState *pit)
  14.410  {
  14.411      destroy_periodic_time(&pit->pt[0]);
  14.412  }
  14.413 @@ -374,7 +379,8 @@ static void pit_info(PITState *pit)
  14.414      struct periodic_time *pt;
  14.415      int i;
  14.416  
  14.417 -    for(i = 0; i < 3; i++) {
  14.418 +    for ( i = 0; i < 3; i++ )
  14.419 +    {
  14.420          printk("*****pit channel %d's state:*****\n", i);
  14.421          s = &pit->hw.channels[i];
  14.422          printk("pit 0x%x.\n", s->count);
  14.423 @@ -392,7 +398,8 @@ static void pit_info(PITState *pit)
  14.424          printk("pit %"PRId64"\n", pit->count_load_time[i]);
  14.425  
  14.426          pt = &pit->pt[i];
  14.427 -        if (pt) {
  14.428 +        if ( pt )
  14.429 +        {
  14.430              printk("pit channel %d has a periodic timer:\n", i);
  14.431              printk("pt %d.\n", pt->enabled);
  14.432              printk("pt %d.\n", pt->one_shot);
  14.433 @@ -405,7 +412,6 @@ static void pit_info(PITState *pit)
  14.434              printk("pt %"PRId64"\n", pt->last_plt_gtime);
  14.435          }
  14.436      }
  14.437 -
  14.438  }
  14.439  #else
  14.440  static void pit_info(PITState *pit)
  14.441 @@ -415,7 +421,7 @@ static void pit_info(PITState *pit)
  14.442  
  14.443  static int pit_save(struct domain *d, hvm_domain_context_t *h)
  14.444  {
  14.445 -    PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
  14.446 +    PITState *pit = domain_vpit(d);
  14.447      
  14.448      pit_info(pit);
  14.449  
  14.450 @@ -425,7 +431,7 @@ static int pit_save(struct domain *d, hv
  14.451  
  14.452  static int pit_load(struct domain *d, hvm_domain_context_t *h)
  14.453  {
  14.454 -    PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
  14.455 +    PITState *pit = domain_vpit(d);
  14.456      int i;
  14.457  
  14.458      /* Restore the PIT hardware state */
  14.459 @@ -435,8 +441,8 @@ static int pit_load(struct domain *d, hv
  14.460      /* Recreate platform timers from hardware state.  There will be some 
  14.461       * time jitter here, but the wall-clock will have jumped massively, so 
  14.462       * we hope the guest can handle it. */
  14.463 -
  14.464 -    for(i = 0; i < 3; i++) {
  14.465 +    for ( i = 0; i < 3; i++ )
  14.466 +    {
  14.467          pit_load_count(pit, i, pit->hw.channels[i].count);
  14.468          pit->pt[i].last_plt_gtime = hvm_get_guest_time(d->vcpu[0]);
  14.469      }
  14.470 @@ -447,121 +453,94 @@ static int pit_load(struct domain *d, hv
  14.471  
  14.472  HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
  14.473  
  14.474 -static void pit_reset(void *opaque)
  14.475 +void pit_init(struct vcpu *v, unsigned long cpu_khz)
  14.476  {
  14.477 -    PITState *pit = opaque;
  14.478 +    PITState *pit = vcpu_vpit(v);
  14.479 +    struct periodic_time *pt;
  14.480      struct hvm_hw_pit_channel *s;
  14.481      int i;
  14.482  
  14.483 -    for(i = 0;i < 3; i++) {
  14.484 +    pt = &pit->pt[0];  
  14.485 +    pt[0].vcpu = v;
  14.486 +    pt[1].vcpu = v;
  14.487 +    pt[2].vcpu = v;
  14.488 +
  14.489 +    register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
  14.490 +    /* register the speaker port */
  14.491 +    register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
  14.492 +    ticks_per_sec(v) = cpu_khz * (int64_t)1000;
  14.493 +
  14.494 +    for ( i = 0; i < 3; i++ )
  14.495 +    {
  14.496          s = &pit->hw.channels[i];
  14.497 -        destroy_periodic_time(&pit->pt[i]);
  14.498          s->mode = 0xff; /* the init mode */
  14.499          s->gate = (i != 2);
  14.500          pit_load_count(pit, i, 0);
  14.501      }
  14.502  }
  14.503  
  14.504 -void pit_init(struct vcpu *v, unsigned long cpu_khz)
  14.505 -{
  14.506 -    PITState *pit = &v->domain->arch.hvm_domain.pl_time.vpit;
  14.507 -    struct periodic_time *pt;
  14.508 -
  14.509 -    pt = &pit->pt[0];  
  14.510 -    pt->vcpu = v;
  14.511 -    /* the timer 0 is connected to an IRQ */
  14.512 -    init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
  14.513 -    pt++; pt->vcpu = v;
  14.514 -    pt++; pt->vcpu = v;
  14.515 -
  14.516 -    register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
  14.517 -    /* register the speaker port */
  14.518 -    register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
  14.519 -    ticks_per_sec(v) = cpu_khz * (int64_t)1000;
  14.520 -#ifdef DEBUG_PIT
  14.521 -    printk("HVM_PIT: guest frequency =%lld\n", (long long)ticks_per_sec(v));
  14.522 -#endif
  14.523 -    pit_reset(pit);
  14.524 -    return;
  14.525 -}
  14.526 -
  14.527 -void pit_migrate_timers(struct vcpu *v)
  14.528 -{
  14.529 -    PITState *pit = &v->domain->arch.hvm_domain.pl_time.vpit;
  14.530 -    struct periodic_time *pt;
  14.531 -
  14.532 -    pt = &pit->pt[0];
  14.533 -    if ( pt->vcpu == v && pt->enabled )
  14.534 -        migrate_timer(&pt->timer, v->processor);
  14.535 -}
  14.536 -
  14.537  void pit_deinit(struct domain *d)
  14.538  {
  14.539 -    PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
  14.540 -
  14.541 -    kill_timer(&pit->pt[0].timer);
  14.542 +    PITState *pit = domain_vpit(d);
  14.543 +    destroy_periodic_time(&pit->pt[0]);
  14.544  }
  14.545  
  14.546  /* the intercept action for PIT DM retval:0--not handled; 1--handled */  
  14.547  static int handle_pit_io(ioreq_t *p)
  14.548  {
  14.549 -    struct vcpu *v = current;
  14.550 -    struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit);
  14.551 +    struct PITState *vpit = vcpu_vpit(current);
  14.552  
  14.553 -    if (p->size != 1 ||
  14.554 -        p->data_is_ptr ||
  14.555 -        p->type != IOREQ_TYPE_PIO){
  14.556 -        printk("HVM_PIT:wrong PIT IO!\n");
  14.557 +    if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
  14.558 +    {
  14.559 +        gdprintk(XENLOG_WARNING, "HVM_PIT bad access\n");
  14.560          return 1;
  14.561      }
  14.562      
  14.563 -    if (p->dir == 0) {/* write */
  14.564 +    if ( p->dir == IOREQ_WRITE )
  14.565 +    {
  14.566          pit_ioport_write(vpit, p->addr, p->data);
  14.567 -    } else if (p->dir == 1) { /* read */
  14.568 -        if ( (p->addr & 3) != 3 ) {
  14.569 +    }
  14.570 +    else
  14.571 +    {
  14.572 +        if ( (p->addr & 3) != 3 )
  14.573              p->data = pit_ioport_read(vpit, p->addr);
  14.574 -        } else {
  14.575 -            printk("HVM_PIT: read A1:A0=3!\n");
  14.576 -        }
  14.577 +        else
  14.578 +            gdprintk(XENLOG_WARNING, "HVM_PIT: read A1:A0=3!\n");
  14.579      }
  14.580 +
  14.581      return 1;
  14.582  }
  14.583  
  14.584 -static void speaker_ioport_write(void *opaque, uint32_t addr, uint32_t val)
  14.585 +static void speaker_ioport_write(
  14.586 +    struct PITState *pit, uint32_t addr, uint32_t val)
  14.587  {
  14.588 -    PITState *pit = opaque;
  14.589      pit->hw.speaker_data_on = (val >> 1) & 1;
  14.590      pit_set_gate(pit, 2, val & 1);
  14.591  }
  14.592  
  14.593 -static uint32_t speaker_ioport_read(void *opaque, uint32_t addr)
  14.594 +static uint32_t speaker_ioport_read(
  14.595 +    struct PITState *pit, uint32_t addr)
  14.596  {
  14.597 -    PITState *pit = opaque;
  14.598 -    int out = pit_get_out(pit, 2,
  14.599 -                          hvm_get_guest_time(pit->pt[2].vcpu));
  14.600      /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
  14.601      unsigned int refresh_clock = ((unsigned int)NOW() >> 14) & 1;
  14.602      return ((pit->hw.speaker_data_on << 1) | pit_get_gate(pit, 2) |
  14.603 -            (out << 5) | refresh_clock << 4);
  14.604 +            (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
  14.605  }
  14.606  
  14.607  static int handle_speaker_io(ioreq_t *p)
  14.608  {
  14.609 -    struct vcpu *v = current;
  14.610 -    struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit);
  14.611 +    struct PITState *vpit = vcpu_vpit(current);
  14.612  
  14.613 -    if (p->size != 1 ||
  14.614 -        p->data_is_ptr ||
  14.615 -        p->type != IOREQ_TYPE_PIO){
  14.616 -        printk("HVM_SPEAKER:wrong SPEAKER IO!\n");
  14.617 +    if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
  14.618 +    {
  14.619 +        gdprintk(XENLOG_WARNING, "HVM_SPEAKER bad access\n");
  14.620          return 1;
  14.621      }
  14.622  
  14.623 -    if (p->dir == 0) {/* write */
  14.624 +    if ( p->dir == IOREQ_WRITE )
  14.625          speaker_ioport_write(vpit, p->addr, p->data);
  14.626 -    } else if (p->dir == 1) {/* read */
  14.627 +    else
  14.628          p->data = speaker_ioport_read(vpit, p->addr);
  14.629 -    }
  14.630  
  14.631      return 1;
  14.632  }
  14.633 @@ -576,7 +555,7 @@ int pv_pit_handler(int port, int data, i
  14.634          .data = write ? data : 0,
  14.635      };
  14.636  
  14.637 -    if (port == 0x61)
  14.638 +    if ( port == 0x61 )
  14.639          handle_speaker_io(&ioreq);
  14.640      else
  14.641          handle_pit_io(&ioreq);
    15.1 --- a/xen/arch/x86/hvm/pmtimer.c	Fri Jun 15 12:38:35 2007 -0600
    15.2 +++ b/xen/arch/x86/hvm/pmtimer.c	Fri Jun 15 13:33:11 2007 -0600
    15.3 @@ -50,7 +50,6 @@
    15.4  #define TMR_VAL_MASK  (0xffffffff)
    15.5  #define TMR_VAL_MSB   (0x80000000)
    15.6  
    15.7 -
    15.8  /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
    15.9  static void pmt_update_sci(PMTState *s)
   15.10  {
   15.11 @@ -89,19 +88,19 @@ static void pmt_timer_callback(void *opa
   15.12      PMTState *s = opaque;
   15.13      uint32_t pmt_cycles_until_flip;
   15.14      uint64_t time_until_flip;
   15.15 -    
   15.16 +
   15.17      /* Recalculate the timer and make sure we get an SCI if we need one */
   15.18      pmt_update_time(s);
   15.19 -    
   15.20 +
   15.21      /* How close are we to the next MSB flip? */
   15.22      pmt_cycles_until_flip = TMR_VAL_MSB - (s->pm.tmr_val & (TMR_VAL_MSB - 1));
   15.23 -    
   15.24 +
   15.25      /* Overall time between MSB flips */
   15.26 -    time_until_flip = (1000000000ULL << 31) / FREQUENCE_PMTIMER;
   15.27 -    
   15.28 +    time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
   15.29 +
   15.30      /* Reduced appropriately */
   15.31 -    time_until_flip = (time_until_flip * pmt_cycles_until_flip) / (1ULL<<31);
   15.32 -    
   15.33 +    time_until_flip = (time_until_flip * pmt_cycles_until_flip) >> 23;
   15.34 +
   15.35      /* Wake up again near the next bit-flip */
   15.36      set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
   15.37  }
    16.1 --- a/xen/arch/x86/hvm/rtc.c	Fri Jun 15 12:38:35 2007 -0600
    16.2 +++ b/xen/arch/x86/hvm/rtc.c	Fri Jun 15 13:33:11 2007 -0600
    16.3 @@ -28,7 +28,11 @@
    16.4  #include <asm/hvm/support.h>
    16.5  #include <asm/current.h>
    16.6  
    16.7 -/* #define DEBUG_RTC */
    16.8 +#define domain_vrtc(d)   (&(d)->arch.hvm_domain.pl_time.vrtc)
    16.9 +#define vcpu_vrtc(vcpu)  (domain_vrtc((vcpu)->domain))
   16.10 +#define vrtc_domain(rtc) (container_of((rtc), struct domain, \
   16.11 +                                       arch.hvm_domain.pl_time.vrtc))
   16.12 +#define vrtc_vcpu(rtc)   (vrtc_domain(rtc)->vcpu[0])
   16.13  
   16.14  void rtc_periodic_cb(struct vcpu *v, void *opaque)
   16.15  {
   16.16 @@ -41,31 +45,31 @@ int is_rtc_periodic_irq(void *opaque)
   16.17      RTCState *s = opaque;
   16.18  
   16.19      return !(s->hw.cmos_data[RTC_REG_C] & RTC_AF || 
   16.20 -           s->hw.cmos_data[RTC_REG_C] & RTC_UF);
   16.21 +             s->hw.cmos_data[RTC_REG_C] & RTC_UF);
   16.22  }
   16.23  
   16.24  /* Enable/configure/disable the periodic timer based on the RTC_PIE and
   16.25   * RTC_RATE_SELECT settings */
   16.26 -static void rtc_timer_update(RTCState *s, struct vcpu *v)
   16.27 +static void rtc_timer_update(RTCState *s)
   16.28  {
   16.29 -    int period_code; 
   16.30 -    int period;
   16.31 +    int period_code, period;
   16.32 +    struct vcpu *v = vrtc_vcpu(s);
   16.33  
   16.34      period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT;
   16.35      if ( (period_code != 0) && (s->hw.cmos_data[RTC_REG_B] & RTC_PIE) )
   16.36      {
   16.37          if ( period_code <= 2 )
   16.38              period_code += 7;
   16.39 -        
   16.40 +
   16.41          period = 1 << (period_code - 1); /* period in 32 Khz cycles */
   16.42          period = DIV_ROUND((period * 1000000000ULL), 32768); /* period in ns */
   16.43 -#ifdef DEBUG_RTC
   16.44 -        printk("HVM_RTC: period = %uns\n", period);
   16.45 -#endif
   16.46 -        create_periodic_time(v, &s->pt, period, RTC_IRQ, 0, rtc_periodic_cb, s);
   16.47 -    } 
   16.48 +        create_periodic_time(v, &s->pt, period, RTC_IRQ,
   16.49 +                             0, rtc_periodic_cb, s);
   16.50 +    }
   16.51      else
   16.52 +    {
   16.53          destroy_periodic_time(&s->pt);
   16.54 +    }
   16.55  }
   16.56  
   16.57  static void rtc_set_time(RTCState *s);
   16.58 @@ -80,14 +84,9 @@ static int rtc_ioport_write(void *opaque
   16.59          return (s->hw.cmos_index < RTC_CMOS_SIZE);
   16.60      }
   16.61  
   16.62 -    if (s->hw.cmos_index >= RTC_CMOS_SIZE)
   16.63 +    if ( s->hw.cmos_index >= RTC_CMOS_SIZE )
   16.64          return 0;
   16.65  
   16.66 -#ifdef DEBUG_RTC
   16.67 -    printk("HVM_RTC: write index=0x%02x val=0x%02x\n",
   16.68 -           s->hw.cmos_index, data);
   16.69 -#endif
   16.70 -
   16.71      switch ( s->hw.cmos_index )
   16.72      {
   16.73      case RTC_SECONDS_ALARM:
   16.74 @@ -111,7 +110,7 @@ static int rtc_ioport_write(void *opaque
   16.75          /* UIP bit is read only */
   16.76          s->hw.cmos_data[RTC_REG_A] = (data & ~RTC_UIP) |
   16.77              (s->hw.cmos_data[RTC_REG_A] & RTC_UIP);
   16.78 -        rtc_timer_update(s, current);
   16.79 +        rtc_timer_update(s);
   16.80          break;
   16.81      case RTC_REG_B:
   16.82          if ( data & RTC_SET )
   16.83 @@ -127,7 +126,7 @@ static int rtc_ioport_write(void *opaque
   16.84                  rtc_set_time(s);
   16.85          }
   16.86          s->hw.cmos_data[RTC_REG_B] = data;
   16.87 -        rtc_timer_update(s, current);
   16.88 +        rtc_timer_update(s);
   16.89          break;
   16.90      case RTC_REG_C:
   16.91      case RTC_REG_D:
   16.92 @@ -181,10 +180,12 @@ static void rtc_set_time(RTCState *s)
   16.93  static void rtc_copy_date(RTCState *s)
   16.94  {
   16.95      const struct tm *tm = &s->current_tm;
   16.96 +    struct domain *d = vrtc_domain(s);
   16.97  
   16.98 -    if (s->time_offset_seconds != s->pt.vcpu->domain->time_offset_seconds) {
   16.99 -        s->current_tm = gmtime(get_localtime(s->pt.vcpu->domain));
  16.100 -        s->time_offset_seconds = s->pt.vcpu->domain->time_offset_seconds;
  16.101 +    if ( s->time_offset_seconds != d->time_offset_seconds )
  16.102 +    {
  16.103 +        s->current_tm = gmtime(get_localtime(d));
  16.104 +        s->time_offset_seconds = d->time_offset_seconds;
  16.105      }
  16.106  
  16.107      s->hw.cmos_data[RTC_SECONDS] = to_bcd(s, tm->tm_sec);
  16.108 @@ -228,34 +229,43 @@ static void rtc_next_second(RTCState *s)
  16.109  {
  16.110      struct tm *tm = &s->current_tm;
  16.111      int days_in_month;
  16.112 +    struct domain *d = vrtc_domain(s);
  16.113  
  16.114 -    if (s->time_offset_seconds != s->pt.vcpu->domain->time_offset_seconds) {
  16.115 -        s->current_tm = gmtime(get_localtime(s->pt.vcpu->domain));
  16.116 -        s->time_offset_seconds = s->pt.vcpu->domain->time_offset_seconds;
  16.117 +    if ( s->time_offset_seconds != d->time_offset_seconds )
  16.118 +    {
  16.119 +        s->current_tm = gmtime(get_localtime(d));
  16.120 +        s->time_offset_seconds = d->time_offset_seconds;
  16.121      }
  16.122  
  16.123      tm->tm_sec++;
  16.124 -    if ((unsigned)tm->tm_sec >= 60) {
  16.125 +    if ( (unsigned)tm->tm_sec >= 60 )
  16.126 +    {
  16.127          tm->tm_sec = 0;
  16.128          tm->tm_min++;
  16.129 -        if ((unsigned)tm->tm_min >= 60) {
  16.130 +        if ( (unsigned)tm->tm_min >= 60 )
  16.131 +        {
  16.132              tm->tm_min = 0;
  16.133              tm->tm_hour++;
  16.134 -            if ((unsigned)tm->tm_hour >= 24) {
  16.135 +            if ( (unsigned)tm->tm_hour >= 24 )
  16.136 +            {
  16.137                  tm->tm_hour = 0;
  16.138                  /* next day */
  16.139                  tm->tm_wday++;
  16.140 -                if ((unsigned)tm->tm_wday >= 7)
  16.141 +                if ( (unsigned)tm->tm_wday >= 7 )
  16.142                      tm->tm_wday = 0;
  16.143                  days_in_month = get_days_in_month(tm->tm_mon, 
  16.144                                                    tm->tm_year + 1900);
  16.145                  tm->tm_mday++;
  16.146 -                if (tm->tm_mday < 1) {
  16.147 +                if ( tm->tm_mday < 1 )
  16.148 +                {
  16.149                      tm->tm_mday = 1;
  16.150 -                } else if (tm->tm_mday > days_in_month) {
  16.151 +                }
  16.152 +                else if ( tm->tm_mday > days_in_month )
  16.153 +                {
  16.154                      tm->tm_mday = 1;
  16.155                      tm->tm_mon++;
  16.156 -                    if (tm->tm_mon >= 12) {
  16.157 +                    if ( tm->tm_mon >= 12 )
  16.158 +                    {
  16.159                          tm->tm_mon = 0;
  16.160                          tm->tm_year++;
  16.161                      }
  16.162 @@ -290,6 +300,7 @@ static void rtc_update_second(void *opaq
  16.163  static void rtc_update_second2(void *opaque)
  16.164  {
  16.165      RTCState *s = opaque;
  16.166 +    struct domain *d = vrtc_domain(s);
  16.167  
  16.168      if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) )
  16.169          rtc_copy_date(s);
  16.170 @@ -308,8 +319,8 @@ static void rtc_update_second2(void *opa
  16.171                s->current_tm.tm_hour) )
  16.172          {
  16.173              s->hw.cmos_data[RTC_REG_C] |= 0xa0; 
  16.174 -            hvm_isa_irq_deassert(s->pt.vcpu->domain, RTC_IRQ);
  16.175 -            hvm_isa_irq_assert(s->pt.vcpu->domain, RTC_IRQ);
  16.176 +            hvm_isa_irq_deassert(d, RTC_IRQ);
  16.177 +            hvm_isa_irq_assert(d, RTC_IRQ);
  16.178          }
  16.179      }
  16.180  
  16.181 @@ -317,8 +328,8 @@ static void rtc_update_second2(void *opa
  16.182      if ( s->hw.cmos_data[RTC_REG_B] & RTC_UIE )
  16.183      {
  16.184          s->hw.cmos_data[RTC_REG_C] |= 0x90; 
  16.185 -        hvm_isa_irq_deassert(s->pt.vcpu->domain, RTC_IRQ);
  16.186 -        hvm_isa_irq_assert(s->pt.vcpu->domain, RTC_IRQ);
  16.187 +        hvm_isa_irq_deassert(d, RTC_IRQ);
  16.188 +        hvm_isa_irq_assert(d, RTC_IRQ);
  16.189      }
  16.190  
  16.191      /* clear update in progress bit */
  16.192 @@ -352,39 +363,33 @@ static uint32_t rtc_ioport_read(void *op
  16.193          break;
  16.194      case RTC_REG_C:
  16.195          ret = s->hw.cmos_data[s->hw.cmos_index];
  16.196 -        hvm_isa_irq_deassert(s->pt.vcpu->domain, RTC_IRQ);
  16.197 -        s->hw.cmos_data[RTC_REG_C] = 0x00; 
  16.198 +        hvm_isa_irq_deassert(vrtc_domain(s), RTC_IRQ);
  16.199 +        s->hw.cmos_data[RTC_REG_C] = 0x00;
  16.200          break;
  16.201      default:
  16.202          ret = s->hw.cmos_data[s->hw.cmos_index];
  16.203          break;
  16.204      }
  16.205  
  16.206 -#ifdef DEBUG_RTC
  16.207 -    printk("HVM_RTC: read index=0x%02x val=0x%02x\n",
  16.208 -           s->hw.cmos_index, ret);
  16.209 -#endif
  16.210 -
  16.211      return ret;
  16.212  }
  16.213  
  16.214  static int handle_rtc_io(ioreq_t *p)
  16.215  {
  16.216 -    struct vcpu *v = current;
  16.217 -    struct RTCState *vrtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
  16.218 +    struct RTCState *vrtc = vcpu_vrtc(current);
  16.219  
  16.220      if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
  16.221      {
  16.222 -        printk("HVM_RTC: wrong RTC IO!\n");
  16.223 +        gdprintk(XENLOG_WARNING, "HVM_RTC bas access\n");
  16.224          return 1;
  16.225      }
  16.226      
  16.227 -    if ( p->dir == 0 ) /* write */
  16.228 +    if ( p->dir == IOREQ_WRITE )
  16.229      {
  16.230          if ( rtc_ioport_write(vrtc, p->addr, p->data & 0xFF) )
  16.231              return 1;
  16.232      }
  16.233 -    else if ( (p->dir == 1) && (vrtc->hw.cmos_index < RTC_CMOS_SIZE) ) /* read */
  16.234 +    else if ( vrtc->hw.cmos_index < RTC_CMOS_SIZE )
  16.235      {
  16.236          p->data = rtc_ioport_read(vrtc, p->addr);
  16.237          return 1;
  16.238 @@ -393,15 +398,12 @@ static int handle_rtc_io(ioreq_t *p)
  16.239      return 0;
  16.240  }
  16.241  
  16.242 -/* Move the RTC timers on to this vcpu's current cpu */
  16.243  void rtc_migrate_timers(struct vcpu *v)
  16.244  {
  16.245 -    RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  16.246 +    RTCState *s = vcpu_vrtc(v);
  16.247  
  16.248 -    if ( s->pt.vcpu == v )
  16.249 +    if ( v->vcpu_id == 0 )
  16.250      {
  16.251 -        if ( s->pt.enabled )
  16.252 -            migrate_timer(&s->pt.timer, v->processor);
  16.253          migrate_timer(&s->second_timer, v->processor);
  16.254          migrate_timer(&s->second_timer2, v->processor);
  16.255      }
  16.256 @@ -410,13 +412,14 @@ void rtc_migrate_timers(struct vcpu *v)
  16.257  /* Save RTC hardware state */
  16.258  static int rtc_save(struct domain *d, hvm_domain_context_t *h)
  16.259  {
  16.260 -    return hvm_save_entry(RTC, 0, h, &d->arch.hvm_domain.pl_time.vrtc.hw);
  16.261 +    RTCState *s = domain_vrtc(d);
  16.262 +    return hvm_save_entry(RTC, 0, h, &s->hw);
  16.263  }
  16.264  
  16.265  /* Reload the hardware state from a saved domain */
  16.266  static int rtc_load(struct domain *d, hvm_domain_context_t *h)
  16.267  {
  16.268 -    RTCState *s = &d->arch.hvm_domain.pl_time.vrtc;    
  16.269 +    RTCState *s = domain_vrtc(d);
  16.270  
  16.271      /* Restore the registers */
  16.272      if ( hvm_load_entry(RTC, h, &s->hw) != 0 )
  16.273 @@ -431,7 +434,7 @@ static int rtc_load(struct domain *d, hv
  16.274      set_timer(&s->second_timer2, s->next_second_time);
  16.275  
  16.276      /* Reset the periodic interrupt timer based on the registers */
  16.277 -    rtc_timer_update(s, d->vcpu[0]);
  16.278 +    rtc_timer_update(s);
  16.279  
  16.280      return 0;
  16.281  }
  16.282 @@ -441,9 +444,8 @@ HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save,
  16.283  
  16.284  void rtc_init(struct vcpu *v, int base)
  16.285  {
  16.286 -    RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  16.287 +    RTCState *s = vcpu_vrtc(v);
  16.288  
  16.289 -    s->pt.vcpu = v;
  16.290      s->hw.cmos_data[RTC_REG_A] = RTC_REF_CLCK_32KHZ | 6; /* ~1kHz */
  16.291      s->hw.cmos_data[RTC_REG_B] = RTC_24H;
  16.292      s->hw.cmos_data[RTC_REG_C] = 0;
  16.293 @@ -452,7 +454,6 @@ void rtc_init(struct vcpu *v, int base)
  16.294      s->current_tm = gmtime(get_localtime(v->domain));
  16.295      rtc_copy_date(s);
  16.296  
  16.297 -    init_timer(&s->pt.timer, pt_timer_fn, &s->pt, v->processor);
  16.298      init_timer(&s->second_timer, rtc_update_second, s, v->processor);
  16.299      init_timer(&s->second_timer2, rtc_update_second2, s, v->processor);
  16.300  
  16.301 @@ -464,9 +465,9 @@ void rtc_init(struct vcpu *v, int base)
  16.302  
  16.303  void rtc_deinit(struct domain *d)
  16.304  {
  16.305 -    RTCState *s = &d->arch.hvm_domain.pl_time.vrtc;
  16.306 +    RTCState *s = domain_vrtc(d);
  16.307  
  16.308 -    kill_timer(&s->pt.timer);
  16.309 +    destroy_periodic_time(&s->pt);
  16.310      kill_timer(&s->second_timer);
  16.311      kill_timer(&s->second_timer2);
  16.312  }
    17.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Jun 15 12:38:35 2007 -0600
    17.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Jun 15 13:33:11 2007 -0600
    17.3 @@ -2089,6 +2089,15 @@ static inline void svm_do_msr_access(
    17.4              msr_content = 1ULL << 61; /* MC4_MISC.Locked */
    17.5              break;
    17.6  
    17.7 +        case MSR_IA32_EBC_FREQUENCY_ID:
    17.8 +            /*
    17.9 +             * This Intel-only register may be accessed if this HVM guest
   17.10 +             * has been migrated from an Intel host. The value zero is not
   17.11 +             * particularly meaningful, but at least avoids the guest crashing!
   17.12 +             */
   17.13 +            msr_content = 0;
   17.14 +            break;
   17.15 +
   17.16          default:
   17.17              if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
   17.18                   rdmsr_safe(ecx, eax, edx) == 0 )
    18.1 --- a/xen/arch/x86/hvm/vlapic.c	Fri Jun 15 12:38:35 2007 -0600
    18.2 +++ b/xen/arch/x86/hvm/vlapic.c	Fri Jun 15 13:33:11 2007 -0600
    18.3 @@ -944,8 +944,6 @@ int vlapic_init(struct vcpu *v)
    18.4      if ( v->vcpu_id == 0 )
    18.5          vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
    18.6  
    18.7 -    init_timer(&vlapic->pt.timer, pt_timer_fn, &vlapic->pt, v->processor);
    18.8 -
    18.9      return 0;
   18.10  }
   18.11  
   18.12 @@ -953,7 +951,7 @@ void vlapic_destroy(struct vcpu *v)
   18.13  {
   18.14      struct vlapic *vlapic = vcpu_vlapic(v);
   18.15  
   18.16 -    kill_timer(&vlapic->pt.timer);
   18.17 +    destroy_periodic_time(&vlapic->pt);
   18.18      unmap_domain_page_global(vlapic->regs);
   18.19      free_domheap_page(vlapic->regs_page);
   18.20  }
    19.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Fri Jun 15 12:38:35 2007 -0600
    19.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Fri Jun 15 13:33:11 2007 -0600
    19.3 @@ -1,6 +1,7 @@
    19.4  /*
    19.5 - * io.c: handling I/O, interrupts related VMX entry/exit
    19.6 + * intr.c: handling I/O, interrupts related VMX entry/exit
    19.7   * Copyright (c) 2004, Intel Corporation.
    19.8 + * Copyright (c) 2004-2007, XenSource Inc.
    19.9   *
   19.10   * This program is free software; you can redistribute it and/or modify it
   19.11   * under the terms and conditions of the GNU General Public License,
   19.12 @@ -14,7 +15,6 @@
   19.13   * You should have received a copy of the GNU General Public License along with
   19.14   * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   19.15   * Place - Suite 330, Boston, MA 02111-1307 USA.
   19.16 - *
   19.17   */
   19.18  
   19.19  #include <xen/config.h>
   19.20 @@ -24,7 +24,6 @@
   19.21  #include <xen/errno.h>
   19.22  #include <xen/trace.h>
   19.23  #include <xen/event.h>
   19.24 -
   19.25  #include <asm/current.h>
   19.26  #include <asm/cpufeature.h>
   19.27  #include <asm/processor.h>
   19.28 @@ -39,34 +38,50 @@
   19.29  #include <public/hvm/ioreq.h>
   19.30  #include <asm/hvm/trace.h>
   19.31  
   19.32 +/*
   19.33 + * A few notes on virtual NMI and INTR delivery, and interactions with
   19.34 + * interruptibility states:
   19.35 + * 
   19.36 + * We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by
   19.37 + * STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt
   19.38 + * pending' control causes a VM exit when all these checks succeed. It will
   19.39 + * exit immediately after VM entry if the checks succeed at that point.
   19.40 + * 
   19.41 + * We can only inject an NMI if no blocking by MOV SS (also, depending on
   19.42 + * implementation, if no blocking by STI). If pin-based 'virtual NMIs'
   19.43 + * control is specified then the NMI-blocking interruptibility flag is
   19.44 + * also checked. The 'virtual NMI pending' control (available only in
   19.45 + * conjunction with 'virtual NMIs') causes a VM exit when all these checks
   19.46 + * succeed. It will exit immediately after VM entry if the checks succeed
   19.47 + * at that point.
   19.48 + * 
   19.49 + * Because a processor may or may not check blocking-by-STI when injecting
   19.50 + * a virtual NMI, it will be necessary to convert that to block-by-MOV-SS
   19.51 + * before specifying the 'virtual NMI pending' control. Otherwise we could
   19.52 + * enter an infinite loop where we check blocking-by-STI in software and
   19.53 + * thus delay delivery of a virtual NMI, but the processor causes immediate
   19.54 + * VM exit because it does not check blocking-by-STI.
   19.55 + * 
   19.56 + * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
   19.57 + * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
   19.58 + * the STI- and MOV-SS-blocking interruptibility-state flags.
   19.59 + * 
   19.60 + * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
   19.61 + * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
   19.62 + * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
   19.63 + */
   19.64  
   19.65 -static inline void
   19.66 -enable_irq_window(struct vcpu *v)
   19.67 +static void enable_irq_window(struct vcpu *v)
   19.68  {
   19.69      u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   19.70      
   19.71 -    if (!(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) {
   19.72 +    if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
   19.73 +    {
   19.74          *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
   19.75          __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   19.76      }
   19.77  }
   19.78  
   19.79 -static inline void
   19.80 -disable_irq_window(struct vcpu *v)
   19.81 -{
   19.82 -    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   19.83 -    
   19.84 -    if ( *cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) {
   19.85 -        *cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
   19.86 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   19.87 -    }
   19.88 -}
   19.89 -
   19.90 -static inline int is_interruptibility_state(void)
   19.91 -{
   19.92 -    return __vmread(GUEST_INTERRUPTIBILITY_INFO);
   19.93 -}
   19.94 -
   19.95  static void update_tpr_threshold(struct vlapic *vlapic)
   19.96  {
   19.97      int max_irr, tpr;
   19.98 @@ -87,13 +102,11 @@ static void update_tpr_threshold(struct 
   19.99  
  19.100  asmlinkage void vmx_intr_assist(void)
  19.101  {
  19.102 -    int intr_type = 0;
  19.103 -    int intr_vector;
  19.104 -    unsigned long eflags;
  19.105 +    int has_ext_irq, intr_vector, intr_type = 0;
  19.106 +    unsigned long eflags, intr_shadow;
  19.107      struct vcpu *v = current;
  19.108      unsigned int idtv_info_field;
  19.109      unsigned long inst_len;
  19.110 -    int    has_ext_irq;
  19.111  
  19.112      pt_update_irq(v);
  19.113  
  19.114 @@ -125,10 +138,10 @@ asmlinkage void vmx_intr_assist(void)
  19.115          inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
  19.116          __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
  19.117  
  19.118 -        if (unlikely(idtv_info_field & 0x800)) /* valid error code */
  19.119 +        if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
  19.120              __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
  19.121                        __vmread(IDT_VECTORING_ERROR_CODE));
  19.122 -        if (unlikely(has_ext_irq))
  19.123 +        if ( unlikely(has_ext_irq) )
  19.124              enable_irq_window(v);
  19.125  
  19.126          HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
  19.127 @@ -138,9 +151,9 @@ asmlinkage void vmx_intr_assist(void)
  19.128      if ( likely(!has_ext_irq) )
  19.129          return;
  19.130  
  19.131 -    if ( unlikely(is_interruptibility_state()) )
  19.132 +    intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
  19.133 +    if ( unlikely(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)) )
  19.134      {
  19.135 -        /* pre-cleared for emulated instruction */
  19.136          enable_irq_window(v);
  19.137          HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
  19.138          return;
    20.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jun 15 12:38:35 2007 -0600
    20.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Fri Jun 15 13:33:11 2007 -0600
    20.3 @@ -70,8 +70,9 @@ void vmx_init_vmcs_config(void)
    20.4      u32 _vmx_vmexit_control;
    20.5      u32 _vmx_vmentry_control;
    20.6  
    20.7 -    min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
    20.8 -    opt = 0;
    20.9 +    min = (PIN_BASED_EXT_INTR_MASK |
   20.10 +           PIN_BASED_NMI_EXITING);
   20.11 +    opt = 0; /*PIN_BASED_VIRTUAL_NMIS*/
   20.12      _vmx_pin_based_exec_control = adjust_vmx_controls(
   20.13          min, opt, MSR_IA32_VMX_PINBASED_CTLS);
   20.14  
    21.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Jun 15 12:38:35 2007 -0600
    21.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Jun 15 13:33:11 2007 -0600
    21.3 @@ -1300,11 +1300,17 @@ static int __get_instruction_length(void
    21.4  
    21.5  static void inline __update_guest_eip(unsigned long inst_len)
    21.6  {
    21.7 -    unsigned long current_eip;
    21.8 +    unsigned long current_eip, intr_shadow;
    21.9  
   21.10      current_eip = __vmread(GUEST_RIP);
   21.11      __vmwrite(GUEST_RIP, current_eip + inst_len);
   21.12 -    __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   21.13 +
   21.14 +    intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
   21.15 +    if ( intr_shadow & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
   21.16 +    {
   21.17 +        intr_shadow &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
   21.18 +        __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
   21.19 +    }
   21.20  }
   21.21  
   21.22  static void vmx_do_no_device_fault(void)
   21.23 @@ -2902,12 +2908,18 @@ asmlinkage void vmx_vmexit_handler(struc
   21.24      case EXIT_REASON_TRIPLE_FAULT:
   21.25          hvm_triple_fault();
   21.26          break;
   21.27 -    case EXIT_REASON_PENDING_INTERRUPT:
   21.28 +    case EXIT_REASON_PENDING_VIRT_INTR:
   21.29          /* Disable the interrupt window. */
   21.30          v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
   21.31          __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   21.32                    v->arch.hvm_vcpu.u.vmx.exec_control);
   21.33          break;
   21.34 +    case EXIT_REASON_PENDING_VIRT_NMI:
   21.35 +        /* Disable the NMI window. */
   21.36 +        v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
   21.37 +        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   21.38 +                  v->arch.hvm_vcpu.u.vmx.exec_control);
   21.39 +        break;
   21.40      case EXIT_REASON_TASK_SWITCH:
   21.41          goto exit_and_crash;
   21.42      case EXIT_REASON_CPUID:
    22.1 --- a/xen/arch/x86/hvm/vpt.c	Fri Jun 15 12:38:35 2007 -0600
    22.2 +++ b/xen/arch/x86/hvm/vpt.c	Fri Jun 15 13:33:11 2007 -0600
    22.3 @@ -22,31 +22,31 @@
    22.4  #include <asm/hvm/vpt.h>
    22.5  #include <asm/event.h>
    22.6  
    22.7 -static __inline__ void missed_ticks(struct periodic_time *pt)
    22.8 +static void missed_ticks(struct periodic_time *pt)
    22.9  {
   22.10      s_time_t missed_ticks;
   22.11  
   22.12      missed_ticks = NOW() - pt->scheduled;
   22.13 -    if ( missed_ticks > 0 ) 
   22.14 +    if ( missed_ticks <= 0 )
   22.15 +        return;
   22.16 +
   22.17 +    missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
   22.18 +    if ( missed_ticks > 1000 )
   22.19      {
   22.20 -        missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
   22.21 -        if ( missed_ticks > 1000 )
   22.22 -        {
   22.23 -            /* TODO: Adjust guest time together */
   22.24 -            pt->pending_intr_nr++;
   22.25 -        }
   22.26 -        else
   22.27 -        {
   22.28 -            pt->pending_intr_nr += missed_ticks;
   22.29 -        }
   22.30 -        pt->scheduled += missed_ticks * pt->period;
   22.31 +        /* TODO: Adjust guest time together */
   22.32 +        pt->pending_intr_nr++;
   22.33      }
   22.34 +    else
   22.35 +    {
   22.36 +        pt->pending_intr_nr += missed_ticks;
   22.37 +    }
   22.38 +
   22.39 +    pt->scheduled += missed_ticks * pt->period;
   22.40  }
   22.41  
   22.42  void pt_freeze_time(struct vcpu *v)
   22.43  {
   22.44      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   22.45 -    struct list_head *list;
   22.46      struct periodic_time *pt;
   22.47  
   22.48      if ( test_bit(_VPF_blocked, &v->pause_flags) )
   22.49 @@ -54,17 +54,13 @@ void pt_freeze_time(struct vcpu *v)
   22.50  
   22.51      v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
   22.52  
   22.53 -    list_for_each( list, head )
   22.54 -    {
   22.55 -        pt = list_entry(list, struct periodic_time, list);
   22.56 +    list_for_each_entry ( pt, head, list )
   22.57          stop_timer(&pt->timer);
   22.58 -    }
   22.59  }
   22.60  
   22.61  void pt_thaw_time(struct vcpu *v)
   22.62  {
   22.63      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   22.64 -    struct list_head *list;
   22.65      struct periodic_time *pt;
   22.66  
   22.67      if ( v->arch.hvm_vcpu.guest_time )
   22.68 @@ -72,17 +68,15 @@ void pt_thaw_time(struct vcpu *v)
   22.69          hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
   22.70          v->arch.hvm_vcpu.guest_time = 0;
   22.71  
   22.72 -        list_for_each( list, head )
   22.73 +        list_for_each_entry ( pt, head, list )
   22.74          {
   22.75 -            pt = list_entry(list, struct periodic_time, list);
   22.76              missed_ticks(pt);
   22.77              set_timer(&pt->timer, pt->scheduled);
   22.78          }
   22.79      }
   22.80  }
   22.81  
   22.82 -/* Hook function for the platform periodic time */
   22.83 -void pt_timer_fn(void *data)
   22.84 +static void pt_timer_fn(void *data)
   22.85  {
   22.86      struct periodic_time *pt = data;
   22.87  
   22.88 @@ -100,14 +94,12 @@ void pt_timer_fn(void *data)
   22.89  void pt_update_irq(struct vcpu *v)
   22.90  {
   22.91      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   22.92 -    struct list_head *list;
   22.93      struct periodic_time *pt;
   22.94      uint64_t max_lag = -1ULL;
   22.95      int irq = -1;
   22.96  
   22.97 -    list_for_each( list, head )
   22.98 +    list_for_each_entry ( pt, head, list )
   22.99      {
  22.100 -        pt = list_entry(list, struct periodic_time, list);
  22.101          if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
  22.102               ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
  22.103          {
  22.104 @@ -130,14 +122,12 @@ void pt_update_irq(struct vcpu *v)
  22.105  struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
  22.106  {
  22.107      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
  22.108 -    struct list_head *list;
  22.109      struct periodic_time *pt;
  22.110      struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
  22.111      int vec;
  22.112  
  22.113 -    list_for_each( list, head )
  22.114 +    list_for_each_entry ( pt, head, list )
  22.115      {
  22.116 -        pt = list_entry(list, struct periodic_time, list);
  22.117          if ( !pt->pending_intr_nr )
  22.118              continue;
  22.119  
  22.120 @@ -177,17 +167,14 @@ void pt_intr_post(struct vcpu *v, int ve
  22.121          pt->cb(pt->vcpu, pt->priv);
  22.122  }
  22.123  
  22.124 -/* If pt is enabled, discard pending intr */
  22.125  void pt_reset(struct vcpu *v)
  22.126  {
  22.127      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
  22.128 -    struct list_head *list;
  22.129      struct periodic_time *pt;
  22.130  
  22.131 -    list_for_each( list, head )
  22.132 +    list_for_each_entry ( pt, head, list )
  22.133      {
  22.134 -	pt = list_entry(list, struct periodic_time, list);
  22.135 -	if ( pt->enabled )
  22.136 +        if ( pt->enabled )
  22.137          {
  22.138              pt->pending_intr_nr = 0;
  22.139              pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
  22.140 @@ -197,11 +184,25 @@ void pt_reset(struct vcpu *v)
  22.141      }
  22.142  }
  22.143  
  22.144 -void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t period,
  22.145 -                          uint8_t irq, char one_shot, time_cb *cb, void *data)
  22.146 +void pt_migrate(struct vcpu *v)
  22.147 +{
  22.148 +    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
  22.149 +    struct periodic_time *pt;
  22.150 +
  22.151 +    list_for_each_entry ( pt, head, list )
  22.152 +    {
  22.153 +        if ( pt->enabled )
  22.154 +            migrate_timer(&pt->timer, v->processor);
  22.155 +    }
  22.156 +}
  22.157 +
  22.158 +void create_periodic_time(
  22.159 +    struct vcpu *v, struct periodic_time *pt, uint64_t period,
  22.160 +    uint8_t irq, char one_shot, time_cb *cb, void *data)
  22.161  {
  22.162      destroy_periodic_time(pt);
  22.163  
  22.164 +    init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
  22.165      pt->enabled = 1;
  22.166      if ( period < 900000 ) /* < 0.9 ms */
  22.167      {
  22.168 @@ -226,11 +227,11 @@ void create_periodic_time(struct vcpu *v
  22.169  
  22.170  void destroy_periodic_time(struct periodic_time *pt)
  22.171  {
  22.172 -    if ( pt->enabled )
  22.173 -    {
  22.174 -        pt->enabled = 0;
  22.175 -        pt->pending_intr_nr = 0;
  22.176 -        list_del(&pt->list);
  22.177 -        stop_timer(&pt->timer);
  22.178 -    }
  22.179 +    if ( !pt->enabled )
  22.180 +        return;
  22.181 +
  22.182 +    pt->enabled = 0;
  22.183 +    pt->pending_intr_nr = 0;
  22.184 +    list_del(&pt->list);
  22.185 +    kill_timer(&pt->timer);
  22.186  }
    23.1 --- a/xen/arch/x86/io_apic.c	Fri Jun 15 12:38:35 2007 -0600
    23.2 +++ b/xen/arch/x86/io_apic.c	Fri Jun 15 13:33:11 2007 -0600
    23.3 @@ -2114,6 +2114,15 @@ int ioapic_guest_write(unsigned long phy
    23.4          return 0;
    23.5      }
    23.6  
    23.7 +    /* Special delivery modes (SMI,NMI,INIT,ExtInt) should have no vector.  */
    23.8 +    if ( (old_rte.delivery_mode > dest_LowestPrio) && (old_rte.vector != 0) )
    23.9 +    {
   23.10 +        WARN_BOGUS_WRITE("Special delivery mode %d with non-zero vector "
   23.11 +                         "%02x\n", old_rte.delivery_mode, old_rte.vector);
   23.12 +        /* Nobble the vector here as it does not relate to a valid irq. */
   23.13 +        old_rte.vector = 0;
   23.14 +    }
   23.15 +
   23.16      if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
   23.17          old_irq = vector_irq[old_rte.vector];
   23.18      if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
    24.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Fri Jun 15 12:38:35 2007 -0600
    24.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Fri Jun 15 13:33:11 2007 -0600
    24.3 @@ -104,6 +104,7 @@ void vmx_vmcs_exit(struct vcpu *v);
    24.4  #define CPU_BASED_CR8_LOAD_EXITING            0x00080000
    24.5  #define CPU_BASED_CR8_STORE_EXITING           0x00100000
    24.6  #define CPU_BASED_TPR_SHADOW                  0x00200000
    24.7 +#define CPU_BASED_VIRTUAL_NMI_PENDING         0x00400000
    24.8  #define CPU_BASED_MOV_DR_EXITING              0x00800000
    24.9  #define CPU_BASED_UNCOND_IO_EXITING           0x01000000
   24.10  #define CPU_BASED_ACTIVATE_IO_BITMAP          0x02000000
   24.11 @@ -115,6 +116,7 @@ extern u32 vmx_cpu_based_exec_control;
   24.12  
   24.13  #define PIN_BASED_EXT_INTR_MASK         0x00000001
   24.14  #define PIN_BASED_NMI_EXITING           0x00000008
   24.15 +#define PIN_BASED_VIRTUAL_NMIS          0x00000020
   24.16  extern u32 vmx_pin_based_exec_control;
   24.17  
   24.18  #define VM_EXIT_IA32E_MODE              0x00000200
   24.19 @@ -137,7 +139,13 @@ extern u32 vmx_secondary_exec_control;
   24.20      (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
   24.21  extern char *vmx_msr_bitmap;
   24.22  
   24.23 -/* VMCS Encordings */
   24.24 +/* GUEST_INTERRUPTIBILITY_INFO flags. */
   24.25 +#define VMX_INTR_SHADOW_STI             0x00000001
   24.26 +#define VMX_INTR_SHADOW_MOV_SS          0x00000002
   24.27 +#define VMX_INTR_SHADOW_SMI             0x00000004
   24.28 +#define VMX_INTR_SHADOW_NMI             0x00000008
   24.29 +
   24.30 +/* VMCS field encodings. */
   24.31  enum vmcs_field {
   24.32      GUEST_ES_SELECTOR               = 0x00000800,
   24.33      GUEST_CS_SELECTOR               = 0x00000802,
    25.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Fri Jun 15 12:38:35 2007 -0600
    25.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Fri Jun 15 13:33:11 2007 -0600
    25.3 @@ -46,8 +46,8 @@ void vmx_vlapic_msr_changed(struct vcpu 
    25.4  #define EXIT_REASON_SIPI                4
    25.5  #define EXIT_REASON_IO_SMI              5
    25.6  #define EXIT_REASON_OTHER_SMI           6
    25.7 -#define EXIT_REASON_PENDING_INTERRUPT   7
    25.8 -
    25.9 +#define EXIT_REASON_PENDING_VIRT_INTR   7
   25.10 +#define EXIT_REASON_PENDING_VIRT_NMI    8
   25.11  #define EXIT_REASON_TASK_SWITCH         9
   25.12  #define EXIT_REASON_CPUID               10
   25.13  #define EXIT_REASON_HLT                 12
   25.14 @@ -295,7 +295,14 @@ static inline void __vmx_inject_exceptio
   25.15  {
   25.16      unsigned long intr_fields;
   25.17  
   25.18 -    /* Reflect it back into the guest */
   25.19 +    /*
   25.20 +     * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
   25.21 +     *  "If the VM entry is injecting, there is no blocking by STI or by
   25.22 +     *   MOV SS following the VM entry, regardless of the contents of the
   25.23 +     *   interruptibility-state field [in the guest-state area before the
   25.24 +     *   VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
   25.25 +     */
   25.26 +
   25.27      intr_fields = (INTR_INFO_VALID_MASK | type | trap);
   25.28      if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
   25.29          __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
   25.30 @@ -332,7 +339,6 @@ static inline void vmx_inject_sw_excepti
   25.31  static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code)
   25.32  {
   25.33      __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0);
   25.34 -    __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   25.35  }
   25.36  
   25.37  #endif /* __ASM_X86_HVM_VMX_VMX_H__ */
    26.1 --- a/xen/include/asm-x86/hvm/vpt.h	Fri Jun 15 12:38:35 2007 -0600
    26.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Fri Jun 15 13:33:11 2007 -0600
    26.3 @@ -115,19 +115,19 @@ struct pl_time {    /* platform time */
    26.4  
    26.5  void pt_freeze_time(struct vcpu *v);
    26.6  void pt_thaw_time(struct vcpu *v);
    26.7 -void pt_timer_fn(void *data);
    26.8  void pt_update_irq(struct vcpu *v);
    26.9  struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
   26.10  void pt_intr_post(struct vcpu *v, int vector, int type);
   26.11  void pt_reset(struct vcpu *v);
   26.12 -void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t period,
   26.13 -                          uint8_t irq, char one_shot, time_cb *cb, void *data);
   26.14 +void pt_migrate(struct vcpu *v);
   26.15 +void create_periodic_time(
   26.16 +    struct vcpu *v, struct periodic_time *pt, uint64_t period,
   26.17 +    uint8_t irq, char one_shot, time_cb *cb, void *data);
   26.18  void destroy_periodic_time(struct periodic_time *pt);
   26.19  
   26.20  int pv_pit_handler(int port, int data, int write);
   26.21  void pit_init(struct vcpu *v, unsigned long cpu_khz);
   26.22  void pit_stop_channel0_irq(PITState * pit);
   26.23 -void pit_migrate_timers(struct vcpu *v);
   26.24  void pit_deinit(struct domain *d);
   26.25  void rtc_init(struct vcpu *v, int base);
   26.26  void rtc_migrate_timers(struct vcpu *v);
    27.1 --- a/xen/include/asm-x86/msr.h	Fri Jun 15 12:38:35 2007 -0600
    27.2 +++ b/xen/include/asm-x86/msr.h	Fri Jun 15 13:33:11 2007 -0600
    27.3 @@ -96,6 +96,7 @@ static inline void wrmsrl(unsigned int m
    27.4  #define MSR_IA32_TIME_STAMP_COUNTER     0x10
    27.5  #define MSR_IA32_PLATFORM_ID		0x17
    27.6  #define MSR_IA32_EBL_CR_POWERON		0x2a
    27.7 +#define MSR_IA32_EBC_FREQUENCY_ID	0x2c
    27.8  
    27.9  #define MSR_IA32_APICBASE		0x1b
   27.10  #define MSR_IA32_APICBASE_BSP		(1<<8)
    28.1 --- a/xen/include/public/io/ring.h	Fri Jun 15 12:38:35 2007 -0600
    28.2 +++ b/xen/include/public/io/ring.h	Fri Jun 15 13:33:11 2007 -0600
    28.3 @@ -133,7 +133,7 @@ typedef struct __name##_back_ring __name
    28.4  #define SHARED_RING_INIT(_s) do {                                       \
    28.5      (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
    28.6      (_s)->req_event = (_s)->rsp_event = 1;                              \
    28.7 -    memset((_s)->pad, 0, sizeof((_s)->pad));                            \
    28.8 +    (void)memset((_s)->pad, 0, sizeof((_s)->pad));                      \
    28.9  } while(0)
   28.10  
   28.11  #define FRONT_RING_INIT(_r, _s, __size) do {                            \
    29.1 --- a/xen/include/public/io/xs_wire.h	Fri Jun 15 12:38:35 2007 -0600
    29.2 +++ b/xen/include/public/io/xs_wire.h	Fri Jun 15 13:33:11 2007 -0600
    29.3 @@ -60,7 +60,11 @@ struct xsd_errors
    29.4      const char *errstring;
    29.5  };
    29.6  #define XSD_ERROR(x) { x, #x }
    29.7 -static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
    29.8 +static struct xsd_errors xsd_errors[]
    29.9 +#if defined(__GNUC__)
   29.10 +__attribute__((unused))
   29.11 +#endif
   29.12 +    = {
   29.13      XSD_ERROR(EINVAL),
   29.14      XSD_ERROR(EACCES),
   29.15      XSD_ERROR(EEXIST),