ia64/xen-unstable

changeset 12735:f50380324d1c

Merge.
author Steven Smith <ssmith@xensource.com>
date Fri Dec 01 12:03:38 2006 +0000 (2006-12-01)
parents 0536dbde1562 bec95280b565
children fb0a586854c1
files tools/python/xen/xend/image.py xen/arch/x86/hvm/vmx/io.c
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/tools/python/xen/util/mkdir.py	Fri Dec 01 12:03:38 2006 +0000
     1.3 @@ -0,0 +1,44 @@
     1.4 +#============================================================================
     1.5 +# This library is free software; you can redistribute it and/or modify
     1.6 +# it under the terms of the GNU Lesser General Public License as published by
     1.7 +# the Free Software Foundation; either version 2.1 of the License, or
     1.8 +# (at your option) any later version.
     1.9 +#
    1.10 +# This library is distributed in the hope that it will be useful,
    1.11 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.12 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    1.13 +# GNU Lesser General Public License for more details.
    1.14 +#
    1.15 +# You should have received a copy of the GNU Lesser General Public License
    1.16 +# along with this library; if not, write to the Free Software
    1.17 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    1.18 +#============================================================================
    1.19 +# Copyright (c) 2006 XenSource Inc.
    1.20 +#============================================================================
    1.21 +
    1.22 +import errno
    1.23 +import os
    1.24 +import os.path
    1.25 +import stat
    1.26 +
    1.27 +
    1.28 +def parents(dir, perms, enforcePermissions = False):
    1.29 +    """
    1.30 +    Ensure that the given directory exists, creating it if necessary, but not
    1.31 +    complaining if it's already there.
    1.32 +    
    1.33 +    @param dir The directory name.
    1.34 +    @param perms One of the stat.S_ constants.
    1.35 +    @param enforcePermissions Enforce our ownership and the given permissions,
    1.36 +    even if the directory pre-existed with different ones.
    1.37 +    """
    1.38 +    # Catch the exception here, rather than checking for the directory's
    1.39 +    # existence first, to avoid races.
    1.40 +    try:
    1.41 +        os.makedirs(dir, perms)
    1.42 +    except OSError, exn:
    1.43 +        if exn.args[0] != errno.EEXIST or not os.path.isdir(dir):
    1.44 +            raise
    1.45 +    if enforcePermissions:
    1.46 +        os.chown(dir, os.geteuid(), os.getegid())
    1.47 +        os.chmod(dir, stat.S_IRWXU)
     2.1 --- a/tools/python/xen/util/xmlrpclib2.py	Fri Dec 01 12:03:15 2006 +0000
     2.2 +++ b/tools/python/xen/util/xmlrpclib2.py	Fri Dec 01 12:03:38 2006 +0000
     2.3 @@ -30,6 +30,8 @@ from SimpleXMLRPCServer import SimpleXML
     2.4  import SocketServer
     2.5  import xmlrpclib, socket, os, stat
     2.6  
     2.7 +import mkdir
     2.8 +
     2.9  from xen.web import connection
    2.10  from xen.xend.XendLogging import log
    2.11  
    2.12 @@ -234,14 +236,9 @@ class UnixXMLRPCServer(TCPXMLRPCServer):
    2.13      address_family = socket.AF_UNIX
    2.14  
    2.15      def __init__(self, addr, allowed, logRequests = 1):
    2.16 -        parent = os.path.dirname(addr)
    2.17 -        if os.path.exists(parent):
    2.18 -            os.chown(parent, os.geteuid(), os.getegid())
    2.19 -            os.chmod(parent, stat.S_IRWXU)
    2.20 -            if self.allow_reuse_address and os.path.exists(addr):
    2.21 -                os.unlink(addr)
    2.22 -        else:
    2.23 -            os.makedirs(parent, stat.S_IRWXU)
    2.24 +        mkdir.parents(os.path.dirname(addr), stat.S_IRWXU, True)
    2.25 +        if self.allow_reuse_address and os.path.exists(addr):
    2.26 +            os.unlink(addr)
    2.27  
    2.28          TCPXMLRPCServer.__init__(self, addr, allowed,
    2.29                                   UnixXMLRPCRequestHandler, logRequests)
     3.1 --- a/tools/python/xen/web/unix.py	Fri Dec 01 12:03:15 2006 +0000
     3.2 +++ b/tools/python/xen/web/unix.py	Fri Dec 01 12:03:38 2006 +0000
     3.3 @@ -22,6 +22,8 @@ import os.path
     3.4  import socket
     3.5  import stat
     3.6  
     3.7 +from xen.util import mkdir
     3.8 +
     3.9  import connection
    3.10  
    3.11  
    3.12 @@ -30,13 +32,9 @@ def bind(path):
    3.13  created such that only the current user may access it."""
    3.14  
    3.15      parent = os.path.dirname(path)
    3.16 -    if os.path.exists(parent):
    3.17 -        os.chown(parent, os.geteuid(), os.getegid())
    3.18 -        os.chmod(parent, stat.S_IRWXU)
    3.19 -        if os.path.exists(path):
    3.20 -            os.unlink(path)
    3.21 -    else:
    3.22 -        os.makedirs(parent, stat.S_IRWXU)
    3.23 +    mkdir.parents(parent, stat.S_IRWXU, True)
    3.24 +    if os.path.exists(path):
    3.25 +        os.unlink(path)
    3.26  
    3.27      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    3.28      sock.bind(path)
     4.1 --- a/tools/python/xen/xend/XendConfig.py	Fri Dec 01 12:03:15 2006 +0000
     4.2 +++ b/tools/python/xen/xend/XendConfig.py	Fri Dec 01 12:03:38 2006 +0000
     4.3 @@ -185,20 +185,22 @@ LEGACY_IMAGE_HVM_CFG = [
     4.4      ('vncconsole', int),
     4.5      ('pae', int),
     4.6      ('apic', int),
     4.7 -    ('acpi', int),
     4.8 -    ('serial', str),
     4.9  ]
    4.10  
    4.11  LEGACY_IMAGE_HVM_DEVICES_CFG = [
    4.12 +    ('acpi', int),    
    4.13      ('boot', str),
    4.14      ('fda', str),
    4.15      ('fdb', str),
    4.16 +    ('isa', str),
    4.17 +    ('keymap', str),    
    4.18 +    ('localtime', str),    
    4.19 +    ('serial', str),
    4.20 +    ('stdvga', int),
    4.21      ('soundhw', str),
    4.22 -    ('isa', str),
    4.23 +    ('usb', str),
    4.24 +    ('usbdevice', str),    
    4.25      ('vcpus', int),
    4.26 -    ('acpi', int),
    4.27 -    ('usb', str),
    4.28 -    ('usbdevice', str),
    4.29  ]
    4.30  
    4.31  
    4.32 @@ -985,12 +987,12 @@ class XendConfig(dict):
    4.33  
    4.34          if 'hvm' in self['image']:
    4.35              for arg, conv in LEGACY_IMAGE_HVM_CFG:
    4.36 -                if self['image']['hvm'].has_key(arg):
    4.37 +                if self['image']['hvm'].get(arg):
    4.38                      image.append([arg, self['image']['hvm'][arg]])
    4.39  
    4.40          if 'hvm' in self['image'] and 'devices' in self['image']['hvm']:
    4.41              for arg, conv in LEGACY_IMAGE_HVM_DEVICES_CFG:
    4.42 -                if self['image']['hvm']['devices'].has_key(arg):
    4.43 +                if self['image']['hvm']['devices'].get(arg):
    4.44                      image.append([arg,
    4.45                                    self['image']['hvm']['devices'][arg]])
    4.46  
     5.1 --- a/tools/python/xen/xend/XendDomain.py	Fri Dec 01 12:03:15 2006 +0000
     5.2 +++ b/tools/python/xen/xend/XendDomain.py	Fri Dec 01 12:03:38 2006 +0000
     5.3 @@ -23,6 +23,7 @@
     5.4  """
     5.5  
     5.6  import os
     5.7 +import stat
     5.8  import shutil
     5.9  import socket
    5.10  import threading
    5.11 @@ -44,7 +45,7 @@ from xen.xend.XendDevices import XendDev
    5.12  
    5.13  from xen.xend.xenstore.xstransact import xstransact
    5.14  from xen.xend.xenstore.xswatch import xswatch
    5.15 -from xen.util import security
    5.16 +from xen.util import mkdir, security
    5.17  from xen.xend import uuid
    5.18  
    5.19  xc = xen.lowlevel.xc.xc()
    5.20 @@ -99,11 +100,7 @@ class XendDomain:
    5.21          """Singleton initialisation function."""
    5.22  
    5.23          dom_path = self._managed_path()
    5.24 -        try:
    5.25 -            os.stat(dom_path)
    5.26 -        except OSError:
    5.27 -            log.info("Making %s", dom_path)
    5.28 -            os.makedirs(dom_path, 0755)
    5.29 +        mkdir.parents(dom_path, stat.S_IRWXU)
    5.30  
    5.31          xstransact.Mkdir(XS_VMROOT)
    5.32          xstransact.SetPermissions(XS_VMROOT, {'dom': DOM0_ID})
    5.33 @@ -271,25 +268,17 @@ class XendDomain:
    5.34              domains_dir = self._managed_path()
    5.35              dom_uuid = dominfo.get_uuid()            
    5.36              domain_config_dir = self._managed_path(dom_uuid)
    5.37 -        
    5.38 -            # make sure the domain dir exists
    5.39 -            if not os.path.exists(domains_dir):
    5.40 -                os.makedirs(domains_dir, 0755)
    5.41 -            elif not os.path.isdir(domains_dir):
    5.42 -                log.error("xend_domain_dir is not a directory.")
    5.43 -                raise XendError("Unable to save managed configuration "
    5.44 -                                "because %s is not a directory." %
    5.45 -                                domains_dir)
    5.46 -            
    5.47 -            if not os.path.exists(domain_config_dir):
    5.48 +
    5.49 +            def make_or_raise(path):
    5.50                  try:
    5.51 -                    os.makedirs(domain_config_dir, 0755)
    5.52 -                except IOError:
    5.53 -                    log.exception("Failed to create directory: %s" %
    5.54 -                                  domain_config_dir)
    5.55 -                    raise XendError("Failed to create directory: %s" %
    5.56 -                                    domain_config_dir)
    5.57 -                
    5.58 +                    mkdir.parents(path, stat.S_IRWXU)
    5.59 +                except:
    5.60 +                    log.exception("%s could not be created." % path)
    5.61 +                    raise XendError("%s could not be created." % path)
    5.62 +
    5.63 +            make_or_raise(domains_dir)
    5.64 +            make_or_raise(domain_config_dir)
    5.65 +
    5.66              try:
    5.67                  sxp_cache_file = open(self._managed_config_path(dom_uuid),'w')
    5.68                  prettyprint(dominfo.sxpr(), sxp_cache_file, width = 78)
     6.1 --- a/tools/python/xen/xend/XendLogging.py	Fri Dec 01 12:03:15 2006 +0000
     6.2 +++ b/tools/python/xen/xend/XendLogging.py	Fri Dec 01 12:03:38 2006 +0000
     6.3 @@ -16,13 +16,15 @@
     6.4  # Copyright (C) 2005, 2006 XenSource Ltd.
     6.5  #============================================================================
     6.6  
     6.7 -
     6.8 +import os
     6.9 +import stat
    6.10  import tempfile
    6.11  import types
    6.12  import logging
    6.13  import logging.handlers
    6.14  import fcntl
    6.15  
    6.16 +from xen.util import mkdir
    6.17  from xen.xend.server import params
    6.18  
    6.19  
    6.20 @@ -80,6 +82,7 @@ def init(filename, level):
    6.21      global logfilename
    6.22  
    6.23      def openFileHandler(fname):
    6.24 +        mkdir.parents(os.path.dirname(fname), stat.S_IRWXU)
    6.25          return XendRotatingFileHandler(fname, mode = 'a',
    6.26                                         maxBytes = MAX_BYTES,
    6.27                                         backupCount = BACKUP_COUNT)
     7.1 --- a/tools/python/xen/xend/XendStorageRepository.py	Fri Dec 01 12:03:15 2006 +0000
     7.2 +++ b/tools/python/xen/xend/XendStorageRepository.py	Fri Dec 01 12:03:38 2006 +0000
     7.3 @@ -19,10 +19,12 @@
     7.4  # The default QCOW Xen API Storage Repository
     7.5  #
     7.6  
     7.7 +import commands
     7.8  import os
     7.9 -import commands
    7.10 +import stat
    7.11  import threading
    7.12  
    7.13 +from xen.util import mkdir
    7.14  from xen.xend import uuid
    7.15  from xen.xend.XendError import XendError
    7.16  from xen.xend.XendVDI import *
    7.17 @@ -98,10 +100,7 @@ class XendStorageRepository:
    7.18          """
    7.19          self.lock.acquire()
    7.20          try:
    7.21 -            # create directory if /var/lib/xend/storage does not exist
    7.22 -            if not os.path.exists(XEND_STORAGE_DIR):
    7.23 -                os.makedirs(XEND_STORAGE_DIR)
    7.24 -                os.chmod(XEND_STORAGE_DIR, 0700)
    7.25 +            mkdir.parents(XEND_STORAGE_DIR, stat.S_IRWXU)
    7.26  
    7.27              # scan the directory and populate self.images
    7.28              total_used = 0
     8.1 --- a/tools/python/xen/xend/image.py	Fri Dec 01 12:03:15 2006 +0000
     8.2 +++ b/tools/python/xen/xend/image.py	Fri Dec 01 12:03:38 2006 +0000
     8.3 @@ -277,8 +277,9 @@ class HVMImageHandler(ImageHandler):
     8.4          self.dmargs += self.configVNC(imageConfig)
     8.5  
     8.6          self.pae  = imageConfig['hvm'].get('pae', 0)
     8.7 -        self.acpi  = imageConfig['hvm'].get('acpi', 0)
     8.8          self.apic  = imageConfig['hvm'].get('apic', 0)
     8.9 +        self.acpi  = imageConfig['hvm']['devices'].get('acpi', 0)
    8.10 +        
    8.11  
    8.12      def buildDomain(self):
    8.13          store_evtchn = self.vm.getStorePort()
    8.14 @@ -317,8 +318,6 @@ class HVMImageHandler(ImageHandler):
    8.15          
    8.16          for a in dmargs:
    8.17              v = hvmDeviceConfig.get(a)
    8.18 -            if a == 'vcpus':
    8.19 -                v = hvmDeviceConfig.get('vcpus_number')
    8.20  
    8.21              # python doesn't allow '-' in variable names
    8.22              if a == 'stdvga': a = 'std-vga'
     9.1 --- a/tools/python/xen/xend/server/SrvDaemon.py	Fri Dec 01 12:03:15 2006 +0000
     9.2 +++ b/tools/python/xen/xend/server/SrvDaemon.py	Fri Dec 01 12:03:38 2006 +0000
     9.3 @@ -21,6 +21,7 @@ import xen.lowlevel.xc
     9.4  
     9.5  from xen.xend.XendLogging import log
     9.6  from xen.xend import osdep
     9.7 +from xen.util import mkdir
     9.8  
     9.9  import relocate
    9.10  import SrvServer
    9.11 @@ -108,8 +109,7 @@ class Daemon:
    9.12          # so _before_ we close stderr.
    9.13          try:
    9.14              parent = os.path.dirname(XEND_DEBUG_LOG)
    9.15 -            if not os.path.exists(parent):
    9.16 -                os.makedirs(parent, stat.S_IRWXU)
    9.17 +            mkdir.parents(parent, stat.S_IRWXU)
    9.18              fd = os.open(XEND_DEBUG_LOG, os.O_WRONLY|os.O_CREAT|os.O_APPEND)
    9.19          except Exception, exn:
    9.20              print >>sys.stderr, exn
    10.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Fri Dec 01 12:03:15 2006 +0000
    10.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Fri Dec 01 12:03:38 2006 +0000
    10.3 @@ -128,17 +128,6 @@ static inline unsigned long DECODE_GPR_V
    10.4          return (unsigned long) -1; \
    10.5      }
    10.6  
    10.7 -#if 0
    10.8 -/*
    10.9 - * hv_is_canonical - checks if the given address is canonical
   10.10 - */
   10.11 -static inline u64 hv_is_canonical(u64 addr)
   10.12 -{
   10.13 -    u64 bits = addr & (u64)0xffff800000000000;
   10.14 -    return (u64)((bits == (u64)0xffff800000000000) || (bits == (u64)0x0));
   10.15 -}
   10.16 -#endif
   10.17 -
   10.18  #define modrm operand [0]
   10.19  
   10.20  #define sib operand [1]
    11.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Dec 01 12:03:15 2006 +0000
    11.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Dec 01 12:03:38 2006 +0000
    11.3 @@ -269,13 +269,11 @@ static int svm_long_mode_enabled(struct 
    11.4      return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
    11.5  }
    11.6  
    11.7 -#define IS_CANO_ADDRESS(add) 1
    11.8 -
    11.9  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
   11.10  {
   11.11      u64 msr_content = 0;
   11.12 -    struct vcpu *vc = current;
   11.13 -    struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
   11.14 +    struct vcpu *v = current;
   11.15 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   11.16  
   11.17      switch ((u32)regs->ecx)
   11.18      {
   11.19 @@ -284,17 +282,25 @@ static inline int long_mode_do_msr_read(
   11.20          msr_content &= ~EFER_SVME;
   11.21          break;
   11.22  
   11.23 +#ifdef __x86_64__
   11.24      case MSR_FS_BASE:
   11.25          msr_content = vmcb->fs.base;
   11.26 -        break;
   11.27 +        goto check_long_mode;
   11.28  
   11.29      case MSR_GS_BASE:
   11.30          msr_content = vmcb->gs.base;
   11.31 -        break;
   11.32 +        goto check_long_mode;
   11.33  
   11.34      case MSR_SHADOW_GS_BASE:
   11.35          msr_content = vmcb->kerngsbase;
   11.36 +    check_long_mode:
   11.37 +        if ( !svm_long_mode_enabled(v) )
   11.38 +        {
   11.39 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   11.40 +            return 0;
   11.41 +        }
   11.42          break;
   11.43 +#endif
   11.44  
   11.45      case MSR_STAR:
   11.46          msr_content = vmcb->star;
   11.47 @@ -326,25 +332,25 @@ static inline int long_mode_do_msr_read(
   11.48  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
   11.49  {
   11.50      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
   11.51 +    u32 ecx = regs->ecx;
   11.52      struct vcpu *v = current;
   11.53      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   11.54  
   11.55      HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
   11.56 -                (u32)regs->ecx, msr_content);
   11.57 -
   11.58 -    switch ( (u32)regs->ecx )
   11.59 +                ecx, msr_content);
   11.60 +
   11.61 +    switch ( ecx )
   11.62      {
   11.63      case MSR_EFER:
   11.64 -#ifdef __x86_64__
   11.65          /* offending reserved bit will cause #GP */
   11.66          if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
   11.67          {
   11.68 -            printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
   11.69 -                   msr_content);
   11.70 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   11.71 -            return 0;
   11.72 +            gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
   11.73 +                     "EFER: %"PRIx64"\n", msr_content);
   11.74 +            goto gp_fault;
   11.75          }
   11.76  
   11.77 +#ifdef __x86_64__
   11.78          /* LME: 0 -> 1 */
   11.79          if ( msr_content & EFER_LME &&
   11.80               !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
   11.81 @@ -353,10 +359,9 @@ static inline int long_mode_do_msr_write
   11.82                   !test_bit(SVM_CPU_STATE_PAE_ENABLED,
   11.83                             &v->arch.hvm_svm.cpu_state) )
   11.84              {
   11.85 -                printk("Trying to set LME bit when "
   11.86 -                       "in paging mode or PAE bit is not set\n");
   11.87 -                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   11.88 -                return 0;
   11.89 +                gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
   11.90 +                         "in paging mode or PAE bit is not set\n");
   11.91 +                goto gp_fault;
   11.92              }
   11.93              set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
   11.94          }
   11.95 @@ -371,37 +376,38 @@ static inline int long_mode_do_msr_write
   11.96          vmcb->efer = msr_content | EFER_SVME;
   11.97          break;
   11.98  
   11.99 +#ifdef __x86_64__
  11.100      case MSR_FS_BASE:
  11.101      case MSR_GS_BASE:
  11.102 +    case MSR_SHADOW_GS_BASE:
  11.103          if ( !svm_long_mode_enabled(v) )
  11.104 -            goto exit_and_crash;
  11.105 -
  11.106 -        if (!IS_CANO_ADDRESS(msr_content))
  11.107 -        {
  11.108 -            HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
  11.109 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  11.110 -        }
  11.111 -
  11.112 -        if (regs->ecx == MSR_FS_BASE)
  11.113 +            goto gp_fault;
  11.114 +
  11.115 +        if ( !is_canonical_address(msr_content) )
  11.116 +            goto uncanonical_address;
  11.117 +
  11.118 +        if ( ecx == MSR_FS_BASE )
  11.119              vmcb->fs.base = msr_content;
  11.120 -        else 
  11.121 +        else if ( ecx == MSR_GS_BASE )
  11.122              vmcb->gs.base = msr_content;
  11.123 +        else
  11.124 +            vmcb->kerngsbase = msr_content;
  11.125          break;
  11.126 -
  11.127 -    case MSR_SHADOW_GS_BASE:
  11.128 -        vmcb->kerngsbase = msr_content;
  11.129 -        break;
  11.130 +#endif
  11.131   
  11.132      case MSR_STAR:
  11.133          vmcb->star = msr_content;
  11.134          break;
  11.135   
  11.136      case MSR_LSTAR:
  11.137 -        vmcb->lstar = msr_content;
  11.138 -        break;
  11.139 - 
  11.140      case MSR_CSTAR:
  11.141 -        vmcb->cstar = msr_content;
  11.142 +        if ( !is_canonical_address(msr_content) )
  11.143 +            goto uncanonical_address;
  11.144 +
  11.145 +        if ( ecx == MSR_LSTAR )
  11.146 +            vmcb->lstar = msr_content;
  11.147 +        else
  11.148 +            vmcb->cstar = msr_content;
  11.149          break;
  11.150   
  11.151      case MSR_SYSCALL_MASK:
  11.152 @@ -414,10 +420,11 @@ static inline int long_mode_do_msr_write
  11.153  
  11.154      return 1;
  11.155  
  11.156 - exit_and_crash:
  11.157 -    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
  11.158 -    domain_crash(v->domain);
  11.159 -    return 1; /* handled */
  11.160 + uncanonical_address:
  11.161 +    HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx);
  11.162 + gp_fault:
  11.163 +    svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  11.164 +    return 0;
  11.165  }
  11.166  
  11.167  
  11.168 @@ -1272,7 +1279,7 @@ static inline int svm_get_io_address(
  11.169  #endif
  11.170  
  11.171      /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
  11.172 -     * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit. 
  11.173 +     * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
  11.174       */
  11.175      asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16);
  11.176  
  11.177 @@ -1383,8 +1390,35 @@ static inline int svm_get_io_address(
  11.178  
  11.179          *addr += seg->base;
  11.180      }
  11.181 -    else if (seg == &vmcb->fs || seg == &vmcb->gs)
  11.182 -        *addr += seg->base;
  11.183 +#ifdef __x86_64__
  11.184 +    else
  11.185 +    {
  11.186 +        if (seg == &vmcb->fs || seg == &vmcb->gs)
  11.187 +            *addr += seg->base;
  11.188 +
  11.189 +        if (!is_canonical_address(*addr) ||
  11.190 +            !is_canonical_address(*addr + size - 1))
  11.191 +        {
  11.192 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  11.193 +            return 0;
  11.194 +        }
  11.195 +        if (*count > (1UL << 48) / size)
  11.196 +            *count = (1UL << 48) / size;
  11.197 +        if (!(regs->eflags & EF_DF))
  11.198 +        {
  11.199 +            if (*addr + *count * size - 1 < *addr ||
  11.200 +                !is_canonical_address(*addr + *count * size - 1))
  11.201 +                *count = (*addr & ~((1UL << 48) - 1)) / size;
  11.202 +        }
  11.203 +        else
  11.204 +        {
  11.205 +            if ((*count - 1) * size > *addr ||
  11.206 +                !is_canonical_address(*addr + (*count - 1) * size))
  11.207 +                *count = (*addr & ~((1UL << 48) - 1)) / size + 1;
  11.208 +        }
  11.209 +        ASSERT(*count);
  11.210 +    }
  11.211 +#endif
  11.212  
  11.213      return 1;
  11.214  }
    12.1 --- a/xen/arch/x86/hvm/vlapic.c	Fri Dec 01 12:03:15 2006 +0000
    12.2 +++ b/xen/arch/x86/hvm/vlapic.c	Fri Dec 01 12:03:38 2006 +0000
    12.3 @@ -119,19 +119,16 @@ static int vlapic_find_highest_vector(u3
    12.4  
    12.5  static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
    12.6  {
    12.7 -    vlapic->flush_tpr_threshold = 1;
    12.8      return vlapic_test_and_set_vector(vector, vlapic->regs + APIC_IRR);
    12.9  }
   12.10  
   12.11  static void vlapic_set_irr(int vector, struct vlapic *vlapic)
   12.12  {
   12.13 -    vlapic->flush_tpr_threshold = 1;
   12.14      vlapic_set_vector(vector, vlapic->regs + APIC_IRR);
   12.15  }
   12.16  
   12.17  static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
   12.18  {
   12.19 -    vlapic->flush_tpr_threshold = 1;
   12.20      vlapic_clear_vector(vector, vlapic->regs + APIC_IRR);
   12.21  }
   12.22  
   12.23 @@ -634,7 +631,6 @@ static void vlapic_write(struct vcpu *v,
   12.24      {
   12.25      case APIC_TASKPRI:
   12.26          vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
   12.27 -        vlapic->flush_tpr_threshold = 1;
   12.28          break;
   12.29  
   12.30      case APIC_EOI:
   12.31 @@ -667,10 +663,7 @@ static void vlapic_write(struct vcpu *v,
   12.32              }
   12.33          }
   12.34          else
   12.35 -        {
   12.36              vlapic->disabled &= ~VLAPIC_SW_DISABLED;
   12.37 -            vlapic->flush_tpr_threshold = 1;
   12.38 -        }
   12.39          break;
   12.40  
   12.41      case APIC_ESR:
   12.42 @@ -730,7 +723,7 @@ static void vlapic_write(struct vcpu *v,
   12.43          break;
   12.44  
   12.45      default:
   12.46 -        gdprintk(XENLOG_WARNING, 
   12.47 +        gdprintk(XENLOG_DEBUG,
   12.48                   "Local APIC Write to read-only register 0x%x\n", offset);
   12.49          break;
   12.50      }
   12.51 @@ -925,8 +918,6 @@ static int vlapic_reset(struct vlapic *v
   12.52      vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
   12.53      vlapic->disabled |= VLAPIC_SW_DISABLED;
   12.54  
   12.55 -    vlapic->flush_tpr_threshold = 1;
   12.56 -
   12.57      return 1;
   12.58  }
   12.59  
    13.1 --- a/xen/arch/x86/hvm/vmx/Makefile	Fri Dec 01 12:03:15 2006 +0000
    13.2 +++ b/xen/arch/x86/hvm/vmx/Makefile	Fri Dec 01 12:03:38 2006 +0000
    13.3 @@ -1,6 +1,6 @@
    13.4  subdir-$(x86_32) += x86_32
    13.5  subdir-$(x86_64) += x86_64
    13.6  
    13.7 -obj-y += io.o
    13.8 +obj-y += intr.o
    13.9  obj-y += vmcs.o
   13.10  obj-y += vmx.o
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Fri Dec 01 12:03:38 2006 +0000
    14.3 @@ -0,0 +1,196 @@
    14.4 +/*
    14.5 + * io.c: handling I/O, interrupts related VMX entry/exit
    14.6 + * Copyright (c) 2004, Intel Corporation.
    14.7 + *
    14.8 + * This program is free software; you can redistribute it and/or modify it
    14.9 + * under the terms and conditions of the GNU General Public License,
   14.10 + * version 2, as published by the Free Software Foundation.
   14.11 + *
   14.12 + * This program is distributed in the hope it will be useful, but WITHOUT
   14.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   14.15 + * more details.
   14.16 + *
   14.17 + * You should have received a copy of the GNU General Public License along with
   14.18 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   14.19 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   14.20 + *
   14.21 + */
   14.22 +
   14.23 +#include <xen/config.h>
   14.24 +#include <xen/init.h>
   14.25 +#include <xen/mm.h>
   14.26 +#include <xen/lib.h>
   14.27 +#include <xen/errno.h>
   14.28 +#include <xen/trace.h>
   14.29 +#include <xen/event.h>
   14.30 +
   14.31 +#include <asm/current.h>
   14.32 +#include <asm/cpufeature.h>
   14.33 +#include <asm/processor.h>
   14.34 +#include <asm/msr.h>
   14.35 +#include <asm/hvm/hvm.h>
   14.36 +#include <asm/hvm/io.h>
   14.37 +#include <asm/hvm/support.h>
   14.38 +#include <asm/hvm/vmx/vmx.h>
   14.39 +#include <asm/hvm/vmx/vmcs.h>
   14.40 +#include <asm/hvm/vpic.h>
   14.41 +#include <asm/hvm/vlapic.h>
   14.42 +#include <public/hvm/ioreq.h>
   14.43 +
   14.44 +
   14.45 +static inline void
   14.46 +enable_irq_window(struct vcpu *v)
   14.47 +{
   14.48 +    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   14.49 +    
   14.50 +    if (!(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) {
   14.51 +        *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
   14.52 +        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   14.53 +    }
   14.54 +}
   14.55 +
   14.56 +static inline void
   14.57 +disable_irq_window(struct vcpu *v)
   14.58 +{
   14.59 +    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   14.60 +    
   14.61 +    if ( *cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) {
   14.62 +        *cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
   14.63 +        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   14.64 +    }
   14.65 +}
   14.66 +
   14.67 +static inline int is_interruptibility_state(void)
   14.68 +{
   14.69 +    return __vmread(GUEST_INTERRUPTIBILITY_INFO);
   14.70 +}
   14.71 +
   14.72 +#ifdef __x86_64__
   14.73 +static void update_tpr_threshold(struct vlapic *vlapic)
   14.74 +{
   14.75 +    int max_irr, tpr;
   14.76 +
   14.77 +    if ( !vlapic_enabled(vlapic) || 
   14.78 +         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
   14.79 +    {
   14.80 +        __vmwrite(TPR_THRESHOLD, 0);
   14.81 +        return;
   14.82 +    }
   14.83 +
   14.84 +    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
   14.85 +    __vmwrite(TPR_THRESHOLD, (max_irr > tpr) ? (tpr >> 4) : (max_irr >> 4));
   14.86 +}
   14.87 +#else
   14.88 +#define update_tpr_threshold(v) ((void)0)
   14.89 +#endif
   14.90 +
   14.91 +asmlinkage void vmx_intr_assist(void)
   14.92 +{
   14.93 +    int intr_type = 0;
   14.94 +    int highest_vector;
   14.95 +    unsigned long eflags;
   14.96 +    struct vcpu *v = current;
   14.97 +    struct hvm_domain *plat=&v->domain->arch.hvm_domain;
   14.98 +    struct periodic_time *pt = &plat->pl_time.periodic_tm;
   14.99 +    unsigned int idtv_info_field;
  14.100 +    unsigned long inst_len;
  14.101 +    int    has_ext_irq;
  14.102 +
  14.103 +    if ( (v->vcpu_id == 0) && pt->enabled && pt->pending_intr_nr )
  14.104 +    {
  14.105 +        hvm_isa_irq_deassert(current->domain, pt->irq);
  14.106 +        hvm_isa_irq_assert(current->domain, pt->irq);
  14.107 +    }
  14.108 +
  14.109 +    hvm_set_callback_irq_level();
  14.110 +
  14.111 +    update_tpr_threshold(vcpu_vlapic(v));
  14.112 +
  14.113 +    has_ext_irq = cpu_has_pending_irq(v);
  14.114 +
  14.115 +    if ( unlikely(v->arch.hvm_vmx.vector_injected) )
  14.116 +    {
  14.117 +        v->arch.hvm_vmx.vector_injected=0;
  14.118 +        if (unlikely(has_ext_irq)) enable_irq_window(v);
  14.119 +        return;
  14.120 +    }
  14.121 +
  14.122 +    /* This could be moved earlier in the VMX resume sequence. */
  14.123 +    idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
  14.124 +    if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
  14.125 +    {
  14.126 +        __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
  14.127 +
  14.128 +        /*
  14.129 +         * Safe: the length will only be interpreted for software exceptions
  14.130 +         * and interrupts. If we get here then delivery of some event caused a
  14.131 +         * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
  14.132 +         */
  14.133 +        inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
  14.134 +        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
  14.135 +
  14.136 +        if (unlikely(idtv_info_field & 0x800)) /* valid error code */
  14.137 +            __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
  14.138 +                      __vmread(IDT_VECTORING_ERROR_CODE));
  14.139 +        if (unlikely(has_ext_irq))
  14.140 +            enable_irq_window(v);
  14.141 +
  14.142 +        HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
  14.143 +
  14.144 +        return;
  14.145 +    }
  14.146 +
  14.147 +    if ( likely(!has_ext_irq) )
  14.148 +        return;
  14.149 +
  14.150 +    if ( unlikely(is_interruptibility_state()) )
  14.151 +    {
  14.152 +        /* pre-cleared for emulated instruction */
  14.153 +        enable_irq_window(v);
  14.154 +        HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
  14.155 +        return;
  14.156 +    }
  14.157 +
  14.158 +    eflags = __vmread(GUEST_RFLAGS);
  14.159 +    if ( irq_masked(eflags) )
  14.160 +    {
  14.161 +        enable_irq_window(v);
  14.162 +        return;
  14.163 +    }
  14.164 +
  14.165 +    highest_vector = cpu_get_interrupt(v, &intr_type);
  14.166 +    if ( highest_vector < 0 )
  14.167 +        return;
  14.168 +
  14.169 +    switch ( intr_type )
  14.170 +    {
  14.171 +    case APIC_DM_EXTINT:
  14.172 +    case APIC_DM_FIXED:
  14.173 +    case APIC_DM_LOWEST:
  14.174 +        vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
  14.175 +        TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, highest_vector, 0);
  14.176 +        break;
  14.177 +
  14.178 +    case APIC_DM_SMI:
  14.179 +    case APIC_DM_NMI:
  14.180 +    case APIC_DM_INIT:
  14.181 +    case APIC_DM_STARTUP:
  14.182 +    default:
  14.183 +        printk("Unsupported interrupt type\n");
  14.184 +        BUG();
  14.185 +        break;
  14.186 +    }
  14.187 +    
  14.188 +    hvm_interrupt_post(v, highest_vector, intr_type);
  14.189 +}
  14.190 +
  14.191 +/*
  14.192 + * Local variables:
  14.193 + * mode: C
  14.194 + * c-set-style: "BSD"
  14.195 + * c-basic-offset: 4
  14.196 + * tab-width: 4
  14.197 + * indent-tabs-mode: nil
  14.198 + * End:
  14.199 + */
    15.1 --- a/xen/arch/x86/hvm/vmx/io.c	Fri Dec 01 12:03:15 2006 +0000
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,202 +0,0 @@
    15.4 -/*
    15.5 - * io.c: handling I/O, interrupts related VMX entry/exit
    15.6 - * Copyright (c) 2004, Intel Corporation.
    15.7 - *
    15.8 - * This program is free software; you can redistribute it and/or modify it
    15.9 - * under the terms and conditions of the GNU General Public License,
   15.10 - * version 2, as published by the Free Software Foundation.
   15.11 - *
   15.12 - * This program is distributed in the hope it will be useful, but WITHOUT
   15.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   15.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   15.15 - * more details.
   15.16 - *
   15.17 - * You should have received a copy of the GNU General Public License along with
   15.18 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   15.19 - * Place - Suite 330, Boston, MA 02111-1307 USA.
   15.20 - *
   15.21 - */
   15.22 -
   15.23 -#include <xen/config.h>
   15.24 -#include <xen/init.h>
   15.25 -#include <xen/mm.h>
   15.26 -#include <xen/lib.h>
   15.27 -#include <xen/errno.h>
   15.28 -#include <xen/trace.h>
   15.29 -#include <xen/event.h>
   15.30 -
   15.31 -#include <asm/current.h>
   15.32 -#include <asm/cpufeature.h>
   15.33 -#include <asm/processor.h>
   15.34 -#include <asm/msr.h>
   15.35 -#include <asm/hvm/hvm.h>
   15.36 -#include <asm/hvm/io.h>
   15.37 -#include <asm/hvm/support.h>
   15.38 -#include <asm/hvm/vmx/vmx.h>
   15.39 -#include <asm/hvm/vmx/vmcs.h>
   15.40 -#include <asm/hvm/vpic.h>
   15.41 -#include <asm/hvm/vlapic.h>
   15.42 -#include <public/hvm/ioreq.h>
   15.43 -
   15.44 -
   15.45 -static inline void
   15.46 -enable_irq_window(struct vcpu *v)
   15.47 -{
   15.48 -    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   15.49 -    
   15.50 -    if (!(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) {
   15.51 -        *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
   15.52 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   15.53 -    }
   15.54 -}
   15.55 -
   15.56 -static inline void
   15.57 -disable_irq_window(struct vcpu *v)
   15.58 -{
   15.59 -    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
   15.60 -    
   15.61 -    if ( *cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) {
   15.62 -        *cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
   15.63 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
   15.64 -    }
   15.65 -}
   15.66 -
   15.67 -static inline int is_interruptibility_state(void)
   15.68 -{
   15.69 -    return __vmread(GUEST_INTERRUPTIBILITY_INFO);
   15.70 -}
   15.71 -
   15.72 -#ifdef __x86_64__
   15.73 -static void update_tpr_threshold(struct vlapic *vlapic)
   15.74 -{
   15.75 -    int max_irr, tpr;
   15.76 -
   15.77 -    /* Clear the work-to-do flag /then/ do the work. */
   15.78 -    vlapic->flush_tpr_threshold = 0;
   15.79 -    mb();
   15.80 -
   15.81 -    if ( !vlapic_enabled(vlapic) || 
   15.82 -         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
   15.83 -    {
   15.84 -        __vmwrite(TPR_THRESHOLD, 0);
   15.85 -        return;
   15.86 -    }
   15.87 -
   15.88 -    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
   15.89 -    __vmwrite(TPR_THRESHOLD, (max_irr > tpr) ? (tpr >> 4) : (max_irr >> 4));
   15.90 -}
   15.91 -#else
   15.92 -#define update_tpr_threshold(v) ((void)0)
   15.93 -#endif
   15.94 -
   15.95 -asmlinkage void vmx_intr_assist(void)
   15.96 -{
   15.97 -    int intr_type = 0;
   15.98 -    int highest_vector;
   15.99 -    unsigned long eflags;
  15.100 -    struct vcpu *v = current;
  15.101 -    struct vlapic *vlapic = vcpu_vlapic(v);
  15.102 -    struct hvm_domain *plat=&v->domain->arch.hvm_domain;
  15.103 -    struct periodic_time *pt = &plat->pl_time.periodic_tm;
  15.104 -    unsigned int idtv_info_field;
  15.105 -    unsigned long inst_len;
  15.106 -    int    has_ext_irq;
  15.107 -
  15.108 -    if ( (v->vcpu_id == 0) && pt->enabled && pt->pending_intr_nr )
  15.109 -    {
  15.110 -        hvm_isa_irq_deassert(current->domain, pt->irq);
  15.111 -        hvm_isa_irq_assert(current->domain, pt->irq);
  15.112 -    }
  15.113 -
  15.114 -    hvm_set_callback_irq_level();
  15.115 -
  15.116 -    if ( vlapic->flush_tpr_threshold )
  15.117 -        update_tpr_threshold(vlapic);
  15.118 -
  15.119 -    has_ext_irq = cpu_has_pending_irq(v);
  15.120 -
  15.121 -    if ( unlikely(v->arch.hvm_vmx.vector_injected) )
  15.122 -    {
  15.123 -        v->arch.hvm_vmx.vector_injected=0;
  15.124 -        if (unlikely(has_ext_irq)) enable_irq_window(v);
  15.125 -        return;
  15.126 -    }
  15.127 -
  15.128 -    /* This could be moved earlier in the VMX resume sequence. */
  15.129 -    idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
  15.130 -    if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
  15.131 -    {
  15.132 -        __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
  15.133 -
  15.134 -        /*
  15.135 -         * Safe: the length will only be interpreted for software exceptions
  15.136 -         * and interrupts. If we get here then delivery of some event caused a
  15.137 -         * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
  15.138 -         */
  15.139 -        inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
  15.140 -        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
  15.141 -
  15.142 -        if (unlikely(idtv_info_field & 0x800)) /* valid error code */
  15.143 -            __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
  15.144 -                      __vmread(IDT_VECTORING_ERROR_CODE));
  15.145 -        if (unlikely(has_ext_irq))
  15.146 -            enable_irq_window(v);
  15.147 -
  15.148 -        HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
  15.149 -
  15.150 -        return;
  15.151 -    }
  15.152 -
  15.153 -    if ( likely(!has_ext_irq) )
  15.154 -        return;
  15.155 -
  15.156 -    if ( unlikely(is_interruptibility_state()) )
  15.157 -    {
  15.158 -        /* pre-cleared for emulated instruction */
  15.159 -        enable_irq_window(v);
  15.160 -        HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
  15.161 -        return;
  15.162 -    }
  15.163 -
  15.164 -    eflags = __vmread(GUEST_RFLAGS);
  15.165 -    if ( irq_masked(eflags) )
  15.166 -    {
  15.167 -        enable_irq_window(v);
  15.168 -        return;
  15.169 -    }
  15.170 -
  15.171 -    highest_vector = cpu_get_interrupt(v, &intr_type);
  15.172 -    if ( highest_vector < 0 )
  15.173 -        return;
  15.174 -
  15.175 -    switch ( intr_type )
  15.176 -    {
  15.177 -    case APIC_DM_EXTINT:
  15.178 -    case APIC_DM_FIXED:
  15.179 -    case APIC_DM_LOWEST:
  15.180 -        vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
  15.181 -        TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, highest_vector, 0);
  15.182 -        break;
  15.183 -
  15.184 -    case APIC_DM_SMI:
  15.185 -    case APIC_DM_NMI:
  15.186 -    case APIC_DM_INIT:
  15.187 -    case APIC_DM_STARTUP:
  15.188 -    default:
  15.189 -        printk("Unsupported interrupt type\n");
  15.190 -        BUG();
  15.191 -        break;
  15.192 -    }
  15.193 -    
  15.194 -    hvm_interrupt_post(v, highest_vector, intr_type);
  15.195 -}
  15.196 -
  15.197 -/*
  15.198 - * Local variables:
  15.199 - * mode: C
  15.200 - * c-set-style: "BSD"
  15.201 - * c-basic-offset: 4
  15.202 - * tab-width: 4
  15.203 - * indent-tabs-mode: nil
  15.204 - * End:
  15.205 - */
    16.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Dec 01 12:03:15 2006 +0000
    16.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Dec 01 12:03:38 2006 +0000
    16.3 @@ -95,13 +95,7 @@ static void vmx_save_host_msrs(void)
    16.4          rdmsrl(msr_index[i], host_msr_state->msrs[i]);
    16.5  }
    16.6  
    16.7 -#define CASE_READ_MSR(address)                                              \
    16.8 -    case MSR_ ## address:                                                   \
    16.9 -        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address];     \
   16.10 -        break
   16.11 -
   16.12 -#define CASE_WRITE_MSR(address)                                             \
   16.13 -    case MSR_ ## address:                                                   \
   16.14 +#define WRITE_MSR(address)                                                  \
   16.15          guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content;     \
   16.16          if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\
   16.17              set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags);    \
   16.18 @@ -109,7 +103,6 @@ static void vmx_save_host_msrs(void)
   16.19          set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags);         \
   16.20          break
   16.21  
   16.22 -#define IS_CANO_ADDRESS(add) 1
   16.23  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
   16.24  {
   16.25      u64 msr_content = 0;
   16.26 @@ -123,27 +116,38 @@ static inline int long_mode_do_msr_read(
   16.27          break;
   16.28  
   16.29      case MSR_FS_BASE:
   16.30 -        if ( !(vmx_long_mode_enabled(v)) )
   16.31 -            goto exit_and_crash;
   16.32 -
   16.33          msr_content = __vmread(GUEST_FS_BASE);
   16.34 -        break;
   16.35 +        goto check_long_mode;
   16.36  
   16.37      case MSR_GS_BASE:
   16.38 -        if ( !(vmx_long_mode_enabled(v)) )
   16.39 -            goto exit_and_crash;
   16.40 -
   16.41          msr_content = __vmread(GUEST_GS_BASE);
   16.42 -        break;
   16.43 +        goto check_long_mode;
   16.44  
   16.45      case MSR_SHADOW_GS_BASE:
   16.46          msr_content = guest_msr_state->shadow_gs;
   16.47 +    check_long_mode:
   16.48 +        if ( !(vmx_long_mode_enabled(v)) )
   16.49 +        {
   16.50 +            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   16.51 +            return 0;
   16.52 +        }
   16.53          break;
   16.54  
   16.55 -    CASE_READ_MSR(STAR);
   16.56 -    CASE_READ_MSR(LSTAR);
   16.57 -    CASE_READ_MSR(CSTAR);
   16.58 -    CASE_READ_MSR(SYSCALL_MASK);
   16.59 +    case MSR_STAR:
   16.60 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
   16.61 +        break;
   16.62 +
   16.63 +    case MSR_LSTAR:
   16.64 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
   16.65 +        break;
   16.66 +
   16.67 +    case MSR_CSTAR:
   16.68 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_CSTAR];
   16.69 +        break;
   16.70 +
   16.71 +    case MSR_SYSCALL_MASK:
   16.72 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
   16.73 +        break;
   16.74  
   16.75      default:
   16.76          return 0;
   16.77 @@ -155,32 +159,28 @@ static inline int long_mode_do_msr_read(
   16.78      regs->edx = (u32)(msr_content >> 32);
   16.79  
   16.80      return 1;
   16.81 -
   16.82 - exit_and_crash:
   16.83 -    gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx);
   16.84 -    domain_crash(v->domain);
   16.85 -    return 1; /* handled */
   16.86  }
   16.87  
   16.88  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
   16.89  {
   16.90      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
   16.91 +    u32 ecx = regs->ecx;
   16.92      struct vcpu *v = current;
   16.93      struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
   16.94      struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
   16.95  
   16.96      HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n",
   16.97 -                (u32)regs->ecx, msr_content);
   16.98 +                ecx, msr_content);
   16.99  
  16.100 -    switch ( (u32)regs->ecx ) {
  16.101 +    switch ( ecx )
  16.102 +    {
  16.103      case MSR_EFER:
  16.104          /* offending reserved bit will cause #GP */
  16.105          if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
  16.106          {
  16.107 -            printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
  16.108 -                   msr_content);
  16.109 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  16.110 -            return 0;
  16.111 +            gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
  16.112 +                     "EFER: %"PRIx64"\n", msr_content);
  16.113 +            goto gp_fault;
  16.114          }
  16.115  
  16.116          if ( (msr_content & EFER_LME)
  16.117 @@ -188,9 +188,9 @@ static inline int long_mode_do_msr_write
  16.118          {
  16.119              if ( unlikely(vmx_paging_enabled(v)) )
  16.120              {
  16.121 -                printk("Trying to set EFER.LME with paging enabled\n");
  16.122 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  16.123 -                return 0;
  16.124 +                gdprintk(XENLOG_WARNING,
  16.125 +                         "Trying to set EFER.LME with paging enabled\n");
  16.126 +                goto gp_fault;
  16.127              }
  16.128          }
  16.129          else if ( !(msr_content & EFER_LME)
  16.130 @@ -198,9 +198,9 @@ static inline int long_mode_do_msr_write
  16.131          {
  16.132              if ( unlikely(vmx_paging_enabled(v)) )
  16.133              {
  16.134 -                printk("Trying to clear EFER.LME with paging enabled\n");
  16.135 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  16.136 -                return 0;
  16.137 +                gdprintk(XENLOG_WARNING,
  16.138 +                         "Trying to clear EFER.LME with paging enabled\n");
  16.139 +                goto gp_fault;
  16.140              }
  16.141          }
  16.142  
  16.143 @@ -209,35 +209,40 @@ static inline int long_mode_do_msr_write
  16.144  
  16.145      case MSR_FS_BASE:
  16.146      case MSR_GS_BASE:
  16.147 +    case MSR_SHADOW_GS_BASE:
  16.148          if ( !vmx_long_mode_enabled(v) )
  16.149 -            goto exit_and_crash;
  16.150 +            goto gp_fault;
  16.151  
  16.152 -        if ( !IS_CANO_ADDRESS(msr_content) )
  16.153 +        if ( !is_canonical_address(msr_content) )
  16.154 +            goto uncanonical_address;
  16.155 +
  16.156 +        if ( ecx == MSR_FS_BASE )
  16.157 +            __vmwrite(GUEST_FS_BASE, msr_content);
  16.158 +        else if ( ecx == MSR_GS_BASE )
  16.159 +            __vmwrite(GUEST_GS_BASE, msr_content);
  16.160 +        else
  16.161          {
  16.162 -            HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
  16.163 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  16.164 -            return 0;
  16.165 +            v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
  16.166 +            wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
  16.167          }
  16.168  
  16.169 -        if ( regs->ecx == MSR_FS_BASE )
  16.170 -            __vmwrite(GUEST_FS_BASE, msr_content);
  16.171 -        else
  16.172 -            __vmwrite(GUEST_GS_BASE, msr_content);
  16.173 -
  16.174          break;
  16.175  
  16.176 -    case MSR_SHADOW_GS_BASE:
  16.177 -        if ( !(vmx_long_mode_enabled(v)) )
  16.178 -            goto exit_and_crash;
  16.179 +    case MSR_STAR:
  16.180 +        WRITE_MSR(STAR);
  16.181  
  16.182 -        v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
  16.183 -        wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
  16.184 -        break;
  16.185 +    case MSR_LSTAR:
  16.186 +        if ( !is_canonical_address(msr_content) )
  16.187 +            goto uncanonical_address;
  16.188 +        WRITE_MSR(LSTAR);
  16.189  
  16.190 -    CASE_WRITE_MSR(STAR);
  16.191 -    CASE_WRITE_MSR(LSTAR);
  16.192 -    CASE_WRITE_MSR(CSTAR);
  16.193 -    CASE_WRITE_MSR(SYSCALL_MASK);
  16.194 +    case MSR_CSTAR:
  16.195 +        if ( !is_canonical_address(msr_content) )
  16.196 +            goto uncanonical_address;
  16.197 +        WRITE_MSR(CSTAR);
  16.198 +
  16.199 +    case MSR_SYSCALL_MASK:
  16.200 +        WRITE_MSR(SYSCALL_MASK);
  16.201  
  16.202      default:
  16.203          return 0;
  16.204 @@ -245,10 +250,11 @@ static inline int long_mode_do_msr_write
  16.205  
  16.206      return 1;
  16.207  
  16.208 - exit_and_crash:
  16.209 -    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
  16.210 -    domain_crash(v->domain);
  16.211 -    return 1; /* handled */
  16.212 + uncanonical_address:
  16.213 +    HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx);
  16.214 + gp_fault:
  16.215 +    vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  16.216 +    return 0;
  16.217  }
  16.218  
  16.219  /*
  16.220 @@ -1283,6 +1289,32 @@ static void vmx_io_instruction(unsigned 
  16.221                  ASSERT(count);
  16.222              }
  16.223          }
  16.224 +#ifdef __x86_64__
  16.225 +        else
  16.226 +        {
  16.227 +            if ( !is_canonical_address(addr) ||
  16.228 +                 !is_canonical_address(addr + size - 1) )
  16.229 +            {
  16.230 +                vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
  16.231 +                return;
  16.232 +            }
  16.233 +            if ( count > (1UL << 48) / size )
  16.234 +                count = (1UL << 48) / size;
  16.235 +            if ( !(regs->eflags & EF_DF) )
  16.236 +            {
  16.237 +                if ( addr + count * size - 1 < addr ||
  16.238 +                     !is_canonical_address(addr + count * size - 1) )
  16.239 +                    count = (addr & ~((1UL << 48) - 1)) / size;
  16.240 +            }
  16.241 +            else
  16.242 +            {
  16.243 +                if ( (count - 1) * size > addr ||
  16.244 +                     !is_canonical_address(addr + (count - 1) * size) )
  16.245 +                    count = (addr & ~((1UL << 48) - 1)) / size + 1;
  16.246 +            }
  16.247 +            ASSERT(count);
  16.248 +        }
  16.249 +#endif
  16.250  
  16.251          /*
  16.252           * Handle string pio instructions that cross pages or that
  16.253 @@ -2500,7 +2532,6 @@ asmlinkage void vmx_vmexit_handler(struc
  16.254          break;
  16.255  
  16.256      case EXIT_REASON_TPR_BELOW_THRESHOLD:
  16.257 -        vcpu_vlapic(v)->flush_tpr_threshold = 1;
  16.258          break;
  16.259  
  16.260      default:
    17.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Dec 01 12:03:15 2006 +0000
    17.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Dec 01 12:03:38 2006 +0000
    17.3 @@ -120,12 +120,17 @@ static int hvm_translate_linear_addr(
    17.4           */
    17.5          addr = (uint32_t)(addr + dreg.base);
    17.6      }
    17.7 -    else if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
    17.8 +    else
    17.9      {
   17.10          /*
   17.11 -         * LONG MODE: FS and GS add a segment base.
   17.12 +         * LONG MODE: FS and GS add segment base. Addresses must be canonical.
   17.13           */
   17.14 -        addr += dreg.base;
   17.15 +
   17.16 +        if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
   17.17 +            addr += dreg.base;
   17.18 +
   17.19 +        if ( !is_canonical_address(addr) )
   17.20 +            goto gpf;
   17.21      }
   17.22  
   17.23      *paddr = addr;
    18.1 --- a/xen/arch/x86/oprofile/op_model_athlon.c	Fri Dec 01 12:03:15 2006 +0000
    18.2 +++ b/xen/arch/x86/oprofile/op_model_athlon.c	Fri Dec 01 12:03:38 2006 +0000
    18.3 @@ -113,14 +113,15 @@ static int athlon_check_ctrs(unsigned in
    18.4  	unsigned long eip = regs->eip;
    18.5  	int mode = 0;
    18.6  	struct vcpu *v = current;
    18.7 -	struct cpu_user_regs tmp_regs;
    18.8 +	struct cpu_user_regs *guest_regs = guest_cpu_user_regs();
    18.9  
   18.10  	if (!guest_mode(regs) &&
   18.11  	    (regs->eip == (unsigned long)svm_stgi_label)) {
   18.12  		/* SVM guest was running when NMI occurred */
   18.13 -		hvm_store_cpu_guest_regs(v, &tmp_regs, NULL);
   18.14 -		eip = tmp_regs.eip;
   18.15 -		mode = xenoprofile_get_mode(v, &tmp_regs);
   18.16 +		ASSERT(is_hvm_vcpu(v));
   18.17 +		hvm_store_cpu_guest_regs(v, guest_regs, NULL);
   18.18 +		eip = guest_regs->eip;
   18.19 +		mode = xenoprofile_get_mode(v, guest_regs);
   18.20  	} else {
   18.21  		eip = regs->eip;
   18.22  		mode = xenoprofile_get_mode(v, regs);
    19.1 --- a/xen/arch/x86/x86_emulate.c	Fri Dec 01 12:03:15 2006 +0000
    19.2 +++ b/xen/arch/x86/x86_emulate.c	Fri Dec 01 12:03:38 2006 +0000
    19.3 @@ -7,16 +7,14 @@
    19.4   */
    19.5  
    19.6  #ifndef __XEN__
    19.7 -#include <stdio.h>
    19.8 +#include <stddef.h>
    19.9  #include <stdint.h>
   19.10  #include <public/xen.h>
   19.11 -#define dprintf(_f, _a...) printf( _f , ## _a )
   19.12  #else
   19.13  #include <xen/config.h>
   19.14  #include <xen/types.h>
   19.15  #include <xen/lib.h>
   19.16  #include <asm/regs.h>
   19.17 -#define dprintf(_f, _a...) gdprintk(XENLOG_WARNING, _f , ## _a )
   19.18  #undef cmpxchg
   19.19  #endif
   19.20  #include <asm-x86/x86_emulate.h>
   19.21 @@ -440,27 +438,6 @@ decode_register(
   19.22      return p;
   19.23  }
   19.24  
   19.25 -static void
   19.26 -dump_instr(
   19.27 -    struct x86_emulate_ctxt *ctxt,
   19.28 -    struct x86_emulate_ops  *ops)
   19.29 -{
   19.30 -#ifdef __XEN__
   19.31 -    int i;
   19.32 -    unsigned long x, eip = ctxt->regs->eip;
   19.33 -
   19.34 -    dprintf("Instr:");
   19.35 -    for ( i = 0; i < 16; i++, eip++ )
   19.36 -    {
   19.37 -        if ( ops->read(x86_seg_cs, eip, &x, 1, ctxt) != 0 )
   19.38 -            printk(" ??");
   19.39 -        else
   19.40 -            printk(" %02x", (uint8_t)x);
   19.41 -    }
   19.42 -    printk("\n");
   19.43 -#endif
   19.44 -}
   19.45 -
   19.46  int
   19.47  x86_emulate_memop(
   19.48      struct x86_emulate_ctxt *ctxt,
   19.49 @@ -579,10 +556,7 @@ x86_emulate_memop(
   19.50          modrm_rm  = modrm & 0x07;
   19.51  
   19.52          if ( modrm_mod == 3 )
   19.53 -        {
   19.54 -            dprintf("Cannot parse ModRM.mod == 3.\n");
   19.55              goto cannot_emulate;
   19.56 -        }
   19.57  
   19.58          if ( ad_bytes == 2 )
   19.59          {
   19.60 @@ -1206,7 +1180,15 @@ x86_emulate_memop(
   19.61      goto writeback;
   19.62  
   19.63   cannot_emulate:
   19.64 -    dprintf("Cannot emulate %02x\n", b);
   19.65 -    dump_instr(ctxt, ops);
   19.66 +#ifdef __XEN__
   19.67 +    gdprintk(XENLOG_DEBUG, "Instr:");
   19.68 +    for ( ea_off = ctxt->regs->eip; ea_off < _regs.eip; ea_off++ )
   19.69 +    {
   19.70 +        unsigned long x;
   19.71 +        ops->read(x86_seg_cs, ea_off, &x, 1, ctxt);
   19.72 +        printk(" %02x", (uint8_t)x);
   19.73 +    }
   19.74 +    printk("\n");
   19.75 +#endif
   19.76      return -1;
   19.77  }
    20.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Dec 01 12:03:15 2006 +0000
    20.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Dec 01 12:03:38 2006 +0000
    20.3 @@ -157,11 +157,15 @@ hvm_paging_enabled(struct vcpu *v)
    20.4      return hvm_funcs.paging_enabled(v);
    20.5  }
    20.6  
    20.7 +#ifdef __x86_64__
    20.8  static inline int
    20.9  hvm_long_mode_enabled(struct vcpu *v)
   20.10  {
   20.11      return hvm_funcs.long_mode_enabled(v);
   20.12  }
   20.13 +#else
   20.14 +#define hvm_long_mode_enabled(v) 0
   20.15 +#endif
   20.16  
   20.17   static inline int
   20.18  hvm_pae_enabled(struct vcpu *v)
    21.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Fri Dec 01 12:03:15 2006 +0000
    21.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Fri Dec 01 12:03:38 2006 +0000
    21.3 @@ -54,7 +54,6 @@ struct vlapic {
    21.4      uint32_t           timer_divisor;
    21.5      struct timer       vlapic_timer;
    21.6      int                timer_pending_count;
    21.7 -    int                flush_tpr_threshold;
    21.8      s_time_t           timer_last_update;
    21.9      struct page_info   *regs_page;
   21.10      void               *regs;
    22.1 --- a/xen/include/asm-x86/x86_32/page.h	Fri Dec 01 12:03:15 2006 +0000
    22.2 +++ b/xen/include/asm-x86/x86_32/page.h	Fri Dec 01 12:03:38 2006 +0000
    22.3 @@ -7,6 +7,8 @@
    22.4  #define VADDR_BITS              32
    22.5  #define VADDR_MASK              (~0UL)
    22.6  
    22.7 +#define is_canonical_address(x) 1
    22.8 +
    22.9  #include <xen/config.h>
   22.10  #ifdef CONFIG_X86_PAE
   22.11  # include <asm/x86_32/page-3level.h>
    23.1 --- a/xen/include/asm-x86/x86_64/page.h	Fri Dec 01 12:03:15 2006 +0000
    23.2 +++ b/xen/include/asm-x86/x86_64/page.h	Fri Dec 01 12:03:38 2006 +0000
    23.3 @@ -24,6 +24,8 @@
    23.4  #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
    23.5  #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
    23.6  
    23.7 +#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
    23.8 +
    23.9  #ifndef __ASSEMBLY__
   23.10  
   23.11  #include <xen/config.h>