ia64/xen-unstable
changeset 17368:feee6422144f
merge with xen-unstable.hg
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Tue Apr 01 11:29:03 2008 -0600 (2008-04-01) |
parents | daf16171a05f 59d2638a7243 |
children | 6cf504b4de7d 2e91c231501d |
files | .hgignore xen/include/public/arch-ia64.h |
line diff
1.1 --- a/.hgignore Tue Apr 01 10:30:57 2008 -0600 1.2 +++ b/.hgignore Tue Apr 01 11:29:03 2008 -0600 1.3 @@ -184,6 +184,7 @@ 1.4 ^tools/tests/blowfish\.bin$ 1.5 ^tools/tests/blowfish\.h$ 1.6 ^tools/tests/test_x86_emulator$ 1.7 +^tools/tests/x86_emulate$ 1.8 ^tools/vnet/Make.local$ 1.9 ^tools/vnet/build/.*$ 1.10 ^tools/vnet/gc$
2.1 --- a/docs/ChangeLog Tue Apr 01 10:30:57 2008 -0600 2.2 +++ b/docs/ChangeLog Tue Apr 01 11:29:03 2008 -0600 2.3 @@ -16,6 +16,16 @@ http://lists.xensource.com/archives/html 2.4 Xen 3.3 release 2.5 --------------- 2.6 2.7 +17336: Add platform capabilities field to XEN_SYSCTL_physinfo 2.8 +http://xenbits.xensource.com/xen-unstable.hg?rev/250606290439 2.9 + 2.10 +17289: PV framebuffer dynamic resolution facility 2.11 +http://xenbits.xensource.com/xen-unstable.hg?rev/d97e61001d81 2.12 + 2.13 +Guest may send XENFB_TYPE_RESIZE if feature-resize=1 in 2.14 +xenstore of the backend VNC server. VNC server code sets 2.15 +feature-resize if it can handle the resize request. 2.16 + 2.17 16857: XS_SET_TARGET 2.18 http://xenbits.xensource.com/xen-unstable.hg?rev/26fc953a89bb 2.19
3.1 --- a/docs/src/user.tex Tue Apr 01 10:30:57 2008 -0600 3.2 +++ b/docs/src/user.tex Tue Apr 01 11:29:03 2008 -0600 3.3 @@ -1618,9 +1618,9 @@ if a virtual machine uses only half of i 3.4 really takes up half of the size allocated. 3.5 3.6 For example, to create a 2GB sparse file-backed virtual block device 3.7 -(actually only consumes 1KB of disk): 3.8 +(actually only consumes no disk space at all): 3.9 \begin{quote} 3.10 - \verb_# dd if=/dev/zero of=vm1disk bs=1k seek=2048k count=1_ 3.11 + \verb_# dd if=/dev/zero of=vm1disk bs=1k seek=2048k count=0_ 3.12 \end{quote} 3.13 3.14 Make a file system in the disk file: 3.15 @@ -4306,14 +4306,22 @@ mailing lists and subscription informati 3.16 3.17 \appendix 3.18 3.19 -\chapter{Unmodified (VMX) guest domains in Xen with Intel\textregistered Virtualization Technology (VT)} 3.20 - 3.21 -Xen supports guest domains running unmodified Guest operating systems using Virtualization Technology (VT) available on recent Intel Processors. More information about the Intel Virtualization Technology implementing Virtual Machine Extensions (VMX) in the processor is available on the Intel website at \\ 3.22 +\chapter{Unmodified (HVM) guest domains in Xen with Hardware support for Virtualization} 3.23 + 3.24 +Xen supports guest domains running unmodified guest operating systems using 3.25 +virtualization extensions available on recent processors. Currently processors 3.26 +featuring the Intel Virtualization Extension (Intel-VT) or the AMD extension 3.27 +(AMD-V) are supported. The technology covering both implementations is 3.28 +called HVM (for Hardware Virtual Machine) in Xen. More information about the 3.29 +virtualization extensions are available on the respective websites: 3.30 {\small {\tt http://www.intel.com/technology/computing/vptech}} 3.31 3.32 -\section{Building Xen with VT support} 3.33 - 3.34 -The following packages need to be installed in order to build Xen with VT support. Some Linux distributions do not provide these packages by default. 3.35 + 3.36 + {\small {\tt http://www.amd.com/us-en/assets/content\_type/white\_papers\_and\_tech\_docs/24593.pdf}} 3.37 + 3.38 +\section{Building Xen with HVM support} 3.39 + 3.40 +The following packages need to be installed in order to build Xen with HVM support. Some Linux distributions do not provide these packages by default. 3.41 3.42 \begin{tabular}{lp{11.0cm}} 3.43 {\bfseries Package} & {\bfseries Description} \\ 3.44 @@ -4322,70 +4330,75 @@ dev86 & The dev86 package provides an as 3.45 3.46 If the dev86 package is not available on the x86\_64 distribution, you can install the i386 version of it. The dev86 rpm package for various distributions can be found at {\scriptsize {\tt http://www.rpmfind.net/linux/rpm2html/search.php?query=dev86\&submit=Search}} \\ 3.47 3.48 -LibVNCServer & The unmodified guest's VGA display, keyboard, and mouse can be virtualized by the vncserver library. You can get the sources of libvncserver from {\small {\tt http://sourceforge.net/projects/libvncserver}}. Build and install the sources on the build system to get the libvncserver library. There is a significant performance degradation in 0.8 version. The current sources in the CVS tree have fixed this degradation. So it is highly recommended to download the latest CVS sources and install them.\\ 3.49 - 3.50 SDL-devel, SDL & Simple DirectMedia Layer (SDL) is another way of virtualizing the unmodified guest console. It provides an X window for the guest console. 3.51 3.52 If the SDL and SDL-devel packages are not installed by default on the build system, they can be obtained from {\scriptsize {\tt http://www.rpmfind.net/linux/rpm2html/search.php?query=SDL\&submit=Search}} 3.53 -, {\scriptsize {\tt http://www.rpmfind.net/linux/rpm2html/search.php?query=SDL-devel\&submit=Search}} \\ 3.54 + 3.55 + 3.56 +{\scriptsize {\tt http://www.rpmfind.net/linux/rpm2html/search.php?query=SDL-devel\&submit=Search}} \\ 3.57 3.58 \end{tabular} 3.59 3.60 -\section{Configuration file for unmodified VMX guests} 3.61 - 3.62 -The Xen installation includes a sample configuration file, {\small {\tt /etc/xen/xmexample.vmx}}. There are comments describing all the options. In addition to the common options that are the same as those for paravirtualized guest configurations, VMX guest configurations have the following settings: 3.63 +\section{Configuration file for unmodified HVM guests} 3.64 + 3.65 +The Xen installation includes a sample configuration file, {\small {\tt /etc/xen/xmexample.hvm}}. There are comments describing all the options. In addition to the common options that are the same as those for paravirtualized guest configurations, HVM guest configurations have the following settings: 3.66 3.67 \begin{tabular}{lp{11.0cm}} 3.68 3.69 {\bfseries Parameter} & {\bfseries Description} \\ 3.70 3.71 -kernel & The VMX firmware loader, {\small {\tt /usr/lib/xen/boot/vmxloader}}\\ 3.72 - 3.73 -builder & The domain build function. The VMX domain uses the vmx builder.\\ 3.74 - 3.75 -acpi & Enable VMX guest ACPI, default=0 (disabled)\\ 3.76 - 3.77 -apic & Enable VMX guest APIC, default=0 (disabled)\\ 3.78 - 3.79 -pae & Enable VMX guest PAE, default=0 (disabled)\\ 3.80 - 3.81 -vif & Optionally defines MAC address and/or bridge for the network interfaces. Random MACs are assigned if not given. {\small {\tt type=ioemu}} means ioemu is used to virtualize the VMX NIC. If no type is specified, vbd is used, as with paravirtualized guests.\\ 3.82 - 3.83 -disk & Defines the disk devices you want the domain to have access to, and what you want them accessible as. If using a physical device as the VMX guest's disk, each disk entry is of the form 3.84 +kernel & The HVM firmware loader, {\small {\tt /usr/lib/xen/boot/hvmloader}}\\ 3.85 + 3.86 +builder & The domain build function. The HVM domain uses the 'hvm' builder.\\ 3.87 + 3.88 +acpi & Enable HVM guest ACPI, default=1 (enabled)\\ 3.89 + 3.90 +apic & Enable HVM guest APIC, default=1 (enabled)\\ 3.91 + 3.92 +pae & Enable HVM guest PAE, default=1 (enabled)\\ 3.93 + 3.94 +hap & Enable hardware-assisted paging support, such as AMD-V's nested paging 3.95 +or Intel\textregistered VT's extended paging. If available, Xen will 3.96 +use hardware-assisted paging instead of shadow paging for this guest's memory 3.97 +management.\\ 3.98 + 3.99 +vif & Optionally defines MAC address and/or bridge for the network interfaces. Random MACs are assigned if not given. {\small {\tt type=ioemu}} means ioemu is used to virtualize the HVM NIC. If no type is specified, vbd is used, as with paravirtualized guests.\\ 3.100 + 3.101 +disk & Defines the disk devices you want the domain to have access to, and what you want them accessible as. If using a physical device as the HVM guest's disk, each disk entry is of the form 3.102 3.103 {\small {\tt phy:UNAME,ioemu:DEV,MODE,}} 3.104 3.105 -where UNAME is the device, DEV is the device name the domain will see, and MODE is r for read-only, w for read-write. ioemu means the disk will use ioemu to virtualize the VMX disk. If not adding ioemu, it uses vbd like paravirtualized guests. 3.106 +where UNAME is the host device file, DEV is the device name the domain will see, and MODE is r for read-only, w for read-write. ioemu means the disk will use ioemu to virtualize the HVM disk. If not adding ioemu, it uses vbd like paravirtualized guests. 3.107 3.108 If using disk image file, its form should be like 3.109 3.110 {\small {\tt file:FILEPATH,ioemu:DEV,MODE}} 3.111 3.112 +Optical devices can be emulated by appending cdrom to the device type 3.113 + 3.114 +{\small {\tt ',hdc:cdrom,r'}} 3.115 + 3.116 If using more than one disk, there should be a comma between each disk entry. For example: 3.117 3.118 -{\scriptsize {\tt disk = ['file:/var/images/image1.img,ioemu:hda,w', 'file:/var/images/image2.img,ioemu:hdb,w']}}\\ 3.119 - 3.120 -cdrom & Disk image for CD-ROM. The default is {\small {\tt /dev/cdrom}} for Domain0. Inside the VMX domain, the CD-ROM will available as device {\small {\tt /dev/hdc}}. The entry can also point to an ISO file.\\ 3.121 - 3.122 -boot & Boot from floppy (a), hard disk (c) or CD-ROM (d). For example, to boot from CD-ROM, the entry should be: 3.123 - 3.124 -boot='d'\\ 3.125 - 3.126 -device\_model & The device emulation tool for VMX guests. This parameter should not be changed.\\ 3.127 +{\scriptsize {\tt disk = ['file:/var/images/image1.img,ioemu:hda,w', 'phy:hda1,hdb1,w', 'file:/var/images/install1.iso,hdc:cdrom,r']}}\\ 3.128 + 3.129 +boot & Boot from floppy (a), hard disk (c) or CD-ROM (d). For example, to boot from CD-ROM and fallback to HD, the entry should be: 3.130 + 3.131 +boot='dc'\\ 3.132 + 3.133 +device\_model & The device emulation tool for HVM guests. This parameter should not be changed.\\ 3.134 3.135 sdl & Enable SDL library for graphics, default = 0 (disabled)\\ 3.136 3.137 vnc & Enable VNC library for graphics, default = 1 (enabled)\\ 3.138 3.139 -vncviewer & Enable spawning of the vncviewer (only valid when vnc=1), default = 1 (enabled) 3.140 - 3.141 -If vnc=1 and vncviewer=0, user can use vncviewer to manually connect VMX from remote. For example: 3.142 - 3.143 -{\small {\tt vncviewer domain0\_IP\_address:VMX\_domain\_id}} \\ 3.144 - 3.145 -ne2000 & Enable ne2000, default = 0 (disabled; use pcnet)\\ 3.146 - 3.147 -serial & Enable redirection of VMX serial output to pty device\\ 3.148 +vncconsole & Enable spawning of the vncviewer (only valid when vnc=1), default = 0 (disabled) 3.149 + 3.150 +If vnc=1 and vncconsole=0, user can use vncviewer to manually connect HVM from remote. For example: 3.151 + 3.152 +{\small {\tt vncviewer domain0\_IP\_address:HVM\_domain\_id}} \\ 3.153 + 3.154 +serial & Enable redirection of HVM serial output to pty device\\ 3.155 3.156 \end{tabular} 3.157 3.158 @@ -4416,9 +4429,9 @@ Details about mouse emulation are provid 3.159 3.160 localtime & Set the real time clock to local time [default=0, that is, set to UTC].\\ 3.161 3.162 -enable-audio & Enable audio support. This is under development.\\ 3.163 - 3.164 -full-screen & Start in full screen. This is under development.\\ 3.165 +soundhw & Enable sound card support and specify the hardware to emulate. Values can be sb16, es1370 or all. Default is none.\\ 3.166 + 3.167 +full-screen & Start in full screen.\\ 3.168 3.169 nographic & Another way to redirect serial output. If enabled, no 'sdl' or 'vnc' can work. Not recommended.\\ 3.170 3.171 @@ -4430,18 +4443,18 @@ nographic & Another way to redirect 3.172 If you are using a physical disk or physical disk partition, you need to install a Linux OS on the disk first. Then the boot loader should be installed in the correct place. For example {\small {\tt dev/sda}} for booting from the whole disk, or {\small {\tt /dev/sda1}} for booting from partition 1. 3.173 3.174 \subsection{Using disk image files} 3.175 -You need to create a large empty disk image file first; then, you need to install a Linux OS onto it. There are two methods you can choose. One is directly installing it using a VMX guest while booting from the OS installation CD-ROM. The other is copying an installed OS into it. The boot loader will also need to be installed. 3.176 +You need to create a large empty disk image file first; then, you need to install a Linux OS onto it. There are two methods you can choose. One is directly installing it using a HVM guest while booting from the OS installation CD-ROM. The other is copying an installed OS into it. The boot loader will also need to be installed. 3.177 3.178 \subsubsection*{To create the image file:} 3.179 The image size should be big enough to accommodate the entire OS. This example assumes the size is 1G (which is probably too small for most OSes). 3.180 3.181 -{\small {\tt \# dd if=/dev/zero of=hd.img bs=1M count=1 seek=1023}} 3.182 - 3.183 -\subsubsection*{To directly install Linux OS into an image file using a VMX guest:} 3.184 - 3.185 -Install Xen and create VMX with the original image file with booting from CD-ROM. Then it is just like a normal Linux OS installation. The VMX configuration file should have these two entries before creating: 3.186 - 3.187 -{\small {\tt cdrom='/dev/cdrom' 3.188 +{\small {\tt \# dd if=/dev/zero of=hd.img bs=1M count=0 seek=1024}} 3.189 + 3.190 +\subsubsection*{To directly install Linux OS into an image file using a HVM guest:} 3.191 + 3.192 +Install Xen and create HVM with the original image file with booting from CD-ROM. Then it is just like a normal Linux OS installation. The HVM configuration file should have a stanza for the CD-ROM as well as a boot device specification: 3.193 + 3.194 +{\small {\tt disk=['file:/var/images/your-hd.img,hda,w', ',hdc:cdrom,r' ] 3.195 boot='d'}} 3.196 3.197 If this method does not succeed, you can choose the following method of copying an installed Linux OS into an image file. 3.198 @@ -4509,31 +4522,28 @@ none /sys sysfs 3.199 3.200 Now, the guest OS image {\small {\tt hd.img}} is ready. You can also reference {\small {\tt http://free.oszoo.org}} for quickstart images. But make sure to install the boot loader. 3.201 3.202 -\subsection{Install Windows into an Image File using a VMX guest} 3.203 -In order to install a Windows OS, you should keep {\small {\tt acpi=0}} in your VMX configuration file. 3.204 - 3.205 -\section{VMX Guests} 3.206 -\subsection{Editing the Xen VMX config file} 3.207 -Make a copy of the example VMX configuration file {\small {\tt /etc/xen/xmeaxmple.vmx}} and edit the line that reads 3.208 - 3.209 -{\small {\tt disk = [ 'file:/var/images/\emph{guest.img},ioemu:hda,w' ]}} 3.210 - 3.211 -replacing \emph{guest.img} with the name of the guest OS image file you just made. 3.212 - 3.213 -\subsection{Creating VMX guests} 3.214 -Simply follow the usual method of creating the guest, using the -f parameter and providing the filename of your VMX configuration file:\\ 3.215 +\section{HVM Guests} 3.216 +\subsection{Editing the Xen HVM config file} 3.217 +Make a copy of the example HVM configuration file {\small {\tt /etc/xen/xmexample.hvm}} and edit the line that reads 3.218 + 3.219 +{\small {\tt disk = [ 'file:/var/images/\emph{min-el3-i386.img},hda,w' ]}} 3.220 + 3.221 +replacing \emph{min-el3-i386.img} with the name of the guest OS image file you just made. 3.222 + 3.223 +\subsection{Creating HVM guests} 3.224 +Simply follow the usual method of creating the guest, providing the filename of your HVM configuration file:\\ 3.225 3.226 {\small {\tt \# xend start\\ 3.227 -\# xm create /etc/xen/vmxguest.vmx}} 3.228 - 3.229 -In the default configuration, VNC is on and SDL is off. Therefore VNC windows will open when VMX guests are created. If you want to use SDL to create VMX guests, set {\small {\tt sdl=1}} in your VMX configuration file. You can also turn off VNC by setting {\small {\tt vnc=0}}. 3.230 +\# xm create /etc/xen/hvmguest.hvm}} 3.231 + 3.232 +In the default configuration, VNC is on and SDL is off. Therefore VNC windows will open when HVM guests are created. If you want to use SDL to create HVM guests, set {\small {\tt sdl=1}} in your HVM configuration file. You can also turn off VNC by setting {\small {\tt vnc=0}}. 3.233 3.234 \subsection{Mouse issues, especially under VNC} 3.235 Mouse handling when using VNC is a little problematic. 3.236 The problem is that the VNC viewer provides a virtual pointer which is 3.237 located at an absolute location in the VNC window and only absolute 3.238 coordinates are provided. 3.239 -The VMX device model converts these absolute mouse coordinates 3.240 +The HVM device model converts these absolute mouse coordinates 3.241 into the relative motion deltas that are expected by the PS/2 3.242 mouse driver running in the guest. 3.243 Unfortunately, 3.244 @@ -4550,7 +4560,7 @@ there are no longer any left mouse delta 3.245 can be provided by the device model emulation code.) 3.246 3.247 To deal with these mouse issues there are 4 different 3.248 -mouse emulations available from the VMX device model: 3.249 +mouse emulations available from the HVM device model: 3.250 3.251 \begin{description} 3.252 \item[PS/2 mouse over the PS/2 port.] 3.253 @@ -4845,7 +4855,7 @@ vendor id 3.254 and product id 3.255 \textbf{310b}. 3.256 This device could be made available 3.257 -to the VMX guest by including the 3.258 +to the HVM guest by including the 3.259 config file entry 3.260 {\small 3.261 \begin{verbatim} 3.262 @@ -4959,7 +4969,7 @@ not the guest can see a USB mouse.} 3.263 will remove the USB mouse 3.264 driver from the Dom0 kernel 3.265 and the mouse will now be 3.266 -accessible by the VMX guest. 3.267 +accessible by the HVM guest. 3.268 3.269 Be aware the the Linux USB 3.270 hotplug system will reload 3.271 @@ -4981,26 +4991,25 @@ just to make sure it doesn't get 3.272 reloaded. 3.273 \end{description} 3.274 3.275 -\subsection{Destroy VMX guests} 3.276 -VMX guests can be destroyed in the same way as can paravirtualized guests. We recommend that you type the command 3.277 +\subsection{Destroy HVM guests} 3.278 +HVM guests can be destroyed in the same way as can paravirtualized guests. We recommend that you shut-down the guest using the guest OS' provided method, for Linux, type the command 3.279 3.280 {\small {\tt poweroff}} 3.281 3.282 -in the VMX guest's console first to prevent data loss. Then execute the command 3.283 +in the HVM guest's console, for Windows use Start -> Shutdown first to prevent 3.284 +data loss. Depending on the configuration the guest will be automatically 3.285 +destroyed, otherwise execute the command 3.286 3.287 {\small {\tt xm destroy \emph{vmx\_guest\_id} }} 3.288 3.289 at the Domain0 console. 3.290 3.291 -\subsection{VMX window (X or VNC) Hot Key} 3.292 -If you are running in the X environment after creating a VMX guest, an X window is created. There are several hot keys for control of the VMX guest that can be used in the window. 3.293 +\subsection{HVM window (X or VNC) Hot Key} 3.294 +If you are running in the X environment after creating a HVM guest, an X window is created. There are several hot keys for control of the HVM guest that can be used in the window. 3.295 3.296 -{\bfseries Ctrl+Alt+2} switches from guest VGA window to the control window. Typing {\small {\tt help }} shows the control commands help. For example, 'q' is the command to destroy the VMX guest.\\ 3.297 -{\bfseries Ctrl+Alt+1} switches back to VMX guest's VGA.\\ 3.298 -{\bfseries Ctrl+Alt+3} switches to serial port output. It captures serial output from the VMX guest. It works only if the VMX guest was configured to use the serial port. \\ 3.299 - 3.300 -\subsection{Save/Restore and Migration} 3.301 -VMX guests currently cannot be saved and restored, nor migrated. These features are currently under active development. 3.302 +{\bfseries Ctrl+Alt+2} switches from guest VGA window to the control window. Typing {\small {\tt help }} shows the control commands help. For example, 'q' is the command to destroy the HVM guest.\\ 3.303 +{\bfseries Ctrl+Alt+1} switches back to HVM guest's VGA.\\ 3.304 +{\bfseries Ctrl+Alt+3} switches to serial port output. It captures serial output from the HVM guest. It works only if the HVM guest was configured to use the serial port. \\ 3.305 3.306 \chapter{Vnets - Domain Virtual Networking} 3.307
4.1 --- a/docs/xen-api/revision-history.tex Tue Apr 01 10:30:57 2008 -0600 4.2 +++ b/docs/xen-api/revision-history.tex Tue Apr 01 11:29:03 2008 -0600 4.3 @@ -23,12 +23,19 @@ 4.4 \end{flushleft} 4.5 \end{minipage}\\ 4.6 \hline 4.7 - 1.0.2 & 11th Feb. 08 & S. Berger & 4.8 + 1.0.3 & 11th Feb. 08 & S. Berger & 4.9 \begin{minipage}[t]{7cm} 4.10 \begin{flushleft} 4.11 Added table of contents and hyperlink cross reference. 4.12 \end{flushleft} 4.13 \end{minipage}\\ 4.14 \hline 4.15 + 1.0.4 & 23rd March 08 & S. Berger & 4.16 + \begin{minipage}[t]{7cm} 4.17 + \begin{flushleft} 4.18 + Added XSPolicy.can\_run 4.19 + \end{flushleft} 4.20 + \end{minipage}\\ 4.21 + \hline 4.22 \end{tabular} 4.23 -\end{center} 4.24 \ No newline at end of file 4.25 +\end{center}
5.1 --- a/docs/xen-api/xenapi-coversheet.tex Tue Apr 01 10:30:57 2008 -0600 5.2 +++ b/docs/xen-api/xenapi-coversheet.tex Tue Apr 01 11:29:03 2008 -0600 5.3 @@ -22,7 +22,7 @@ 5.4 \newcommand{\releasestatement}{Stable Release} 5.5 5.6 %% Document revision 5.7 -\newcommand{\revstring}{API Revision 1.0.2} 5.8 +\newcommand{\revstring}{API Revision 1.0.4} 5.9 5.10 %% Document authors 5.11 \newcommand{\docauthors}{
6.1 --- a/docs/xen-api/xenapi-datamodel.tex Tue Apr 01 10:30:57 2008 -0600 6.2 +++ b/docs/xen-api/xenapi-datamodel.tex Tue Apr 01 11:29:03 2008 -0600 6.3 @@ -14938,6 +14938,41 @@ Currently active instantiation flags. 6.4 \vspace{0.3cm} 6.5 \vspace{0.3cm} 6.6 \vspace{0.3cm} 6.7 +\subsubsection{RPC name:~can\_run} 6.8 + 6.9 +{\bf Overview:} 6.10 +Check whether a VM with the given security label could run on the system. 6.11 + 6.12 + \noindent {\bf Signature:} 6.13 +\begin{verbatim} int can_run (session_id s, string security_label)\end{verbatim} 6.14 + 6.15 + 6.16 +\noindent{\bf Arguments:} 6.17 + 6.18 + 6.19 +\vspace{0.3cm} 6.20 +\begin{tabular}{|c|c|p{7cm}|} 6.21 + \hline 6.22 +{\bf type} & {\bf name} & {\bf description} \\ \hline 6.23 +{\tt string } & security_label & reference to the object \\ \hline 6.24 + 6.25 +\end{tabular} 6.26 + 6.27 +\vspace{0.3cm} 6.28 + 6.29 + \noindent {\bf Return Type:} 6.30 +{\tt 6.31 +int 6.32 +} 6.33 + 6.34 + 6.35 +Error code indicating whether a VM with the given security label could run. 6.36 +If zero, it can run. 6.37 + 6.38 +\vspace{0.3cm} 6.39 + 6.40 +\noindent{\bf Possible Error Codes:} {\tt SECURITY\_ERROR} 6.41 + 6.42 \subsubsection{RPC name:~get\_all} 6.43 6.44 {\bf Overview:}
7.1 --- a/extras/mini-os/blkfront.c Tue Apr 01 10:30:57 2008 -0600 7.2 +++ b/extras/mini-os/blkfront.c Tue Apr 01 11:29:03 2008 -0600 7.3 @@ -319,6 +319,7 @@ int blkfront_aio_poll(struct blkfront_de 7.4 { 7.5 RING_IDX rp, cons; 7.6 struct blkif_response *rsp; 7.7 + int more; 7.8 7.9 moretodo: 7.10 #ifdef HAVE_LIBC 7.11 @@ -334,6 +335,7 @@ moretodo: 7.12 while ((cons != rp)) 7.13 { 7.14 rsp = RING_GET_RESPONSE(&dev->ring, cons); 7.15 + nr_consumed++; 7.16 7.17 if (rsp->status != BLKIF_RSP_OKAY) 7.18 printk("block error %d for op %d\n", rsp->status, rsp->operation); 7.19 @@ -343,29 +345,30 @@ moretodo: 7.20 case BLKIF_OP_WRITE: 7.21 { 7.22 struct blkfront_aiocb *aiocbp = (void*) (uintptr_t) rsp->id; 7.23 + int status = rsp->status; 7.24 int j; 7.25 7.26 for (j = 0; j < aiocbp->n; j++) 7.27 gnttab_end_access(aiocbp->gref[j]); 7.28 7.29 + dev->ring.rsp_cons = ++cons; 7.30 /* Nota: callback frees aiocbp itself */ 7.31 - aiocbp->aio_cb(aiocbp, rsp->status ? -EIO : 0); 7.32 + aiocbp->aio_cb(aiocbp, status ? -EIO : 0); 7.33 + if (dev->ring.rsp_cons != cons) 7.34 + /* We reentered, we must not continue here */ 7.35 + goto out; 7.36 break; 7.37 } 7.38 - case BLKIF_OP_WRITE_BARRIER: 7.39 - case BLKIF_OP_FLUSH_DISKCACHE: 7.40 - break; 7.41 default: 7.42 printk("unrecognized block operation %d response\n", rsp->operation); 7.43 + case BLKIF_OP_WRITE_BARRIER: 7.44 + case BLKIF_OP_FLUSH_DISKCACHE: 7.45 + dev->ring.rsp_cons = ++cons; 7.46 break; 7.47 } 7.48 + } 7.49 7.50 - nr_consumed++; 7.51 - ++cons; 7.52 - } 7.53 - dev->ring.rsp_cons = cons; 7.54 - 7.55 - int more; 7.56 +out: 7.57 RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more); 7.58 if (more) goto moretodo; 7.59
8.1 --- a/extras/mini-os/gnttab.c Tue Apr 01 10:30:57 2008 -0600 8.2 +++ b/extras/mini-os/gnttab.c Tue Apr 01 11:29:03 2008 -0600 8.3 @@ -32,6 +32,9 @@ 8.4 8.5 static grant_entry_t *gnttab_table; 8.6 static grant_ref_t gnttab_list[NR_GRANT_ENTRIES]; 8.7 +#ifdef GNT_DEBUG 8.8 +static char inuse[NR_GRANT_ENTRIES]; 8.9 +#endif 8.10 static __DECLARE_SEMAPHORE_GENERIC(gnttab_sem, NR_GRANT_ENTRIES); 8.11 8.12 static void 8.13 @@ -39,6 +42,10 @@ put_free_entry(grant_ref_t ref) 8.14 { 8.15 unsigned long flags; 8.16 local_irq_save(flags); 8.17 +#ifdef GNT_DEBUG 8.18 + BUG_ON(!inuse[ref]); 8.19 + inuse[ref] = 0; 8.20 +#endif 8.21 gnttab_list[ref] = gnttab_list[0]; 8.22 gnttab_list[0] = ref; 8.23 local_irq_restore(flags); 8.24 @@ -54,6 +61,10 @@ get_free_entry(void) 8.25 local_irq_save(flags); 8.26 ref = gnttab_list[0]; 8.27 gnttab_list[0] = gnttab_list[ref]; 8.28 +#ifdef GNT_DEBUG 8.29 + BUG_ON(inuse[ref]); 8.30 + inuse[ref] = 1; 8.31 +#endif 8.32 local_irq_restore(flags); 8.33 return ref; 8.34 } 8.35 @@ -92,10 +103,12 @@ gnttab_end_access(grant_ref_t ref) 8.36 { 8.37 u16 flags, nflags; 8.38 8.39 + BUG_ON(ref >= NR_GRANT_ENTRIES || ref < NR_RESERVED_ENTRIES); 8.40 + 8.41 nflags = gnttab_table[ref].flags; 8.42 do { 8.43 if ((flags = nflags) & (GTF_reading|GTF_writing)) { 8.44 - printk("WARNING: g.e. still in use!\n"); 8.45 + printk("WARNING: g.e. still in use! (%x)\n", flags); 8.46 return 0; 8.47 } 8.48 } while ((nflags = synch_cmpxchg(&gnttab_table[ref].flags, flags, 0)) != 8.49 @@ -111,6 +124,8 @@ gnttab_end_transfer(grant_ref_t ref) 8.50 unsigned long frame; 8.51 u16 flags; 8.52 8.53 + BUG_ON(ref >= NR_GRANT_ENTRIES || ref < NR_RESERVED_ENTRIES); 8.54 + 8.55 while (!((flags = gnttab_table[ref].flags) & GTF_transfer_committed)) { 8.56 if (synch_cmpxchg(&gnttab_table[ref].flags, flags, 0) == flags) { 8.57 printk("Release unused transfer grant.\n"); 8.58 @@ -164,6 +179,9 @@ init_gnttab(void) 8.59 unsigned long frames[NR_GRANT_FRAMES]; 8.60 int i; 8.61 8.62 +#ifdef GNT_DEBUG 8.63 + memset(inuse, 1, sizeof(inuse)); 8.64 +#endif 8.65 for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++) 8.66 put_free_entry(i); 8.67
9.1 --- a/extras/mini-os/kernel.c Tue Apr 01 10:30:57 2008 -0600 9.2 +++ b/extras/mini-os/kernel.c Tue Apr 01 11:29:03 2008 -0600 9.3 @@ -341,7 +341,7 @@ static void kbdfront_thread(void *p) 9.4 { 9.5 struct kbdfront_dev *kbd_dev; 9.6 DEFINE_WAIT(w); 9.7 - int x = WIDTH / 2, y = HEIGHT / 2, z; 9.8 + int x = WIDTH / 2, y = HEIGHT / 2, z = 0; 9.9 9.10 kbd_dev = init_kbdfront(NULL, 1); 9.11 if (!kbd_dev)
10.1 --- a/extras/mini-os/minios.mk Tue Apr 01 10:30:57 2008 -0600 10.2 +++ b/extras/mini-os/minios.mk Tue Apr 01 11:29:03 2008 -0600 10.3 @@ -16,6 +16,10 @@ DEF_LDFLAGS = 10.4 10.5 ifeq ($(debug),y) 10.6 DEF_CFLAGS += -g 10.7 +#DEF_CFLAGS += -DMM_DEBUG 10.8 +#DEF_CFLAGS += -DFS_DEBUG 10.9 +#DEF_CFLAGS += -DLIBC_DEBUG 10.10 +DEF_CFLAGS += -DGNT_DEBUG 10.11 else 10.12 DEF_CFLAGS += -O3 10.13 endif
11.1 --- a/extras/mini-os/netfront.c Tue Apr 01 10:30:57 2008 -0600 11.2 +++ b/extras/mini-os/netfront.c Tue Apr 01 11:29:03 2008 -0600 11.3 @@ -120,6 +120,7 @@ moretodo: 11.4 if (rx->status == NETIF_RSP_NULL) continue; 11.5 11.6 int id = rx->id; 11.7 + BUG_ON(id >= NET_TX_RING_SIZE); 11.8 11.9 buf = &dev->rx_buffers[id]; 11.10 page = (unsigned char*)buf->page; 11.11 @@ -204,6 +205,7 @@ void network_tx_buf_gc(struct netfront_d 11.12 printk("packet error\n"); 11.13 11.14 id = txrsp->id; 11.15 + BUG_ON(id >= NET_TX_RING_SIZE); 11.16 struct net_buffer* buf = &dev->tx_buffers[id]; 11.17 gnttab_end_access(buf->gref); 11.18 buf->gref=GRANT_INVALID_REF; 11.19 @@ -510,6 +512,8 @@ void netfront_xmit(struct netfront_dev * 11.20 struct net_buffer* buf; 11.21 void* page; 11.22 11.23 + BUG_ON(len > PAGE_SIZE); 11.24 + 11.25 down(&dev->tx_sem); 11.26 11.27 local_irq_save(flags);
12.1 --- a/tools/blktap/drivers/block-qcow.c Tue Apr 01 10:30:57 2008 -0600 12.2 +++ b/tools/blktap/drivers/block-qcow.c Tue Apr 01 11:29:03 2008 -0600 12.3 @@ -76,6 +76,7 @@ 12.4 12.5 #define QCOW_OFLAG_COMPRESSED (1LL << 63) 12.6 #define SPARSE_FILE 0x01 12.7 +#define EXTHDR_L1_BIG_ENDIAN 0x02 12.8 12.9 #ifndef O_BINARY 12.10 #define O_BINARY 0 12.11 @@ -147,19 +148,30 @@ static int decompress_cluster(struct tdq 12.12 12.13 static uint32_t gen_cksum(char *ptr, int len) 12.14 { 12.15 + int i; 12.16 unsigned char *md; 12.17 uint32_t ret; 12.18 12.19 md = malloc(MD5_DIGEST_LENGTH); 12.20 12.21 if(!md) return 0; 12.22 - 12.23 - if (MD5((unsigned char *)ptr, len, md) != md) { 12.24 - free(md); 12.25 - return 0; 12.26 + 12.27 + /* Convert L1 table to big endian */ 12.28 + for(i = 0; i < len / sizeof(uint64_t); i++) { 12.29 + cpu_to_be64s(&((uint64_t*) ptr)[i]); 12.30 } 12.31 12.32 - memcpy(&ret, md, sizeof(uint32_t)); 12.33 + /* Generate checksum */ 12.34 + if (MD5((unsigned char *)ptr, len, md) != md) 12.35 + ret = 0; 12.36 + else 12.37 + memcpy(&ret, md, sizeof(uint32_t)); 12.38 + 12.39 + /* Convert L1 table back to native endianess */ 12.40 + for(i = 0; i < len / sizeof(uint64_t); i++) { 12.41 + be64_to_cpus(&((uint64_t*) ptr)[i]); 12.42 + } 12.43 + 12.44 free(md); 12.45 return ret; 12.46 } 12.47 @@ -354,7 +366,8 @@ static uint64_t get_cluster_offset(struc 12.48 int n_start, int n_end) 12.49 { 12.50 int min_index, i, j, l1_index, l2_index, l2_sector, l1_sector; 12.51 - char *tmp_ptr, *tmp_ptr2, *l2_ptr, *l1_ptr; 12.52 + char *tmp_ptr2, *l2_ptr, *l1_ptr; 12.53 + uint64_t *tmp_ptr; 12.54 uint64_t l2_offset, *l2_table, cluster_offset, tmp; 12.55 uint32_t min_count; 12.56 int new_l2_table; 12.57 @@ -401,6 +414,11 @@ static uint64_t get_cluster_offset(struc 12.58 } 12.59 memcpy(tmp_ptr, l1_ptr, 4096); 12.60 12.61 + /* Convert block to write to big endian */ 12.62 + for(i = 0; i < 4096 / sizeof(uint64_t); i++) { 12.63 + cpu_to_be64s(&tmp_ptr[i]); 12.64 + } 12.65 + 12.66 /* 12.67 * Issue non-asynchronous L1 write. 12.68 * For safety, we must ensure that 12.69 @@ -777,7 +795,7 @@ int tdqcow_open (struct disk_driver *dd, 12.70 goto fail; 12.71 12.72 for(i = 0; i < s->l1_size; i++) { 12.73 - //be64_to_cpus(&s->l1_table[i]); 12.74 + be64_to_cpus(&s->l1_table[i]); 12.75 //DPRINTF("L1[%d] => %llu\n", i, s->l1_table[i]); 12.76 if (s->l1_table[i] > final_cluster) 12.77 final_cluster = s->l1_table[i]; 12.78 @@ -810,6 +828,38 @@ int tdqcow_open (struct disk_driver *dd, 12.79 be32_to_cpus(&exthdr->xmagic); 12.80 if(exthdr->xmagic != XEN_MAGIC) 12.81 goto end_xenhdr; 12.82 + 12.83 + /* Try to detect old tapdisk images. They have to be fixed because 12.84 + * they don't use big endian but native endianess for the L1 table */ 12.85 + if ((exthdr->flags & EXTHDR_L1_BIG_ENDIAN) == 0) { 12.86 + 12.87 + /* 12.88 + The image is broken. Fix it. The L1 table has already been 12.89 + byte-swapped, so we can write it to the image file as it is 12.90 + currently in memory. Then swap it back to native endianess 12.91 + for operation. 12.92 + */ 12.93 + 12.94 + DPRINTF("qcow: Converting image to big endian L1 table\n"); 12.95 + 12.96 + lseek(fd, s->l1_table_offset, SEEK_SET); 12.97 + if (write(fd, s->l1_table, l1_table_size) != l1_table_size) { 12.98 + DPRINTF("qcow: Failed to write new L1 table\n"); 12.99 + goto fail; 12.100 + } 12.101 + 12.102 + for(i = 0;i < s->l1_size; i++) { 12.103 + cpu_to_be64s(&s->l1_table[i]); 12.104 + } 12.105 + 12.106 + /* Write the big endian flag to the extended header */ 12.107 + exthdr->flags |= EXTHDR_L1_BIG_ENDIAN; 12.108 + 12.109 + if (write(fd, buf, 512) != 512) { 12.110 + DPRINTF("qcow: Failed to write extended header\n"); 12.111 + goto fail; 12.112 + } 12.113 + } 12.114 12.115 /*Finally check the L1 table cksum*/ 12.116 be32_to_cpus(&exthdr->cksum);
13.1 --- a/tools/firmware/hvmloader/Makefile Tue Apr 01 10:30:57 2008 -0600 13.2 +++ b/tools/firmware/hvmloader/Makefile Tue Apr 01 11:29:03 2008 -0600 13.3 @@ -42,16 +42,21 @@ OBJS = $(patsubst %.c,%.o,$(SRCS)) 13.4 .PHONY: all 13.5 all: hvmloader 13.6 13.7 -hvmloader: roms.h subdirs-all $(SRCS) 13.8 - $(CC) $(CFLAGS) -c $(SRCS) 13.9 - $(LD) $(LDFLAGS_DIRECT) -N -Ttext $(LOADADDR) -o hvmloader.tmp $(OBJS) acpi/acpi.a 13.10 +smbios.o: CFLAGS += -D__SMBIOS_DATE__="\"$(shell date +%m/%d/%Y)\"" 13.11 + 13.12 +hvmloader: roms.h subdirs-all $(OBJS) 13.13 + $(LD) $(LDFLAGS_DIRECT) -N -Ttext $(LOADADDR) \ 13.14 + -o hvmloader.tmp $(OBJS) acpi/acpi.a 13.15 $(OBJCOPY) hvmloader.tmp hvmloader 13.16 rm -f hvmloader.tmp 13.17 13.18 -roms.h: ../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin ../vgabios/VGABIOS-lgpl-latest.cirrus.bin ../etherboot/eb-roms.h ../extboot/extboot.bin 13.19 +roms.h: ../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin \ 13.20 + ../vgabios/VGABIOS-lgpl-latest.cirrus.bin ../etherboot/eb-roms.h \ 13.21 + ../extboot/extboot.bin 13.22 sh ./mkhex rombios ../rombios/BIOS-bochs-latest > roms.h 13.23 sh ./mkhex vgabios_stdvga ../vgabios/VGABIOS-lgpl-latest.bin >> roms.h 13.24 - sh ./mkhex vgabios_cirrusvga ../vgabios/VGABIOS-lgpl-latest.cirrus.bin >> roms.h 13.25 + sh ./mkhex vgabios_cirrusvga \ 13.26 + ../vgabios/VGABIOS-lgpl-latest.cirrus.bin >> roms.h 13.27 cat ../etherboot/eb-roms.h >> roms.h 13.28 sh ./mkhex extboot ../extboot/extboot.bin >> roms.h 13.29
14.1 --- a/tools/firmware/hvmloader/hvmloader.c Tue Apr 01 10:30:57 2008 -0600 14.2 +++ b/tools/firmware/hvmloader/hvmloader.c Tue Apr 01 11:29:03 2008 -0600 14.3 @@ -420,6 +420,8 @@ int main(void) 14.4 14.5 init_hypercalls(); 14.6 14.7 + printf("CPU speed is %u MHz\n", get_cpu_mhz()); 14.8 + 14.9 printf("Writing SMBIOS tables ...\n"); 14.10 smbios_sz = hvm_write_smbios_tables(); 14.11
15.1 --- a/tools/firmware/hvmloader/smbios.c Tue Apr 01 10:30:57 2008 -0600 15.2 +++ b/tools/firmware/hvmloader/smbios.c Tue Apr 01 11:29:03 2008 -0600 15.3 @@ -21,6 +21,7 @@ 15.4 */ 15.5 15.6 #include <stdint.h> 15.7 +#include <xen/xen.h> 15.8 #include <xen/version.h> 15.9 #include "smbios_types.h" 15.10 #include "util.h" 15.11 @@ -246,13 +247,14 @@ smbios_entry_point_init(void *start, 15.12 int i; 15.13 struct smbios_entry_point *ep = (struct smbios_entry_point *)start; 15.14 15.15 + memset(ep, 0, sizeof(*ep)); 15.16 + 15.17 strncpy(ep->anchor_string, "_SM_", 4); 15.18 ep->length = 0x1f; 15.19 ep->smbios_major_version = 2; 15.20 ep->smbios_minor_version = 4; 15.21 ep->max_structure_size = max_structure_size; 15.22 ep->entry_point_revision = 0; 15.23 - memset(ep->formatted_area, 0, 5); 15.24 strncpy(ep->intermediate_anchor_string, "_DMI_", 5); 15.25 15.26 ep->structure_table_length = structure_table_length; 15.27 @@ -260,9 +262,6 @@ smbios_entry_point_init(void *start, 15.28 ep->number_of_structures = number_of_structures; 15.29 ep->smbios_bcd_revision = 0x24; 15.30 15.31 - ep->checksum = 0; 15.32 - ep->intermediate_checksum = 0; 15.33 - 15.34 sum = 0; 15.35 for ( i = 0; i < 0x10; i++ ) 15.36 sum += ((int8_t *)start)[i]; 15.37 @@ -280,22 +279,27 @@ smbios_type_0_init(void *start, const ch 15.38 uint32_t xen_major_version, uint32_t xen_minor_version) 15.39 { 15.40 struct smbios_type_0 *p = (struct smbios_type_0 *)start; 15.41 - 15.42 + static const char *smbios_release_date = __SMBIOS_DATE__; 15.43 + 15.44 + memset(p, 0, sizeof(*p)); 15.45 + 15.46 p->header.type = 0; 15.47 p->header.length = sizeof(struct smbios_type_0); 15.48 p->header.handle = 0; 15.49 - 15.50 + 15.51 p->vendor_str = 1; 15.52 p->version_str = 2; 15.53 p->starting_address_segment = 0xe800; 15.54 - p->release_date_str = 0; 15.55 + p->release_date_str = 3; 15.56 p->rom_size = 0; 15.57 - 15.58 - memset(p->characteristics, 0, 8); 15.59 - p->characteristics[7] = 0x08; /* BIOS characteristics not supported */ 15.60 - p->characteristics_extension_bytes[0] = 0; 15.61 - p->characteristics_extension_bytes[1] = 0; 15.62 - 15.63 + 15.64 + /* BIOS Characteristics. */ 15.65 + p->characteristics[0] = 0x80; /* PCI is supported */ 15.66 + p->characteristics[2] = 0x08; /* EDD is supported */ 15.67 + 15.68 + /* Extended Characteristics: Enable Targeted Content Distribution. */ 15.69 + p->characteristics_extension_bytes[1] = 0x04; 15.70 + 15.71 p->major_release = (uint8_t) xen_major_version; 15.72 p->minor_release = (uint8_t) xen_minor_version; 15.73 p->embedded_controller_major = 0xff; 15.74 @@ -306,6 +310,8 @@ smbios_type_0_init(void *start, const ch 15.75 start += strlen("Xen") + 1; 15.76 strcpy((char *)start, xen_version); 15.77 start += strlen(xen_version) + 1; 15.78 + strcpy((char *)start, smbios_release_date); 15.79 + start += strlen(smbios_release_date) + 1; 15.80 15.81 *((uint8_t *)start) = 0; 15.82 return start + 1; 15.83 @@ -318,6 +324,9 @@ smbios_type_1_init(void *start, const ch 15.84 { 15.85 char uuid_str[37]; 15.86 struct smbios_type_1 *p = (struct smbios_type_1 *)start; 15.87 + 15.88 + memset(p, 0, sizeof(*p)); 15.89 + 15.90 p->header.type = 1; 15.91 p->header.length = sizeof(struct smbios_type_1); 15.92 p->header.handle = 0x100; 15.93 @@ -355,6 +364,8 @@ smbios_type_3_init(void *start) 15.94 { 15.95 struct smbios_type_3 *p = (struct smbios_type_3 *)start; 15.96 15.97 + memset(p, 0, sizeof(*p)); 15.98 + 15.99 p->header.type = 3; 15.100 p->header.length = sizeof(struct smbios_type_3); 15.101 p->header.handle = 0x300; 15.102 @@ -379,12 +390,15 @@ smbios_type_3_init(void *start) 15.103 15.104 /* Type 4 -- Processor Information */ 15.105 static void * 15.106 -smbios_type_4_init(void *start, unsigned int cpu_number, char *cpu_manufacturer) 15.107 +smbios_type_4_init( 15.108 + void *start, unsigned int cpu_number, char *cpu_manufacturer) 15.109 { 15.110 char buf[80]; 15.111 struct smbios_type_4 *p = (struct smbios_type_4 *)start; 15.112 uint32_t eax, ebx, ecx, edx; 15.113 15.114 + memset(p, 0, sizeof(*p)); 15.115 + 15.116 p->header.type = 4; 15.117 p->header.length = sizeof(struct smbios_type_4); 15.118 p->header.handle = 0x400 + cpu_number; 15.119 @@ -403,8 +417,7 @@ smbios_type_4_init(void *start, unsigned 15.120 p->voltage = 0; 15.121 p->external_clock = 0; 15.122 15.123 - p->max_speed = 0; /* unknown */ 15.124 - p->current_speed = 0; /* unknown */ 15.125 + p->max_speed = p->current_speed = get_cpu_mhz(); 15.126 15.127 p->status = 0x41; /* socket populated, CPU enabled */ 15.128 p->upgrade = 0x01; /* other */ 15.129 @@ -431,6 +444,8 @@ smbios_type_16_init(void *start, uint32_ 15.130 { 15.131 struct smbios_type_16 *p = (struct smbios_type_16*)start; 15.132 15.133 + memset(p, 0, sizeof(*p)); 15.134 + 15.135 p->header.type = 16; 15.136 p->header.handle = 0x1000; 15.137 p->header.length = sizeof(struct smbios_type_16); 15.138 @@ -453,6 +468,8 @@ smbios_type_17_init(void *start, uint32_ 15.139 { 15.140 struct smbios_type_17 *p = (struct smbios_type_17 *)start; 15.141 15.142 + memset(p, 0, sizeof(*p)); 15.143 + 15.144 p->header.type = 17; 15.145 p->header.length = sizeof(struct smbios_type_17); 15.146 p->header.handle = 0x1100; 15.147 @@ -484,6 +501,8 @@ smbios_type_19_init(void *start, uint32_ 15.148 { 15.149 struct smbios_type_19 *p = (struct smbios_type_19 *)start; 15.150 15.151 + memset(p, 0, sizeof(*p)); 15.152 + 15.153 p->header.type = 19; 15.154 p->header.length = sizeof(struct smbios_type_19); 15.155 p->header.handle = 0x1300; 15.156 @@ -504,6 +523,8 @@ smbios_type_20_init(void *start, uint32_ 15.157 { 15.158 struct smbios_type_20 *p = (struct smbios_type_20 *)start; 15.159 15.160 + memset(p, 0, sizeof(*p)); 15.161 + 15.162 p->header.type = 20; 15.163 p->header.length = sizeof(struct smbios_type_20); 15.164 p->header.handle = 0x1400; 15.165 @@ -528,6 +549,8 @@ smbios_type_32_init(void *start) 15.166 { 15.167 struct smbios_type_32 *p = (struct smbios_type_32 *)start; 15.168 15.169 + memset(p, 0, sizeof(*p)); 15.170 + 15.171 p->header.type = 32; 15.172 p->header.length = sizeof(struct smbios_type_32); 15.173 p->header.handle = 0x2000; 15.174 @@ -545,6 +568,8 @@ smbios_type_127_init(void *start) 15.175 { 15.176 struct smbios_type_127 *p = (struct smbios_type_127 *)start; 15.177 15.178 + memset(p, 0, sizeof(*p)); 15.179 + 15.180 p->header.type = 127; 15.181 p->header.length = sizeof(struct smbios_type_127); 15.182 p->header.handle = 0x7f00;
16.1 --- a/tools/firmware/hvmloader/util.c Tue Apr 01 10:30:57 2008 -0600 16.2 +++ b/tools/firmware/hvmloader/util.c Tue Apr 01 11:29:03 2008 -0600 16.3 @@ -21,7 +21,10 @@ 16.4 #include "util.h" 16.5 #include "config.h" 16.6 #include "e820.h" 16.7 +#include "hypercall.h" 16.8 #include <stdint.h> 16.9 +#include <xen/xen.h> 16.10 +#include <xen/memory.h> 16.11 #include <xen/hvm/hvm_info_table.h> 16.12 16.13 void outb(uint16_t addr, uint8_t val) 16.14 @@ -585,6 +588,56 @@ int get_apic_mode(void) 16.15 return (t ? t->apic_mode : 1); 16.16 } 16.17 16.18 +uint16_t get_cpu_mhz(void) 16.19 +{ 16.20 + struct xen_add_to_physmap xatp; 16.21 + struct shared_info *shared_info = (struct shared_info *)0xa0000; 16.22 + struct vcpu_time_info *info = &shared_info->vcpu_info[0].time; 16.23 + uint64_t cpu_khz; 16.24 + uint32_t tsc_to_nsec_mul, version; 16.25 + int8_t tsc_shift; 16.26 + 16.27 + static uint16_t cpu_mhz; 16.28 + if ( cpu_mhz != 0 ) 16.29 + return cpu_mhz; 16.30 + 16.31 + /* Map shared-info page to 0xa0000 (i.e., overlap VGA hole). */ 16.32 + xatp.domid = DOMID_SELF; 16.33 + xatp.space = XENMAPSPACE_shared_info; 16.34 + xatp.idx = 0; 16.35 + xatp.gpfn = (unsigned long)shared_info >> 12; 16.36 + if ( hypercall_memory_op(XENMEM_add_to_physmap, &xatp) != 0 ) 16.37 + BUG(); 16.38 + 16.39 + /* Get a consistent snapshot of scale factor (multiplier and shift). */ 16.40 + do { 16.41 + version = info->version; 16.42 + rmb(); 16.43 + tsc_to_nsec_mul = info->tsc_to_system_mul; 16.44 + tsc_shift = info->tsc_shift; 16.45 + rmb(); 16.46 + } while ((version & 1) | (version ^ info->version)); 16.47 + 16.48 + /* Compute CPU speed in kHz. */ 16.49 + cpu_khz = 1000000ull << 32; 16.50 + do_div(cpu_khz, tsc_to_nsec_mul); 16.51 + if ( tsc_shift < 0 ) 16.52 + cpu_khz = cpu_khz << -tsc_shift; 16.53 + else 16.54 + cpu_khz = cpu_khz >> tsc_shift; 16.55 + 16.56 + /* Get the VGA MMIO hole back by remapping shared info to scratch. */ 16.57 + xatp.domid = DOMID_SELF; 16.58 + xatp.space = XENMAPSPACE_shared_info; 16.59 + xatp.idx = 0; 16.60 + xatp.gpfn = 0xfffff; /* scratch pfn */ 16.61 + if ( hypercall_memory_op(XENMEM_add_to_physmap, &xatp) != 0 ) 16.62 + BUG(); 16.63 + 16.64 + cpu_mhz = (uint16_t)(((uint32_t)cpu_khz + 500) / 1000); 16.65 + return cpu_mhz; 16.66 +} 16.67 + 16.68 /* 16.69 * Local variables: 16.70 * mode: C
17.1 --- a/tools/firmware/hvmloader/util.h Tue Apr 01 10:30:57 2008 -0600 17.2 +++ b/tools/firmware/hvmloader/util.h Tue Apr 01 11:29:03 2008 -0600 17.3 @@ -10,11 +10,11 @@ 17.4 #undef NULL 17.5 #define NULL ((void*)0) 17.6 17.7 -extern void __assert_failed(char *assertion, char *file, int line) 17.8 +void __assert_failed(char *assertion, char *file, int line) 17.9 __attribute__((noreturn)); 17.10 #define ASSERT(p) \ 17.11 do { if (!(p)) __assert_failed(#p, __FILE__, __LINE__); } while (0) 17.12 -extern void __bug(char *file, int line) __attribute__((noreturn)); 17.13 +void __bug(char *file, int line) __attribute__((noreturn)); 17.14 #define BUG() __bug(__FILE__, __LINE__) 17.15 #define BUG_ON(p) do { if (p) BUG(); } while (0) 17.16 #define BUILD_BUG_ON(p) ((void)sizeof(char[1 - 2 * !!(p)])) 17.17 @@ -49,10 +49,54 @@ void pci_write(uint32_t devfn, uint32_t 17.18 #define pci_writew(devfn, reg, val) (pci_write(devfn, reg, 2, (uint16_t)val)) 17.19 #define pci_writel(devfn, reg, val) (pci_write(devfn, reg, 4, (uint32_t)val)) 17.20 17.21 +/* Get CPU speed in MHz. */ 17.22 +uint16_t get_cpu_mhz(void); 17.23 + 17.24 /* Do cpuid instruction, with operation 'idx' */ 17.25 void cpuid(uint32_t idx, uint32_t *eax, uint32_t *ebx, 17.26 uint32_t *ecx, uint32_t *edx); 17.27 17.28 +/* Read the TSC register. */ 17.29 +static inline uint64_t rdtsc(void) 17.30 +{ 17.31 + uint64_t tsc; 17.32 + asm volatile ( "rdtsc" : "=A" (tsc) ); 17.33 + return tsc; 17.34 +} 17.35 + 17.36 +/* Relax the CPU and let the compiler know that time passes. */ 17.37 +static inline void cpu_relax(void) 17.38 +{ 17.39 + asm volatile ( "rep ; nop" : : : "memory" ); 17.40 +} 17.41 + 17.42 +/* Memory barriers. */ 17.43 +#define barrier() asm volatile ( "" : : : "memory" ) 17.44 +#define rmb() barrier() 17.45 +#define wmb() barrier() 17.46 + 17.47 +/* 17.48 + * Divide a 64-bit dividend by a 32-bit divisor. 17.49 + * (1) Overwrites the 64-bit dividend _in_place_ with the quotient 17.50 + * (2) Returns the 32-bit remainder 17.51 + */ 17.52 +#define do_div(n, base) ({ \ 17.53 + unsigned long __upper, __low, __high, __mod, __base; \ 17.54 + __base = (base); \ 17.55 + asm ( "" : "=a" (__low), "=d" (__high) : "A" (n) ); \ 17.56 + __upper = __high; \ 17.57 + if ( __high ) \ 17.58 + { \ 17.59 + __upper = __high % (__base); \ 17.60 + __high = __high / (__base); \ 17.61 + } \ 17.62 + asm ( "divl %2" \ 17.63 + : "=a" (__low), "=d" (__mod) \ 17.64 + : "rm" (__base), "0" (__low), "1" (__upper) ); \ 17.65 + asm ( "" : "=A" (n) : "a" (__low), "d" (__high) ); \ 17.66 + __mod; \ 17.67 +}) 17.68 + 17.69 /* HVM-builder info. */ 17.70 int get_vcpu_nr(void); 17.71 int get_acpi_enabled(void);
19.1 --- a/tools/include/xen-foreign/mkheader.py Tue Apr 01 10:30:57 2008 -0600 19.2 +++ b/tools/include/xen-foreign/mkheader.py Tue Apr 01 11:29:03 2008 -0600 19.3 @@ -37,8 +37,8 @@ inttypes["x86_64"] = { 19.4 "xen_pfn_t" : "__align8__ uint64_t", 19.5 }; 19.6 header["x86_64"] = """ 19.7 -#ifdef __GNUC__ 19.8 -# define __DECL_REG(name) __extension__ union { uint64_t r ## name, e ## name; } 19.9 +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) 19.10 +# define __DECL_REG(name) union { uint64_t r ## name, e ## name; } 19.11 # define __align8__ __attribute__((aligned (8))) 19.12 #else 19.13 # define __DECL_REG(name) uint64_t r ## name
20.1 --- a/tools/ioemu/block-qcow.c Tue Apr 01 10:30:57 2008 -0600 20.2 +++ b/tools/ioemu/block-qcow.c Tue Apr 01 11:29:03 2008 -0600 20.3 @@ -37,6 +37,11 @@ 20.4 20.5 #define QCOW_OFLAG_COMPRESSED (1LL << 63) 20.6 20.7 +#define XEN_MAGIC (('X' << 24) | ('E' << 16) | ('N' << 8) | 0xfb) 20.8 + 20.9 +#define EXTHDR_SPARSE_FILE 0x01 20.10 +#define EXTHDR_L1_BIG_ENDIAN 0x02 20.11 + 20.12 typedef struct QCowHeader { 20.13 uint32_t magic; 20.14 uint32_t version; 20.15 @@ -50,6 +55,14 @@ typedef struct QCowHeader { 20.16 uint64_t l1_table_offset; 20.17 } QCowHeader; 20.18 20.19 +/*Extended header for Xen enhancements*/ 20.20 +typedef struct QCowHeader_ext { 20.21 + uint32_t xmagic; 20.22 + uint32_t cksum; 20.23 + uint32_t min_cluster_alloc; 20.24 + uint32_t flags; 20.25 +} QCowHeader_ext; 20.26 + 20.27 #define L2_CACHE_SIZE 16 20.28 20.29 typedef struct BDRVQcowState { 20.30 @@ -137,6 +150,51 @@ static int qcow_open(BlockDriverState *b 20.31 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != 20.32 s->l1_size * sizeof(uint64_t)) 20.33 goto fail; 20.34 + 20.35 + /* Try to detect old tapdisk images. They have to be fixed because they 20.36 + * don't use big endian but native endianess for the L1 table */ 20.37 + if (header.backing_file_offset == 0 && s->l1_table_offset % 4096 == 0) { 20.38 + 20.39 + QCowHeader_ext exthdr; 20.40 + uint64_t l1_bytes = s->l1_size * sizeof(uint64_t); 20.41 + 20.42 + if (bdrv_pread(s->hd, sizeof(header), &exthdr, sizeof(exthdr)) 20.43 + != sizeof(exthdr)) 20.44 + goto end_xenhdr; 20.45 + 20.46 + be32_to_cpus(&exthdr.xmagic); 20.47 + if (exthdr.xmagic != XEN_MAGIC) 20.48 + goto end_xenhdr; 20.49 + 20.50 + be32_to_cpus(&exthdr.flags); 20.51 + if (exthdr.flags & EXTHDR_L1_BIG_ENDIAN) 20.52 + goto end_xenhdr; 20.53 + 20.54 + /* The image is broken. Fix it. */ 20.55 + fprintf(stderr, "qcow: Converting image to big endian L1 table\n"); 20.56 + 20.57 + for(i = 0;i < s->l1_size; i++) { 20.58 + cpu_to_be64s(&s->l1_table[i]); 20.59 + } 20.60 + 20.61 + if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, 20.62 + l1_bytes) != l1_bytes) { 20.63 + fprintf(stderr, "qcow: Failed to write new L1 table\n"); 20.64 + goto fail; 20.65 + } 20.66 + 20.67 + exthdr.flags |= EXTHDR_L1_BIG_ENDIAN; 20.68 + cpu_to_be32s(&exthdr.flags); 20.69 + 20.70 + if (bdrv_pwrite(s->hd, sizeof(header), &exthdr, sizeof(exthdr)) 20.71 + != sizeof(exthdr)) { 20.72 + fprintf(stderr, "qcow: Failed to write extended header\n"); 20.73 + goto fail; 20.74 + } 20.75 + } 20.76 +end_xenhdr: 20.77 + 20.78 + /* L1 table is big endian now */ 20.79 for(i = 0;i < s->l1_size; i++) { 20.80 be64_to_cpus(&s->l1_table[i]); 20.81 } 20.82 @@ -725,6 +783,13 @@ static void qcow_aio_cancel(BlockDriverA 20.83 qemu_aio_release(acb); 20.84 } 20.85 20.86 +static BlockDriverAIOCB *qcow_aio_flush(BlockDriverState *bs, 20.87 + BlockDriverCompletionFunc *cb, void *opaque) 20.88 +{ 20.89 + BDRVQcowState *s = bs->opaque; 20.90 + return bdrv_aio_flush(s->hd, cb, opaque); 20.91 +} 20.92 + 20.93 static void qcow_close(BlockDriverState *bs) 20.94 { 20.95 BDRVQcowState *s = bs->opaque; 20.96 @@ -869,10 +934,10 @@ static int qcow_write_compressed(BlockDr 20.97 return 0; 20.98 } 20.99 20.100 -static void qcow_flush(BlockDriverState *bs) 20.101 +static int qcow_flush(BlockDriverState *bs) 20.102 { 20.103 BDRVQcowState *s = bs->opaque; 20.104 - bdrv_flush(s->hd); 20.105 + return bdrv_flush(s->hd); 20.106 } 20.107 20.108 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 20.109 @@ -899,6 +964,7 @@ BlockDriver bdrv_qcow = { 20.110 .bdrv_aio_read = qcow_aio_read, 20.111 .bdrv_aio_write = qcow_aio_write, 20.112 .bdrv_aio_cancel = qcow_aio_cancel, 20.113 + .bdrv_aio_flush = qcow_aio_flush, 20.114 .aiocb_size = sizeof(QCowAIOCB), 20.115 .bdrv_write_compressed = qcow_write_compressed, 20.116 .bdrv_get_info = qcow_get_info,
21.1 --- a/tools/ioemu/block-qcow2.c Tue Apr 01 10:30:57 2008 -0600 21.2 +++ b/tools/ioemu/block-qcow2.c Tue Apr 01 11:29:03 2008 -0600 21.3 @@ -1007,6 +1007,13 @@ static void qcow_aio_cancel(BlockDriverA 21.4 qemu_aio_release(acb); 21.5 } 21.6 21.7 +static BlockDriverAIOCB *qcow_aio_flush(BlockDriverState *bs, 21.8 + BlockDriverCompletionFunc *cb, void *opaque) 21.9 +{ 21.10 + BDRVQcowState *s = bs->opaque; 21.11 + return bdrv_aio_flush(s->hd, cb, opaque); 21.12 +} 21.13 + 21.14 static void qcow_close(BlockDriverState *bs) 21.15 { 21.16 BDRVQcowState *s = bs->opaque; 21.17 @@ -1228,10 +1235,10 @@ static int qcow_write_compressed(BlockDr 21.18 return 0; 21.19 } 21.20 21.21 -static void qcow_flush(BlockDriverState *bs) 21.22 +static int qcow_flush(BlockDriverState *bs) 21.23 { 21.24 BDRVQcowState *s = bs->opaque; 21.25 - bdrv_flush(s->hd); 21.26 + return bdrv_flush(s->hd); 21.27 } 21.28 21.29 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 21.30 @@ -1886,6 +1893,8 @@ static int grow_refcount_table(BlockDriv 21.31 int64_t table_offset; 21.32 uint64_t data64; 21.33 uint32_t data32; 21.34 + int old_table_size; 21.35 + int64_t old_table_offset; 21.36 21.37 if (min_size <= s->refcount_table_size) 21.38 return 0; 21.39 @@ -1931,10 +1940,14 @@ static int grow_refcount_table(BlockDriv 21.40 &data32, sizeof(data32)) != sizeof(data32)) 21.41 goto fail; 21.42 qemu_free(s->refcount_table); 21.43 + old_table_offset = s->refcount_table_offset; 21.44 + old_table_size = s->refcount_table_size; 21.45 s->refcount_table = new_table; 21.46 s->refcount_table_size = new_table_size; 21.47 + s->refcount_table_offset = table_offset; 21.48 21.49 update_refcount(bs, table_offset, new_table_size2, 1); 21.50 + free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t)); 21.51 return 0; 21.52 fail: 21.53 free_clusters(bs, table_offset, new_table_size2); 21.54 @@ -2235,6 +2248,7 @@ BlockDriver bdrv_qcow2 = { 21.55 .bdrv_aio_read = qcow_aio_read, 21.56 .bdrv_aio_write = qcow_aio_write, 21.57 .bdrv_aio_cancel = qcow_aio_cancel, 21.58 + .bdrv_aio_flush = qcow_aio_flush, 21.59 .aiocb_size = sizeof(QCowAIOCB), 21.60 .bdrv_write_compressed = qcow_write_compressed, 21.61
22.1 --- a/tools/ioemu/block-raw.c Tue Apr 01 10:30:57 2008 -0600 22.2 +++ b/tools/ioemu/block-raw.c Tue Apr 01 11:29:03 2008 -0600 22.3 @@ -496,6 +496,21 @@ static void raw_aio_cancel(BlockDriverAI 22.4 pacb = &acb->next; 22.5 } 22.6 } 22.7 + 22.8 +static BlockDriverAIOCB *raw_aio_flush(BlockDriverState *bs, 22.9 + BlockDriverCompletionFunc *cb, void *opaque) 22.10 +{ 22.11 + RawAIOCB *acb; 22.12 + 22.13 + acb = raw_aio_setup(bs, 0, NULL, 0, cb, opaque); 22.14 + if (!acb) 22.15 + return NULL; 22.16 + if (aio_fsync(O_SYNC, &acb->aiocb) < 0) { 22.17 + qemu_aio_release(acb); 22.18 + return NULL; 22.19 + } 22.20 + return &acb->common; 22.21 +} 22.22 #endif 22.23 22.24 static void raw_close(BlockDriverState *bs) 22.25 @@ -600,10 +615,12 @@ static int raw_create(const char *filena 22.26 return 0; 22.27 } 22.28 22.29 -static void raw_flush(BlockDriverState *bs) 22.30 +static int raw_flush(BlockDriverState *bs) 22.31 { 22.32 BDRVRawState *s = bs->opaque; 22.33 - fsync(s->fd); 22.34 + if (fsync(s->fd)) 22.35 + return errno; 22.36 + return 0; 22.37 } 22.38 22.39 BlockDriver bdrv_raw = { 22.40 @@ -621,6 +638,7 @@ BlockDriver bdrv_raw = { 22.41 .bdrv_aio_read = raw_aio_read, 22.42 .bdrv_aio_write = raw_aio_write, 22.43 .bdrv_aio_cancel = raw_aio_cancel, 22.44 + .bdrv_aio_flush = raw_aio_flush, 22.45 .aiocb_size = sizeof(RawAIOCB), 22.46 #endif 22.47 .protocol_name = "file", 22.48 @@ -959,6 +977,7 @@ BlockDriver bdrv_host_device = { 22.49 .bdrv_aio_read = raw_aio_read, 22.50 .bdrv_aio_write = raw_aio_write, 22.51 .bdrv_aio_cancel = raw_aio_cancel, 22.52 + .bdrv_aio_flush = raw_aio_flush, 22.53 .aiocb_size = sizeof(RawAIOCB), 22.54 #endif 22.55 .bdrv_pread = raw_pread,
23.1 --- a/tools/ioemu/block-vmdk.c Tue Apr 01 10:30:57 2008 -0600 23.2 +++ b/tools/ioemu/block-vmdk.c Tue Apr 01 11:29:03 2008 -0600 23.3 @@ -734,10 +734,10 @@ static void vmdk_close(BlockDriverState 23.4 vmdk_parent_close(s->hd); 23.5 } 23.6 23.7 -static void vmdk_flush(BlockDriverState *bs) 23.8 +static int vmdk_flush(BlockDriverState *bs) 23.9 { 23.10 BDRVVmdkState *s = bs->opaque; 23.11 - bdrv_flush(s->hd); 23.12 + return bdrv_flush(s->hd); 23.13 } 23.14 23.15 BlockDriver bdrv_vmdk = {
24.1 --- a/tools/ioemu/block.c Tue Apr 01 10:30:57 2008 -0600 24.2 +++ b/tools/ioemu/block.c Tue Apr 01 11:29:03 2008 -0600 24.3 @@ -48,6 +48,8 @@ static BlockDriverAIOCB *bdrv_aio_write_ 24.4 int64_t sector_num, const uint8_t *buf, int nb_sectors, 24.5 BlockDriverCompletionFunc *cb, void *opaque); 24.6 static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb); 24.7 +static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs, 24.8 + BlockDriverCompletionFunc *cb, void *opaque); 24.9 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, 24.10 uint8_t *buf, int nb_sectors); 24.11 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, 24.12 @@ -155,6 +157,8 @@ void bdrv_register(BlockDriver *bdrv) 24.13 bdrv->bdrv_read = bdrv_read_em; 24.14 bdrv->bdrv_write = bdrv_write_em; 24.15 } 24.16 + if (!bdrv->bdrv_aio_flush) 24.17 + bdrv->bdrv_aio_flush = bdrv_aio_flush_em; 24.18 bdrv->next = first_drv; 24.19 first_drv = bdrv; 24.20 } 24.21 @@ -885,12 +889,14 @@ const char *bdrv_get_device_name(BlockDr 24.22 return bs->device_name; 24.23 } 24.24 24.25 -void bdrv_flush(BlockDriverState *bs) 24.26 +int bdrv_flush(BlockDriverState *bs) 24.27 { 24.28 - if (bs->drv->bdrv_flush) 24.29 - bs->drv->bdrv_flush(bs); 24.30 - if (bs->backing_hd) 24.31 - bdrv_flush(bs->backing_hd); 24.32 + int ret = 0; 24.33 + if (bs->drv->bdrv_flush) 24.34 + ret = bs->drv->bdrv_flush(bs); 24.35 + if (!ret && bs->backing_hd) 24.36 + ret = bdrv_flush(bs->backing_hd); 24.37 + return ret; 24.38 } 24.39 24.40 void bdrv_info(void) 24.41 @@ -1138,6 +1144,17 @@ void bdrv_aio_cancel(BlockDriverAIOCB *a 24.42 drv->bdrv_aio_cancel(acb); 24.43 } 24.44 24.45 +BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 24.46 + BlockDriverCompletionFunc *cb, void *opaque) 24.47 +{ 24.48 + BlockDriver *drv = bs->drv; 24.49 + 24.50 + if (!drv) 24.51 + return NULL; 24.52 + 24.53 + return drv->bdrv_aio_flush(bs, cb, opaque); 24.54 +} 24.55 + 24.56 24.57 /**************************************************************/ 24.58 /* async block device emulation */ 24.59 @@ -1214,6 +1231,15 @@ static void bdrv_aio_cancel_em(BlockDriv 24.60 } 24.61 #endif /* !QEMU_TOOL */ 24.62 24.63 +static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs, 24.64 + BlockDriverCompletionFunc *cb, void *opaque) 24.65 +{ 24.66 + int ret; 24.67 + ret = bdrv_flush(bs); 24.68 + cb(opaque, ret); 24.69 + return NULL; 24.70 +} 24.71 + 24.72 /**************************************************************/ 24.73 /* sync block device emulation */ 24.74
25.1 --- a/tools/ioemu/block_int.h Tue Apr 01 10:30:57 2008 -0600 25.2 +++ b/tools/ioemu/block_int.h Tue Apr 01 11:29:03 2008 -0600 25.3 @@ -36,7 +36,7 @@ struct BlockDriver { 25.4 void (*bdrv_close)(BlockDriverState *bs); 25.5 int (*bdrv_create)(const char *filename, int64_t total_sectors, 25.6 const char *backing_file, int flags); 25.7 - void (*bdrv_flush)(BlockDriverState *bs); 25.8 + int (*bdrv_flush)(BlockDriverState *bs); 25.9 int (*bdrv_is_allocated)(BlockDriverState *bs, int64_t sector_num, 25.10 int nb_sectors, int *pnum); 25.11 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 25.12 @@ -49,6 +49,8 @@ struct BlockDriver { 25.13 int64_t sector_num, const uint8_t *buf, int nb_sectors, 25.14 BlockDriverCompletionFunc *cb, void *opaque); 25.15 void (*bdrv_aio_cancel)(BlockDriverAIOCB *acb); 25.16 + BlockDriverAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, 25.17 + BlockDriverCompletionFunc *cb, void *opaque); 25.18 int aiocb_size; 25.19 25.20 const char *protocol_name;
26.1 --- a/tools/ioemu/hw/ide.c Tue Apr 01 10:30:57 2008 -0600 26.2 +++ b/tools/ioemu/hw/ide.c Tue Apr 01 11:29:03 2008 -0600 26.3 @@ -751,6 +751,7 @@ static inline void ide_abort_command(IDE 26.4 static inline void ide_set_irq(IDEState *s) 26.5 { 26.6 BMDMAState *bm = s->bmdma; 26.7 + if (!s->bs) return; /* yikes */ 26.8 if (!(s->cmd & IDE_CMD_DISABLE_IRQ)) { 26.9 if (bm) { 26.10 bm->status |= BM_STATUS_INT; 26.11 @@ -916,6 +917,8 @@ static void ide_read_dma_cb(void *opaque 26.12 int n; 26.13 int64_t sector_num; 26.14 26.15 + if (!s->bs) return; /* yikes */ 26.16 + 26.17 n = s->io_buffer_size >> 9; 26.18 sector_num = ide_get_sector(s); 26.19 if (n > 0) { 26.20 @@ -1024,6 +1027,8 @@ static void ide_write_dma_cb(void *opaqu 26.21 int n; 26.22 int64_t sector_num; 26.23 26.24 + if (!s->bs) return; /* yikes */ 26.25 + 26.26 n = s->io_buffer_size >> 9; 26.27 sector_num = ide_get_sector(s); 26.28 if (n > 0) { 26.29 @@ -1072,6 +1077,39 @@ static void ide_sector_write_dma(IDEStat 26.30 ide_dma_start(s, ide_write_dma_cb); 26.31 } 26.32 26.33 +static void ide_device_utterly_broken(IDEState *s) { 26.34 + s->status |= BUSY_STAT; 26.35 + s->bs = NULL; 26.36 + /* This prevents all future commands from working. All of the 26.37 + * asynchronous callbacks (and ide_set_irq, as a safety measure) 26.38 + * check to see whether this has happened and bail if so. 26.39 + */ 26.40 +} 26.41 + 26.42 +static void ide_flush_cb(void *opaque, int ret) 26.43 +{ 26.44 + IDEState *s = opaque; 26.45 + 26.46 + if (!s->bs) return; /* yikes */ 26.47 + 26.48 + if (ret) { 26.49 + /* We are completely doomed. The IDE spec does not permit us 26.50 + * to return an error from a flush except via a protocol which 26.51 + * requires us to say where the error is and which 26.52 + * contemplates the guest repeating the flush attempt to 26.53 + * attempt flush the remaining data. We can't support that 26.54 + * because f(data)sync (which is what the block drivers use 26.55 + * eventually) doesn't report the necessary information or 26.56 + * give us the necessary control. So we make the disk vanish. 26.57 + */ 26.58 + ide_device_utterly_broken(s); 26.59 + return; 26.60 + } 26.61 + else 26.62 + s->status = READY_STAT; 26.63 + ide_set_irq(s); 26.64 +} 26.65 + 26.66 static void ide_atapi_cmd_ok(IDEState *s) 26.67 { 26.68 s->error = 0; 26.69 @@ -1298,6 +1336,8 @@ static void ide_atapi_cmd_read_dma_cb(vo 26.70 IDEState *s = bm->ide_if; 26.71 int data_offset, n; 26.72 26.73 + if (!s->bs) return; /* yikes */ 26.74 + 26.75 if (ret < 0) { 26.76 ide_atapi_io_error(s, ret); 26.77 goto eot; 26.78 @@ -1703,6 +1743,8 @@ static void cdrom_change_cb(void *opaque 26.79 IDEState *s = opaque; 26.80 int64_t nb_sectors; 26.81 26.82 + if (!s->bs) return; /* yikes */ 26.83 + 26.84 /* XXX: send interrupt too */ 26.85 bdrv_get_geometry(s->bs, &nb_sectors); 26.86 s->nb_sectors = nb_sectors; 26.87 @@ -1744,6 +1786,7 @@ static void ide_ioport_write(void *opaqu 26.88 IDEState *s; 26.89 int unit, n; 26.90 int lba48 = 0; 26.91 + int ret; 26.92 26.93 #ifdef DEBUG_IDE 26.94 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val); 26.95 @@ -1806,8 +1849,8 @@ static void ide_ioport_write(void *opaqu 26.96 printf("ide: CMD=%02x\n", val); 26.97 #endif 26.98 s = ide_if->cur_drive; 26.99 - /* ignore commands to non existant slave */ 26.100 - if (s != ide_if && !s->bs) 26.101 + /* ignore commands to non existant device */ 26.102 + if (!s->bs) 26.103 break; 26.104 26.105 switch(val) { 26.106 @@ -1976,10 +2019,8 @@ static void ide_ioport_write(void *opaqu 26.107 break; 26.108 case WIN_FLUSH_CACHE: 26.109 case WIN_FLUSH_CACHE_EXT: 26.110 - if (s->bs) 26.111 - bdrv_flush(s->bs); 26.112 - s->status = READY_STAT; 26.113 - ide_set_irq(s); 26.114 + s->status = BUSY_STAT; 26.115 + bdrv_aio_flush(s->bs, ide_flush_cb, s); 26.116 break; 26.117 case WIN_IDLEIMMEDIATE: 26.118 case WIN_STANDBY: 26.119 @@ -2723,6 +2764,7 @@ static void pci_ide_save(QEMUFile* f, vo 26.120 if (s->identify_set) { 26.121 qemu_put_buffer(f, (const uint8_t *)s->identify_data, 512); 26.122 } 26.123 + qemu_put_8s(f, &s->write_cache); 26.124 qemu_put_8s(f, &s->feature); 26.125 qemu_put_8s(f, &s->error); 26.126 qemu_put_be32s(f, &s->nsector); 26.127 @@ -2749,7 +2791,7 @@ static int pci_ide_load(QEMUFile* f, voi 26.128 PCIIDEState *d = opaque; 26.129 int ret, i; 26.130 26.131 - if (version_id != 1) 26.132 + if (version_id != 1 && version_id != 2) 26.133 return -EINVAL; 26.134 ret = pci_device_load(&d->dev, f); 26.135 if (ret < 0) 26.136 @@ -2780,6 +2822,8 @@ static int pci_ide_load(QEMUFile* f, voi 26.137 if (s->identify_set) { 26.138 qemu_get_buffer(f, (uint8_t *)s->identify_data, 512); 26.139 } 26.140 + if (version_id >= 2) 26.141 + qemu_get_8s(f, &s->write_cache); 26.142 qemu_get_8s(f, &s->feature); 26.143 qemu_get_8s(f, &s->error); 26.144 qemu_get_be32s(f, &s->nsector); 26.145 @@ -2854,7 +2898,7 @@ void pci_piix_ide_init(PCIBus *bus, Bloc 26.146 26.147 buffered_pio_init(); 26.148 26.149 - register_savevm("ide", 0, 1, pci_ide_save, pci_ide_load, d); 26.150 + register_savevm("ide", 0, 2, pci_ide_save, pci_ide_load, d); 26.151 } 26.152 26.153 /* hd_table must contain 4 block drivers */ 26.154 @@ -2895,7 +2939,7 @@ void pci_piix3_ide_init(PCIBus *bus, Blo 26.155 26.156 buffered_pio_init(); 26.157 26.158 - register_savevm("ide", 0, 1, pci_ide_save, pci_ide_load, d); 26.159 + register_savevm("ide", 0, 2, pci_ide_save, pci_ide_load, d); 26.160 } 26.161 26.162 /***********************************************************/
27.1 --- a/tools/ioemu/hw/ne2000.c Tue Apr 01 10:30:57 2008 -0600 27.2 +++ b/tools/ioemu/hw/ne2000.c Tue Apr 01 11:29:03 2008 -0600 27.3 @@ -207,7 +207,7 @@ static int ne2000_buffer_full(NE2000Stat 27.4 27.5 index = s->curpag << 8; 27.6 boundary = s->boundary << 8; 27.7 - if (index <= boundary) 27.8 + if (index < boundary) 27.9 avail = boundary - index; 27.10 else 27.11 avail = (s->stop - s->start) - (index - boundary);
28.1 --- a/tools/ioemu/hw/scsi-disk.c Tue Apr 01 10:30:57 2008 -0600 28.2 +++ b/tools/ioemu/hw/scsi-disk.c Tue Apr 01 11:29:03 2008 -0600 28.3 @@ -291,6 +291,7 @@ int32_t scsi_send_command(SCSIDevice *s, 28.4 uint8_t command; 28.5 uint8_t *outbuf; 28.6 SCSIRequest *r; 28.7 + int ret; 28.8 28.9 command = buf[0]; 28.10 r = scsi_find_request(s, tag); 28.11 @@ -496,7 +497,12 @@ int32_t scsi_send_command(SCSIDevice *s, 28.12 break; 28.13 case 0x35: 28.14 DPRINTF("Syncronise cache (sector %d, count %d)\n", lba, len); 28.15 - bdrv_flush(s->bdrv); 28.16 + ret = bdrv_flush(s->bdrv); 28.17 + if (ret) { 28.18 + DPRINTF("IO error on bdrv_flush\n"); 28.19 + scsi_command_complete(r, SENSE_HARDWARE_ERROR); 28.20 + return 0; 28.21 + } 28.22 break; 28.23 case 0x43: 28.24 {
29.1 --- a/tools/ioemu/hw/vga.c Tue Apr 01 10:30:57 2008 -0600 29.2 +++ b/tools/ioemu/hw/vga.c Tue Apr 01 11:29:03 2008 -0600 29.3 @@ -1486,7 +1486,7 @@ void check_sse2(void) 29.4 static void vga_draw_graphic(VGAState *s, int full_update) 29.5 { 29.6 int y1, y, update, linesize, y_start, double_scan, mask, depth; 29.7 - int width, height, shift_control, line_offset, bwidth, changed_flag; 29.8 + int width, height, shift_control, line_offset, bwidth, ds_depth; 29.9 ram_addr_t page0, page1; 29.10 int disp_width, multi_scan, multi_run; 29.11 uint8_t *d; 29.12 @@ -1499,13 +1499,13 @@ static void vga_draw_graphic(VGAState *s 29.13 s->get_resolution(s, &width, &height); 29.14 disp_width = width; 29.15 29.16 - changed_flag = 0; 29.17 + ds_depth = s->ds->depth; 29.18 depth = s->get_bpp(s); 29.19 if (s->ds->dpy_colourdepth != NULL && 29.20 - (s->ds->depth != depth || !s->ds->shared_buf)) { 29.21 + (ds_depth != depth || !s->ds->shared_buf)) 29.22 s->ds->dpy_colourdepth(s->ds, depth); 29.23 - changed_flag = 1; 29.24 - } 29.25 + if (ds_depth != s->ds->depth) full_update = 1; 29.26 + 29.27 s->rgb_to_pixel = 29.28 rgb_to_pixel_dup_table[get_depth_index(s->ds)]; 29.29 29.30 @@ -1569,17 +1569,18 @@ static void vga_draw_graphic(VGAState *s 29.31 } 29.32 29.33 vga_draw_line = vga_draw_line_table[v * NB_DEPTHS + get_depth_index(s->ds)]; 29.34 - if (disp_width != s->last_width || 29.35 + if (s->line_offset != s->last_line_offset || 29.36 + disp_width != s->last_width || 29.37 height != s->last_height) { 29.38 dpy_resize(s->ds, disp_width, height, s->line_offset); 29.39 s->last_scr_width = disp_width; 29.40 s->last_scr_height = height; 29.41 s->last_width = disp_width; 29.42 s->last_height = height; 29.43 + s->last_line_offset = s->line_offset; 29.44 full_update = 1; 29.45 - changed_flag = 1; 29.46 } 29.47 - if (s->ds->shared_buf && (changed_flag || s->ds->data != s->vram_ptr + (s->start_addr * 4))) 29.48 + if (s->ds->shared_buf && (full_update || s->ds->data != s->vram_ptr + (s->start_addr * 4))) 29.49 s->ds->dpy_setdata(s->ds, s->vram_ptr + (s->start_addr * 4)); 29.50 if (!s->ds->shared_buf && s->cursor_invalidate) 29.51 s->cursor_invalidate(s); 29.52 @@ -2072,6 +2073,7 @@ void vga_common_init(VGAState *s, Displa 29.53 s->vram_offset = vga_ram_offset; 29.54 s->vram_size = vga_ram_size; 29.55 s->ds = ds; 29.56 + ds->palette = s->last_palette; 29.57 s->get_bpp = vga_get_bpp; 29.58 s->get_offsets = vga_get_offsets; 29.59 s->get_resolution = vga_get_resolution;
30.1 --- a/tools/ioemu/hw/vga_int.h Tue Apr 01 10:30:57 2008 -0600 30.2 +++ b/tools/ioemu/hw/vga_int.h Tue Apr 01 11:29:03 2008 -0600 30.3 @@ -129,6 +129,7 @@ 30.4 uint32_t line_compare; \ 30.5 uint32_t start_addr; \ 30.6 uint32_t plane_updated; \ 30.7 + uint32_t last_line_offset; \ 30.8 uint8_t last_cw, last_ch; \ 30.9 uint32_t last_width, last_height; /* in chars or pixels */ \ 30.10 uint32_t last_scr_width, last_scr_height; /* in pixels */ \
31.1 --- a/tools/ioemu/hw/xenfb.c Tue Apr 01 10:30:57 2008 -0600 31.2 +++ b/tools/ioemu/hw/xenfb.c Tue Apr 01 11:29:03 2008 -0600 31.3 @@ -56,6 +56,7 @@ struct xenfb { 31.4 int depth; /* colour depth of guest framebuffer */ 31.5 int width; /* pixel width of guest framebuffer */ 31.6 int height; /* pixel height of guest framebuffer */ 31.7 + int offset; /* offset of the framebuffer */ 31.8 int abs_pointer_wanted; /* Whether guest supports absolute pointer */ 31.9 int button_state; /* Last seen pointer button state */ 31.10 char protocol[64]; /* frontend protocol */ 31.11 @@ -516,6 +517,18 @@ static void xenfb_on_fb_event(struct xen 31.12 } 31.13 xenfb_guest_copy(xenfb, x, y, w, h); 31.14 break; 31.15 + case XENFB_TYPE_RESIZE: 31.16 + xenfb->width = event->resize.width; 31.17 + xenfb->height = event->resize.height; 31.18 + xenfb->depth = event->resize.depth; 31.19 + xenfb->row_stride = event->resize.stride; 31.20 + xenfb->offset = event->resize.offset; 31.21 + dpy_colourdepth(xenfb->ds, xenfb->depth); 31.22 + dpy_resize(xenfb->ds, xenfb->width, xenfb->height, xenfb->row_stride); 31.23 + if (xenfb->ds->shared_buf) 31.24 + dpy_setdata(xenfb->ds, xenfb->pixels + xenfb->offset); 31.25 + xenfb_invalidate(xenfb); 31.26 + break; 31.27 } 31.28 } 31.29 xen_mb(); /* ensure we're done with ring contents */ 31.30 @@ -680,6 +693,7 @@ static void xenfb_dispatch_store(void *o 31.31 static int xenfb_read_frontend_fb_config(struct xenfb *xenfb) { 31.32 struct xenfb_page *fb_page; 31.33 int val; 31.34 + int videoram; 31.35 31.36 if (xenfb_xs_scanf1(xenfb->xsh, xenfb->fb.otherend, "feature-update", 31.37 "%d", &val) < 0) 31.38 @@ -702,11 +716,31 @@ static int xenfb_read_frontend_fb_config 31.39 /* TODO check for consistency with the above */ 31.40 xenfb->fb_len = fb_page->mem_length; 31.41 xenfb->row_stride = fb_page->line_length; 31.42 + 31.43 + /* Protect against hostile frontend, limit fb_len to max allowed */ 31.44 + if (xenfb_xs_scanf1(xenfb->xsh, xenfb->fb.nodename, "videoram", "%d", 31.45 + &videoram) < 0) 31.46 + videoram = 0; 31.47 + videoram = videoram * 1024 * 1024; 31.48 + if (videoram && xenfb->fb_len > videoram) { 31.49 + fprintf(stderr, "Framebuffer requested length of %zd exceeded allowed %d\n", 31.50 + xenfb->fb_len, videoram); 31.51 + xenfb->fb_len = videoram; 31.52 + if (xenfb->row_stride * xenfb->height > xenfb->fb_len) 31.53 + xenfb->height = xenfb->fb_len / xenfb->row_stride; 31.54 + } 31.55 fprintf(stderr, "Framebuffer depth %d width %d height %d line %d\n", 31.56 fb_page->depth, fb_page->width, fb_page->height, fb_page->line_length); 31.57 if (xenfb_map_fb(xenfb, xenfb->fb.otherend_id) < 0) 31.58 return -1; 31.59 31.60 + /* Indicate we have the frame buffer resize feature */ 31.61 + xenfb_xs_printf(xenfb->xsh, xenfb->fb.nodename, "feature-resize", "1"); 31.62 + 31.63 + /* Tell kbd pointer the screen geometry */ 31.64 + xenfb_xs_printf(xenfb->xsh, xenfb->kbd.nodename, "width", "%d", xenfb->width); 31.65 + xenfb_xs_printf(xenfb->xsh, xenfb->kbd.nodename, "height", "%d", xenfb->height); 31.66 + 31.67 if (xenfb_switch_state(&xenfb->fb, XenbusStateConnected)) 31.68 return -1; 31.69 if (xenfb_switch_state(&xenfb->kbd, XenbusStateConnected)) 31.70 @@ -1074,6 +1108,7 @@ static void xenfb_mouse_event(void *opaq 31.71 #define BLT(SRC_T,DST_T,RSB,GSB,BSB,RDB,GDB,BDB) \ 31.72 for (line = y ; line < (y+h) ; line++) { \ 31.73 SRC_T *src = (SRC_T *)(xenfb->pixels \ 31.74 + + xenfb->offset \ 31.75 + (line * xenfb->row_stride) \ 31.76 + (x * xenfb->depth / 8)); \ 31.77 DST_T *dst = (DST_T *)(xenfb->ds->data \ 31.78 @@ -1116,7 +1151,7 @@ static void xenfb_guest_copy(struct xenf 31.79 if (xenfb->depth == xenfb->ds->depth) { /* Perfect match can use fast path */ 31.80 for (line = y ; line < (y+h) ; line++) { 31.81 memcpy(xenfb->ds->data + (line * xenfb->ds->linesize) + (x * xenfb->ds->depth / 8), 31.82 - xenfb->pixels + (line * xenfb->row_stride) + (x * xenfb->depth / 8), 31.83 + xenfb->pixels + xenfb->offset + (line * xenfb->row_stride) + (x * xenfb->depth / 8), 31.84 w * xenfb->depth / 8); 31.85 } 31.86 } else { /* Mismatch requires slow pixel munging */
32.1 --- a/tools/ioemu/sdl.c Tue Apr 01 10:30:57 2008 -0600 32.2 +++ b/tools/ioemu/sdl.c Tue Apr 01 11:29:03 2008 -0600 32.3 @@ -85,19 +85,33 @@ static void opengl_setdata(DisplayState 32.4 glPixelStorei(GL_UNPACK_LSB_FIRST, 1); 32.5 switch (ds->depth) { 32.6 case 8: 32.7 - tex_format = GL_RGB; 32.8 - tex_type = GL_UNSIGNED_BYTE_3_3_2; 32.9 - glPixelStorei (GL_UNPACK_ALIGNMENT, 1); 32.10 + if (ds->palette == NULL) { 32.11 + tex_format = GL_RGB; 32.12 + tex_type = GL_UNSIGNED_BYTE_3_3_2; 32.13 + } else { 32.14 + int i; 32.15 + GLushort paletter[256], paletteg[256], paletteb[256]; 32.16 + for (i = 0; i < 256; i++) { 32.17 + uint8_t rgb = ds->palette[i] >> 16; 32.18 + paletter[i] = ((rgb & 0xe0) >> 5) * 65535 / 7; 32.19 + paletteg[i] = ((rgb & 0x1c) >> 2) * 65535 / 7; 32.20 + paletteb[i] = (rgb & 0x3) * 65535 / 3; 32.21 + } 32.22 + glPixelMapusv(GL_PIXEL_MAP_I_TO_R, 256, paletter); 32.23 + glPixelMapusv(GL_PIXEL_MAP_I_TO_G, 256, paletteg); 32.24 + glPixelMapusv(GL_PIXEL_MAP_I_TO_B, 256, paletteb); 32.25 + 32.26 + tex_format = GL_COLOR_INDEX; 32.27 + tex_type = GL_UNSIGNED_BYTE; 32.28 + } 32.29 break; 32.30 case 16: 32.31 tex_format = GL_RGB; 32.32 tex_type = GL_UNSIGNED_SHORT_5_6_5; 32.33 - glPixelStorei (GL_UNPACK_ALIGNMENT, 2); 32.34 break; 32.35 case 24: 32.36 tex_format = GL_BGR; 32.37 tex_type = GL_UNSIGNED_BYTE; 32.38 - glPixelStorei (GL_UNPACK_ALIGNMENT, 1); 32.39 break; 32.40 case 32: 32.41 if (!ds->bgr) { 32.42 @@ -107,7 +121,6 @@ static void opengl_setdata(DisplayState 32.43 tex_format = GL_RGBA; 32.44 tex_type = GL_UNSIGNED_BYTE; 32.45 } 32.46 - glPixelStorei (GL_UNPACK_ALIGNMENT, 4); 32.47 break; 32.48 } 32.49 glPixelStorei(GL_UNPACK_ROW_LENGTH, (ds->linesize * 8) / ds->depth); 32.50 @@ -184,6 +197,17 @@ static void sdl_setdata(DisplayState *ds 32.51 return; 32.52 } 32.53 shared = SDL_CreateRGBSurfaceFrom(pixels, width, height, ds->depth, ds->linesize, rmask , gmask, bmask, amask); 32.54 + if (ds->depth == 8 && ds->palette != NULL) { 32.55 + SDL_Color palette[256]; 32.56 + int i; 32.57 + for (i = 0; i < 256; i++) { 32.58 + uint8_t rgb = ds->palette[i] >> 16; 32.59 + palette[i].r = ((rgb & 0xe0) >> 5) * 255 / 7; 32.60 + palette[i].g = ((rgb & 0x1c) >> 2) * 255 / 7; 32.61 + palette[i].b = (rgb & 0x3) * 255 / 3; 32.62 + } 32.63 + SDL_SetColors(shared, palette, 0, 256); 32.64 + } 32.65 ds->data = pixels; 32.66 } 32.67 32.68 @@ -210,21 +234,32 @@ static void sdl_resize(DisplayState *ds, 32.69 32.70 again: 32.71 screen = SDL_SetVideoMode(w, h, 0, flags); 32.72 -#ifndef CONFIG_OPENGL 32.73 + 32.74 if (!screen) { 32.75 fprintf(stderr, "Could not open SDL display: %s\n", SDL_GetError()); 32.76 + if (opengl_enabled) { 32.77 + /* Fallback to SDL */ 32.78 + opengl_enabled = 0; 32.79 + ds->dpy_update = sdl_update; 32.80 + ds->dpy_setdata = sdl_setdata; 32.81 + sdl_resize(ds, w, h, linesize); 32.82 + return; 32.83 + } 32.84 exit(1); 32.85 } 32.86 - if (!screen->pixels && (flags & SDL_HWSURFACE) && (flags & SDL_FULLSCREEN)) { 32.87 - flags &= ~SDL_HWSURFACE; 32.88 - goto again; 32.89 + 32.90 + if (!opengl_enabled) { 32.91 + if (!screen->pixels && (flags & SDL_HWSURFACE) && (flags & SDL_FULLSCREEN)) { 32.92 + flags &= ~SDL_HWSURFACE; 32.93 + goto again; 32.94 + } 32.95 + 32.96 + if (!screen->pixels) { 32.97 + fprintf(stderr, "Could not open SDL display: %s\n", SDL_GetError()); 32.98 + exit(1); 32.99 + } 32.100 } 32.101 32.102 - if (!screen->pixels) { 32.103 - fprintf(stderr, "Could not open SDL display: %s\n", SDL_GetError()); 32.104 - exit(1); 32.105 - } 32.106 -#endif 32.107 ds->width = w; 32.108 ds->height = h; 32.109 if (!ds->shared_buf) { 32.110 @@ -262,7 +297,10 @@ static void sdl_resize(DisplayState *ds, 32.111 32.112 static void sdl_colourdepth(DisplayState *ds, int depth) 32.113 { 32.114 - if (!depth || !ds->depth) return; 32.115 + if (!depth || !ds->depth) { 32.116 + ds->shared_buf = 0; 32.117 + return; 32.118 + } 32.119 ds->shared_buf = 1; 32.120 ds->depth = depth; 32.121 ds->linesize = width * depth / 8;
33.1 --- a/tools/ioemu/vl.h Tue Apr 01 10:30:57 2008 -0600 33.2 +++ b/tools/ioemu/vl.h Tue Apr 01 11:29:03 2008 -0600 33.3 @@ -653,6 +653,8 @@ BlockDriverAIOCB *bdrv_aio_write(BlockDr 33.4 const uint8_t *buf, int nb_sectors, 33.5 BlockDriverCompletionFunc *cb, void *opaque); 33.6 void bdrv_aio_cancel(BlockDriverAIOCB *acb); 33.7 +BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, 33.8 + BlockDriverCompletionFunc *cb, void *opaque); 33.9 33.10 void qemu_aio_init(void); 33.11 void qemu_aio_poll(void); 33.12 @@ -662,7 +664,7 @@ void qemu_aio_wait(void); 33.13 void qemu_aio_wait_end(void); 33.14 33.15 /* Ensure contents are flushed to disk. */ 33.16 -void bdrv_flush(BlockDriverState *bs); 33.17 +int bdrv_flush(BlockDriverState *bs); 33.18 33.19 #define BDRV_TYPE_HD 0 33.20 #define BDRV_TYPE_CDROM 1 33.21 @@ -935,6 +937,7 @@ struct DisplayState { 33.22 int width; 33.23 int height; 33.24 void *opaque; 33.25 + uint32_t *palette; 33.26 uint64_t gui_timer_interval; 33.27 33.28 int switchbpp;
34.1 --- a/tools/ioemu/vnc.c Tue Apr 01 10:30:57 2008 -0600 34.2 +++ b/tools/ioemu/vnc.c Tue Apr 01 11:29:03 2008 -0600 34.3 @@ -1640,6 +1640,7 @@ static void vnc_dpy_colourdepth(DisplayS 34.4 if (ds->depth == 32) return; 34.5 depth = 32; 34.6 break; 34.7 + case 8: 34.8 case 0: 34.9 ds->shared_buf = 0; 34.10 return;
36.1 --- a/tools/libfsimage/Rules.mk Tue Apr 01 10:30:57 2008 -0600 36.2 +++ b/tools/libfsimage/Rules.mk Tue Apr 01 11:29:03 2008 -0600 36.3 @@ -11,6 +11,7 @@ FSDIR-$(CONFIG_Linux) = $(LIBDIR)/fs/$(F 36.4 FSDIR-$(CONFIG_SunOS)-x86_64 = $(PREFIX)/lib/fs/$(FS)/64 36.5 FSDIR-$(CONFIG_SunOS)-x86_32 = $(PREFIX)/lib/fs/$(FS)/ 36.6 FSDIR-$(CONFIG_SunOS) = $(FSDIR-$(CONFIG_SunOS)-$(XEN_TARGET_ARCH)) 36.7 +FSDIR-$(CONFIG_NetBSD) = $(LIBDIR)/fs/$(FS) 36.8 FSDIR = $(FSDIR-y) 36.9 36.10 FSLIB = fsimage.so
37.1 --- a/tools/libfsimage/check-libext2fs Tue Apr 01 10:30:57 2008 -0600 37.2 +++ b/tools/libfsimage/check-libext2fs Tue Apr 01 11:29:03 2008 -0600 37.3 @@ -1,4 +1,4 @@ 37.4 -#!/bin/bash 37.5 +#!/bin/sh 37.6 37.7 cat >ext2-test.c <<EOF 37.8 #include <ext2fs/ext2fs.h> 37.9 @@ -9,7 +9,7 @@ int main() 37.10 } 37.11 EOF 37.12 37.13 -${CC:-gcc} -o ext2-test ext2-test.c -lext2fs >/dev/null 2>&1 37.14 +${CC-gcc} -o ext2-test ext2-test.c -lext2fs >/dev/null 2>&1 37.15 if [ $? = 0 ]; then 37.16 echo ext2fs-lib 37.17 else
38.1 --- a/tools/libfsimage/common/fsimage_grub.c Tue Apr 01 10:30:57 2008 -0600 38.2 +++ b/tools/libfsimage/common/fsimage_grub.c Tue Apr 01 11:29:03 2008 -0600 38.3 @@ -204,19 +204,47 @@ int 38.4 fsig_devread(fsi_file_t *ffi, unsigned int sector, unsigned int offset, 38.5 unsigned int bufsize, char *buf) 38.6 { 38.7 - uint64_t off = ffi->ff_fsi->f_off + ((uint64_t)sector * 512) + offset; 38.8 - ssize_t bytes_read = 0; 38.9 + off_t off; 38.10 + ssize_t ret; 38.11 + int n, r; 38.12 + char tmp[SECTOR_SIZE]; 38.13 + 38.14 + off = ffi->ff_fsi->f_off + ((off_t)sector * SECTOR_SIZE) + offset; 38.15 + 38.16 + /* 38.17 + * Make reads from a raw disk sector-aligned. This is a requirement 38.18 + * for NetBSD. Split the read up into to three parts to meet this 38.19 + * requirement. 38.20 + */ 38.21 38.22 - while (bufsize) { 38.23 - ssize_t ret = pread(ffi->ff_fsi->f_fd, buf + bytes_read, 38.24 - bufsize, (off_t)off); 38.25 - if (ret == -1) 38.26 + n = (off & (SECTOR_SIZE - 1)); 38.27 + if (n > 0) { 38.28 + r = SECTOR_SIZE - n; 38.29 + if (r > bufsize) 38.30 + r = bufsize; 38.31 + ret = pread(ffi->ff_fsi->f_fd, tmp, SECTOR_SIZE, off - n); 38.32 + if (ret < n + r) 38.33 return (0); 38.34 - if (ret == 0) 38.35 - return (0); 38.36 + memcpy(buf, tmp + n, r); 38.37 + buf += r; 38.38 + bufsize -= r; 38.39 + off += r; 38.40 + } 38.41 38.42 - bytes_read += ret; 38.43 - bufsize -= ret; 38.44 + n = (bufsize & ~(SECTOR_SIZE - 1)); 38.45 + if (n > 0) { 38.46 + ret = pread(ffi->ff_fsi->f_fd, buf, n, off); 38.47 + if (ret < n) 38.48 + return (0); 38.49 + buf += n; 38.50 + bufsize -= n; 38.51 + off += n; 38.52 + } 38.53 + if (bufsize > 0) { 38.54 + ret = pread(ffi->ff_fsi->f_fd, tmp, SECTOR_SIZE, off); 38.55 + if (ret < bufsize) 38.56 + return (0); 38.57 + memcpy(buf, tmp, bufsize); 38.58 } 38.59 38.60 return (1);
39.1 --- a/tools/libfsimage/common/fsimage_grub.h Tue Apr 01 10:30:57 2008 -0600 39.2 +++ b/tools/libfsimage/common/fsimage_grub.h Tue Apr 01 11:29:03 2008 -0600 39.3 @@ -44,7 +44,7 @@ typedef struct fsig_plugin_ops { 39.4 } fsig_plugin_ops_t; 39.5 39.6 #define STAGE1_5 39.7 -#define FSYS_BUFLEN 0x8000 39.8 +#define FSYS_BUFLEN 0x40000 39.9 #define SECTOR_BITS 9 39.10 #define SECTOR_SIZE 0x200 39.11
40.1 --- a/tools/libfsimage/common/fsimage_plugin.c Tue Apr 01 10:30:57 2008 -0600 40.2 +++ b/tools/libfsimage/common/fsimage_plugin.c Tue Apr 01 11:29:03 2008 -0600 40.3 @@ -131,7 +131,10 @@ static int load_plugins(void) 40.4 int err; 40.5 int ret = -1; 40.6 40.7 -#ifdef __sun__ 40.8 +#if defined(FSIMAGE_FSDIR) 40.9 + if (fsdir == NULL) 40.10 + fsdir = FSIMAGE_FSDIR; 40.11 +#elif defined(__sun__) 40.12 if (fsdir == NULL) 40.13 fsdir = "/usr/lib/fs"; 40.14
41.1 --- a/tools/libxc/Makefile Tue Apr 01 10:30:57 2008 -0600 41.2 +++ b/tools/libxc/Makefile Tue Apr 01 11:29:03 2008 -0600 41.3 @@ -46,10 +46,11 @@ GUEST_SRCS-y += libelf-tools.c libelf-lo 41.4 GUEST_SRCS-y += libelf-dominfo.c libelf-relocate.c 41.5 41.6 # new domain builder 41.7 -GUEST_SRCS-y += xc_dom_core.c xc_dom_boot.c 41.8 -GUEST_SRCS-y += xc_dom_elfloader.c 41.9 -GUEST_SRCS-y += xc_dom_binloader.c 41.10 -GUEST_SRCS-y += xc_dom_compat_linux.c 41.11 +GUEST_SRCS-y += xc_dom_core.c xc_dom_boot.c 41.12 +GUEST_SRCS-y += xc_dom_elfloader.c 41.13 +GUEST_SRCS-$(CONFIG_X86) += xc_dom_bzimageloader.c 41.14 +GUEST_SRCS-y += xc_dom_binloader.c 41.15 +GUEST_SRCS-y += xc_dom_compat_linux.c 41.16 41.17 GUEST_SRCS-$(CONFIG_X86) += xc_dom_x86.c 41.18 GUEST_SRCS-$(CONFIG_IA64) += xc_dom_ia64.c
42.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 42.2 +++ b/tools/libxc/xc_dom_bzimageloader.c Tue Apr 01 11:29:03 2008 -0600 42.3 @@ -0,0 +1,159 @@ 42.4 +/* 42.5 + * Xen domain builder -- bzImage bits 42.6 + * 42.7 + * Parse and load bzImage kernel images. 42.8 + * 42.9 + * This relies on version 2.08 of the boot protocol, which contains an 42.10 + * ELF file embedded in the bzImage. The loader extracts this ELF 42.11 + * image and passes it off to the standard ELF loader. 42.12 + * 42.13 + * This code is licenced under the GPL. 42.14 + * written 2006 by Gerd Hoffmann <kraxel@suse.de>. 42.15 + * written 2007 by Jeremy Fitzhardinge <jeremy@xensource.com> 42.16 + * written 2008 by Ian Campbell <ijc@hellion.org.uk> 42.17 + * 42.18 + */ 42.19 +#include <stdio.h> 42.20 +#include <stdlib.h> 42.21 +#include <inttypes.h> 42.22 + 42.23 +#include "xg_private.h" 42.24 +#include "xc_dom.h" 42.25 + 42.26 +struct setup_header { 42.27 + uint8_t _pad0[0x1f1]; /* skip uninteresting stuff */ 42.28 + uint8_t setup_sects; 42.29 + uint16_t root_flags; 42.30 + uint32_t syssize; 42.31 + uint16_t ram_size; 42.32 + uint16_t vid_mode; 42.33 + uint16_t root_dev; 42.34 + uint16_t boot_flag; 42.35 + uint16_t jump; 42.36 + uint32_t header; 42.37 +#define HDR_MAGIC "HdrS" 42.38 +#define HDR_MAGIC_SZ 4 42.39 + uint16_t version; 42.40 +#define VERSION(h,l) (((h)<<8) | (l)) 42.41 + uint32_t realmode_swtch; 42.42 + uint16_t start_sys; 42.43 + uint16_t kernel_version; 42.44 + uint8_t type_of_loader; 42.45 + uint8_t loadflags; 42.46 + uint16_t setup_move_size; 42.47 + uint32_t code32_start; 42.48 + uint32_t ramdisk_image; 42.49 + uint32_t ramdisk_size; 42.50 + uint32_t bootsect_kludge; 42.51 + uint16_t heap_end_ptr; 42.52 + uint16_t _pad1; 42.53 + uint32_t cmd_line_ptr; 42.54 + uint32_t initrd_addr_max; 42.55 + uint32_t kernel_alignment; 42.56 + uint8_t relocatable_kernel; 42.57 + uint8_t _pad2[3]; 42.58 + uint32_t cmdline_size; 42.59 + uint32_t hardware_subarch; 42.60 + uint64_t hardware_subarch_data; 42.61 + uint32_t payload_offset; 42.62 + uint32_t payload_length; 42.63 +} __attribute__((packed)); 42.64 + 42.65 +extern struct xc_dom_loader elf_loader; 42.66 + 42.67 +static unsigned int payload_offset(struct setup_header *hdr) 42.68 +{ 42.69 + unsigned int off; 42.70 + 42.71 + off = (hdr->setup_sects + 1) * 512; 42.72 + off += hdr->payload_offset; 42.73 + return off; 42.74 +} 42.75 + 42.76 +static int check_bzimage_kernel(struct xc_dom_image *dom, int verbose) 42.77 +{ 42.78 + struct setup_header *hdr; 42.79 + 42.80 + if ( dom->kernel_blob == NULL ) 42.81 + { 42.82 + if ( verbose ) 42.83 + xc_dom_panic(XC_INTERNAL_ERROR, "%s: no kernel image loaded\n", 42.84 + __FUNCTION__); 42.85 + return -EINVAL; 42.86 + } 42.87 + if ( dom->kernel_size < sizeof(struct setup_header) ) 42.88 + { 42.89 + if ( verbose ) 42.90 + xc_dom_panic(XC_INTERNAL_ERROR, "%s: kernel image too small\n", 42.91 + __FUNCTION__); 42.92 + return -EINVAL; 42.93 + } 42.94 + 42.95 + hdr = dom->kernel_blob; 42.96 + 42.97 + if ( memcmp(&hdr->header, HDR_MAGIC, HDR_MAGIC_SZ) != 0 ) 42.98 + { 42.99 + if ( verbose ) 42.100 + xc_dom_panic(XC_INVALID_KERNEL, "%s: kernel is not a bzImage\n", 42.101 + __FUNCTION__); 42.102 + return -EINVAL; 42.103 + } 42.104 + 42.105 + if ( hdr->version < VERSION(2,8) ) 42.106 + { 42.107 + if ( verbose ) 42.108 + xc_dom_panic(XC_INVALID_KERNEL, "%s: boot protocol too old (%04x)\n", 42.109 + __FUNCTION__, hdr->version); 42.110 + return -EINVAL; 42.111 + } 42.112 + 42.113 + dom->kernel_blob = dom->kernel_blob + payload_offset(hdr); 42.114 + dom->kernel_size = hdr->payload_length; 42.115 + 42.116 + if ( xc_dom_try_gunzip(dom, &dom->kernel_blob, &dom->kernel_size) == -1 ) 42.117 + { 42.118 + if ( verbose ) 42.119 + xc_dom_panic(XC_INVALID_KERNEL, "%s: unable to decompress kernel\n", 42.120 + __FUNCTION__); 42.121 + return -EINVAL; 42.122 + } 42.123 + 42.124 + return elf_loader.probe(dom); 42.125 +} 42.126 + 42.127 +static int xc_dom_probe_bzimage_kernel(struct xc_dom_image *dom) 42.128 +{ 42.129 + return check_bzimage_kernel(dom, 0); 42.130 +} 42.131 + 42.132 +static int xc_dom_parse_bzimage_kernel(struct xc_dom_image *dom) 42.133 +{ 42.134 + return elf_loader.parser(dom); 42.135 +} 42.136 + 42.137 +static int xc_dom_load_bzimage_kernel(struct xc_dom_image *dom) 42.138 +{ 42.139 + return elf_loader.loader(dom); 42.140 +} 42.141 + 42.142 +static struct xc_dom_loader bzimage_loader = { 42.143 + .name = "Linux bzImage", 42.144 + .probe = xc_dom_probe_bzimage_kernel, 42.145 + .parser = xc_dom_parse_bzimage_kernel, 42.146 + .loader = xc_dom_load_bzimage_kernel, 42.147 +}; 42.148 + 42.149 +static void __init register_loader(void) 42.150 +{ 42.151 + xc_dom_register_loader(&bzimage_loader); 42.152 +} 42.153 + 42.154 +/* 42.155 + * Local variables: 42.156 + * mode: C 42.157 + * c-set-style: "BSD" 42.158 + * c-basic-offset: 4 42.159 + * tab-width: 4 42.160 + * indent-tabs-mode: nil 42.161 + * End: 42.162 + */
43.1 --- a/tools/libxc/xc_dom_elfloader.c Tue Apr 01 10:30:57 2008 -0600 43.2 +++ b/tools/libxc/xc_dom_elfloader.c Tue Apr 01 11:29:03 2008 -0600 43.3 @@ -281,7 +281,7 @@ static int xc_dom_load_elf_kernel(struct 43.4 43.5 /* ------------------------------------------------------------------------ */ 43.6 43.7 -static struct xc_dom_loader elf_loader = { 43.8 +struct xc_dom_loader elf_loader = { 43.9 .name = "ELF-generic", 43.10 .probe = xc_dom_probe_elf_kernel, 43.11 .parser = xc_dom_parse_elf_kernel,
44.1 --- a/tools/libxen/include/xen/api/xen_acmpolicy.h Tue Apr 01 10:30:57 2008 -0600 44.2 +++ b/tools/libxen/include/xen/api/xen_acmpolicy.h Tue Apr 01 11:29:03 2008 -0600 44.3 @@ -74,7 +74,7 @@ xen_acm_header_free(xen_acm_header *hdr) 44.4 /** 44.5 * Get the referenced policy's record. 44.6 */ 44.7 -bool 44.8 +extern bool 44.9 xen_acmpolicy_get_record(xen_session *session, xen_acmpolicy_record **result, 44.10 xen_xspolicy xspolicy); 44.11 44.12 @@ -118,14 +118,14 @@ xen_acmpolicy_get_enforced_binary(xen_se 44.13 /** 44.14 * Get the ACM ssidref of the given VM. 44.15 */ 44.16 -bool 44.17 +extern bool 44.18 xen_acmpolicy_get_VM_ssidref(xen_session *session, int64_t *result, 44.19 xen_vm vm); 44.20 44.21 /** 44.22 * Get the UUID field of the given policy. 44.23 */ 44.24 -bool 44.25 +extern bool 44.26 xen_acmpolicy_get_uuid(xen_session *session, char **result, 44.27 xen_xspolicy xspolicy); 44.28
45.1 --- a/tools/libxen/include/xen/api/xen_xspolicy.h Tue Apr 01 10:30:57 2008 -0600 45.2 +++ b/tools/libxen/include/xen/api/xen_xspolicy.h Tue Apr 01 11:29:03 2008 -0600 45.3 @@ -68,6 +68,8 @@ enum xs_instantiationflags { 45.4 #define XSERR_RESOURCE_ACCESS 23 + XSERR_BASE 45.5 #define XSERR_HV_OP_FAILED 24 + XSERR_BASE 45.6 #define XSERR_BOOTPOLICY_INSTALL_ERROR 25 + XSERR_BASE 45.7 +#define XSERR_VM_NOT_AUTHORIZED 26 + XSERR_BASE 45.8 +#define XSERR_VM_IN_CONFLICT 27 + XSERR_BASE 45.9 45.10 45.11 /** 45.12 @@ -179,28 +181,28 @@ typedef struct xen_xs_policystate 45.13 char *errors; 45.14 } xen_xs_policystate; 45.15 45.16 -void 45.17 +extern void 45.18 xen_xs_policystate_free(xen_xs_policystate *state); 45.19 45.20 45.21 /** 45.22 * Get the referenced policy's record. 45.23 */ 45.24 -bool 45.25 +extern bool 45.26 xen_xspolicy_get_record(xen_session *session, xen_xspolicy_record **result, 45.27 xen_xspolicy xspolicy); 45.28 45.29 /** 45.30 * Get the UUID field of the given policy. 45.31 */ 45.32 -bool 45.33 +extern bool 45.34 xen_xspolicy_get_uuid(xen_session *session, char **result, 45.35 xen_xspolicy xspolicy); 45.36 45.37 /** 45.38 * Get a policy given it's UUID 45.39 */ 45.40 -bool 45.41 +extern bool 45.42 xen_xspolicy_get_by_uuid(xen_session *session, xen_xspolicy *result, 45.43 char *uuid); 45.44 45.45 @@ -208,7 +210,7 @@ xen_xspolicy_get_by_uuid(xen_session *se 45.46 /** 45.47 * Get the types of policies supported by the system. 45.48 */ 45.49 -bool 45.50 +extern bool 45.51 xen_xspolicy_get_xstype(xen_session *session, xs_type *result); 45.52 45.53 45.54 @@ -216,13 +218,13 @@ xen_xspolicy_get_xstype(xen_session *ses 45.55 * Get information about the currently managed policy. 45.56 * (The API allows only one policy to be on the system.) 45.57 */ 45.58 -bool 45.59 +extern bool 45.60 xen_xspolicy_get_xspolicy(xen_session *session, xen_xs_policystate **result); 45.61 45.62 /** 45.63 * Activate the referenced policy by loading it into the hypervisor. 45.64 */ 45.65 -bool 45.66 +extern bool 45.67 xen_xspolicy_activate_xspolicy(xen_session *session, int64_t *result, 45.68 xen_xspolicy xspolicy, 45.69 xs_instantiationflags flags); 45.70 @@ -234,7 +236,7 @@ xen_xspolicy_activate_xspolicy(xen_sessi 45.71 * on whether to load the policy immediately and whether to overwrite 45.72 * an existing policy on the system. 45.73 */ 45.74 -bool 45.75 +extern bool 45.76 xen_xspolicy_set_xspolicy(xen_session *session, xen_xs_policystate **result, 45.77 xs_type type, char *repr, int64_t flags, 45.78 bool overwrite); 45.79 @@ -248,7 +250,7 @@ xen_xspolicy_set_xspolicy(xen_session *s 45.80 * for example fail if other domains than Domain-0 are running and have 45.81 * different labels than Domain-0. 45.82 */ 45.83 -bool 45.84 +extern bool 45.85 xen_xspolicy_reset_xspolicy(xen_session *session, xen_xs_policystate **result, 45.86 xs_type type); 45.87 45.88 @@ -281,4 +283,11 @@ extern bool 45.89 xen_xspolicy_get_resource_label(xen_session *session, char **label, 45.90 char *resource); 45.91 45.92 +/** 45.93 + * Check whether a VM with the given VM-label could run. 45.94 + */ 45.95 +extern bool 45.96 +xen_xspolicy_can_run(xen_session *session, int64_t *result, 45.97 + char *security_label); 45.98 + 45.99 #endif
46.1 --- a/tools/libxen/src/xen_xspolicy.c Tue Apr 01 10:30:57 2008 -0600 46.2 +++ b/tools/libxen/src/xen_xspolicy.c Tue Apr 01 11:29:03 2008 -0600 46.3 @@ -343,3 +343,21 @@ xen_xspolicy_activate_xspolicy(xen_sessi 46.4 XEN_CALL_("XSPolicy.activate_xspolicy"); 46.5 return session->ok; 46.6 } 46.7 + 46.8 + 46.9 +bool 46.10 +xen_xspolicy_can_run(xen_session *session, int64_t *result, 46.11 + char *security_label) 46.12 +{ 46.13 + abstract_value param_values[] = 46.14 + { 46.15 + { .type = &abstract_type_string, 46.16 + .u.string_val = security_label } 46.17 + }; 46.18 + 46.19 + abstract_type result_type = abstract_type_int; 46.20 + 46.21 + *result = 0; 46.22 + XEN_CALL_("XSPolicy.can_run"); 46.23 + return session->ok; 46.24 +}
48.1 --- a/tools/pygrub/src/pygrub Tue Apr 01 10:30:57 2008 -0600 48.2 +++ b/tools/pygrub/src/pygrub Tue Apr 01 11:29:03 2008 -0600 48.3 @@ -240,10 +240,10 @@ class Grub: 48.4 if y > self.start_image + maxy: 48.5 break 48.6 if y == self.selected_image: 48.7 - attr = curses.A_REVERSE 48.8 - else: 48.9 - attr = 0 48.10 - self.entry_win.addstr(y + 1 - self.start_image, 2, i.title.ljust(70), attr) 48.11 + self.entry_win.attron(curses.A_REVERSE) 48.12 + self.entry_win.addstr(y + 1 - self.start_image, 2, i.title.ljust(70)) 48.13 + if y == self.selected_image: 48.14 + self.entry_win.attroff(curses.A_REVERSE) 48.15 self.entry_win.refresh() 48.16 48.17 def edit_entry(self, origimg): 48.18 @@ -269,16 +269,17 @@ class Grub: 48.19 self.entry_win.box() 48.20 for idx in range(1, len(img.lines)): 48.21 # current line should be highlighted 48.22 - attr = 0 48.23 if idx == curline: 48.24 - attr = curses.A_REVERSE 48.25 + self.entry_win.attron(curses.A_REVERSE) 48.26 48.27 # trim the line 48.28 l = img.lines[idx].ljust(70) 48.29 if len(l) > 70: 48.30 l = l[:69] + ">" 48.31 48.32 - self.entry_win.addstr(idx, 2, l, attr) 48.33 + self.entry_win.addstr(idx, 2, l) 48.34 + if idx == curline: 48.35 + self.entry_win.attroff(curses.A_REVERSE) 48.36 self.entry_win.refresh() 48.37 48.38 c = self.screen.getch()
49.1 --- a/tools/python/xen/lowlevel/xc/xc.c Tue Apr 01 10:30:57 2008 -0600 49.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Apr 01 11:29:03 2008 -0600 49.3 @@ -762,11 +762,12 @@ static PyObject *pyxc_physinfo(XcObject 49.4 { 49.5 #define MAX_CPU_ID 255 49.6 xc_physinfo_t info; 49.7 - char cpu_cap[128], *p=cpu_cap, *q=cpu_cap; 49.8 + char cpu_cap[128], virt_caps[128], *p; 49.9 int i, j, max_cpu_id; 49.10 uint64_t free_heap; 49.11 PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj; 49.12 xc_cpu_to_node_t map[MAX_CPU_ID + 1]; 49.13 + const char *virtcap_names[] = { "hvm", "hvm_directio" }; 49.14 49.15 set_xen_guest_handle(info.cpu_to_node, map); 49.16 info.max_cpu_id = MAX_CPU_ID; 49.17 @@ -774,17 +775,21 @@ static PyObject *pyxc_physinfo(XcObject 49.18 if ( xc_physinfo(self->xc_handle, &info) != 0 ) 49.19 return pyxc_error_to_exception(); 49.20 49.21 - *q = 0; 49.22 + p = cpu_cap; 49.23 + *p = '\0'; 49.24 for ( i = 0; i < sizeof(info.hw_cap)/4; i++ ) 49.25 - { 49.26 p += sprintf(p, "%08x:", info.hw_cap[i]); 49.27 - if ( info.hw_cap[i] ) 49.28 - q = p; 49.29 - } 49.30 - if ( q > cpu_cap ) 49.31 - *(q-1) = 0; 49.32 + *(p-1) = 0; 49.33 49.34 - ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}", 49.35 + p = virt_caps; 49.36 + *p = '\0'; 49.37 + for ( i = 0; i < 2; i++ ) 49.38 + if ( (info.capabilities >> i) & 1 ) 49.39 + p += sprintf(p, "%s ", virtcap_names[i]); 49.40 + if ( p != virt_caps ) 49.41 + *(p-1) = '\0'; 49.42 + 49.43 + ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}", 49.44 "nr_nodes", info.nr_nodes, 49.45 "max_cpu_id", info.max_cpu_id, 49.46 "threads_per_core", info.threads_per_core, 49.47 @@ -794,7 +799,8 @@ static PyObject *pyxc_physinfo(XcObject 49.48 "free_memory", pages_to_kib(info.free_pages), 49.49 "scrub_memory", pages_to_kib(info.scrub_pages), 49.50 "cpu_khz", info.cpu_khz, 49.51 - "hw_caps", cpu_cap); 49.52 + "hw_caps", cpu_cap, 49.53 + "virt_caps", virt_caps); 49.54 49.55 max_cpu_id = info.max_cpu_id; 49.56 if ( max_cpu_id > MAX_CPU_ID )
50.1 --- a/tools/python/xen/util/xsconstants.py Tue Apr 01 10:30:57 2008 -0600 50.2 +++ b/tools/python/xen/util/xsconstants.py Tue Apr 01 11:29:03 2008 -0600 50.3 @@ -57,7 +57,9 @@ XSERR_POLICY_NOT_LOADED = 22 + XS 50.4 XSERR_RESOURCE_ACCESS = 23 + XSERR_BASE 50.5 XSERR_HV_OP_FAILED = 24 + XSERR_BASE 50.6 XSERR_BOOTPOLICY_INSTALL_ERROR = 25 + XSERR_BASE 50.7 -XSERR_LAST = 25 + XSERR_BASE ## KEEP LAST 50.8 +XSERR_VM_NOT_AUTHORIZED = 26 + XSERR_BASE 50.9 +XSERR_VM_IN_CONFLICT = 27 + XSERR_BASE 50.10 +XSERR_LAST = 27 + XSERR_BASE ## KEEP LAST 50.11 50.12 XSERR_MESSAGES = [ 50.13 '', 50.14 @@ -85,7 +87,9 @@ XSERR_MESSAGES = [ 50.15 'The policy is not loaded', 50.16 'Error accessing resource', 50.17 'Operation failed in hypervisor', 50.18 - 'Boot policy installation error' 50.19 + 'Boot policy installation error', 50.20 + 'VM is not authorized to run', 50.21 + 'VM label conflicts with another VM' 50.22 ] 50.23 50.24 def xserr2string(err):
51.1 --- a/tools/python/xen/util/xsm/acm/acm.py Tue Apr 01 10:30:57 2008 -0600 51.2 +++ b/tools/python/xen/util/xsm/acm/acm.py Tue Apr 01 11:29:03 2008 -0600 51.3 @@ -68,6 +68,7 @@ policy_name_re = re.compile(".*[chwall|s 51.4 #decision hooks known to the hypervisor 51.5 ACMHOOK_sharing = 1 51.6 ACMHOOK_authorization = 2 51.7 +ACMHOOK_conflictset = 3 51.8 51.9 #other global variables 51.10 NULL_SSIDREF = 0 51.11 @@ -373,7 +374,7 @@ def label2ssidref(labelname, policyname, 51.12 else: 51.13 return (sec_ssid[0] << 16) | pri_ssid[0] 51.14 finally: 51.15 - mapfile_unlock() 51.16 + mapfile_unlock() 51.17 51.18 51.19 def refresh_ssidref(config): 51.20 @@ -552,6 +553,18 @@ def hv_get_policy(): 51.21 return rc, bin_pol 51.22 51.23 51.24 +def is_in_conflict(ssidref): 51.25 + """ Check whether the given ssidref is in conflict with any running 51.26 + domain. 51.27 + """ 51.28 + decision = acm.getdecision('ssidref', str(ssidref), 51.29 + 'ssidref', str(ssidref), 51.30 + ACMHOOK_conflictset) 51.31 + if decision == "DENIED": 51.32 + return True 51.33 + return False 51.34 + 51.35 + 51.36 def set_policy(xs_type, xml, flags, overwrite): 51.37 """ 51.38 Xend exports this function via XML-RPC 51.39 @@ -1550,6 +1563,33 @@ def get_security_label(self, xspol=None) 51.40 return label 51.41 51.42 51.43 +def check_can_run(sec_label): 51.44 + """ Check whether a VM could run, given its vm label. A VM can run if 51.45 + - it is authorized 51.46 + - is not in conflict with any running domain 51.47 + """ 51.48 + try: 51.49 + mapfile_lock() 51.50 + 51.51 + if sec_label == None or sec_label == "": 51.52 + vm_label = ACM_LABEL_UNLABELED 51.53 + else: 51.54 + poltype, policy, vm_label = sec_label.split(':') 51.55 + if policy != get_active_policy_name(): 51.56 + return -xsconstants.XSERR_BAD_POLICY_NAME 51.57 + ssidref = label2ssidref(vm_label, policy, 'dom') 51.58 + if ssidref != xsconstants.INVALID_SSIDREF: 51.59 + if not has_authorization(ssidref): 51.60 + return -xsconstants.XSERR_VM_NOT_AUTHORIZED 51.61 + if is_in_conflict(ssidref): 51.62 + return -xsconstants.XSERR_VM_IN_CONFLICT 51.63 + return -xsconstants.XSERR_SUCCESS 51.64 + else: 51.65 + return -xsconstants.XSERR_BAD_LABEL 51.66 + finally: 51.67 + mapfile_unlock() 51.68 + 51.69 + 51.70 __cond = threading.Condition() 51.71 __script_runner = None 51.72 __orders = []
52.1 --- a/tools/python/xen/xend/XendBootloader.py Tue Apr 01 10:30:57 2008 -0600 52.2 +++ b/tools/python/xen/xend/XendBootloader.py Tue Apr 01 11:29:03 2008 -0600 52.3 @@ -12,7 +12,7 @@ 52.4 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 52.5 # 52.6 52.7 -import os, select, errno, stat, signal 52.8 +import os, select, errno, stat, signal, tty 52.9 import random 52.10 import shlex 52.11 from xen.xend import sxp 52.12 @@ -43,6 +43,9 @@ def bootloader(blexec, disk, dom, quiet 52.13 log.error(msg) 52.14 raise VmError(msg) 52.15 52.16 + if os.uname()[0] == "NetBSD" and disk.startswith('/dev/'): 52.17 + disk = disk.replace("/dev/", "/dev/r") 52.18 + 52.19 mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU) 52.20 52.21 while True: 52.22 @@ -63,12 +66,8 @@ def bootloader(blexec, disk, dom, quiet 52.23 # where we copy characters between the two master fds, as well as 52.24 # listening on the bootloader's fifo for the results. 52.25 52.26 - # Termios runes for very raw access to the pty master fds. 52.27 - attr = [ 0, 0, termios.CS8 | termios.CREAD | termios.CLOCAL, 52.28 - 0, 0, 0, [0] * 32 ] 52.29 - 52.30 (m1, s1) = pty.openpty() 52.31 - termios.tcsetattr(m1, termios.TCSANOW, attr) 52.32 + tty.setraw(m1); 52.33 fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY); 52.34 os.close(s1) 52.35 slavename = ptsname.ptsname(m1) 52.36 @@ -109,7 +108,7 @@ def bootloader(blexec, disk, dom, quiet 52.37 # record that this domain is bootloading 52.38 dom.bootloader_pid = child 52.39 52.40 - termios.tcsetattr(m2, termios.TCSANOW, attr) 52.41 + tty.setraw(m2); 52.42 fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY); 52.43 while True: 52.44 try:
53.1 --- a/tools/python/xen/xend/XendDomainInfo.py Tue Apr 01 10:30:57 2008 -0600 53.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Apr 01 11:29:03 2008 -0600 53.3 @@ -2047,11 +2047,10 @@ class XendDomainInfo: 53.4 53.5 try: 53.6 if self.info['platform'].get('localtime', 0): 53.7 - t = time.time() 53.8 - loc = time.localtime(t) 53.9 - utc = time.gmtime(t) 53.10 - timeoffset = int(time.mktime(loc) - time.mktime(utc)) 53.11 - self.info['platform']['rtc_timeoffset'] = timeoffset 53.12 + if time.localtime(time.time())[8]: 53.13 + self.info['platform']['rtc_timeoffset'] = -time.altzone 53.14 + else: 53.15 + self.info['platform']['rtc_timeoffset'] = -time.timezone 53.16 53.17 self.image = image.create(self, self.info) 53.18
54.1 --- a/tools/python/xen/xend/XendNode.py Tue Apr 01 10:30:57 2008 -0600 54.2 +++ b/tools/python/xen/xend/XendNode.py Tue Apr 01 11:29:03 2008 -0600 54.3 @@ -92,6 +92,7 @@ class XendNode: 54.4 physinfo = self.physinfo_dict() 54.5 cpu_count = physinfo['nr_cpus'] 54.6 cpu_features = physinfo['hw_caps'] 54.7 + virt_caps = physinfo['virt_caps'] 54.8 54.9 # If the number of CPUs don't match, we should just reinitialise 54.10 # the CPU UUIDs. 54.11 @@ -112,6 +113,7 @@ class XendNode: 54.12 self.cpus[u].update( 54.13 { 'host' : self.uuid, 54.14 'features' : cpu_features, 54.15 + 'virt_caps': virt_caps, 54.16 'speed' : int(float(cpuinfo[number]['cpu MHz'])), 54.17 'vendor' : cpuinfo[number]['vendor_id'], 54.18 'modelname': cpuinfo[number]['model name'], 54.19 @@ -605,6 +607,7 @@ class XendNode: 54.20 'threads_per_core', 54.21 'cpu_mhz', 54.22 'hw_caps', 54.23 + 'virt_caps', 54.24 'total_memory', 54.25 'free_memory', 54.26 'node_to_cpu',
55.1 --- a/tools/python/xen/xend/XendXSPolicy.py Tue Apr 01 10:30:57 2008 -0600 55.2 +++ b/tools/python/xen/xend/XendXSPolicy.py Tue Apr 01 11:29:03 2008 -0600 55.3 @@ -48,7 +48,8 @@ class XendXSPolicy(XendBase): 55.4 'rm_xsbootpolicy', 55.5 'get_resource_label', 55.6 'set_resource_label', 55.7 - 'get_labeled_resources' ] 55.8 + 'get_labeled_resources', 55.9 + 'can_run' ] 55.10 return XendBase.getFuncs() + funcs 55.11 55.12 getClass = classmethod(getClass) 55.13 @@ -190,6 +191,12 @@ class XendXSPolicy(XendBase): 55.14 res = security.get_resource_label_xapi(resource) 55.15 return res 55.16 55.17 + def can_run(self, sec_label): 55.18 + irc = security.validate_label_xapi(sec_label, 'dom') 55.19 + if irc != xsconstants.XSERR_SUCCESS: 55.20 + raise SecurityError(irc) 55.21 + return security.check_can_run(sec_label) 55.22 + 55.23 get_xstype = classmethod(get_xstype) 55.24 get_xspolicy = classmethod(get_xspolicy) 55.25 set_xspolicy = classmethod(set_xspolicy) 55.26 @@ -198,6 +205,7 @@ class XendXSPolicy(XendBase): 55.27 set_resource_label = classmethod(set_resource_label) 55.28 get_resource_label = classmethod(get_resource_label) 55.29 get_labeled_resources = classmethod(get_labeled_resources) 55.30 + can_run = classmethod(can_run) 55.31 55.32 55.33 class XendACMPolicy(XendXSPolicy):
56.1 --- a/tools/python/xen/xend/server/vfbif.py Tue Apr 01 10:30:57 2008 -0600 56.2 +++ b/tools/python/xen/xend/server/vfbif.py Tue Apr 01 11:29:03 2008 -0600 56.3 @@ -6,7 +6,7 @@ import xen.xend 56.4 import os 56.5 56.6 CONFIG_ENTRIES = ['type', 'vncdisplay', 'vnclisten', 'vncpasswd', 'vncunused', 56.7 - 'display', 'xauthority', 'keymap', 56.8 + 'videoram', 'display', 'xauthority', 'keymap', 56.9 'uuid', 'location', 'protocol', 'opengl'] 56.10 56.11 class VfbifController(DevController):
57.1 --- a/tools/python/xen/xm/XenAPI.py Tue Apr 01 10:30:57 2008 -0600 57.2 +++ b/tools/python/xen/xm/XenAPI.py Tue Apr 01 11:29:03 2008 -0600 57.3 @@ -64,6 +64,7 @@ errormap = { 57.4 "HANDLE_INVALID": N_("The %(1)s handle %(2)s is invalid."), 57.5 "OPERATION_NOT_ALLOWED": N_("You attempted an operation that was not allowed."), 57.6 "NETWORK_ALREADY_CONNECTED": N_("The network you specified already has a PIF attached to it, and so another one may not be attached."), 57.7 + "SECURITY_ERROR": N_("%(2)s"), 57.8 } 57.9 57.10 translation = gettext.translation('xen-xm', fallback = True)
58.1 --- a/tools/python/xen/xm/create.py Tue Apr 01 10:30:57 2008 -0600 58.2 +++ b/tools/python/xen/xm/create.py Tue Apr 01 11:29:03 2008 -0600 58.3 @@ -500,6 +500,11 @@ gopts.var('vncunused', val='', 58.4 use="""Try to find an unused port for the VNC server. 58.5 Only valid when vnc=1.""") 58.6 58.7 +gopts.var('videoram', val='', 58.8 + fn=set_value, default=None, 58.9 + use="""Maximum amount of videoram PV guest can allocate 58.10 + for frame buffer.""") 58.11 + 58.12 gopts.var('sdl', val='', 58.13 fn=set_value, default=None, 58.14 use="""Should the device model use SDL?""") 58.15 @@ -645,7 +650,8 @@ def configure_vfbs(config_devs, vals): 58.16 d['type'] = 'sdl' 58.17 for (k,v) in d.iteritems(): 58.18 if not k in [ 'vnclisten', 'vncunused', 'vncdisplay', 'display', 58.19 - 'xauthority', 'type', 'vncpasswd', 'opengl' ]: 58.20 + 'videoram', 'xauthority', 'type', 'vncpasswd', 58.21 + 'opengl' ]: 58.22 err("configuration option %s unknown to vfbs" % k) 58.23 config.append([k,v]) 58.24 if not d.has_key("keymap"):
59.1 --- a/tools/python/xen/xm/messages/xen-xm.pot Tue Apr 01 10:30:57 2008 -0600 59.2 +++ b/tools/python/xen/xm/messages/xen-xm.pot Tue Apr 01 11:29:03 2008 -0600 59.3 @@ -8,7 +8,7 @@ msgid "" 59.4 msgstr "" 59.5 "Project-Id-Version: PACKAGE VERSION\n" 59.6 "Report-Msgid-Bugs-To: \n" 59.7 -"POT-Creation-Date: 2007-04-05 14:17-0400\n" 59.8 +"POT-Creation-Date: 2008-03-31 17:40+0100\n" 59.9 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" 59.10 "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" 59.11 "Language-Team: LANGUAGE <LL@li.org>\n" 59.12 @@ -61,3 +61,8 @@ msgid "" 59.13 "The network you specified already has a PIF attached to it, and so another " 59.14 "one may not be attached." 59.15 msgstr "" 59.16 + 59.17 +#: xen/xm/XenAPI.py:67 59.18 +#, python-format 59.19 +msgid "%(2)s" 59.20 +msgstr ""
60.1 --- a/tools/tests/Makefile Tue Apr 01 10:30:57 2008 -0600 60.2 +++ b/tools/tests/Makefile Tue Apr 01 11:29:03 2008 -0600 60.3 @@ -21,13 +21,17 @@ blowfish.h: blowfish.bin 60.4 60.5 .PHONY: clean 60.6 clean: 60.7 - rm -rf $(TARGET) *.o *~ core blowfish.h blowfish.bin 60.8 + rm -rf $(TARGET) *.o *~ core blowfish.h blowfish.bin x86_emulate 60.9 60.10 .PHONY: install 60.11 install: 60.12 60.13 -x86_emulate.o: $(XEN_ROOT)/xen/arch/x86/x86_emulate.c 60.14 +.PHONY: x86_emulate 60.15 +x86_emulate: 60.16 + [ -L x86_emulate ] || ln -sf $(XEN_ROOT)/xen/arch/x86/x86_emulate . 60.17 + 60.18 +x86_emulate.o: x86_emulate.c x86_emulate 60.19 $(HOSTCC) $(HOSTCFLAGS) -I$(XEN_ROOT)/xen/include -c -o $@ $< 60.20 60.21 -test_x86_emulator.o: test_x86_emulator.c blowfish.h 60.22 +test_x86_emulator.o: test_x86_emulator.c blowfish.h x86_emulate 60.23 $(HOSTCC) $(HOSTCFLAGS) -I$(XEN_ROOT)/xen/include -c -o $@ $<
61.1 --- a/tools/tests/test_x86_emulator.c Tue Apr 01 10:30:57 2008 -0600 61.2 +++ b/tools/tests/test_x86_emulator.c Tue Apr 01 11:29:03 2008 -0600 61.3 @@ -1,20 +1,11 @@ 61.4 - 61.5 #include <stdio.h> 61.6 #include <stdlib.h> 61.7 #include <string.h> 61.8 #include <stdint.h> 61.9 -typedef uint8_t u8; 61.10 -typedef uint16_t u16; 61.11 -typedef uint32_t u32; 61.12 -typedef uint64_t u64; 61.13 -typedef int8_t s8; 61.14 -typedef int16_t s16; 61.15 -typedef int32_t s32; 61.16 -typedef int64_t s64; 61.17 #include <public/xen.h> 61.18 -#include <asm-x86/x86_emulate.h> 61.19 #include <sys/mman.h> 61.20 61.21 +#include "x86_emulate/x86_emulate.h" 61.22 #include "blowfish.h" 61.23 61.24 #define MMAP_SZ 16384 61.25 @@ -38,9 +29,9 @@ static int read( 61.26 unsigned long addr = offset; 61.27 switch ( bytes ) 61.28 { 61.29 - case 1: *val = *(u8 *)addr; break; 61.30 - case 2: *val = *(u16 *)addr; break; 61.31 - case 4: *val = *(u32 *)addr; break; 61.32 + case 1: *val = *(uint8_t *)addr; break; 61.33 + case 2: *val = *(uint16_t *)addr; break; 61.34 + case 4: *val = *(uint32_t *)addr; break; 61.35 case 8: *val = *(unsigned long *)addr; break; 61.36 } 61.37 return X86EMUL_OKAY; 61.38 @@ -56,9 +47,9 @@ static int write( 61.39 unsigned long addr = offset; 61.40 switch ( bytes ) 61.41 { 61.42 - case 1: *(u8 *)addr = (u8)val; break; 61.43 - case 2: *(u16 *)addr = (u16)val; break; 61.44 - case 4: *(u32 *)addr = (u32)val; break; 61.45 + case 1: *(uint8_t *)addr = (uint8_t)val; break; 61.46 + case 2: *(uint16_t *)addr = (uint16_t)val; break; 61.47 + case 4: *(uint32_t *)addr = (uint32_t)val; break; 61.48 case 8: *(unsigned long *)addr = val; break; 61.49 } 61.50 return X86EMUL_OKAY; 61.51 @@ -75,9 +66,9 @@ static int cmpxchg( 61.52 unsigned long addr = offset; 61.53 switch ( bytes ) 61.54 { 61.55 - case 1: *(u8 *)addr = (u8)new; break; 61.56 - case 2: *(u16 *)addr = (u16)new; break; 61.57 - case 4: *(u32 *)addr = (u32)new; break; 61.58 + case 1: *(uint8_t *)addr = (uint8_t)new; break; 61.59 + case 2: *(uint16_t *)addr = (uint16_t)new; break; 61.60 + case 4: *(uint32_t *)addr = (uint32_t)new; break; 61.61 case 8: *(unsigned long *)addr = new; break; 61.62 } 61.63 return X86EMUL_OKAY;
62.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 62.2 +++ b/tools/tests/x86_emulate.c Tue Apr 01 11:29:03 2008 -0600 62.3 @@ -0,0 +1,13 @@ 62.4 +#include <stddef.h> 62.5 +#include <stdint.h> 62.6 +#include <string.h> 62.7 +#include <public/xen.h> 62.8 + 62.9 +#include "x86_emulate/x86_emulate.h" 62.10 + 62.11 +#define __emulate_fpu_insn(_op) \ 62.12 +do{ rc = X86EMUL_UNHANDLEABLE; \ 62.13 + goto done; \ 62.14 +} while (0) 62.15 + 62.16 +#include "x86_emulate/x86_emulate.c"
65.1 --- a/xen/arch/ia64/xen/dom0_ops.c Tue Apr 01 10:30:57 2008 -0600 65.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Apr 01 11:29:03 2008 -0600 65.3 @@ -410,6 +410,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE 65.4 65.5 xen_sysctl_physinfo_t *pi = &op->u.physinfo; 65.6 65.7 + memset(pi, 0, sizeof(*pi)); 65.8 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); 65.9 pi->cores_per_socket = 65.10 cpus_weight(cpu_core_map[0]) / pi->threads_per_core; 65.11 @@ -419,7 +420,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XE 65.12 pi->free_pages = avail_domheap_pages(); 65.13 pi->scrub_pages = avail_scrub_pages(); 65.14 pi->cpu_khz = local_cpu_data->proc_freq / 1000; 65.15 - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 65.16 65.17 max_array_ent = pi->max_cpu_id; 65.18 pi->max_cpu_id = last_cpu(cpu_online_map);
66.1 --- a/xen/arch/ia64/xen/dom_fw_common.c Tue Apr 01 10:30:57 2008 -0600 66.2 +++ b/xen/arch/ia64/xen/dom_fw_common.c Tue Apr 01 11:29:03 2008 -0600 66.3 @@ -20,7 +20,7 @@ 66.4 #include <assert.h> 66.5 #include <inttypes.h> 66.6 66.7 -#include <xen/arch-ia64.h> 66.8 +#include <xen/xen.h> 66.9 #include <asm/bundle.h> 66.10 66.11 #include "xg_private.h"
67.1 --- a/xen/arch/ia64/xen/dom_fw_domu.c Tue Apr 01 10:30:57 2008 -0600 67.2 +++ b/xen/arch/ia64/xen/dom_fw_domu.c Tue Apr 01 11:29:03 2008 -0600 67.3 @@ -37,7 +37,7 @@ 67.4 #include <errno.h> 67.5 #include <inttypes.h> 67.6 67.7 -#include <xen/arch-ia64.h> 67.8 +#include <xen/xen.h> 67.9 67.10 #include "xg_private.h" 67.11 #include "xc_dom.h"
68.1 --- a/xen/arch/powerpc/sysctl.c Tue Apr 01 10:30:57 2008 -0600 68.2 +++ b/xen/arch/powerpc/sysctl.c Tue Apr 01 11:29:03 2008 -0600 68.3 @@ -41,6 +41,7 @@ long arch_do_sysctl(struct xen_sysctl *s 68.4 { 68.5 xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo; 68.6 68.7 + memset(pi, 0, sizeof(*pi)); 68.8 pi->threads_per_core = 68.9 cpus_weight(cpu_sibling_map[0]); 68.10 pi->cores_per_socket = 68.11 @@ -50,10 +51,7 @@ long arch_do_sysctl(struct xen_sysctl *s 68.12 pi->total_pages = total_pages; 68.13 pi->free_pages = avail_domheap_pages(); 68.14 pi->cpu_khz = cpu_khz; 68.15 - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 68.16 - ret = 0; 68.17 - if ( copy_to_guest(u_sysctl, sysctl, 1) ) 68.18 - ret = -EFAULT; 68.19 + ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0; 68.20 } 68.21 break; 68.22
69.1 --- a/xen/arch/x86/boot/trampoline.S Tue Apr 01 10:30:57 2008 -0600 69.2 +++ b/xen/arch/x86/boot/trampoline.S Tue Apr 01 11:29:03 2008 -0600 69.3 @@ -156,9 +156,12 @@ 1: mov $(BOOT_TRAMPOLINE>>4),%a 69.4 sti 69.5 69.6 #if defined(__x86_64__) 69.7 - /* Declare that our target operating mode is long mode. */ 69.8 - movw $0xec00,%ax # declare target operating mode 69.9 - movw $0x0002,%bx # long mode 69.10 + /* 69.11 + * Declare that our target operating mode is long mode. 69.12 + * Initialise 32-bit registers since some buggy BIOSes depend on it. 69.13 + */ 69.14 + movl $0xec00,%eax # declare target operating mode 69.15 + movl $0x0002,%ebx # long mode 69.16 int $0x15 69.17 #endif 69.18
70.1 --- a/xen/arch/x86/hvm/emulate.c Tue Apr 01 10:30:57 2008 -0600 70.2 +++ b/xen/arch/x86/hvm/emulate.c Tue Apr 01 11:29:03 2008 -0600 70.3 @@ -19,23 +19,93 @@ 70.4 #include <asm/hvm/hvm.h> 70.5 #include <asm/hvm/support.h> 70.6 70.7 +static int hvmemul_do_io( 70.8 + int is_mmio, paddr_t addr, unsigned long count, int size, 70.9 + paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val) 70.10 +{ 70.11 + struct vcpu *curr = current; 70.12 + vcpu_iodata_t *vio = get_ioreq(curr); 70.13 + ioreq_t *p = &vio->vp_ioreq; 70.14 + 70.15 + switch ( curr->arch.hvm_vcpu.io_state ) 70.16 + { 70.17 + case HVMIO_none: 70.18 + break; 70.19 + case HVMIO_completed: 70.20 + curr->arch.hvm_vcpu.io_state = HVMIO_none; 70.21 + if ( val == NULL ) 70.22 + return X86EMUL_UNHANDLEABLE; 70.23 + *val = curr->arch.hvm_vcpu.io_data; 70.24 + return X86EMUL_OKAY; 70.25 + default: 70.26 + return X86EMUL_UNHANDLEABLE; 70.27 + } 70.28 + 70.29 + curr->arch.hvm_vcpu.io_state = 70.30 + (val == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion; 70.31 + 70.32 + if ( p->state != STATE_IOREQ_NONE ) 70.33 + gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n", 70.34 + p->state); 70.35 + 70.36 + p->dir = dir; 70.37 + p->data_is_ptr = value_is_ptr; 70.38 + p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; 70.39 + p->size = size; 70.40 + p->addr = addr; 70.41 + p->count = count; 70.42 + p->df = df; 70.43 + p->data = value; 70.44 + p->io_count++; 70.45 + 70.46 + if ( is_mmio 70.47 + ? (hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p)) 70.48 + : hvm_portio_intercept(p) ) 70.49 + { 70.50 + p->state = STATE_IORESP_READY; 70.51 + hvm_io_assist(); 70.52 + if ( val != NULL ) 70.53 + *val = curr->arch.hvm_vcpu.io_data; 70.54 + curr->arch.hvm_vcpu.io_state = HVMIO_none; 70.55 + return X86EMUL_OKAY; 70.56 + } 70.57 + 70.58 + hvm_send_assist_req(curr); 70.59 + return (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY; 70.60 +} 70.61 + 70.62 +static int hvmemul_do_pio( 70.63 + unsigned long port, unsigned long count, int size, 70.64 + paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val) 70.65 +{ 70.66 + return hvmemul_do_io(0, port, count, size, value, 70.67 + dir, df, value_is_ptr, val); 70.68 +} 70.69 + 70.70 +static int hvmemul_do_mmio( 70.71 + paddr_t gpa, unsigned long count, int size, 70.72 + paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val) 70.73 +{ 70.74 + return hvmemul_do_io(1, gpa, count, size, value, 70.75 + dir, df, value_is_ptr, val); 70.76 +} 70.77 + 70.78 /* 70.79 * Convert addr from linear to physical form, valid over the range 70.80 * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to 70.81 * the valid computed range. It is always >0 when X86EMUL_OKAY is returned. 70.82 + * @pfec indicates the access checks to be performed during page-table walks. 70.83 */ 70.84 static int hvmemul_linear_to_phys( 70.85 unsigned long addr, 70.86 paddr_t *paddr, 70.87 unsigned int bytes_per_rep, 70.88 unsigned long *reps, 70.89 - enum hvm_access_type access_type, 70.90 + uint32_t pfec, 70.91 struct hvm_emulate_ctxt *hvmemul_ctxt) 70.92 { 70.93 struct vcpu *curr = current; 70.94 unsigned long pfn, npfn, done, todo, i; 70.95 - struct segment_register *sreg; 70.96 - uint32_t pfec; 70.97 70.98 /* Clip repetitions to a sensible maximum. */ 70.99 *reps = min_t(unsigned long, *reps, 4096); 70.100 @@ -49,14 +119,6 @@ static int hvmemul_linear_to_phys( 70.101 70.102 *paddr = addr & ~PAGE_MASK; 70.103 70.104 - /* Gather access-type information for the page walks. */ 70.105 - sreg = hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); 70.106 - pfec = PFEC_page_present; 70.107 - if ( sreg->attr.fields.dpl == 3 ) 70.108 - pfec |= PFEC_user_mode; 70.109 - if ( access_type == hvm_access_write ) 70.110 - pfec |= PFEC_write_access; 70.111 - 70.112 /* Get the first PFN in the range. */ 70.113 if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN ) 70.114 { 70.115 @@ -74,18 +136,19 @@ static int hvmemul_linear_to_phys( 70.116 for ( i = 1; done < todo; i++ ) 70.117 { 70.118 /* Get the next PFN in the range. */ 70.119 - if ( (npfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN ) 70.120 - { 70.121 - hvm_inject_exception(TRAP_page_fault, pfec, addr); 70.122 - return X86EMUL_EXCEPTION; 70.123 - } 70.124 + npfn = paging_gva_to_gfn(curr, addr, &pfec); 70.125 70.126 /* Is it contiguous with the preceding PFNs? If not then we're done. */ 70.127 - if ( npfn != (pfn + i) ) 70.128 + if ( (npfn == INVALID_GFN) || (npfn != (pfn + i)) ) 70.129 { 70.130 done /= bytes_per_rep; 70.131 if ( done == 0 ) 70.132 - return X86EMUL_UNHANDLEABLE; 70.133 + { 70.134 + if ( npfn != INVALID_GFN ) 70.135 + return X86EMUL_UNHANDLEABLE; 70.136 + hvm_inject_exception(TRAP_page_fault, pfec, addr); 70.137 + return X86EMUL_EXCEPTION; 70.138 + } 70.139 *reps = done; 70.140 break; 70.141 } 70.142 @@ -142,7 +205,10 @@ static int __hvmemul_read( 70.143 enum hvm_access_type access_type, 70.144 struct hvm_emulate_ctxt *hvmemul_ctxt) 70.145 { 70.146 + struct vcpu *curr = current; 70.147 unsigned long addr; 70.148 + uint32_t pfec = PFEC_page_present; 70.149 + paddr_t gpa; 70.150 int rc; 70.151 70.152 rc = hvmemul_virtual_to_linear( 70.153 @@ -152,41 +218,40 @@ static int __hvmemul_read( 70.154 70.155 *val = 0; 70.156 70.157 + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && 70.158 + curr->arch.hvm_vcpu.mmio_gva ) 70.159 + { 70.160 + unsigned int off = addr & (PAGE_SIZE - 1); 70.161 + if ( access_type == hvm_access_insn_fetch ) 70.162 + return X86EMUL_UNHANDLEABLE; 70.163 + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); 70.164 + if ( (off + bytes) <= PAGE_SIZE ) 70.165 + return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val); 70.166 + } 70.167 + 70.168 + if ( (seg != x86_seg_none) && 70.169 + (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) 70.170 + pfec |= PFEC_user_mode; 70.171 + 70.172 rc = ((access_type == hvm_access_insn_fetch) ? 70.173 - hvm_fetch_from_guest_virt(val, addr, bytes) : 70.174 - hvm_copy_from_guest_virt(val, addr, bytes)); 70.175 + hvm_fetch_from_guest_virt(val, addr, bytes, pfec) : 70.176 + hvm_copy_from_guest_virt(val, addr, bytes, pfec)); 70.177 if ( rc == HVMCOPY_bad_gva_to_gfn ) 70.178 return X86EMUL_EXCEPTION; 70.179 70.180 if ( rc == HVMCOPY_bad_gfn_to_mfn ) 70.181 { 70.182 - struct vcpu *curr = current; 70.183 unsigned long reps = 1; 70.184 - paddr_t gpa; 70.185 70.186 if ( access_type == hvm_access_insn_fetch ) 70.187 return X86EMUL_UNHANDLEABLE; 70.188 70.189 rc = hvmemul_linear_to_phys( 70.190 - addr, &gpa, bytes, &reps, access_type, hvmemul_ctxt); 70.191 + addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt); 70.192 if ( rc != X86EMUL_OKAY ) 70.193 return rc; 70.194 70.195 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.196 - return X86EMUL_UNHANDLEABLE; 70.197 - 70.198 - if ( !curr->arch.hvm_vcpu.io_completed ) 70.199 - { 70.200 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.201 - send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, bytes, 70.202 - 0, IOREQ_READ, 0, 0); 70.203 - } 70.204 - 70.205 - if ( !curr->arch.hvm_vcpu.io_completed ) 70.206 - return X86EMUL_RETRY; 70.207 - 70.208 - *val = curr->arch.hvm_vcpu.io_data; 70.209 - curr->arch.hvm_vcpu.io_completed = 0; 70.210 + return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val); 70.211 } 70.212 70.213 return X86EMUL_OKAY; 70.214 @@ -236,7 +301,10 @@ static int hvmemul_write( 70.215 { 70.216 struct hvm_emulate_ctxt *hvmemul_ctxt = 70.217 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 70.218 + struct vcpu *curr = current; 70.219 unsigned long addr; 70.220 + uint32_t pfec = PFEC_page_present | PFEC_write_access; 70.221 + paddr_t gpa; 70.222 int rc; 70.223 70.224 rc = hvmemul_virtual_to_linear( 70.225 @@ -244,27 +312,34 @@ static int hvmemul_write( 70.226 if ( rc != X86EMUL_OKAY ) 70.227 return rc; 70.228 70.229 - rc = hvm_copy_to_guest_virt(addr, &val, bytes); 70.230 + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && 70.231 + curr->arch.hvm_vcpu.mmio_gva ) 70.232 + { 70.233 + unsigned int off = addr & (PAGE_SIZE - 1); 70.234 + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); 70.235 + if ( (off + bytes) <= PAGE_SIZE ) 70.236 + return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, 70.237 + 0, 0, NULL); 70.238 + } 70.239 + 70.240 + if ( (seg != x86_seg_none) && 70.241 + (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) 70.242 + pfec |= PFEC_user_mode; 70.243 + 70.244 + rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec); 70.245 if ( rc == HVMCOPY_bad_gva_to_gfn ) 70.246 return X86EMUL_EXCEPTION; 70.247 70.248 if ( rc == HVMCOPY_bad_gfn_to_mfn ) 70.249 { 70.250 - struct vcpu *curr = current; 70.251 unsigned long reps = 1; 70.252 - paddr_t gpa; 70.253 70.254 rc = hvmemul_linear_to_phys( 70.255 - addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt); 70.256 + addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt); 70.257 if ( rc != X86EMUL_OKAY ) 70.258 return rc; 70.259 70.260 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.261 - return X86EMUL_UNHANDLEABLE; 70.262 - 70.263 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.264 - send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, bytes, 70.265 - val, IOREQ_WRITE, 0, 0); 70.266 + return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL); 70.267 } 70.268 70.269 return X86EMUL_OKAY; 70.270 @@ -292,8 +367,8 @@ static int hvmemul_rep_ins( 70.271 { 70.272 struct hvm_emulate_ctxt *hvmemul_ctxt = 70.273 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 70.274 - struct vcpu *curr = current; 70.275 unsigned long addr; 70.276 + uint32_t pfec = PFEC_page_present | PFEC_write_access; 70.277 paddr_t gpa; 70.278 int rc; 70.279 70.280 @@ -303,19 +378,16 @@ static int hvmemul_rep_ins( 70.281 if ( rc != X86EMUL_OKAY ) 70.282 return rc; 70.283 70.284 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 70.285 + pfec |= PFEC_user_mode; 70.286 + 70.287 rc = hvmemul_linear_to_phys( 70.288 - addr, &gpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt); 70.289 + addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 70.290 if ( rc != X86EMUL_OKAY ) 70.291 return rc; 70.292 70.293 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.294 - return X86EMUL_UNHANDLEABLE; 70.295 - 70.296 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.297 - send_pio_req(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ, 70.298 - !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); 70.299 - 70.300 - return X86EMUL_OKAY; 70.301 + return hvmemul_do_pio(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ, 70.302 + !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL); 70.303 } 70.304 70.305 static int hvmemul_rep_outs( 70.306 @@ -328,8 +400,8 @@ static int hvmemul_rep_outs( 70.307 { 70.308 struct hvm_emulate_ctxt *hvmemul_ctxt = 70.309 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 70.310 - struct vcpu *curr = current; 70.311 unsigned long addr; 70.312 + uint32_t pfec = PFEC_page_present; 70.313 paddr_t gpa; 70.314 int rc; 70.315 70.316 @@ -339,20 +411,16 @@ static int hvmemul_rep_outs( 70.317 if ( rc != X86EMUL_OKAY ) 70.318 return rc; 70.319 70.320 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 70.321 + pfec |= PFEC_user_mode; 70.322 + 70.323 rc = hvmemul_linear_to_phys( 70.324 - addr, &gpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt); 70.325 + addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 70.326 if ( rc != X86EMUL_OKAY ) 70.327 return rc; 70.328 70.329 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.330 - return X86EMUL_UNHANDLEABLE; 70.331 - 70.332 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.333 - send_pio_req(dst_port, *reps, bytes_per_rep, 70.334 - gpa, IOREQ_WRITE, 70.335 - !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); 70.336 - 70.337 - return X86EMUL_OKAY; 70.338 + return hvmemul_do_pio(dst_port, *reps, bytes_per_rep, gpa, IOREQ_WRITE, 70.339 + !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL); 70.340 } 70.341 70.342 static int hvmemul_rep_movs( 70.343 @@ -366,9 +434,9 @@ static int hvmemul_rep_movs( 70.344 { 70.345 struct hvm_emulate_ctxt *hvmemul_ctxt = 70.346 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 70.347 - struct vcpu *curr = current; 70.348 unsigned long saddr, daddr; 70.349 paddr_t sgpa, dgpa; 70.350 + uint32_t pfec = PFEC_page_present; 70.351 p2m_type_t p2mt; 70.352 int rc; 70.353 70.354 @@ -384,39 +452,32 @@ static int hvmemul_rep_movs( 70.355 if ( rc != X86EMUL_OKAY ) 70.356 return rc; 70.357 70.358 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 70.359 + pfec |= PFEC_user_mode; 70.360 + 70.361 rc = hvmemul_linear_to_phys( 70.362 - saddr, &sgpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt); 70.363 + saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 70.364 if ( rc != X86EMUL_OKAY ) 70.365 return rc; 70.366 70.367 rc = hvmemul_linear_to_phys( 70.368 - daddr, &dgpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt); 70.369 + daddr, &dgpa, bytes_per_rep, reps, 70.370 + pfec | PFEC_write_access, hvmemul_ctxt); 70.371 if ( rc != X86EMUL_OKAY ) 70.372 return rc; 70.373 70.374 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.375 - return X86EMUL_UNHANDLEABLE; 70.376 - 70.377 (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt); 70.378 if ( !p2m_is_ram(p2mt) ) 70.379 - { 70.380 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.381 - send_mmio_req(IOREQ_TYPE_COPY, sgpa, *reps, bytes_per_rep, 70.382 - dgpa, IOREQ_READ, 70.383 - !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); 70.384 - } 70.385 - else 70.386 - { 70.387 - (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt); 70.388 - if ( p2m_is_ram(p2mt) ) 70.389 - return X86EMUL_UNHANDLEABLE; 70.390 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.391 - send_mmio_req(IOREQ_TYPE_COPY, dgpa, *reps, bytes_per_rep, 70.392 - sgpa, IOREQ_WRITE, 70.393 - !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); 70.394 - } 70.395 + return hvmemul_do_mmio( 70.396 + sgpa, *reps, bytes_per_rep, dgpa, IOREQ_READ, 70.397 + !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL); 70.398 70.399 - return X86EMUL_OKAY; 70.400 + (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt); 70.401 + if ( p2m_is_ram(p2mt) ) 70.402 + return X86EMUL_UNHANDLEABLE; 70.403 + return hvmemul_do_mmio( 70.404 + dgpa, *reps, bytes_per_rep, sgpa, IOREQ_WRITE, 70.405 + !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL); 70.406 } 70.407 70.408 static int hvmemul_read_segment( 70.409 @@ -452,24 +513,7 @@ static int hvmemul_read_io( 70.410 unsigned long *val, 70.411 struct x86_emulate_ctxt *ctxt) 70.412 { 70.413 - struct vcpu *curr = current; 70.414 - 70.415 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.416 - return X86EMUL_UNHANDLEABLE; 70.417 - 70.418 - if ( !curr->arch.hvm_vcpu.io_completed ) 70.419 - { 70.420 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.421 - send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0); 70.422 - } 70.423 - 70.424 - if ( !curr->arch.hvm_vcpu.io_completed ) 70.425 - return X86EMUL_RETRY; 70.426 - 70.427 - *val = curr->arch.hvm_vcpu.io_data; 70.428 - curr->arch.hvm_vcpu.io_completed = 0; 70.429 - 70.430 - return X86EMUL_OKAY; 70.431 + return hvmemul_do_pio(port, 1, bytes, 0, IOREQ_READ, 0, 0, val); 70.432 } 70.433 70.434 static int hvmemul_write_io( 70.435 @@ -478,21 +522,7 @@ static int hvmemul_write_io( 70.436 unsigned long val, 70.437 struct x86_emulate_ctxt *ctxt) 70.438 { 70.439 - struct vcpu *curr = current; 70.440 - 70.441 - if ( port == 0xe9 ) 70.442 - { 70.443 - hvm_print_line(curr, val); 70.444 - return X86EMUL_OKAY; 70.445 - } 70.446 - 70.447 - if ( curr->arch.hvm_vcpu.io_in_progress ) 70.448 - return X86EMUL_UNHANDLEABLE; 70.449 - 70.450 - curr->arch.hvm_vcpu.io_in_progress = 1; 70.451 - send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0); 70.452 - 70.453 - return X86EMUL_OKAY; 70.454 + return hvmemul_do_pio(port, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL); 70.455 } 70.456 70.457 static int hvmemul_read_cr( 70.458 @@ -674,7 +704,7 @@ int hvm_emulate_one( 70.459 { 70.460 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; 70.461 struct vcpu *curr = current; 70.462 - uint32_t new_intr_shadow; 70.463 + uint32_t new_intr_shadow, pfec = PFEC_page_present; 70.464 unsigned long addr; 70.465 int rc; 70.466 70.467 @@ -691,6 +721,9 @@ int hvm_emulate_one( 70.468 hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; 70.469 } 70.470 70.471 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 70.472 + pfec |= PFEC_user_mode; 70.473 + 70.474 hvmemul_ctxt->insn_buf_eip = regs->eip; 70.475 hvmemul_ctxt->insn_buf_bytes = 70.476 (hvm_virtual_to_linear_addr( 70.477 @@ -698,7 +731,8 @@ int hvm_emulate_one( 70.478 regs->eip, sizeof(hvmemul_ctxt->insn_buf), 70.479 hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) && 70.480 !hvm_fetch_from_guest_virt_nofault( 70.481 - hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf))) 70.482 + hvmemul_ctxt->insn_buf, addr, 70.483 + sizeof(hvmemul_ctxt->insn_buf), pfec)) 70.484 ? sizeof(hvmemul_ctxt->insn_buf) : 0; 70.485 70.486 hvmemul_ctxt->exn_pending = 0;
71.1 --- a/xen/arch/x86/hvm/hvm.c Tue Apr 01 10:30:57 2008 -0600 71.2 +++ b/xen/arch/x86/hvm/hvm.c Tue Apr 01 11:29:03 2008 -0600 71.3 @@ -81,6 +81,58 @@ void hvm_enable(struct hvm_function_tabl 71.4 printk("HVM: Hardware Assisted Paging detected.\n"); 71.5 } 71.6 71.7 +/* 71.8 + * Need to re-inject a given event? We avoid re-injecting software exceptions 71.9 + * and interrupts because the faulting/trapping instruction can simply be 71.10 + * re-executed (neither VMX nor SVM update RIP when they VMEXIT during 71.11 + * INT3/INTO/INTn). 71.12 + */ 71.13 +int hvm_event_needs_reinjection(uint8_t type, uint8_t vector) 71.14 +{ 71.15 + switch ( type ) 71.16 + { 71.17 + case X86_EVENTTYPE_EXT_INTR: 71.18 + case X86_EVENTTYPE_NMI: 71.19 + return 1; 71.20 + case X86_EVENTTYPE_HW_EXCEPTION: 71.21 + /* 71.22 + * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly 71.23 + * check for these vectors, as they are really SW Exceptions. SVM has 71.24 + * not updated RIP to point after the trapping instruction (INT3/INTO). 71.25 + */ 71.26 + return (vector != 3) && (vector != 4); 71.27 + default: 71.28 + /* Software exceptions/interrupts can be re-executed (e.g., INT n). */ 71.29 + break; 71.30 + } 71.31 + return 0; 71.32 +} 71.33 + 71.34 +/* 71.35 + * Combine two hardware exceptions: @vec2 was raised during delivery of @vec1. 71.36 + * This means we can assume that @vec2 is contributory or a page fault. 71.37 + */ 71.38 +uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2) 71.39 +{ 71.40 + /* Exception during double-fault delivery always causes a triple fault. */ 71.41 + if ( vec1 == TRAP_double_fault ) 71.42 + { 71.43 + hvm_triple_fault(); 71.44 + return TRAP_double_fault; /* dummy return */ 71.45 + } 71.46 + 71.47 + /* Exception during page-fault delivery always causes a double fault. */ 71.48 + if ( vec1 == TRAP_page_fault ) 71.49 + return TRAP_double_fault; 71.50 + 71.51 + /* Discard the first exception if it's benign or if we now have a #PF. */ 71.52 + if ( !((1u << vec1) & 0x7c01u) || (vec2 == TRAP_page_fault) ) 71.53 + return vec2; 71.54 + 71.55 + /* Cannot combine the exceptions: double fault. */ 71.56 + return TRAP_double_fault; 71.57 +} 71.58 + 71.59 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) 71.60 { 71.61 u64 host_tsc; 71.62 @@ -203,6 +255,30 @@ static int hvm_set_ioreq_page( 71.63 return 0; 71.64 } 71.65 71.66 +static int hvm_print_line( 71.67 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 71.68 +{ 71.69 + struct vcpu *curr = current; 71.70 + struct hvm_domain *hd = &curr->domain->arch.hvm_domain; 71.71 + char c = *val; 71.72 + 71.73 + BUG_ON(bytes != 1); 71.74 + 71.75 + spin_lock(&hd->pbuf_lock); 71.76 + hd->pbuf[hd->pbuf_idx++] = c; 71.77 + if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') ) 71.78 + { 71.79 + if ( c != '\n' ) 71.80 + hd->pbuf[hd->pbuf_idx++] = '\n'; 71.81 + hd->pbuf[hd->pbuf_idx] = '\0'; 71.82 + printk(XENLOG_G_DEBUG "HVM%u: %s", curr->domain->domain_id, hd->pbuf); 71.83 + hd->pbuf_idx = 0; 71.84 + } 71.85 + spin_unlock(&hd->pbuf_lock); 71.86 + 71.87 + return 1; 71.88 +} 71.89 + 71.90 int hvm_domain_initialise(struct domain *d) 71.91 { 71.92 int rc; 71.93 @@ -237,6 +313,8 @@ int hvm_domain_initialise(struct domain 71.94 hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq); 71.95 hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); 71.96 71.97 + register_portio_handler(d, 0xe9, 1, hvm_print_line); 71.98 + 71.99 rc = hvm_funcs.domain_initialise(d); 71.100 if ( rc != 0 ) 71.101 goto fail2; 71.102 @@ -1250,7 +1328,7 @@ void hvm_task_switch( 71.103 goto out; 71.104 } 71.105 71.106 - if ( !tr.attr.fields.g && (tr.limit < (sizeof(tss)-1)) ) 71.107 + if ( tr.limit < (sizeof(tss)-1) ) 71.108 { 71.109 hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0); 71.110 goto out; 71.111 @@ -1358,7 +1436,7 @@ void hvm_task_switch( 71.112 if ( hvm_virtual_to_linear_addr(x86_seg_ss, ®, regs->esp, 71.113 4, hvm_access_write, 32, 71.114 &linear_addr) ) 71.115 - hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4); 71.116 + hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0); 71.117 } 71.118 71.119 out: 71.120 @@ -1366,60 +1444,31 @@ void hvm_task_switch( 71.121 hvm_unmap(nptss_desc); 71.122 } 71.123 71.124 -/* 71.125 - * __hvm_copy(): 71.126 - * @buf = hypervisor buffer 71.127 - * @addr = guest address to copy to/from 71.128 - * @size = number of bytes to copy 71.129 - * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)? 71.130 - * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)? 71.131 - * @fetch = copy is an instruction fetch? 71.132 - * Returns number of bytes failed to copy (0 == complete success). 71.133 - */ 71.134 +#define HVMCOPY_from_guest (0u<<0) 71.135 +#define HVMCOPY_to_guest (1u<<0) 71.136 +#define HVMCOPY_no_fault (0u<<1) 71.137 +#define HVMCOPY_fault (1u<<1) 71.138 +#define HVMCOPY_phys (0u<<2) 71.139 +#define HVMCOPY_virt (1u<<2) 71.140 static enum hvm_copy_result __hvm_copy( 71.141 - void *buf, paddr_t addr, int size, int dir, int virt, int fetch) 71.142 + void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec) 71.143 { 71.144 struct vcpu *curr = current; 71.145 unsigned long gfn, mfn; 71.146 p2m_type_t p2mt; 71.147 char *p; 71.148 - int count, todo; 71.149 - uint32_t pfec = PFEC_page_present; 71.150 - 71.151 - /* 71.152 - * We cannot use hvm_get_segment_register() while executing in 71.153 - * vmx_realmode() as segment register state is cached. Furthermore, 71.154 - * VMREADs on every data access hurts emulation performance. 71.155 - * Hence we do not gather extra PFEC flags if CR0.PG == 0. 71.156 - */ 71.157 - if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG) ) 71.158 - virt = 0; 71.159 + int count, todo = size; 71.160 71.161 - if ( virt ) 71.162 - { 71.163 - struct segment_register sreg; 71.164 - hvm_get_segment_register(curr, x86_seg_ss, &sreg); 71.165 - if ( sreg.attr.fields.dpl == 3 ) 71.166 - pfec |= PFEC_user_mode; 71.167 - 71.168 - if ( dir ) 71.169 - pfec |= PFEC_write_access; 71.170 - 71.171 - if ( fetch ) 71.172 - pfec |= PFEC_insn_fetch; 71.173 - } 71.174 - 71.175 - todo = size; 71.176 while ( todo > 0 ) 71.177 { 71.178 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo); 71.179 71.180 - if ( virt ) 71.181 + if ( flags & HVMCOPY_virt ) 71.182 { 71.183 gfn = paging_gva_to_gfn(curr, addr, &pfec); 71.184 if ( gfn == INVALID_GFN ) 71.185 { 71.186 - if ( virt == 2 ) /* 2 means generate a fault */ 71.187 + if ( flags & HVMCOPY_fault ) 71.188 hvm_inject_exception(TRAP_page_fault, pfec, addr); 71.189 return HVMCOPY_bad_gva_to_gfn; 71.190 } 71.191 @@ -1437,16 +1486,18 @@ static enum hvm_copy_result __hvm_copy( 71.192 71.193 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK); 71.194 71.195 - if ( dir ) 71.196 + if ( flags & HVMCOPY_to_guest ) 71.197 { 71.198 - memcpy(p, buf, count); /* dir == TRUE: *to* guest */ 71.199 + memcpy(p, buf, count); 71.200 paging_mark_dirty(curr->domain, mfn); 71.201 } 71.202 else 71.203 - memcpy(buf, p, count); /* dir == FALSE: *from guest */ 71.204 + { 71.205 + memcpy(buf, p, count); 71.206 + } 71.207 71.208 unmap_domain_page(p); 71.209 - 71.210 + 71.211 addr += count; 71.212 buf += count; 71.213 todo -= count; 71.214 @@ -1458,56 +1509,73 @@ static enum hvm_copy_result __hvm_copy( 71.215 enum hvm_copy_result hvm_copy_to_guest_phys( 71.216 paddr_t paddr, void *buf, int size) 71.217 { 71.218 - return __hvm_copy(buf, paddr, size, 1, 0, 0); 71.219 + return __hvm_copy(buf, paddr, size, 71.220 + HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys, 71.221 + 0); 71.222 } 71.223 71.224 enum hvm_copy_result hvm_copy_from_guest_phys( 71.225 void *buf, paddr_t paddr, int size) 71.226 { 71.227 - return __hvm_copy(buf, paddr, size, 0, 0, 0); 71.228 + return __hvm_copy(buf, paddr, size, 71.229 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys, 71.230 + 0); 71.231 } 71.232 71.233 enum hvm_copy_result hvm_copy_to_guest_virt( 71.234 - unsigned long vaddr, void *buf, int size) 71.235 + unsigned long vaddr, void *buf, int size, uint32_t pfec) 71.236 { 71.237 - return __hvm_copy(buf, vaddr, size, 1, 2, 0); 71.238 + return __hvm_copy(buf, vaddr, size, 71.239 + HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt, 71.240 + PFEC_page_present | PFEC_write_access | pfec); 71.241 } 71.242 71.243 enum hvm_copy_result hvm_copy_from_guest_virt( 71.244 - void *buf, unsigned long vaddr, int size) 71.245 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 71.246 { 71.247 - return __hvm_copy(buf, vaddr, size, 0, 2, 0); 71.248 + return __hvm_copy(buf, vaddr, size, 71.249 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, 71.250 + PFEC_page_present | pfec); 71.251 } 71.252 71.253 enum hvm_copy_result hvm_fetch_from_guest_virt( 71.254 - void *buf, unsigned long vaddr, int size) 71.255 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 71.256 { 71.257 - return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current)); 71.258 + if ( hvm_nx_enabled(current) ) 71.259 + pfec |= PFEC_insn_fetch; 71.260 + return __hvm_copy(buf, vaddr, size, 71.261 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, 71.262 + PFEC_page_present | pfec); 71.263 } 71.264 71.265 enum hvm_copy_result hvm_copy_to_guest_virt_nofault( 71.266 - unsigned long vaddr, void *buf, int size) 71.267 + unsigned long vaddr, void *buf, int size, uint32_t pfec) 71.268 { 71.269 - return __hvm_copy(buf, vaddr, size, 1, 1, 0); 71.270 + return __hvm_copy(buf, vaddr, size, 71.271 + HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt, 71.272 + PFEC_page_present | PFEC_write_access | pfec); 71.273 } 71.274 71.275 enum hvm_copy_result hvm_copy_from_guest_virt_nofault( 71.276 - void *buf, unsigned long vaddr, int size) 71.277 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 71.278 { 71.279 - return __hvm_copy(buf, vaddr, size, 0, 1, 0); 71.280 + return __hvm_copy(buf, vaddr, size, 71.281 + HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, 71.282 + PFEC_page_present | pfec); 71.283 } 71.284 71.285 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( 71.286 - void *buf, unsigned long vaddr, int size) 71.287 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 71.288 { 71.289 - return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current)); 71.290 + if ( hvm_nx_enabled(current) ) 71.291 + pfec |= PFEC_insn_fetch; 71.292 + return __hvm_copy(buf, vaddr, size, 71.293 + HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, 71.294 + PFEC_page_present | pfec); 71.295 } 71.296 71.297 DEFINE_PER_CPU(int, guest_handles_in_xen_space); 71.298 71.299 -/* Note that copy_{to,from}_user_hvm require the PTE to be writable even 71.300 - when they're only trying to read from it. The guest is expected to 71.301 - deal with this. */ 71.302 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len) 71.303 { 71.304 int rc; 71.305 @@ -1518,7 +1586,8 @@ unsigned long copy_to_user_hvm(void *to, 71.306 return 0; 71.307 } 71.308 71.309 - rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len); 71.310 + rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, 71.311 + len, 0); 71.312 return rc ? len : 0; /* fake a copy_to_user() return code */ 71.313 } 71.314 71.315 @@ -1532,28 +1601,10 @@ unsigned long copy_from_user_hvm(void *t 71.316 return 0; 71.317 } 71.318 71.319 - rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len); 71.320 + rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0); 71.321 return rc ? len : 0; /* fake a copy_from_user() return code */ 71.322 } 71.323 71.324 -/* HVM specific printbuf. Mostly used for hvmloader chit-chat. */ 71.325 -void hvm_print_line(struct vcpu *v, const char c) 71.326 -{ 71.327 - struct hvm_domain *hd = &v->domain->arch.hvm_domain; 71.328 - 71.329 - spin_lock(&hd->pbuf_lock); 71.330 - hd->pbuf[hd->pbuf_idx++] = c; 71.331 - if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') ) 71.332 - { 71.333 - if ( c != '\n' ) 71.334 - hd->pbuf[hd->pbuf_idx++] = '\n'; 71.335 - hd->pbuf[hd->pbuf_idx] = '\0'; 71.336 - printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf); 71.337 - hd->pbuf_idx = 0; 71.338 - } 71.339 - spin_unlock(&hd->pbuf_lock); 71.340 -} 71.341 - 71.342 #define bitmaskof(idx) (1U << ((idx) & 31)) 71.343 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, 71.344 unsigned int *ecx, unsigned int *edx) 71.345 @@ -1655,7 +1706,7 @@ enum hvm_intblk hvm_interrupt_blocked(st 71.346 static long hvm_grant_table_op( 71.347 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count) 71.348 { 71.349 - if ( cmd != GNTTABOP_query_size ) 71.350 + if ( (cmd != GNTTABOP_query_size) && (cmd != GNTTABOP_setup_table) ) 71.351 return -ENOSYS; /* all other commands need auditing */ 71.352 return do_grant_table_op(cmd, uop, count); 71.353 } 71.354 @@ -2109,12 +2160,15 @@ long do_hvm_op(unsigned long op, XEN_GUE 71.355 return -EINVAL; 71.356 71.357 if ( a.domid == DOMID_SELF ) 71.358 + { 71.359 d = rcu_lock_current_domain(); 71.360 - else { 71.361 - d = rcu_lock_domain_by_id(a.domid); 71.362 - if ( d == NULL ) 71.363 + } 71.364 + else 71.365 + { 71.366 + if ( (d = rcu_lock_domain_by_id(a.domid)) == NULL ) 71.367 return -ESRCH; 71.368 - if ( !IS_PRIV_FOR(current->domain, d) ) { 71.369 + if ( !IS_PRIV_FOR(current->domain, d) ) 71.370 + { 71.371 rc = -EPERM; 71.372 goto param_fail; 71.373 }
72.1 --- a/xen/arch/x86/hvm/io.c Tue Apr 01 10:30:57 2008 -0600 72.2 +++ b/xen/arch/x86/hvm/io.c Tue Apr 01 11:29:03 2008 -0600 72.3 @@ -123,73 +123,6 @@ int hvm_buffered_io_send(ioreq_t *p) 72.4 return 1; 72.5 } 72.6 72.7 -void send_pio_req(unsigned long port, unsigned long count, int size, 72.8 - paddr_t value, int dir, int df, int value_is_ptr) 72.9 -{ 72.10 - struct vcpu *v = current; 72.11 - vcpu_iodata_t *vio = get_ioreq(v); 72.12 - ioreq_t *p = &vio->vp_ioreq; 72.13 - 72.14 - if ( p->state != STATE_IOREQ_NONE ) 72.15 - gdprintk(XENLOG_WARNING, 72.16 - "WARNING: send pio with something already pending (%d)?\n", 72.17 - p->state); 72.18 - 72.19 - p->dir = dir; 72.20 - p->data_is_ptr = value_is_ptr; 72.21 - p->type = IOREQ_TYPE_PIO; 72.22 - p->size = size; 72.23 - p->addr = port; 72.24 - p->count = count; 72.25 - p->df = df; 72.26 - p->data = value; 72.27 - p->io_count++; 72.28 - 72.29 - if ( hvm_portio_intercept(p) ) 72.30 - { 72.31 - p->state = STATE_IORESP_READY; 72.32 - hvm_io_assist(); 72.33 - } 72.34 - else 72.35 - { 72.36 - hvm_send_assist_req(v); 72.37 - } 72.38 -} 72.39 - 72.40 -void send_mmio_req(unsigned char type, paddr_t gpa, 72.41 - unsigned long count, int size, paddr_t value, 72.42 - int dir, int df, int value_is_ptr) 72.43 -{ 72.44 - struct vcpu *v = current; 72.45 - vcpu_iodata_t *vio = get_ioreq(v); 72.46 - ioreq_t *p = &vio->vp_ioreq; 72.47 - 72.48 - if ( p->state != STATE_IOREQ_NONE ) 72.49 - gdprintk(XENLOG_WARNING, 72.50 - "WARNING: send mmio with something already pending (%d)?\n", 72.51 - p->state); 72.52 - 72.53 - p->dir = dir; 72.54 - p->data_is_ptr = value_is_ptr; 72.55 - p->type = type; 72.56 - p->size = size; 72.57 - p->addr = gpa; 72.58 - p->count = count; 72.59 - p->df = df; 72.60 - p->data = value; 72.61 - p->io_count++; 72.62 - 72.63 - if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) 72.64 - { 72.65 - p->state = STATE_IORESP_READY; 72.66 - hvm_io_assist(); 72.67 - } 72.68 - else 72.69 - { 72.70 - hvm_send_assist_req(v); 72.71 - } 72.72 -} 72.73 - 72.74 void send_timeoffset_req(unsigned long timeoff) 72.75 { 72.76 ioreq_t p[1]; 72.77 @@ -249,6 +182,11 @@ int handle_mmio(void) 72.78 72.79 rc = hvm_emulate_one(&ctxt); 72.80 72.81 + if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion ) 72.82 + curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; 72.83 + else 72.84 + curr->arch.hvm_vcpu.mmio_gva = 0; 72.85 + 72.86 switch ( rc ) 72.87 { 72.88 case X86EMUL_UNHANDLEABLE: 72.89 @@ -271,41 +209,46 @@ int handle_mmio(void) 72.90 72.91 hvm_emulate_writeback(&ctxt); 72.92 72.93 - curr->arch.hvm_vcpu.mmio_in_progress = curr->arch.hvm_vcpu.io_in_progress; 72.94 + return 1; 72.95 +} 72.96 72.97 - return 1; 72.98 +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn) 72.99 +{ 72.100 + current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK; 72.101 + current->arch.hvm_vcpu.mmio_gpfn = gpfn; 72.102 + return handle_mmio(); 72.103 } 72.104 72.105 void hvm_io_assist(void) 72.106 { 72.107 - struct vcpu *v = current; 72.108 - ioreq_t *p = &get_ioreq(v)->vp_ioreq; 72.109 + struct vcpu *curr = current; 72.110 + ioreq_t *p = &get_ioreq(curr)->vp_ioreq; 72.111 + enum hvm_io_state io_state; 72.112 72.113 if ( p->state != STATE_IORESP_READY ) 72.114 { 72.115 gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state); 72.116 - domain_crash(v->domain); 72.117 - goto out; 72.118 + domain_crash_synchronous(); 72.119 } 72.120 72.121 rmb(); /* see IORESP_READY /then/ read contents of ioreq */ 72.122 72.123 p->state = STATE_IOREQ_NONE; 72.124 72.125 - if ( v->arch.hvm_vcpu.io_in_progress ) 72.126 + io_state = curr->arch.hvm_vcpu.io_state; 72.127 + curr->arch.hvm_vcpu.io_state = HVMIO_none; 72.128 + 72.129 + if ( (io_state == HVMIO_awaiting_completion) || 72.130 + (io_state == HVMIO_handle_mmio_awaiting_completion) ) 72.131 { 72.132 - v->arch.hvm_vcpu.io_in_progress = 0; 72.133 - if ( (p->dir == IOREQ_READ) && !p->data_is_ptr ) 72.134 - { 72.135 - v->arch.hvm_vcpu.io_completed = 1; 72.136 - v->arch.hvm_vcpu.io_data = p->data; 72.137 - if ( v->arch.hvm_vcpu.mmio_in_progress ) 72.138 - (void)handle_mmio(); 72.139 - } 72.140 + curr->arch.hvm_vcpu.io_state = HVMIO_completed; 72.141 + curr->arch.hvm_vcpu.io_data = p->data; 72.142 + if ( io_state == HVMIO_handle_mmio_awaiting_completion ) 72.143 + (void)handle_mmio(); 72.144 } 72.145 72.146 - out: 72.147 - vcpu_end_shutdown_deferral(v); 72.148 + if ( p->state == STATE_IOREQ_NONE ) 72.149 + vcpu_end_shutdown_deferral(curr); 72.150 } 72.151 72.152 void dpci_ioport_read(uint32_t mport, ioreq_t *p)
73.1 --- a/xen/arch/x86/hvm/svm/emulate.c Tue Apr 01 10:30:57 2008 -0600 73.2 +++ b/xen/arch/x86/hvm/svm/emulate.c Tue Apr 01 11:29:03 2008 -0600 73.3 @@ -32,9 +32,11 @@ 73.4 static int inst_copy_from_guest( 73.5 unsigned char *buf, unsigned long guest_eip, int inst_len) 73.6 { 73.7 + struct vmcb_struct *vmcb = current->arch.hvm_svm.vmcb; 73.8 + uint32_t pfec = (vmcb->cpl == 3) ? PFEC_user_mode : 0; 73.9 if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) ) 73.10 return 0; 73.11 - if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) ) 73.12 + if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len, pfec) ) 73.13 return 0; 73.14 return inst_len; 73.15 }
74.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Apr 01 10:30:57 2008 -0600 74.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Apr 01 11:29:03 2008 -0600 74.3 @@ -725,7 +725,15 @@ static void svm_inject_exception( 74.4 { 74.5 struct vcpu *curr = current; 74.6 struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; 74.7 - eventinj_t event; 74.8 + eventinj_t event = vmcb->eventinj; 74.9 + 74.10 + if ( unlikely(event.fields.v) && 74.11 + (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) ) 74.12 + { 74.13 + trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr); 74.14 + if ( trapnr == TRAP_double_fault ) 74.15 + errcode = 0; 74.16 + } 74.17 74.18 event.bytes = 0; 74.19 event.fields.v = 1;
75.1 --- a/xen/arch/x86/hvm/vmx/realmode.c Tue Apr 01 10:30:57 2008 -0600 75.2 +++ b/xen/arch/x86/hvm/vmx/realmode.c Tue Apr 01 11:29:03 2008 -0600 75.3 @@ -190,7 +190,7 @@ void vmx_realmode(struct cpu_user_regs * 75.4 75.5 hvm_emulate_prepare(&hvmemul_ctxt, regs); 75.6 75.7 - if ( curr->arch.hvm_vcpu.io_completed ) 75.8 + if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed ) 75.9 realmode_emulate_one(&hvmemul_ctxt); 75.10 75.11 /* Only deliver interrupts into emulated real mode. */ 75.12 @@ -203,7 +203,7 @@ void vmx_realmode(struct cpu_user_regs * 75.13 75.14 while ( curr->arch.hvm_vmx.vmxemul && 75.15 !softirq_pending(smp_processor_id()) && 75.16 - !curr->arch.hvm_vcpu.io_in_progress ) 75.17 + (curr->arch.hvm_vcpu.io_state == HVMIO_none) ) 75.18 { 75.19 /* 75.20 * Check for pending interrupts only every 16 instructions, because
76.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Apr 01 10:30:57 2008 -0600 76.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Apr 01 11:29:03 2008 -0600 76.3 @@ -983,6 +983,62 @@ static void vmx_flush_guest_tlbs(void) 76.4 * because VMRESUME will flush it for us. */ 76.5 } 76.6 76.7 + 76.8 + 76.9 +static void __vmx_inject_exception( 76.10 + struct vcpu *v, int trap, int type, int error_code) 76.11 +{ 76.12 + unsigned long intr_fields; 76.13 + 76.14 + /* 76.15 + * NB. Callers do not need to worry about clearing STI/MOV-SS blocking: 76.16 + * "If the VM entry is injecting, there is no blocking by STI or by 76.17 + * MOV SS following the VM entry, regardless of the contents of the 76.18 + * interruptibility-state field [in the guest-state area before the 76.19 + * VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State). 76.20 + */ 76.21 + 76.22 + intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap); 76.23 + if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) { 76.24 + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 76.25 + intr_fields |= INTR_INFO_DELIVER_CODE_MASK; 76.26 + } 76.27 + 76.28 + __vmwrite(VM_ENTRY_INTR_INFO, intr_fields); 76.29 + 76.30 + if ( trap == TRAP_page_fault ) 76.31 + HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code); 76.32 + else 76.33 + HVMTRACE_2D(INJ_EXC, v, trap, error_code); 76.34 +} 76.35 + 76.36 +void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code) 76.37 +{ 76.38 + unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO); 76.39 + 76.40 + if ( unlikely(intr_info & INTR_INFO_VALID_MASK) && 76.41 + (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) ) 76.42 + { 76.43 + trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap); 76.44 + if ( trap == TRAP_double_fault ) 76.45 + error_code = 0; 76.46 + } 76.47 + 76.48 + __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code); 76.49 +} 76.50 + 76.51 +void vmx_inject_extint(struct vcpu *v, int trap) 76.52 +{ 76.53 + __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR, 76.54 + HVM_DELIVER_NO_ERROR_CODE); 76.55 +} 76.56 + 76.57 +void vmx_inject_nmi(struct vcpu *v) 76.58 +{ 76.59 + __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI, 76.60 + HVM_DELIVER_NO_ERROR_CODE); 76.61 +} 76.62 + 76.63 static void vmx_inject_exception( 76.64 unsigned int trapnr, int errcode, unsigned long cr2) 76.65 { 76.66 @@ -1184,23 +1240,6 @@ static void vmx_do_cpuid(struct cpu_user 76.67 regs->edx = edx; 76.68 } 76.69 76.70 -#define CASE_GET_REG_P(REG, reg) \ 76.71 - case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break 76.72 - 76.73 -#ifdef __i386__ 76.74 -#define CASE_EXTEND_GET_REG_P 76.75 -#else 76.76 -#define CASE_EXTEND_GET_REG_P \ 76.77 - CASE_GET_REG_P(R8, r8); \ 76.78 - CASE_GET_REG_P(R9, r9); \ 76.79 - CASE_GET_REG_P(R10, r10); \ 76.80 - CASE_GET_REG_P(R11, r11); \ 76.81 - CASE_GET_REG_P(R12, r12); \ 76.82 - CASE_GET_REG_P(R13, r13); \ 76.83 - CASE_GET_REG_P(R14, r14); \ 76.84 - CASE_GET_REG_P(R15, r15) 76.85 -#endif 76.86 - 76.87 static void vmx_dr_access(unsigned long exit_qualification, 76.88 struct cpu_user_regs *regs) 76.89 { 76.90 @@ -1224,9 +1263,9 @@ static void vmx_invlpg_intercept(unsigne 76.91 } 76.92 76.93 #define CASE_SET_REG(REG, reg) \ 76.94 - case REG_ ## REG: regs->reg = value; break 76.95 + case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break 76.96 #define CASE_GET_REG(REG, reg) \ 76.97 - case REG_ ## REG: value = regs->reg; break 76.98 + case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break 76.99 76.100 #define CASE_EXTEND_SET_REG \ 76.101 CASE_EXTEND_REG(S) 76.102 @@ -1352,26 +1391,25 @@ static int vmx_cr_access(unsigned long e 76.103 unsigned long value; 76.104 struct vcpu *v = current; 76.105 76.106 - switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) 76.107 + switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE ) 76.108 { 76.109 - case TYPE_MOV_TO_CR: 76.110 - gp = exit_qualification & CONTROL_REG_ACCESS_REG; 76.111 - cr = exit_qualification & CONTROL_REG_ACCESS_NUM; 76.112 + case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: 76.113 + gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR; 76.114 + cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM; 76.115 return mov_to_cr(gp, cr, regs); 76.116 - case TYPE_MOV_FROM_CR: 76.117 - gp = exit_qualification & CONTROL_REG_ACCESS_REG; 76.118 - cr = exit_qualification & CONTROL_REG_ACCESS_NUM; 76.119 + case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: 76.120 + gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR; 76.121 + cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM; 76.122 mov_from_cr(cr, gp, regs); 76.123 break; 76.124 - case TYPE_CLTS: 76.125 + case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: 76.126 v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; 76.127 vmx_update_guest_cr(v, 0); 76.128 HVMTRACE_0D(CLTS, current); 76.129 break; 76.130 - case TYPE_LMSW: 76.131 + case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: 76.132 value = v->arch.hvm_vcpu.guest_cr[0]; 76.133 - value = (value & ~0xF) | 76.134 - (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF); 76.135 + value = (value & ~0xFFFF) | ((exit_qualification >> 16) & 0xFFFF); 76.136 HVMTRACE_1D(LMSW, current, value); 76.137 return !hvm_set_cr0(value); 76.138 default:
77.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Tue Apr 01 10:30:57 2008 -0600 77.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Tue Apr 01 11:29:03 2008 -0600 77.3 @@ -60,6 +60,7 @@ 77.4 ALIGN 77.5 ENTRY(vmx_asm_vmexit_handler) 77.6 HVM_SAVE_ALL_NOSEGREGS 77.7 + GET_CURRENT(%ebx) 77.8 77.9 movl $GUEST_RIP,%eax 77.10 VMREAD(UREGS_eip) 77.11 @@ -68,6 +69,9 @@ ENTRY(vmx_asm_vmexit_handler) 77.12 movl $GUEST_RFLAGS,%eax 77.13 VMREAD(UREGS_eflags) 77.14 77.15 + movl %cr2,%eax 77.16 + movl %eax,VCPU_hvm_guest_cr2(%ebx) 77.17 + 77.18 #ifndef NDEBUG 77.19 movw $0xbeef,%ax 77.20 movw %ax,UREGS_error_code(%esp)
78.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Tue Apr 01 10:30:57 2008 -0600 78.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Tue Apr 01 11:29:03 2008 -0600 78.3 @@ -76,6 +76,7 @@ 78.4 ALIGN 78.5 ENTRY(vmx_asm_vmexit_handler) 78.6 HVM_SAVE_ALL_NOSEGREGS 78.7 + GET_CURRENT(%rbx) 78.8 78.9 leaq UREGS_rip(%rsp),%rdi 78.10 movl $GUEST_RIP,%eax 78.11 @@ -86,6 +87,9 @@ ENTRY(vmx_asm_vmexit_handler) 78.12 movl $GUEST_RFLAGS,%eax 78.13 VMREAD(UREGS_eflags) 78.14 78.15 + movq %cr2,%rax 78.16 + movq %rax,VCPU_hvm_guest_cr2(%rbx) 78.17 + 78.18 #ifndef NDEBUG 78.19 movw $0xbeef,%ax 78.20 movw %ax,UREGS_error_code(%rsp)
79.1 --- a/xen/arch/x86/mm.c Tue Apr 01 10:30:57 2008 -0600 79.2 +++ b/xen/arch/x86/mm.c Tue Apr 01 11:29:03 2008 -0600 79.3 @@ -2114,14 +2114,14 @@ static int set_foreigndom(domid_t domid) 79.4 info->foreign = rcu_lock_domain(dom_xen); 79.5 break; 79.6 default: 79.7 - e = rcu_lock_domain_by_id(domid); 79.8 - if ( e == NULL ) 79.9 + if ( (e = rcu_lock_domain_by_id(domid)) == NULL ) 79.10 { 79.11 MEM_LOG("Unknown domain '%u'", domid); 79.12 okay = 0; 79.13 break; 79.14 } 79.15 - if (!IS_PRIV_FOR(d, e)) { 79.16 + if ( !IS_PRIV_FOR(d, e) ) 79.17 + { 79.18 MEM_LOG("Cannot set foreign dom"); 79.19 okay = 0; 79.20 rcu_unlock_domain(e); 79.21 @@ -3259,12 +3259,15 @@ long arch_memory_op(int op, XEN_GUEST_HA 79.22 return -EFAULT; 79.23 79.24 if ( xatp.domid == DOMID_SELF ) 79.25 + { 79.26 d = rcu_lock_current_domain(); 79.27 - else { 79.28 - d = rcu_lock_domain_by_id(xatp.domid); 79.29 - if ( d == NULL ) 79.30 + } 79.31 + else 79.32 + { 79.33 + if ( (d = rcu_lock_domain_by_id(xatp.domid)) == NULL ) 79.34 return -ESRCH; 79.35 - if ( !IS_PRIV_FOR(current->domain, d) ) { 79.36 + if ( !IS_PRIV_FOR(current->domain, d) ) 79.37 + { 79.38 rcu_unlock_domain(d); 79.39 return -EPERM; 79.40 } 79.41 @@ -3355,12 +3358,15 @@ long arch_memory_op(int op, XEN_GUEST_HA 79.42 return -EINVAL; 79.43 79.44 if ( fmap.domid == DOMID_SELF ) 79.45 + { 79.46 d = rcu_lock_current_domain(); 79.47 - else { 79.48 - d = rcu_lock_domain_by_id(fmap.domid); 79.49 - if ( d == NULL ) 79.50 + } 79.51 + else 79.52 + { 79.53 + if ( (d = rcu_lock_domain_by_id(fmap.domid)) == NULL ) 79.54 return -ESRCH; 79.55 - if ( !IS_PRIV_FOR(current->domain, d) ) { 79.56 + if ( !IS_PRIV_FOR(current->domain, d) ) 79.57 + { 79.58 rcu_unlock_domain(d); 79.59 return -EPERM; 79.60 }
80.1 --- a/xen/arch/x86/mm/shadow/common.c Tue Apr 01 10:30:57 2008 -0600 80.2 +++ b/xen/arch/x86/mm/shadow/common.c Tue Apr 01 11:29:03 2008 -0600 80.3 @@ -152,9 +152,9 @@ hvm_read(enum x86_segment seg, 80.4 *val = 0; 80.5 80.6 if ( access_type == hvm_access_insn_fetch ) 80.7 - rc = hvm_fetch_from_guest_virt(val, addr, bytes); 80.8 + rc = hvm_fetch_from_guest_virt(val, addr, bytes, 0); 80.9 else 80.10 - rc = hvm_copy_from_guest_virt(val, addr, bytes); 80.11 + rc = hvm_copy_from_guest_virt(val, addr, bytes, 0); 80.12 80.13 switch ( rc ) 80.14 { 80.15 @@ -416,7 +416,7 @@ struct x86_emulate_ops *shadow_init_emul 80.16 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 80.17 hvm_access_insn_fetch, sh_ctxt, &addr) && 80.18 !hvm_fetch_from_guest_virt_nofault( 80.19 - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 80.20 + sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0)) 80.21 ? sizeof(sh_ctxt->insn_buf) : 0; 80.22 80.23 return &hvm_shadow_emulator_ops; 80.24 @@ -444,7 +444,7 @@ void shadow_continue_emulation(struct sh 80.25 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 80.26 hvm_access_insn_fetch, sh_ctxt, &addr) && 80.27 !hvm_fetch_from_guest_virt_nofault( 80.28 - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 80.29 + sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0)) 80.30 ? sizeof(sh_ctxt->insn_buf) : 0; 80.31 sh_ctxt->insn_buf_eip = regs->eip; 80.32 }
81.1 --- a/xen/arch/x86/mm/shadow/multi.c Tue Apr 01 10:30:57 2008 -0600 81.2 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Apr 01 11:29:03 2008 -0600 81.3 @@ -2881,7 +2881,8 @@ static int sh_page_fault(struct vcpu *v, 81.4 perfc_incr(shadow_fault_fast_mmio); 81.5 SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); 81.6 reset_early_unshadow(v); 81.7 - return handle_mmio() ? EXCRET_fault_fixed : 0; 81.8 + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) 81.9 + ? EXCRET_fault_fixed : 0); 81.10 } 81.11 else 81.12 { 81.13 @@ -3199,7 +3200,8 @@ static int sh_page_fault(struct vcpu *v, 81.14 shadow_audit_tables(v); 81.15 reset_early_unshadow(v); 81.16 shadow_unlock(d); 81.17 - return handle_mmio() ? EXCRET_fault_fixed : 0; 81.18 + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) 81.19 + ? EXCRET_fault_fixed : 0); 81.20 81.21 not_a_shadow_fault: 81.22 sh_audit_gw(v, &gw);
82.1 --- a/xen/arch/x86/sysctl.c Tue Apr 01 10:30:57 2008 -0600 82.2 +++ b/xen/arch/x86/sysctl.c Tue Apr 01 11:29:03 2008 -0600 82.3 @@ -47,18 +47,22 @@ long arch_do_sysctl( 82.4 if ( ret ) 82.5 break; 82.6 82.7 + memset(pi, 0, sizeof(*pi)); 82.8 pi->threads_per_core = 82.9 cpus_weight(cpu_sibling_map[0]); 82.10 pi->cores_per_socket = 82.11 cpus_weight(cpu_core_map[0]) / pi->threads_per_core; 82.12 pi->nr_cpus = (u32)num_online_cpus(); 82.13 pi->nr_nodes = num_online_nodes(); 82.14 - pi->total_pages = total_pages; 82.15 - pi->free_pages = avail_domheap_pages(); 82.16 - pi->scrub_pages = avail_scrub_pages(); 82.17 - pi->cpu_khz = cpu_khz; 82.18 - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 82.19 + pi->total_pages = total_pages; 82.20 + pi->free_pages = avail_domheap_pages(); 82.21 + pi->scrub_pages = avail_scrub_pages(); 82.22 + pi->cpu_khz = cpu_khz; 82.23 memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); 82.24 + if ( hvm_enabled ) 82.25 + pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm; 82.26 + if ( iommu_enabled ) 82.27 + pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio; 82.28 82.29 max_array_ent = pi->max_cpu_id; 82.30 pi->max_cpu_id = last_cpu(cpu_online_map);
83.1 --- a/xen/arch/x86/x86_emulate.c Tue Apr 01 10:30:57 2008 -0600 83.2 +++ b/xen/arch/x86/x86_emulate.c Tue Apr 01 11:29:03 2008 -0600 83.3 @@ -1,484 +1,18 @@ 83.4 /****************************************************************************** 83.5 * x86_emulate.c 83.6 * 83.7 - * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 83.8 - * 83.9 - * Copyright (c) 2005-2007 Keir Fraser 83.10 - * Copyright (c) 2005-2007 XenSource Inc. 83.11 + * Wrapper for generic x86 instruction decoder and emulator. 83.12 * 83.13 - * This program is free software; you can redistribute it and/or modify 83.14 - * it under the terms of the GNU General Public License as published by 83.15 - * the Free Software Foundation; either version 2 of the License, or 83.16 - * (at your option) any later version. 83.17 + * Copyright (c) 2008, Citrix Systems, Inc. 83.18 * 83.19 - * This program is distributed in the hope that it will be useful, 83.20 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 83.21 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 83.22 - * GNU General Public License for more details. 83.23 - * 83.24 - * You should have received a copy of the GNU General Public License 83.25 - * along with this program; if not, write to the Free Software 83.26 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 83.27 + * Authors: 83.28 + * Keir Fraser <keir.fraser@citrix.com> 83.29 */ 83.30 83.31 -#ifndef __XEN__ 83.32 -#include <stddef.h> 83.33 -#include <stdint.h> 83.34 -#include <string.h> 83.35 -#include <public/xen.h> 83.36 -#else 83.37 -#include <xen/config.h> 83.38 -#include <xen/types.h> 83.39 -#include <xen/lib.h> 83.40 -#include <asm/regs.h> 83.41 -#undef cmpxchg 83.42 -#endif 83.43 -#include <asm-x86/x86_emulate.h> 83.44 - 83.45 -/* Operand sizes: 8-bit operands or specified/overridden size. */ 83.46 -#define ByteOp (1<<0) /* 8-bit operands. */ 83.47 -/* Destination operand type. */ 83.48 -#define DstBitBase (0<<1) /* Memory operand, bit string. */ 83.49 -#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ 83.50 -#define DstReg (2<<1) /* Register operand. */ 83.51 -#define DstMem (3<<1) /* Memory operand. */ 83.52 -#define DstMask (3<<1) 83.53 -/* Source operand type. */ 83.54 -#define SrcNone (0<<3) /* No source operand. */ 83.55 -#define SrcImplicit (0<<3) /* Source operand is implicit in the opcode. */ 83.56 -#define SrcReg (1<<3) /* Register operand. */ 83.57 -#define SrcMem (2<<3) /* Memory operand. */ 83.58 -#define SrcMem16 (3<<3) /* Memory operand (16-bit). */ 83.59 -#define SrcImm (4<<3) /* Immediate operand. */ 83.60 -#define SrcImmByte (5<<3) /* 8-bit sign-extended immediate operand. */ 83.61 -#define SrcMask (7<<3) 83.62 -/* Generic ModRM decode. */ 83.63 -#define ModRM (1<<6) 83.64 -/* Destination is only written; never read. */ 83.65 -#define Mov (1<<7) 83.66 - 83.67 -static uint8_t opcode_table[256] = { 83.68 - /* 0x00 - 0x07 */ 83.69 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.70 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.71 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 83.72 - /* 0x08 - 0x0F */ 83.73 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.74 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.75 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, 0, 83.76 - /* 0x10 - 0x17 */ 83.77 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.78 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.79 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 83.80 - /* 0x18 - 0x1F */ 83.81 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.82 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.83 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 83.84 - /* 0x20 - 0x27 */ 83.85 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.86 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.87 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 83.88 - /* 0x28 - 0x2F */ 83.89 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.90 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.91 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 83.92 - /* 0x30 - 0x37 */ 83.93 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.94 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.95 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 83.96 - /* 0x38 - 0x3F */ 83.97 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.98 - ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.99 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 83.100 - /* 0x40 - 0x4F */ 83.101 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.102 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.103 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.104 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.105 - /* 0x50 - 0x5F */ 83.106 - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 83.107 - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 83.108 - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 83.109 - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 83.110 - /* 0x60 - 0x67 */ 83.111 - ImplicitOps, ImplicitOps, DstReg|SrcMem|ModRM, DstReg|SrcMem16|ModRM|Mov, 83.112 - 0, 0, 0, 0, 83.113 - /* 0x68 - 0x6F */ 83.114 - ImplicitOps|Mov, DstReg|SrcImm|ModRM|Mov, 83.115 - ImplicitOps|Mov, DstReg|SrcImmByte|ModRM|Mov, 83.116 - ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 83.117 - /* 0x70 - 0x77 */ 83.118 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.119 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.120 - /* 0x78 - 0x7F */ 83.121 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.122 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.123 - /* 0x80 - 0x87 */ 83.124 - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImm|ModRM, 83.125 - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, 83.126 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.127 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.128 - /* 0x88 - 0x8F */ 83.129 - ByteOp|DstMem|SrcReg|ModRM|Mov, DstMem|SrcReg|ModRM|Mov, 83.130 - ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.131 - DstMem|SrcReg|ModRM|Mov, DstReg|SrcNone|ModRM, 83.132 - DstReg|SrcMem|ModRM|Mov, DstMem|SrcNone|ModRM|Mov, 83.133 - /* 0x90 - 0x97 */ 83.134 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.135 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.136 - /* 0x98 - 0x9F */ 83.137 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.138 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.139 - /* 0xA0 - 0xA7 */ 83.140 - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 83.141 - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 83.142 - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 83.143 - ByteOp|ImplicitOps, ImplicitOps, 83.144 - /* 0xA8 - 0xAF */ 83.145 - ByteOp|DstReg|SrcImm, DstReg|SrcImm, 83.146 - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 83.147 - ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 83.148 - ByteOp|ImplicitOps, ImplicitOps, 83.149 - /* 0xB0 - 0xB7 */ 83.150 - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 83.151 - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 83.152 - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 83.153 - ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 83.154 - /* 0xB8 - 0xBF */ 83.155 - DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, 83.156 - DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, 83.157 - /* 0xC0 - 0xC7 */ 83.158 - ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, 83.159 - ImplicitOps, ImplicitOps, 83.160 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.161 - ByteOp|DstMem|SrcImm|ModRM|Mov, DstMem|SrcImm|ModRM|Mov, 83.162 - /* 0xC8 - 0xCF */ 83.163 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.164 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.165 - /* 0xD0 - 0xD7 */ 83.166 - ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 83.167 - ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 83.168 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.169 - /* 0xD8 - 0xDF */ 83.170 - 0, ImplicitOps|ModRM|Mov, 0, ImplicitOps|ModRM|Mov, 83.171 - 0, ImplicitOps|ModRM|Mov, ImplicitOps|ModRM|Mov, ImplicitOps|ModRM|Mov, 83.172 - /* 0xE0 - 0xE7 */ 83.173 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.174 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.175 - /* 0xE8 - 0xEF */ 83.176 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.177 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.178 - /* 0xF0 - 0xF7 */ 83.179 - 0, ImplicitOps, 0, 0, 83.180 - ImplicitOps, ImplicitOps, 83.181 - ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM, 83.182 - /* 0xF8 - 0xFF */ 83.183 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.184 - ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM 83.185 -}; 83.186 +#include <asm/x86_emulate.h> 83.187 83.188 -static uint8_t twobyte_table[256] = { 83.189 - /* 0x00 - 0x07 */ 83.190 - 0, ImplicitOps|ModRM, 0, 0, 0, 0, ImplicitOps, 0, 83.191 - /* 0x08 - 0x0F */ 83.192 - ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps|ModRM, 0, 0, 83.193 - /* 0x10 - 0x17 */ 83.194 - 0, 0, 0, 0, 0, 0, 0, 0, 83.195 - /* 0x18 - 0x1F */ 83.196 - ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 83.197 - ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 83.198 - /* 0x20 - 0x27 */ 83.199 - ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 83.200 - 0, 0, 0, 0, 83.201 - /* 0x28 - 0x2F */ 83.202 - 0, 0, 0, 0, 0, 0, 0, 0, 83.203 - /* 0x30 - 0x37 */ 83.204 - ImplicitOps, ImplicitOps, ImplicitOps, 0, 0, 0, 0, 0, 83.205 - /* 0x38 - 0x3F */ 83.206 - 0, 0, 0, 0, 0, 0, 0, 0, 83.207 - /* 0x40 - 0x47 */ 83.208 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.209 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.210 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.211 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.212 - /* 0x48 - 0x4F */ 83.213 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.214 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.215 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.216 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.217 - /* 0x50 - 0x5F */ 83.218 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83.219 - /* 0x60 - 0x6F */ 83.220 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83.221 - /* 0x70 - 0x7F */ 83.222 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83.223 - /* 0x80 - 0x87 */ 83.224 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.225 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.226 - /* 0x88 - 0x8F */ 83.227 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.228 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.229 - /* 0x90 - 0x97 */ 83.230 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.231 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.232 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.233 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.234 - /* 0x98 - 0x9F */ 83.235 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.236 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.237 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.238 - ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 83.239 - /* 0xA0 - 0xA7 */ 83.240 - ImplicitOps, ImplicitOps, ImplicitOps, DstBitBase|SrcReg|ModRM, 83.241 - DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0, 83.242 - /* 0xA8 - 0xAF */ 83.243 - ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM, 83.244 - DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, DstReg|SrcMem|ModRM, 83.245 - /* 0xB0 - 0xB7 */ 83.246 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 83.247 - DstReg|SrcMem|ModRM|Mov, DstBitBase|SrcReg|ModRM, 83.248 - DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 83.249 - ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov, 83.250 - /* 0xB8 - 0xBF */ 83.251 - 0, 0, DstBitBase|SrcImmByte|ModRM, DstBitBase|SrcReg|ModRM, 83.252 - DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 83.253 - ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov, 83.254 - /* 0xC0 - 0xC7 */ 83.255 - ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0, 83.256 - 0, 0, 0, ImplicitOps|ModRM, 83.257 - /* 0xC8 - 0xCF */ 83.258 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.259 - ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 83.260 - /* 0xD0 - 0xDF */ 83.261 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83.262 - /* 0xE0 - 0xEF */ 83.263 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83.264 - /* 0xF0 - 0xFF */ 83.265 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 83.266 -}; 83.267 - 83.268 -/* Type, address-of, and value of an instruction's operand. */ 83.269 -struct operand { 83.270 - enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 83.271 - unsigned int bytes; 83.272 - unsigned long val, orig_val; 83.273 - union { 83.274 - /* OP_REG: Pointer to register field. */ 83.275 - unsigned long *reg; 83.276 - /* OP_MEM: Segment and offset. */ 83.277 - struct { 83.278 - enum x86_segment seg; 83.279 - unsigned long off; 83.280 - } mem; 83.281 - }; 83.282 -}; 83.283 - 83.284 -/* MSRs. */ 83.285 -#define MSR_TSC 0x10 83.286 - 83.287 -/* Control register flags. */ 83.288 -#define CR0_PE (1<<0) 83.289 -#define CR4_TSD (1<<2) 83.290 - 83.291 -/* EFLAGS bit definitions. */ 83.292 -#define EFLG_VIP (1<<20) 83.293 -#define EFLG_VIF (1<<19) 83.294 -#define EFLG_AC (1<<18) 83.295 -#define EFLG_VM (1<<17) 83.296 -#define EFLG_RF (1<<16) 83.297 -#define EFLG_NT (1<<14) 83.298 -#define EFLG_IOPL (3<<12) 83.299 -#define EFLG_OF (1<<11) 83.300 -#define EFLG_DF (1<<10) 83.301 -#define EFLG_IF (1<<9) 83.302 -#define EFLG_TF (1<<8) 83.303 -#define EFLG_SF (1<<7) 83.304 -#define EFLG_ZF (1<<6) 83.305 -#define EFLG_AF (1<<4) 83.306 -#define EFLG_PF (1<<2) 83.307 -#define EFLG_CF (1<<0) 83.308 - 83.309 -/* Exception definitions. */ 83.310 -#define EXC_DE 0 83.311 -#define EXC_DB 1 83.312 -#define EXC_BP 3 83.313 -#define EXC_OF 4 83.314 -#define EXC_BR 5 83.315 -#define EXC_UD 6 83.316 -#define EXC_TS 10 83.317 -#define EXC_NP 11 83.318 -#define EXC_SS 12 83.319 -#define EXC_GP 13 83.320 -#define EXC_PF 14 83.321 -#define EXC_MF 16 83.322 - 83.323 -/* 83.324 - * Instruction emulation: 83.325 - * Most instructions are emulated directly via a fragment of inline assembly 83.326 - * code. This allows us to save/restore EFLAGS and thus very easily pick up 83.327 - * any modified flags. 83.328 - */ 83.329 +#undef cmpxchg 83.330 83.331 -#if defined(__x86_64__) 83.332 -#define _LO32 "k" /* force 32-bit operand */ 83.333 -#define _STK "%%rsp" /* stack pointer */ 83.334 -#define _BYTES_PER_LONG "8" 83.335 -#elif defined(__i386__) 83.336 -#define _LO32 "" /* force 32-bit operand */ 83.337 -#define _STK "%%esp" /* stack pointer */ 83.338 -#define _BYTES_PER_LONG "4" 83.339 -#endif 83.340 - 83.341 -/* 83.342 - * These EFLAGS bits are restored from saved value during emulation, and 83.343 - * any changes are written back to the saved value after emulation. 83.344 - */ 83.345 -#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 83.346 - 83.347 -/* Before executing instruction: restore necessary bits in EFLAGS. */ 83.348 -#define _PRE_EFLAGS(_sav, _msk, _tmp) \ 83.349 -/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \ 83.350 -"movl %"_sav",%"_LO32 _tmp"; " \ 83.351 -"push %"_tmp"; " \ 83.352 -"push %"_tmp"; " \ 83.353 -"movl %"_msk",%"_LO32 _tmp"; " \ 83.354 -"andl %"_LO32 _tmp",("_STK"); " \ 83.355 -"pushf; " \ 83.356 -"notl %"_LO32 _tmp"; " \ 83.357 -"andl %"_LO32 _tmp",("_STK"); " \ 83.358 -"andl %"_LO32 _tmp",2*"_BYTES_PER_LONG"("_STK"); " \ 83.359 -"pop %"_tmp"; " \ 83.360 -"orl %"_LO32 _tmp",("_STK"); " \ 83.361 -"popf; " \ 83.362 -"pop %"_sav"; " 83.363 - 83.364 -/* After executing instruction: write-back necessary bits in EFLAGS. */ 83.365 -#define _POST_EFLAGS(_sav, _msk, _tmp) \ 83.366 -/* _sav |= EFLAGS & _msk; */ \ 83.367 -"pushf; " \ 83.368 -"pop %"_tmp"; " \ 83.369 -"andl %"_msk",%"_LO32 _tmp"; " \ 83.370 -"orl %"_LO32 _tmp",%"_sav"; " 83.371 - 83.372 -/* Raw emulation: instruction has two explicit operands. */ 83.373 -#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy)\ 83.374 -do{ unsigned long _tmp; \ 83.375 - switch ( (_dst).bytes ) \ 83.376 - { \ 83.377 - case 2: \ 83.378 - asm volatile ( \ 83.379 - _PRE_EFLAGS("0","4","2") \ 83.380 - _op"w %"_wx"3,%1; " \ 83.381 - _POST_EFLAGS("0","4","2") \ 83.382 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.383 - : _wy ((_src).val), "i" (EFLAGS_MASK), \ 83.384 - "m" (_eflags), "m" ((_dst).val) ); \ 83.385 - break; \ 83.386 - case 4: \ 83.387 - asm volatile ( \ 83.388 - _PRE_EFLAGS("0","4","2") \ 83.389 - _op"l %"_lx"3,%1; " \ 83.390 - _POST_EFLAGS("0","4","2") \ 83.391 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.392 - : _ly ((_src).val), "i" (EFLAGS_MASK), \ 83.393 - "m" (_eflags), "m" ((_dst).val) ); \ 83.394 - break; \ 83.395 - case 8: \ 83.396 - __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy); \ 83.397 - break; \ 83.398 - } \ 83.399 -} while (0) 83.400 -#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)\ 83.401 -do{ unsigned long _tmp; \ 83.402 - switch ( (_dst).bytes ) \ 83.403 - { \ 83.404 - case 1: \ 83.405 - asm volatile ( \ 83.406 - _PRE_EFLAGS("0","4","2") \ 83.407 - _op"b %"_bx"3,%1; " \ 83.408 - _POST_EFLAGS("0","4","2") \ 83.409 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.410 - : _by ((_src).val), "i" (EFLAGS_MASK), \ 83.411 - "m" (_eflags), "m" ((_dst).val) ); \ 83.412 - break; \ 83.413 - default: \ 83.414 - __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy);\ 83.415 - break; \ 83.416 - } \ 83.417 -} while (0) 83.418 -/* Source operand is byte-sized and may be restricted to just %cl. */ 83.419 -#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \ 83.420 - __emulate_2op(_op, _src, _dst, _eflags, \ 83.421 - "b", "c", "b", "c", "b", "c", "b", "c") 83.422 -/* Source operand is byte, word, long or quad sized. */ 83.423 -#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \ 83.424 - __emulate_2op(_op, _src, _dst, _eflags, \ 83.425 - "b", "q", "w", "r", _LO32, "r", "", "r") 83.426 -/* Source operand is word, long or quad sized. */ 83.427 -#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \ 83.428 - __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ 83.429 - "w", "r", _LO32, "r", "", "r") 83.430 - 83.431 -/* Instruction has only one explicit operand (no source operand). */ 83.432 -#define emulate_1op(_op,_dst,_eflags) \ 83.433 -do{ unsigned long _tmp; \ 83.434 - switch ( (_dst).bytes ) \ 83.435 - { \ 83.436 - case 1: \ 83.437 - asm volatile ( \ 83.438 - _PRE_EFLAGS("0","3","2") \ 83.439 - _op"b %1; " \ 83.440 - _POST_EFLAGS("0","3","2") \ 83.441 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.442 - : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 83.443 - break; \ 83.444 - case 2: \ 83.445 - asm volatile ( \ 83.446 - _PRE_EFLAGS("0","3","2") \ 83.447 - _op"w %1; " \ 83.448 - _POST_EFLAGS("0","3","2") \ 83.449 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.450 - : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 83.451 - break; \ 83.452 - case 4: \ 83.453 - asm volatile ( \ 83.454 - _PRE_EFLAGS("0","3","2") \ 83.455 - _op"l %1; " \ 83.456 - _POST_EFLAGS("0","3","2") \ 83.457 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.458 - : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 83.459 - break; \ 83.460 - case 8: \ 83.461 - __emulate_1op_8byte(_op, _dst, _eflags); \ 83.462 - break; \ 83.463 - } \ 83.464 -} while (0) 83.465 - 83.466 -/* Emulate an instruction with quadword operands (x86/64 only). */ 83.467 -#if defined(__x86_64__) 83.468 -#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ 83.469 -do{ asm volatile ( \ 83.470 - _PRE_EFLAGS("0","4","2") \ 83.471 - _op"q %"_qx"3,%1; " \ 83.472 - _POST_EFLAGS("0","4","2") \ 83.473 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.474 - : _qy ((_src).val), "i" (EFLAGS_MASK), \ 83.475 - "m" (_eflags), "m" ((_dst).val) ); \ 83.476 -} while (0) 83.477 -#define __emulate_1op_8byte(_op, _dst, _eflags) \ 83.478 -do{ asm volatile ( \ 83.479 - _PRE_EFLAGS("0","3","2") \ 83.480 - _op"q %1; " \ 83.481 - _POST_EFLAGS("0","3","2") \ 83.482 - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 83.483 - : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 83.484 -} while (0) 83.485 -#elif defined(__i386__) 83.486 -#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) 83.487 -#define __emulate_1op_8byte(_op, _dst, _eflags) 83.488 -#endif /* __i386__ */ 83.489 - 83.490 -#ifdef __XEN__ 83.491 #define __emulate_fpu_insn(_op) \ 83.492 do{ int _exn; \ 83.493 asm volatile ( \ 83.494 @@ -495,2935 +29,5 @@ do{ int _exn; 83.495 : "=r" (_exn) : "0" (0) ); \ 83.496 generate_exception_if(_exn, EXC_MF, -1); \ 83.497 } while (0) 83.498 -#else 83.499 -#define __emulate_fpu_insn(_op) \ 83.500 -do{ rc = X86EMUL_UNHANDLEABLE; \ 83.501 - goto done; \ 83.502 -} while (0) 83.503 -#endif 83.504 83.505 - 83.506 -/* Fetch next part of the instruction being emulated. */ 83.507 -#define insn_fetch_bytes(_size) \ 83.508 -({ unsigned long _x, _eip = _regs.eip; \ 83.509 - if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \ 83.510 - _regs.eip += (_size); /* real hardware doesn't truncate */ \ 83.511 - generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15, \ 83.512 - EXC_GP, 0); \ 83.513 - rc = ops->insn_fetch(x86_seg_cs, _eip, &_x, (_size), ctxt); \ 83.514 - if ( rc ) goto done; \ 83.515 - _x; \ 83.516 -}) 83.517 -#define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type))) 83.518 - 83.519 -#define truncate_word(ea, byte_width) \ 83.520 -({ unsigned long __ea = (ea); \ 83.521 - unsigned int _width = (byte_width); \ 83.522 - ((_width == sizeof(unsigned long)) ? __ea : \ 83.523 - (__ea & ((1UL << (_width << 3)) - 1))); \ 83.524 -}) 83.525 -#define truncate_ea(ea) truncate_word((ea), ad_bytes) 83.526 - 83.527 -#define mode_64bit() (def_ad_bytes == 8) 83.528 - 83.529 -#define fail_if(p) \ 83.530 -do { \ 83.531 - rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \ 83.532 - if ( rc ) goto done; \ 83.533 -} while (0) 83.534 - 83.535 -#define generate_exception_if(p, e, ec) \ 83.536 -({ if ( (p) ) { \ 83.537 - fail_if(ops->inject_hw_exception == NULL); \ 83.538 - rc = ops->inject_hw_exception(e, ec, ctxt) ? : X86EMUL_EXCEPTION; \ 83.539 - goto done; \ 83.540 - } \ 83.541 -}) 83.542 - 83.543 -/* 83.544 - * Given byte has even parity (even number of 1s)? SDM Vol. 1 Sec. 3.4.3.1, 83.545 - * "Status Flags": EFLAGS.PF reflects parity of least-sig. byte of result only. 83.546 - */ 83.547 -static int even_parity(uint8_t v) 83.548 -{ 83.549 - asm ( "test %b0,%b0; setp %b0" : "=a" (v) : "0" (v) ); 83.550 - return v; 83.551 -} 83.552 - 83.553 -/* Update address held in a register, based on addressing mode. */ 83.554 -#define _register_address_increment(reg, inc, byte_width) \ 83.555 -do { \ 83.556 - int _inc = (inc); /* signed type ensures sign extension to long */ \ 83.557 - unsigned int _width = (byte_width); \ 83.558 - if ( _width == sizeof(unsigned long) ) \ 83.559 - (reg) += _inc; \ 83.560 - else if ( mode_64bit() ) \ 83.561 - (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1); \ 83.562 - else \ 83.563 - (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) | \ 83.564 - (((reg) + _inc) & ((1UL << (_width << 3)) - 1)); \ 83.565 -} while (0) 83.566 -#define register_address_increment(reg, inc) \ 83.567 - _register_address_increment((reg), (inc), ad_bytes) 83.568 - 83.569 -#define sp_pre_dec(dec) ({ \ 83.570 - _register_address_increment(_regs.esp, -(dec), ctxt->sp_size/8); \ 83.571 - truncate_word(_regs.esp, ctxt->sp_size/8); \ 83.572 -}) 83.573 -#define sp_post_inc(inc) ({ \ 83.574 - unsigned long __esp = truncate_word(_regs.esp, ctxt->sp_size/8); \ 83.575 - _register_address_increment(_regs.esp, (inc), ctxt->sp_size/8); \ 83.576 - __esp; \ 83.577 -}) 83.578 - 83.579 -#define jmp_rel(rel) \ 83.580 -do { \ 83.581 - int _rel = (int)(rel); \ 83.582 - _regs.eip += _rel; \ 83.583 - if ( !mode_64bit() ) \ 83.584 - _regs.eip = ((op_bytes == 2) \ 83.585 - ? (uint16_t)_regs.eip : (uint32_t)_regs.eip); \ 83.586 -} while (0) 83.587 - 83.588 -static unsigned long __get_rep_prefix( 83.589 - struct cpu_user_regs *int_regs, 83.590 - struct cpu_user_regs *ext_regs, 83.591 - int ad_bytes) 83.592 -{ 83.593 - unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx : 83.594 - (ad_bytes == 4) ? (uint32_t)int_regs->ecx : 83.595 - int_regs->ecx); 83.596 - 83.597 - /* Skip the instruction if no repetitions are required. */ 83.598 - if ( ecx == 0 ) 83.599 - ext_regs->eip = int_regs->eip; 83.600 - 83.601 - return ecx; 83.602 -} 83.603 - 83.604 -#define get_rep_prefix() ({ \ 83.605 - unsigned long max_reps = 1; \ 83.606 - if ( rep_prefix ) \ 83.607 - max_reps = __get_rep_prefix(&_regs, ctxt->regs, ad_bytes); \ 83.608 - if ( max_reps == 0 ) \ 83.609 - goto done; \ 83.610 - max_reps; \ 83.611 -}) 83.612 - 83.613 -static void __put_rep_prefix( 83.614 - struct cpu_user_regs *int_regs, 83.615 - struct cpu_user_regs *ext_regs, 83.616 - int ad_bytes, 83.617 - unsigned long reps_completed) 83.618 -{ 83.619 - unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx : 83.620 - (ad_bytes == 4) ? (uint32_t)int_regs->ecx : 83.621 - int_regs->ecx); 83.622 - 83.623 - /* Reduce counter appropriately, and repeat instruction if non-zero. */ 83.624 - ecx -= reps_completed; 83.625 - if ( ecx != 0 ) 83.626 - int_regs->eip = ext_regs->eip; 83.627 - 83.628 - if ( ad_bytes == 2 ) 83.629 - *(uint16_t *)&int_regs->ecx = ecx; 83.630 - else if ( ad_bytes == 4 ) 83.631 - int_regs->ecx = (uint32_t)ecx; 83.632 - else 83.633 - int_regs->ecx = ecx; 83.634 -} 83.635 - 83.636 -#define put_rep_prefix(reps_completed) ({ \ 83.637 - if ( rep_prefix ) \ 83.638 - __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \ 83.639 -}) 83.640 - 83.641 -/* 83.642 - * Unsigned multiplication with double-word result. 83.643 - * IN: Multiplicand=m[0], Multiplier=m[1] 83.644 - * OUT: Return CF/OF (overflow status); Result=m[1]:m[0] 83.645 - */ 83.646 -static int mul_dbl(unsigned long m[2]) 83.647 -{ 83.648 - int rc; 83.649 - asm ( "mul %4; seto %b2" 83.650 - : "=a" (m[0]), "=d" (m[1]), "=q" (rc) 83.651 - : "0" (m[0]), "1" (m[1]), "2" (0) ); 83.652 - return rc; 83.653 -} 83.654 - 83.655 -/* 83.656 - * Signed multiplication with double-word result. 83.657 - * IN: Multiplicand=m[0], Multiplier=m[1] 83.658 - * OUT: Return CF/OF (overflow status); Result=m[1]:m[0] 83.659 - */ 83.660 -static int imul_dbl(unsigned long m[2]) 83.661 -{ 83.662 - int rc; 83.663 - asm ( "imul %4; seto %b2" 83.664 - : "=a" (m[0]), "=d" (m[1]), "=q" (rc) 83.665 - : "0" (m[0]), "1" (m[1]), "2" (0) ); 83.666 - return rc; 83.667 -} 83.668 - 83.669 -/* 83.670 - * Unsigned division of double-word dividend. 83.671 - * IN: Dividend=u[1]:u[0], Divisor=v 83.672 - * OUT: Return 1: #DE 83.673 - * Return 0: Quotient=u[0], Remainder=u[1] 83.674 - */ 83.675 -static int div_dbl(unsigned long u[2], unsigned long v) 83.676 -{ 83.677 - if ( (v == 0) || (u[1] >= v) ) 83.678 - return 1; 83.679 - asm ( "div %4" 83.680 - : "=a" (u[0]), "=d" (u[1]) 83.681 - : "0" (u[0]), "1" (u[1]), "r" (v) ); 83.682 - return 0; 83.683 -} 83.684 - 83.685 -/* 83.686 - * Signed division of double-word dividend. 83.687 - * IN: Dividend=u[1]:u[0], Divisor=v 83.688 - * OUT: Return 1: #DE 83.689 - * Return 0: Quotient=u[0], Remainder=u[1] 83.690 - * NB. We don't use idiv directly as it's moderately hard to work out 83.691 - * ahead of time whether it will #DE, which we cannot allow to happen. 83.692 - */ 83.693 -static int idiv_dbl(unsigned long u[2], unsigned long v) 83.694 -{ 83.695 - int negu = (long)u[1] < 0, negv = (long)v < 0; 83.696 - 83.697 - /* u = abs(u) */ 83.698 - if ( negu ) 83.699 - { 83.700 - u[1] = ~u[1]; 83.701 - if ( (u[0] = -u[0]) == 0 ) 83.702 - u[1]++; 83.703 - } 83.704 - 83.705 - /* abs(u) / abs(v) */ 83.706 - if ( div_dbl(u, negv ? -v : v) ) 83.707 - return 1; 83.708 - 83.709 - /* Remainder has same sign as dividend. It cannot overflow. */ 83.710 - if ( negu ) 83.711 - u[1] = -u[1]; 83.712 - 83.713 - /* Quotient is overflowed if sign bit is set. */ 83.714 - if ( negu ^ negv ) 83.715 - { 83.716 - if ( (long)u[0] >= 0 ) 83.717 - u[0] = -u[0]; 83.718 - else if ( (u[0] << 1) != 0 ) /* == 0x80...0 is okay */ 83.719 - return 1; 83.720 - } 83.721 - else if ( (long)u[0] < 0 ) 83.722 - return 1; 83.723 - 83.724 - return 0; 83.725 -} 83.726 - 83.727 -static int 83.728 -test_cc( 83.729 - unsigned int condition, unsigned int flags) 83.730 -{ 83.731 - int rc = 0; 83.732 - 83.733 - switch ( (condition & 15) >> 1 ) 83.734 - { 83.735 - case 0: /* o */ 83.736 - rc |= (flags & EFLG_OF); 83.737 - break; 83.738 - case 1: /* b/c/nae */ 83.739 - rc |= (flags & EFLG_CF); 83.740 - break; 83.741 - case 2: /* z/e */ 83.742 - rc |= (flags & EFLG_ZF); 83.743 - break; 83.744 - case 3: /* be/na */ 83.745 - rc |= (flags & (EFLG_CF|EFLG_ZF)); 83.746 - break; 83.747 - case 4: /* s */ 83.748 - rc |= (flags & EFLG_SF); 83.749 - break; 83.750 - case 5: /* p/pe */ 83.751 - rc |= (flags & EFLG_PF); 83.752 - break; 83.753 - case 7: /* le/ng */ 83.754 - rc |= (flags & EFLG_ZF); 83.755 - /* fall through */ 83.756 - case 6: /* l/nge */ 83.757 - rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF)); 83.758 - break; 83.759 - } 83.760 - 83.761 - /* Odd condition identifiers (lsb == 1) have inverted sense. */ 83.762 - return (!!rc ^ (condition & 1)); 83.763 -} 83.764 - 83.765 -static int 83.766 -get_cpl( 83.767 - struct x86_emulate_ctxt *ctxt, 83.768 - struct x86_emulate_ops *ops) 83.769 -{ 83.770 - struct segment_register reg; 83.771 - 83.772 - if ( ctxt->regs->eflags & EFLG_VM ) 83.773 - return 3; 83.774 - 83.775 - if ( (ops->read_segment == NULL) || 83.776 - ops->read_segment(x86_seg_ss, ®, ctxt) ) 83.777 - return -1; 83.778 - 83.779 - return reg.attr.fields.dpl; 83.780 -} 83.781 - 83.782 -static int 83.783 -_mode_iopl( 83.784 - struct x86_emulate_ctxt *ctxt, 83.785 - struct x86_emulate_ops *ops) 83.786 -{ 83.787 - int cpl = get_cpl(ctxt, ops); 83.788 - if ( cpl == -1 ) 83.789 - return -1; 83.790 - return ((cpl >= 0) && (cpl <= ((ctxt->regs->eflags >> 12) & 3))); 83.791 -} 83.792 - 83.793 -#define mode_ring0() ({ \ 83.794 - int _cpl = get_cpl(ctxt, ops); \ 83.795 - fail_if(_cpl < 0); \ 83.796 - (_cpl == 0); \ 83.797 -}) 83.798 -#define mode_iopl() ({ \ 83.799 - int _iopl = _mode_iopl(ctxt, ops); \ 83.800 - fail_if(_iopl < 0); \ 83.801 - _iopl; \ 83.802 -}) 83.803 - 83.804 -static int 83.805 -in_realmode( 83.806 - struct x86_emulate_ctxt *ctxt, 83.807 - struct x86_emulate_ops *ops) 83.808 -{ 83.809 - unsigned long cr0; 83.810 - int rc; 83.811 - 83.812 - if ( ops->read_cr == NULL ) 83.813 - return 0; 83.814 - 83.815 - rc = ops->read_cr(0, &cr0, ctxt); 83.816 - return (!rc && !(cr0 & CR0_PE)); 83.817 -} 83.818 - 83.819 -static int 83.820 -realmode_load_seg( 83.821 - enum x86_segment seg, 83.822 - uint16_t sel, 83.823 - struct x86_emulate_ctxt *ctxt, 83.824 - struct x86_emulate_ops *ops) 83.825 -{ 83.826 - struct segment_register reg; 83.827 - int rc; 83.828 - 83.829 - if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) 83.830 - return rc; 83.831 - 83.832 - reg.sel = sel; 83.833 - reg.base = (uint32_t)sel << 4; 83.834 - 83.835 - return ops->write_segment(seg, ®, ctxt); 83.836 -} 83.837 - 83.838 -static int 83.839 -protmode_load_seg( 83.840 - enum x86_segment seg, 83.841 - uint16_t sel, 83.842 - struct x86_emulate_ctxt *ctxt, 83.843 - struct x86_emulate_ops *ops) 83.844 -{ 83.845 - struct segment_register desctab, cs, segr; 83.846 - struct { uint32_t a, b; } desc; 83.847 - unsigned long val; 83.848 - uint8_t dpl, rpl, cpl; 83.849 - int rc, fault_type = EXC_TS; 83.850 - 83.851 - /* NULL selector? */ 83.852 - if ( (sel & 0xfffc) == 0 ) 83.853 - { 83.854 - if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) ) 83.855 - goto raise_exn; 83.856 - memset(&segr, 0, sizeof(segr)); 83.857 - return ops->write_segment(seg, &segr, ctxt); 83.858 - } 83.859 - 83.860 - /* LDT descriptor must be in the GDT. */ 83.861 - if ( (seg == x86_seg_ldtr) && (sel & 4) ) 83.862 - goto raise_exn; 83.863 - 83.864 - if ( (rc = ops->read_segment(x86_seg_cs, &cs, ctxt)) || 83.865 - (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, 83.866 - &desctab, ctxt)) ) 83.867 - return rc; 83.868 - 83.869 - /* Check against descriptor table limit. */ 83.870 - if ( ((sel & 0xfff8) + 7) > desctab.limit ) 83.871 - goto raise_exn; 83.872 - 83.873 - do { 83.874 - if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8), 83.875 - &val, 4, ctxt)) ) 83.876 - return rc; 83.877 - desc.a = val; 83.878 - if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4, 83.879 - &val, 4, ctxt)) ) 83.880 - return rc; 83.881 - desc.b = val; 83.882 - 83.883 - /* Segment present in memory? */ 83.884 - if ( !(desc.b & (1u<<15)) ) 83.885 - { 83.886 - fault_type = EXC_NP; 83.887 - goto raise_exn; 83.888 - } 83.889 - 83.890 - /* LDT descriptor is a system segment. All others are code/data. */ 83.891 - if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) ) 83.892 - goto raise_exn; 83.893 - 83.894 - dpl = (desc.b >> 13) & 3; 83.895 - rpl = sel & 3; 83.896 - cpl = cs.sel & 3; 83.897 - 83.898 - switch ( seg ) 83.899 - { 83.900 - case x86_seg_cs: 83.901 - /* Code segment? */ 83.902 - if ( !(desc.b & (1u<<11)) ) 83.903 - goto raise_exn; 83.904 - /* Non-conforming segment: check DPL against RPL. */ 83.905 - if ( ((desc.b & (6u<<9)) != 6) && (dpl != rpl) ) 83.906 - goto raise_exn; 83.907 - break; 83.908 - case x86_seg_ss: 83.909 - /* Writable data segment? */ 83.910 - if ( (desc.b & (5u<<9)) != (1u<<9) ) 83.911 - goto raise_exn; 83.912 - if ( (dpl != cpl) || (dpl != rpl) ) 83.913 - goto raise_exn; 83.914 - break; 83.915 - case x86_seg_ldtr: 83.916 - /* LDT system segment? */ 83.917 - if ( (desc.b & (15u<<8)) != (2u<<8) ) 83.918 - goto raise_exn; 83.919 - goto skip_accessed_flag; 83.920 - default: 83.921 - /* Readable code or data segment? */ 83.922 - if ( (desc.b & (5u<<9)) == (4u<<9) ) 83.923 - goto raise_exn; 83.924 - /* Non-conforming segment: check DPL against RPL and CPL. */ 83.925 - if ( ((desc.b & (6u<<9)) != 6) && ((dpl < cpl) || (dpl < rpl)) ) 83.926 - goto raise_exn; 83.927 - break; 83.928 - } 83.929 - 83.930 - /* Ensure Accessed flag is set. */ 83.931 - rc = ((desc.b & 0x100) ? X86EMUL_OKAY : 83.932 - ops->cmpxchg( 83.933 - x86_seg_none, desctab.base + (sel & 0xfff8) + 4, desc.b, 83.934 - desc.b | 0x100, 4, ctxt)); 83.935 - } while ( rc == X86EMUL_CMPXCHG_FAILED ); 83.936 - 83.937 - if ( rc ) 83.938 - return rc; 83.939 - 83.940 - /* Force the Accessed flag in our local copy. */ 83.941 - desc.b |= 0x100; 83.942 - 83.943 - skip_accessed_flag: 83.944 - segr.base = (((desc.b << 0) & 0xff000000u) | 83.945 - ((desc.b << 16) & 0x00ff0000u) | 83.946 - ((desc.a >> 16) & 0x0000ffffu)); 83.947 - segr.attr.bytes = (((desc.b >> 8) & 0x00ffu) | 83.948 - ((desc.b >> 12) & 0x0f00u)); 83.949 - segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu); 83.950 - if ( segr.attr.fields.g ) 83.951 - segr.limit = (segr.limit << 12) | 0xfffu; 83.952 - segr.sel = sel; 83.953 - return ops->write_segment(seg, &segr, ctxt); 83.954 - 83.955 - raise_exn: 83.956 - if ( ops->inject_hw_exception == NULL ) 83.957 - return X86EMUL_UNHANDLEABLE; 83.958 - if ( (rc = ops->inject_hw_exception(fault_type, sel & 0xfffc, ctxt)) ) 83.959 - return rc; 83.960 - return X86EMUL_EXCEPTION; 83.961 -} 83.962 - 83.963 -static int 83.964 -load_seg( 83.965 - enum x86_segment seg, 83.966 - uint16_t sel, 83.967 - struct x86_emulate_ctxt *ctxt, 83.968 - struct x86_emulate_ops *ops) 83.969 -{ 83.970 - if ( (ops->read_segment == NULL) || 83.971 - (ops->write_segment == NULL) ) 83.972 - return X86EMUL_UNHANDLEABLE; 83.973 - 83.974 - if ( in_realmode(ctxt, ops) ) 83.975 - return realmode_load_seg(seg, sel, ctxt, ops); 83.976 - 83.977 - return protmode_load_seg(seg, sel, ctxt, ops); 83.978 -} 83.979 - 83.980 -void * 83.981 -decode_register( 83.982 - uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs) 83.983 -{ 83.984 - void *p; 83.985 - 83.986 - switch ( modrm_reg ) 83.987 - { 83.988 - case 0: p = ®s->eax; break; 83.989 - case 1: p = ®s->ecx; break; 83.990 - case 2: p = ®s->edx; break; 83.991 - case 3: p = ®s->ebx; break; 83.992 - case 4: p = (highbyte_regs ? 83.993 - ((unsigned char *)®s->eax + 1) : 83.994 - (unsigned char *)®s->esp); break; 83.995 - case 5: p = (highbyte_regs ? 83.996 - ((unsigned char *)®s->ecx + 1) : 83.997 - (unsigned char *)®s->ebp); break; 83.998 - case 6: p = (highbyte_regs ? 83.999 - ((unsigned char *)®s->edx + 1) : 83.1000 - (unsigned char *)®s->esi); break; 83.1001 - case 7: p = (highbyte_regs ? 83.1002 - ((unsigned char *)®s->ebx + 1) : 83.1003 - (unsigned char *)®s->edi); break; 83.1004 -#if defined(__x86_64__) 83.1005 - case 8: p = ®s->r8; break; 83.1006 - case 9: p = ®s->r9; break; 83.1007 - case 10: p = ®s->r10; break; 83.1008 - case 11: p = ®s->r11; break; 83.1009 - case 12: p = ®s->r12; break; 83.1010 - case 13: p = ®s->r13; break; 83.1011 - case 14: p = ®s->r14; break; 83.1012 - case 15: p = ®s->r15; break; 83.1013 -#endif 83.1014 - default: p = NULL; break; 83.1015 - } 83.1016 - 83.1017 - return p; 83.1018 -} 83.1019 - 83.1020 -#define decode_segment_failed x86_seg_tr 83.1021 -enum x86_segment 83.1022 -decode_segment( 83.1023 - uint8_t modrm_reg) 83.1024 -{ 83.1025 - switch ( modrm_reg ) 83.1026 - { 83.1027 - case 0: return x86_seg_es; 83.1028 - case 1: return x86_seg_cs; 83.1029 - case 2: return x86_seg_ss; 83.1030 - case 3: return x86_seg_ds; 83.1031 - case 4: return x86_seg_fs; 83.1032 - case 5: return x86_seg_gs; 83.1033 - default: break; 83.1034 - } 83.1035 - return decode_segment_failed; 83.1036 -} 83.1037 - 83.1038 -int 83.1039 -x86_emulate( 83.1040 - struct x86_emulate_ctxt *ctxt, 83.1041 - struct x86_emulate_ops *ops) 83.1042 -{ 83.1043 - /* Shadow copy of register state. Committed on successful emulation. */ 83.1044 - struct cpu_user_regs _regs = *ctxt->regs; 83.1045 - 83.1046 - uint8_t b, d, sib, sib_index, sib_base, twobyte = 0, rex_prefix = 0; 83.1047 - uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0; 83.1048 - unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes; 83.1049 -#define REPE_PREFIX 1 83.1050 -#define REPNE_PREFIX 2 83.1051 - unsigned int lock_prefix = 0, rep_prefix = 0; 83.1052 - int override_seg = -1, rc = X86EMUL_OKAY; 83.1053 - struct operand src, dst; 83.1054 - 83.1055 - /* Data operand effective address (usually computed from ModRM). */ 83.1056 - struct operand ea; 83.1057 - 83.1058 - /* Default is a memory operand relative to segment DS. */ 83.1059 - ea.type = OP_MEM; 83.1060 - ea.mem.seg = x86_seg_ds; 83.1061 - ea.mem.off = 0; 83.1062 - 83.1063 - ctxt->retire.byte = 0; 83.1064 - 83.1065 - op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8; 83.1066 - if ( op_bytes == 8 ) 83.1067 - { 83.1068 - op_bytes = def_op_bytes = 4; 83.1069 -#ifndef __x86_64__ 83.1070 - return X86EMUL_UNHANDLEABLE; 83.1071 -#endif 83.1072 - } 83.1073 - 83.1074 - /* Prefix bytes. */ 83.1075 - for ( ; ; ) 83.1076 - { 83.1077 - switch ( b = insn_fetch_type(uint8_t) ) 83.1078 - { 83.1079 - case 0x66: /* operand-size override */ 83.1080 - op_bytes = def_op_bytes ^ 6; 83.1081 - break; 83.1082 - case 0x67: /* address-size override */ 83.1083 - ad_bytes = def_ad_bytes ^ (mode_64bit() ? 12 : 6); 83.1084 - break; 83.1085 - case 0x2e: /* CS override */ 83.1086 - override_seg = x86_seg_cs; 83.1087 - break; 83.1088 - case 0x3e: /* DS override */ 83.1089 - override_seg = x86_seg_ds; 83.1090 - break; 83.1091 - case 0x26: /* ES override */ 83.1092 - override_seg = x86_seg_es; 83.1093 - break; 83.1094 - case 0x64: /* FS override */ 83.1095 - override_seg = x86_seg_fs; 83.1096 - break; 83.1097 - case 0x65: /* GS override */ 83.1098 - override_seg = x86_seg_gs; 83.1099 - break; 83.1100 - case 0x36: /* SS override */ 83.1101 - override_seg = x86_seg_ss; 83.1102 - break; 83.1103 - case 0xf0: /* LOCK */ 83.1104 - lock_prefix = 1; 83.1105 - break; 83.1106 - case 0xf2: /* REPNE/REPNZ */ 83.1107 - rep_prefix = REPNE_PREFIX; 83.1108 - break; 83.1109 - case 0xf3: /* REP/REPE/REPZ */ 83.1110 - rep_prefix = REPE_PREFIX; 83.1111 - break; 83.1112 - case 0x40 ... 0x4f: /* REX */ 83.1113 - if ( !mode_64bit() ) 83.1114 - goto done_prefixes; 83.1115 - rex_prefix = b; 83.1116 - continue; 83.1117 - default: 83.1118 - goto done_prefixes; 83.1119 - } 83.1120 - 83.1121 - /* Any legacy prefix after a REX prefix nullifies its effect. */ 83.1122 - rex_prefix = 0; 83.1123 - } 83.1124 - done_prefixes: 83.1125 - 83.1126 - if ( rex_prefix & 8 ) /* REX.W */ 83.1127 - op_bytes = 8; 83.1128 - 83.1129 - /* Opcode byte(s). */ 83.1130 - d = opcode_table[b]; 83.1131 - if ( d == 0 ) 83.1132 - { 83.1133 - /* Two-byte opcode? */ 83.1134 - if ( b == 0x0f ) 83.1135 - { 83.1136 - twobyte = 1; 83.1137 - b = insn_fetch_type(uint8_t); 83.1138 - d = twobyte_table[b]; 83.1139 - } 83.1140 - 83.1141 - /* Unrecognised? */ 83.1142 - if ( d == 0 ) 83.1143 - goto cannot_emulate; 83.1144 - } 83.1145 - 83.1146 - /* Lock prefix is allowed only on RMW instructions. */ 83.1147 - generate_exception_if((d & Mov) && lock_prefix, EXC_GP, 0); 83.1148 - 83.1149 - /* ModRM and SIB bytes. */ 83.1150 - if ( d & ModRM ) 83.1151 - { 83.1152 - modrm = insn_fetch_type(uint8_t); 83.1153 - modrm_mod = (modrm & 0xc0) >> 6; 83.1154 - modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3); 83.1155 - modrm_rm = modrm & 0x07; 83.1156 - 83.1157 - if ( modrm_mod == 3 ) 83.1158 - { 83.1159 - modrm_rm |= (rex_prefix & 1) << 3; 83.1160 - ea.type = OP_REG; 83.1161 - ea.reg = decode_register( 83.1162 - modrm_rm, &_regs, (d & ByteOp) && (rex_prefix == 0)); 83.1163 - } 83.1164 - else if ( ad_bytes == 2 ) 83.1165 - { 83.1166 - /* 16-bit ModR/M decode. */ 83.1167 - switch ( modrm_rm ) 83.1168 - { 83.1169 - case 0: 83.1170 - ea.mem.off = _regs.ebx + _regs.esi; 83.1171 - break; 83.1172 - case 1: 83.1173 - ea.mem.off = _regs.ebx + _regs.edi; 83.1174 - break; 83.1175 - case 2: 83.1176 - ea.mem.seg = x86_seg_ss; 83.1177 - ea.mem.off = _regs.ebp + _regs.esi; 83.1178 - break; 83.1179 - case 3: 83.1180 - ea.mem.seg = x86_seg_ss; 83.1181 - ea.mem.off = _regs.ebp + _regs.edi; 83.1182 - break; 83.1183 - case 4: 83.1184 - ea.mem.off = _regs.esi; 83.1185 - break; 83.1186 - case 5: 83.1187 - ea.mem.off = _regs.edi; 83.1188 - break; 83.1189 - case 6: 83.1190 - if ( modrm_mod == 0 ) 83.1191 - break; 83.1192 - ea.mem.seg = x86_seg_ss; 83.1193 - ea.mem.off = _regs.ebp; 83.1194 - break; 83.1195 - case 7: 83.1196 - ea.mem.off = _regs.ebx; 83.1197 - break; 83.1198 - } 83.1199 - switch ( modrm_mod ) 83.1200 - { 83.1201 - case 0: 83.1202 - if ( modrm_rm == 6 ) 83.1203 - ea.mem.off = insn_fetch_type(int16_t); 83.1204 - break; 83.1205 - case 1: 83.1206 - ea.mem.off += insn_fetch_type(int8_t); 83.1207 - break; 83.1208 - case 2: 83.1209 - ea.mem.off += insn_fetch_type(int16_t); 83.1210 - break; 83.1211 - } 83.1212 - ea.mem.off = truncate_ea(ea.mem.off); 83.1213 - } 83.1214 - else 83.1215 - { 83.1216 - /* 32/64-bit ModR/M decode. */ 83.1217 - if ( modrm_rm == 4 ) 83.1218 - { 83.1219 - sib = insn_fetch_type(uint8_t); 83.1220 - sib_index = ((sib >> 3) & 7) | ((rex_prefix << 2) & 8); 83.1221 - sib_base = (sib & 7) | ((rex_prefix << 3) & 8); 83.1222 - if ( sib_index != 4 ) 83.1223 - ea.mem.off = *(long*)decode_register(sib_index, &_regs, 0); 83.1224 - ea.mem.off <<= (sib >> 6) & 3; 83.1225 - if ( (modrm_mod == 0) && ((sib_base & 7) == 5) ) 83.1226 - ea.mem.off += insn_fetch_type(int32_t); 83.1227 - else if ( sib_base == 4 ) 83.1228 - { 83.1229 - ea.mem.seg = x86_seg_ss; 83.1230 - ea.mem.off += _regs.esp; 83.1231 - if ( !twobyte && (b == 0x8f) ) 83.1232 - /* POP <rm> computes its EA post increment. */ 83.1233 - ea.mem.off += ((mode_64bit() && (op_bytes == 4)) 83.1234 - ? 8 : op_bytes); 83.1235 - } 83.1236 - else if ( sib_base == 5 ) 83.1237 - { 83.1238 - ea.mem.seg = x86_seg_ss; 83.1239 - ea.mem.off += _regs.ebp; 83.1240 - } 83.1241 - else 83.1242 - ea.mem.off += *(long*)decode_register(sib_base, &_regs, 0); 83.1243 - } 83.1244 - else 83.1245 - { 83.1246 - modrm_rm |= (rex_prefix & 1) << 3; 83.1247 - ea.mem.off = *(long *)decode_register(modrm_rm, &_regs, 0); 83.1248 - if ( (modrm_rm == 5) && (modrm_mod != 0) ) 83.1249 - ea.mem.seg = x86_seg_ss; 83.1250 - } 83.1251 - switch ( modrm_mod ) 83.1252 - { 83.1253 - case 0: 83.1254 - if ( (modrm_rm & 7) != 5 ) 83.1255 - break; 83.1256 - ea.mem.off = insn_fetch_type(int32_t); 83.1257 - if ( !mode_64bit() ) 83.1258 - break; 83.1259 - /* Relative to RIP of next instruction. Argh! */ 83.1260 - ea.mem.off += _regs.eip; 83.1261 - if ( (d & SrcMask) == SrcImm ) 83.1262 - ea.mem.off += (d & ByteOp) ? 1 : 83.1263 - ((op_bytes == 8) ? 4 : op_bytes); 83.1264 - else if ( (d & SrcMask) == SrcImmByte ) 83.1265 - ea.mem.off += 1; 83.1266 - else if ( !twobyte && ((b & 0xfe) == 0xf6) && 83.1267 - ((modrm_reg & 7) <= 1) ) 83.1268 - /* Special case in Grp3: test has immediate operand. */ 83.1269 - ea.mem.off += (d & ByteOp) ? 1 83.1270 - : ((op_bytes == 8) ? 4 : op_bytes); 83.1271 - else if ( twobyte && ((b & 0xf7) == 0xa4) ) 83.1272 - /* SHLD/SHRD with immediate byte third operand. */ 83.1273 - ea.mem.off++; 83.1274 - break; 83.1275 - case 1: 83.1276 - ea.mem.off += insn_fetch_type(int8_t); 83.1277 - break; 83.1278 - case 2: 83.1279 - ea.mem.off += insn_fetch_type(int32_t); 83.1280 - break; 83.1281 - } 83.1282 - ea.mem.off = truncate_ea(ea.mem.off); 83.1283 - } 83.1284 - } 83.1285 - 83.1286 - if ( override_seg != -1 ) 83.1287 - ea.mem.seg = override_seg; 83.1288 - 83.1289 - /* Special instructions do their own operand decoding. */ 83.1290 - if ( (d & DstMask) == ImplicitOps ) 83.1291 - goto special_insn; 83.1292 - 83.1293 - /* Decode and fetch the source operand: register, memory or immediate. */ 83.1294 - switch ( d & SrcMask ) 83.1295 - { 83.1296 - case SrcNone: 83.1297 - break; 83.1298 - case SrcReg: 83.1299 - src.type = OP_REG; 83.1300 - if ( d & ByteOp ) 83.1301 - { 83.1302 - src.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0)); 83.1303 - src.val = *(uint8_t *)src.reg; 83.1304 - src.bytes = 1; 83.1305 - } 83.1306 - else 83.1307 - { 83.1308 - src.reg = decode_register(modrm_reg, &_regs, 0); 83.1309 - switch ( (src.bytes = op_bytes) ) 83.1310 - { 83.1311 - case 2: src.val = *(uint16_t *)src.reg; break; 83.1312 - case 4: src.val = *(uint32_t *)src.reg; break; 83.1313 - case 8: src.val = *(uint64_t *)src.reg; break; 83.1314 - } 83.1315 - } 83.1316 - break; 83.1317 - case SrcMem16: 83.1318 - ea.bytes = 2; 83.1319 - goto srcmem_common; 83.1320 - case SrcMem: 83.1321 - ea.bytes = (d & ByteOp) ? 1 : op_bytes; 83.1322 - srcmem_common: 83.1323 - src = ea; 83.1324 - if ( src.type == OP_REG ) 83.1325 - { 83.1326 - switch ( src.bytes ) 83.1327 - { 83.1328 - case 1: src.val = *(uint8_t *)src.reg; break; 83.1329 - case 2: src.val = *(uint16_t *)src.reg; break; 83.1330 - case 4: src.val = *(uint32_t *)src.reg; break; 83.1331 - case 8: src.val = *(uint64_t *)src.reg; break; 83.1332 - } 83.1333 - } 83.1334 - else if ( (rc = ops->read(src.mem.seg, src.mem.off, 83.1335 - &src.val, src.bytes, ctxt)) ) 83.1336 - goto done; 83.1337 - break; 83.1338 - case SrcImm: 83.1339 - src.type = OP_IMM; 83.1340 - src.bytes = (d & ByteOp) ? 1 : op_bytes; 83.1341 - if ( src.bytes == 8 ) src.bytes = 4; 83.1342 - /* NB. Immediates are sign-extended as necessary. */ 83.1343 - switch ( src.bytes ) 83.1344 - { 83.1345 - case 1: src.val = insn_fetch_type(int8_t); break; 83.1346 - case 2: src.val = insn_fetch_type(int16_t); break; 83.1347 - case 4: src.val = insn_fetch_type(int32_t); break; 83.1348 - } 83.1349 - break; 83.1350 - case SrcImmByte: 83.1351 - src.type = OP_IMM; 83.1352 - src.bytes = 1; 83.1353 - src.val = insn_fetch_type(int8_t); 83.1354 - break; 83.1355 - } 83.1356 - 83.1357 - /* Decode and fetch the destination operand: register or memory. */ 83.1358 - switch ( d & DstMask ) 83.1359 - { 83.1360 - case DstReg: 83.1361 - dst.type = OP_REG; 83.1362 - if ( d & ByteOp ) 83.1363 - { 83.1364 - dst.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0)); 83.1365 - dst.val = *(uint8_t *)dst.reg; 83.1366 - dst.bytes = 1; 83.1367 - } 83.1368 - else 83.1369 - { 83.1370 - dst.reg = decode_register(modrm_reg, &_regs, 0); 83.1371 - switch ( (dst.bytes = op_bytes) ) 83.1372 - { 83.1373 - case 2: dst.val = *(uint16_t *)dst.reg; break; 83.1374 - case 4: dst.val = *(uint32_t *)dst.reg; break; 83.1375 - case 8: dst.val = *(uint64_t *)dst.reg; break; 83.1376 - } 83.1377 - } 83.1378 - break; 83.1379 - case DstBitBase: 83.1380 - if ( ((d & SrcMask) == SrcImmByte) || (ea.type == OP_REG) ) 83.1381 - { 83.1382 - src.val &= (op_bytes << 3) - 1; 83.1383 - } 83.1384 - else 83.1385 - { 83.1386 - /* 83.1387 - * EA += BitOffset DIV op_bytes*8 83.1388 - * BitOffset = BitOffset MOD op_bytes*8 83.1389 - * DIV truncates towards negative infinity. 83.1390 - * MOD always produces a positive result. 83.1391 - */ 83.1392 - if ( op_bytes == 2 ) 83.1393 - src.val = (int16_t)src.val; 83.1394 - else if ( op_bytes == 4 ) 83.1395 - src.val = (int32_t)src.val; 83.1396 - if ( (long)src.val < 0 ) 83.1397 - { 83.1398 - unsigned long byte_offset; 83.1399 - byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1)); 83.1400 - ea.mem.off -= byte_offset; 83.1401 - src.val = (byte_offset << 3) + src.val; 83.1402 - } 83.1403 - else 83.1404 - { 83.1405 - ea.mem.off += (src.val >> 3) & ~(op_bytes - 1); 83.1406 - src.val &= (op_bytes << 3) - 1; 83.1407 - } 83.1408 - } 83.1409 - /* Becomes a normal DstMem operation from here on. */ 83.1410 - d = (d & ~DstMask) | DstMem; 83.1411 - case DstMem: 83.1412 - ea.bytes = (d & ByteOp) ? 1 : op_bytes; 83.1413 - dst = ea; 83.1414 - if ( dst.type == OP_REG ) 83.1415 - { 83.1416 - switch ( dst.bytes ) 83.1417 - { 83.1418 - case 1: dst.val = *(uint8_t *)dst.reg; break; 83.1419 - case 2: dst.val = *(uint16_t *)dst.reg; break; 83.1420 - case 4: dst.val = *(uint32_t *)dst.reg; break; 83.1421 - case 8: dst.val = *(uint64_t *)dst.reg; break; 83.1422 - } 83.1423 - } 83.1424 - else if ( !(d & Mov) ) /* optimisation - avoid slow emulated read */ 83.1425 - { 83.1426 - if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 83.1427 - &dst.val, dst.bytes, ctxt)) ) 83.1428 - goto done; 83.1429 - dst.orig_val = dst.val; 83.1430 - } 83.1431 - break; 83.1432 - } 83.1433 - 83.1434 - /* LOCK prefix allowed only on instructions with memory destination. */ 83.1435 - generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP, 0); 83.1436 - 83.1437 - if ( twobyte ) 83.1438 - goto twobyte_insn; 83.1439 - 83.1440 - switch ( b ) 83.1441 - { 83.1442 - case 0x04 ... 0x05: /* add imm,%%eax */ 83.1443 - dst.reg = (unsigned long *)&_regs.eax; 83.1444 - dst.val = _regs.eax; 83.1445 - case 0x00 ... 0x03: add: /* add */ 83.1446 - emulate_2op_SrcV("add", src, dst, _regs.eflags); 83.1447 - break; 83.1448 - 83.1449 - case 0x0c ... 0x0d: /* or imm,%%eax */ 83.1450 - dst.reg = (unsigned long *)&_regs.eax; 83.1451 - dst.val = _regs.eax; 83.1452 - case 0x08 ... 0x0b: or: /* or */ 83.1453 - emulate_2op_SrcV("or", src, dst, _regs.eflags); 83.1454 - break; 83.1455 - 83.1456 - case 0x14 ... 0x15: /* adc imm,%%eax */ 83.1457 - dst.reg = (unsigned long *)&_regs.eax; 83.1458 - dst.val = _regs.eax; 83.1459 - case 0x10 ... 0x13: adc: /* adc */ 83.1460 - emulate_2op_SrcV("adc", src, dst, _regs.eflags); 83.1461 - break; 83.1462 - 83.1463 - case 0x1c ... 0x1d: /* sbb imm,%%eax */ 83.1464 - dst.reg = (unsigned long *)&_regs.eax; 83.1465 - dst.val = _regs.eax; 83.1466 - case 0x18 ... 0x1b: sbb: /* sbb */ 83.1467 - emulate_2op_SrcV("sbb", src, dst, _regs.eflags); 83.1468 - break; 83.1469 - 83.1470 - case 0x24 ... 0x25: /* and imm,%%eax */ 83.1471 - dst.reg = (unsigned long *)&_regs.eax; 83.1472 - dst.val = _regs.eax; 83.1473 - case 0x20 ... 0x23: and: /* and */ 83.1474 - emulate_2op_SrcV("and", src, dst, _regs.eflags); 83.1475 - break; 83.1476 - 83.1477 - case 0x2c ... 0x2d: /* sub imm,%%eax */ 83.1478 - dst.reg = (unsigned long *)&_regs.eax; 83.1479 - dst.val = _regs.eax; 83.1480 - case 0x28 ... 0x2b: sub: /* sub */ 83.1481 - emulate_2op_SrcV("sub", src, dst, _regs.eflags); 83.1482 - break; 83.1483 - 83.1484 - case 0x34 ... 0x35: /* xor imm,%%eax */ 83.1485 - dst.reg = (unsigned long *)&_regs.eax; 83.1486 - dst.val = _regs.eax; 83.1487 - case 0x30 ... 0x33: xor: /* xor */ 83.1488 - emulate_2op_SrcV("xor", src, dst, _regs.eflags); 83.1489 - break; 83.1490 - 83.1491 - case 0x3c ... 0x3d: /* cmp imm,%%eax */ 83.1492 - dst.reg = (unsigned long *)&_regs.eax; 83.1493 - dst.val = _regs.eax; 83.1494 - case 0x38 ... 0x3b: cmp: /* cmp */ 83.1495 - emulate_2op_SrcV("cmp", src, dst, _regs.eflags); 83.1496 - break; 83.1497 - 83.1498 - case 0x62: /* bound */ { 83.1499 - unsigned long src_val2; 83.1500 - int lb, ub, idx; 83.1501 - generate_exception_if(mode_64bit() || (src.type != OP_MEM), 83.1502 - EXC_UD, -1); 83.1503 - if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes, 83.1504 - &src_val2, op_bytes, ctxt)) ) 83.1505 - goto done; 83.1506 - ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2; 83.1507 - lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val; 83.1508 - idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val; 83.1509 - generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1); 83.1510 - dst.type = OP_NONE; 83.1511 - break; 83.1512 - } 83.1513 - 83.1514 - case 0x63: /* movsxd (x86/64) / arpl (x86/32) */ 83.1515 - if ( mode_64bit() ) 83.1516 - { 83.1517 - /* movsxd */ 83.1518 - if ( src.type == OP_REG ) 83.1519 - src.val = *(int32_t *)src.reg; 83.1520 - else if ( (rc = ops->read(src.mem.seg, src.mem.off, 83.1521 - &src.val, 4, ctxt)) ) 83.1522 - goto done; 83.1523 - dst.val = (int32_t)src.val; 83.1524 - } 83.1525 - else 83.1526 - { 83.1527 - /* arpl */ 83.1528 - uint16_t src_val = dst.val; 83.1529 - dst = src; 83.1530 - _regs.eflags &= ~EFLG_ZF; 83.1531 - _regs.eflags |= ((src_val & 3) > (dst.val & 3)) ? EFLG_ZF : 0; 83.1532 - if ( _regs.eflags & EFLG_ZF ) 83.1533 - dst.val = (dst.val & ~3) | (src_val & 3); 83.1534 - else 83.1535 - dst.type = OP_NONE; 83.1536 - generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1); 83.1537 - } 83.1538 - break; 83.1539 - 83.1540 - case 0x69: /* imul imm16/32 */ 83.1541 - case 0x6b: /* imul imm8 */ { 83.1542 - unsigned long src1; /* ModR/M source operand */ 83.1543 - if ( ea.type == OP_REG ) 83.1544 - src1 = *ea.reg; 83.1545 - else if ( (rc = ops->read(ea.mem.seg, ea.mem.off, 83.1546 - &src1, op_bytes, ctxt)) ) 83.1547 - goto done; 83.1548 - _regs.eflags &= ~(EFLG_OF|EFLG_CF); 83.1549 - switch ( dst.bytes ) 83.1550 - { 83.1551 - case 2: 83.1552 - dst.val = ((uint32_t)(int16_t)src.val * 83.1553 - (uint32_t)(int16_t)src1); 83.1554 - if ( (int16_t)dst.val != (uint32_t)dst.val ) 83.1555 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1556 - break; 83.1557 -#ifdef __x86_64__ 83.1558 - case 4: 83.1559 - dst.val = ((uint64_t)(int32_t)src.val * 83.1560 - (uint64_t)(int32_t)src1); 83.1561 - if ( (int32_t)dst.val != dst.val ) 83.1562 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1563 - break; 83.1564 -#endif 83.1565 - default: { 83.1566 - unsigned long m[2] = { src.val, src1 }; 83.1567 - if ( imul_dbl(m) ) 83.1568 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1569 - dst.val = m[0]; 83.1570 - break; 83.1571 - } 83.1572 - } 83.1573 - break; 83.1574 - } 83.1575 - 83.1576 - case 0x82: /* Grp1 (x86/32 only) */ 83.1577 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.1578 - case 0x80: case 0x81: case 0x83: /* Grp1 */ 83.1579 - switch ( modrm_reg & 7 ) 83.1580 - { 83.1581 - case 0: goto add; 83.1582 - case 1: goto or; 83.1583 - case 2: goto adc; 83.1584 - case 3: goto sbb; 83.1585 - case 4: goto and; 83.1586 - case 5: goto sub; 83.1587 - case 6: goto xor; 83.1588 - case 7: goto cmp; 83.1589 - } 83.1590 - break; 83.1591 - 83.1592 - case 0xa8 ... 0xa9: /* test imm,%%eax */ 83.1593 - dst.reg = (unsigned long *)&_regs.eax; 83.1594 - dst.val = _regs.eax; 83.1595 - case 0x84 ... 0x85: test: /* test */ 83.1596 - emulate_2op_SrcV("test", src, dst, _regs.eflags); 83.1597 - break; 83.1598 - 83.1599 - case 0x86 ... 0x87: xchg: /* xchg */ 83.1600 - /* Write back the register source. */ 83.1601 - switch ( dst.bytes ) 83.1602 - { 83.1603 - case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break; 83.1604 - case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break; 83.1605 - case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */ 83.1606 - case 8: *src.reg = dst.val; break; 83.1607 - } 83.1608 - /* Write back the memory destination with implicit LOCK prefix. */ 83.1609 - dst.val = src.val; 83.1610 - lock_prefix = 1; 83.1611 - break; 83.1612 - 83.1613 - case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ 83.1614 - generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1); 83.1615 - case 0x88 ... 0x8b: /* mov */ 83.1616 - dst.val = src.val; 83.1617 - break; 83.1618 - 83.1619 - case 0x8c: /* mov Sreg,r/m */ { 83.1620 - struct segment_register reg; 83.1621 - enum x86_segment seg = decode_segment(modrm_reg); 83.1622 - generate_exception_if(seg == decode_segment_failed, EXC_UD, -1); 83.1623 - fail_if(ops->read_segment == NULL); 83.1624 - if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) 83.1625 - goto done; 83.1626 - dst.val = reg.sel; 83.1627 - if ( dst.type == OP_MEM ) 83.1628 - dst.bytes = 2; 83.1629 - break; 83.1630 - } 83.1631 - 83.1632 - case 0x8e: /* mov r/m,Sreg */ { 83.1633 - enum x86_segment seg = decode_segment(modrm_reg); 83.1634 - generate_exception_if(seg == decode_segment_failed, EXC_UD, -1); 83.1635 - if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 ) 83.1636 - goto done; 83.1637 - if ( seg == x86_seg_ss ) 83.1638 - ctxt->retire.flags.mov_ss = 1; 83.1639 - dst.type = OP_NONE; 83.1640 - break; 83.1641 - } 83.1642 - 83.1643 - case 0x8d: /* lea */ 83.1644 - dst.val = ea.mem.off; 83.1645 - break; 83.1646 - 83.1647 - case 0x8f: /* pop (sole member of Grp1a) */ 83.1648 - generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1); 83.1649 - /* 64-bit mode: POP defaults to a 64-bit operand. */ 83.1650 - if ( mode_64bit() && (dst.bytes == 4) ) 83.1651 - dst.bytes = 8; 83.1652 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes), 83.1653 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.1654 - goto done; 83.1655 - break; 83.1656 - 83.1657 - case 0xb0 ... 0xb7: /* mov imm8,r8 */ 83.1658 - dst.reg = decode_register( 83.1659 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, (rex_prefix == 0)); 83.1660 - dst.val = src.val; 83.1661 - break; 83.1662 - 83.1663 - case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */ 83.1664 - if ( dst.bytes == 8 ) /* Fetch more bytes to obtain imm64 */ 83.1665 - src.val = ((uint32_t)src.val | 83.1666 - ((uint64_t)insn_fetch_type(uint32_t) << 32)); 83.1667 - dst.reg = decode_register( 83.1668 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 83.1669 - dst.val = src.val; 83.1670 - break; 83.1671 - 83.1672 - case 0xc0 ... 0xc1: grp2: /* Grp2 */ 83.1673 - switch ( modrm_reg & 7 ) 83.1674 - { 83.1675 - case 0: /* rol */ 83.1676 - emulate_2op_SrcB("rol", src, dst, _regs.eflags); 83.1677 - break; 83.1678 - case 1: /* ror */ 83.1679 - emulate_2op_SrcB("ror", src, dst, _regs.eflags); 83.1680 - break; 83.1681 - case 2: /* rcl */ 83.1682 - emulate_2op_SrcB("rcl", src, dst, _regs.eflags); 83.1683 - break; 83.1684 - case 3: /* rcr */ 83.1685 - emulate_2op_SrcB("rcr", src, dst, _regs.eflags); 83.1686 - break; 83.1687 - case 4: /* sal/shl */ 83.1688 - case 6: /* sal/shl */ 83.1689 - emulate_2op_SrcB("sal", src, dst, _regs.eflags); 83.1690 - break; 83.1691 - case 5: /* shr */ 83.1692 - emulate_2op_SrcB("shr", src, dst, _regs.eflags); 83.1693 - break; 83.1694 - case 7: /* sar */ 83.1695 - emulate_2op_SrcB("sar", src, dst, _regs.eflags); 83.1696 - break; 83.1697 - } 83.1698 - break; 83.1699 - 83.1700 - case 0xc4: /* les */ { 83.1701 - unsigned long sel; 83.1702 - dst.val = x86_seg_es; 83.1703 - les: /* dst.val identifies the segment */ 83.1704 - generate_exception_if(src.type != OP_MEM, EXC_UD, -1); 83.1705 - if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes, 83.1706 - &sel, 2, ctxt)) != 0 ) 83.1707 - goto done; 83.1708 - if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 ) 83.1709 - goto done; 83.1710 - dst.val = src.val; 83.1711 - break; 83.1712 - } 83.1713 - 83.1714 - case 0xc5: /* lds */ 83.1715 - dst.val = x86_seg_ds; 83.1716 - goto les; 83.1717 - 83.1718 - case 0xd0 ... 0xd1: /* Grp2 */ 83.1719 - src.val = 1; 83.1720 - goto grp2; 83.1721 - 83.1722 - case 0xd2 ... 0xd3: /* Grp2 */ 83.1723 - src.val = _regs.ecx; 83.1724 - goto grp2; 83.1725 - 83.1726 - case 0xf6 ... 0xf7: /* Grp3 */ 83.1727 - switch ( modrm_reg & 7 ) 83.1728 - { 83.1729 - case 0 ... 1: /* test */ 83.1730 - /* Special case in Grp3: test has an immediate source operand. */ 83.1731 - src.type = OP_IMM; 83.1732 - src.bytes = (d & ByteOp) ? 1 : op_bytes; 83.1733 - if ( src.bytes == 8 ) src.bytes = 4; 83.1734 - switch ( src.bytes ) 83.1735 - { 83.1736 - case 1: src.val = insn_fetch_type(int8_t); break; 83.1737 - case 2: src.val = insn_fetch_type(int16_t); break; 83.1738 - case 4: src.val = insn_fetch_type(int32_t); break; 83.1739 - } 83.1740 - goto test; 83.1741 - case 2: /* not */ 83.1742 - dst.val = ~dst.val; 83.1743 - break; 83.1744 - case 3: /* neg */ 83.1745 - emulate_1op("neg", dst, _regs.eflags); 83.1746 - break; 83.1747 - case 4: /* mul */ 83.1748 - src = dst; 83.1749 - dst.type = OP_REG; 83.1750 - dst.reg = (unsigned long *)&_regs.eax; 83.1751 - dst.val = *dst.reg; 83.1752 - _regs.eflags &= ~(EFLG_OF|EFLG_CF); 83.1753 - switch ( src.bytes ) 83.1754 - { 83.1755 - case 1: 83.1756 - dst.val = (uint8_t)dst.val; 83.1757 - dst.val *= src.val; 83.1758 - if ( (uint8_t)dst.val != (uint16_t)dst.val ) 83.1759 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1760 - dst.bytes = 2; 83.1761 - break; 83.1762 - case 2: 83.1763 - dst.val = (uint16_t)dst.val; 83.1764 - dst.val *= src.val; 83.1765 - if ( (uint16_t)dst.val != (uint32_t)dst.val ) 83.1766 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1767 - *(uint16_t *)&_regs.edx = dst.val >> 16; 83.1768 - break; 83.1769 -#ifdef __x86_64__ 83.1770 - case 4: 83.1771 - dst.val = (uint32_t)dst.val; 83.1772 - dst.val *= src.val; 83.1773 - if ( (uint32_t)dst.val != dst.val ) 83.1774 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1775 - _regs.edx = (uint32_t)(dst.val >> 32); 83.1776 - break; 83.1777 -#endif 83.1778 - default: { 83.1779 - unsigned long m[2] = { src.val, dst.val }; 83.1780 - if ( mul_dbl(m) ) 83.1781 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1782 - _regs.edx = m[1]; 83.1783 - dst.val = m[0]; 83.1784 - break; 83.1785 - } 83.1786 - } 83.1787 - break; 83.1788 - case 5: /* imul */ 83.1789 - src = dst; 83.1790 - dst.type = OP_REG; 83.1791 - dst.reg = (unsigned long *)&_regs.eax; 83.1792 - dst.val = *dst.reg; 83.1793 - _regs.eflags &= ~(EFLG_OF|EFLG_CF); 83.1794 - switch ( src.bytes ) 83.1795 - { 83.1796 - case 1: 83.1797 - dst.val = ((uint16_t)(int8_t)src.val * 83.1798 - (uint16_t)(int8_t)dst.val); 83.1799 - if ( (int8_t)dst.val != (uint16_t)dst.val ) 83.1800 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1801 - dst.bytes = 2; 83.1802 - break; 83.1803 - case 2: 83.1804 - dst.val = ((uint32_t)(int16_t)src.val * 83.1805 - (uint32_t)(int16_t)dst.val); 83.1806 - if ( (int16_t)dst.val != (uint32_t)dst.val ) 83.1807 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1808 - *(uint16_t *)&_regs.edx = dst.val >> 16; 83.1809 - break; 83.1810 -#ifdef __x86_64__ 83.1811 - case 4: 83.1812 - dst.val = ((uint64_t)(int32_t)src.val * 83.1813 - (uint64_t)(int32_t)dst.val); 83.1814 - if ( (int32_t)dst.val != dst.val ) 83.1815 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1816 - _regs.edx = (uint32_t)(dst.val >> 32); 83.1817 - break; 83.1818 -#endif 83.1819 - default: { 83.1820 - unsigned long m[2] = { src.val, dst.val }; 83.1821 - if ( imul_dbl(m) ) 83.1822 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.1823 - _regs.edx = m[1]; 83.1824 - dst.val = m[0]; 83.1825 - break; 83.1826 - } 83.1827 - } 83.1828 - break; 83.1829 - case 6: /* div */ { 83.1830 - unsigned long u[2], v; 83.1831 - src = dst; 83.1832 - dst.type = OP_REG; 83.1833 - dst.reg = (unsigned long *)&_regs.eax; 83.1834 - switch ( src.bytes ) 83.1835 - { 83.1836 - case 1: 83.1837 - u[0] = (uint16_t)_regs.eax; 83.1838 - u[1] = 0; 83.1839 - v = (uint8_t)src.val; 83.1840 - generate_exception_if( 83.1841 - div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]), 83.1842 - EXC_DE, -1); 83.1843 - dst.val = (uint8_t)u[0]; 83.1844 - ((uint8_t *)&_regs.eax)[1] = u[1]; 83.1845 - break; 83.1846 - case 2: 83.1847 - u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax; 83.1848 - u[1] = 0; 83.1849 - v = (uint16_t)src.val; 83.1850 - generate_exception_if( 83.1851 - div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]), 83.1852 - EXC_DE, -1); 83.1853 - dst.val = (uint16_t)u[0]; 83.1854 - *(uint16_t *)&_regs.edx = u[1]; 83.1855 - break; 83.1856 -#ifdef __x86_64__ 83.1857 - case 4: 83.1858 - u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax; 83.1859 - u[1] = 0; 83.1860 - v = (uint32_t)src.val; 83.1861 - generate_exception_if( 83.1862 - div_dbl(u, v) || ((uint32_t)u[0] != u[0]), 83.1863 - EXC_DE, -1); 83.1864 - dst.val = (uint32_t)u[0]; 83.1865 - _regs.edx = (uint32_t)u[1]; 83.1866 - break; 83.1867 -#endif 83.1868 - default: 83.1869 - u[0] = _regs.eax; 83.1870 - u[1] = _regs.edx; 83.1871 - v = src.val; 83.1872 - generate_exception_if(div_dbl(u, v), EXC_DE, -1); 83.1873 - dst.val = u[0]; 83.1874 - _regs.edx = u[1]; 83.1875 - break; 83.1876 - } 83.1877 - break; 83.1878 - } 83.1879 - case 7: /* idiv */ { 83.1880 - unsigned long u[2], v; 83.1881 - src = dst; 83.1882 - dst.type = OP_REG; 83.1883 - dst.reg = (unsigned long *)&_regs.eax; 83.1884 - switch ( src.bytes ) 83.1885 - { 83.1886 - case 1: 83.1887 - u[0] = (int16_t)_regs.eax; 83.1888 - u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 83.1889 - v = (int8_t)src.val; 83.1890 - generate_exception_if( 83.1891 - idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]), 83.1892 - EXC_DE, -1); 83.1893 - dst.val = (int8_t)u[0]; 83.1894 - ((int8_t *)&_regs.eax)[1] = u[1]; 83.1895 - break; 83.1896 - case 2: 83.1897 - u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax); 83.1898 - u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 83.1899 - v = (int16_t)src.val; 83.1900 - generate_exception_if( 83.1901 - idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]), 83.1902 - EXC_DE, -1); 83.1903 - dst.val = (int16_t)u[0]; 83.1904 - *(int16_t *)&_regs.edx = u[1]; 83.1905 - break; 83.1906 -#ifdef __x86_64__ 83.1907 - case 4: 83.1908 - u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax; 83.1909 - u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 83.1910 - v = (int32_t)src.val; 83.1911 - generate_exception_if( 83.1912 - idiv_dbl(u, v) || ((int32_t)u[0] != u[0]), 83.1913 - EXC_DE, -1); 83.1914 - dst.val = (int32_t)u[0]; 83.1915 - _regs.edx = (uint32_t)u[1]; 83.1916 - break; 83.1917 -#endif 83.1918 - default: 83.1919 - u[0] = _regs.eax; 83.1920 - u[1] = _regs.edx; 83.1921 - v = src.val; 83.1922 - generate_exception_if(idiv_dbl(u, v), EXC_DE, -1); 83.1923 - dst.val = u[0]; 83.1924 - _regs.edx = u[1]; 83.1925 - break; 83.1926 - } 83.1927 - break; 83.1928 - } 83.1929 - default: 83.1930 - goto cannot_emulate; 83.1931 - } 83.1932 - break; 83.1933 - 83.1934 - case 0xfe: /* Grp4 */ 83.1935 - generate_exception_if((modrm_reg & 7) >= 2, EXC_UD, -1); 83.1936 - case 0xff: /* Grp5 */ 83.1937 - switch ( modrm_reg & 7 ) 83.1938 - { 83.1939 - case 0: /* inc */ 83.1940 - emulate_1op("inc", dst, _regs.eflags); 83.1941 - break; 83.1942 - case 1: /* dec */ 83.1943 - emulate_1op("dec", dst, _regs.eflags); 83.1944 - break; 83.1945 - case 2: /* call (near) */ 83.1946 - case 4: /* jmp (near) */ 83.1947 - if ( (dst.bytes != 8) && mode_64bit() ) 83.1948 - { 83.1949 - dst.bytes = op_bytes = 8; 83.1950 - if ( dst.type == OP_REG ) 83.1951 - dst.val = *dst.reg; 83.1952 - else if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 83.1953 - &dst.val, 8, ctxt)) != 0 ) 83.1954 - goto done; 83.1955 - } 83.1956 - src.val = _regs.eip; 83.1957 - _regs.eip = dst.val; 83.1958 - if ( (modrm_reg & 7) == 2 ) 83.1959 - goto push; /* call */ 83.1960 - dst.type = OP_NONE; 83.1961 - break; 83.1962 - case 3: /* call (far, absolute indirect) */ 83.1963 - case 5: /* jmp (far, absolute indirect) */ { 83.1964 - unsigned long sel; 83.1965 - 83.1966 - generate_exception_if(dst.type != OP_MEM, EXC_UD, -1); 83.1967 - 83.1968 - if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes, 83.1969 - &sel, 2, ctxt)) ) 83.1970 - goto done; 83.1971 - 83.1972 - if ( (modrm_reg & 7) == 3 ) /* call */ 83.1973 - { 83.1974 - struct segment_register reg; 83.1975 - fail_if(ops->read_segment == NULL); 83.1976 - if ( (rc = ops->read_segment(x86_seg_cs, ®, ctxt)) || 83.1977 - (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.1978 - reg.sel, op_bytes, ctxt)) || 83.1979 - (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.1980 - _regs.eip, op_bytes, ctxt)) ) 83.1981 - goto done; 83.1982 - } 83.1983 - 83.1984 - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) 83.1985 - goto done; 83.1986 - _regs.eip = dst.val; 83.1987 - 83.1988 - dst.type = OP_NONE; 83.1989 - break; 83.1990 - } 83.1991 - case 6: /* push */ 83.1992 - /* 64-bit mode: PUSH defaults to a 64-bit operand. */ 83.1993 - if ( mode_64bit() && (dst.bytes == 4) ) 83.1994 - { 83.1995 - dst.bytes = 8; 83.1996 - if ( dst.type == OP_REG ) 83.1997 - dst.val = *dst.reg; 83.1998 - else if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 83.1999 - &dst.val, 8, ctxt)) != 0 ) 83.2000 - goto done; 83.2001 - } 83.2002 - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), 83.2003 - dst.val, dst.bytes, ctxt)) != 0 ) 83.2004 - goto done; 83.2005 - dst.type = OP_NONE; 83.2006 - break; 83.2007 - case 7: 83.2008 - generate_exception_if(1, EXC_UD, -1); 83.2009 - default: 83.2010 - goto cannot_emulate; 83.2011 - } 83.2012 - break; 83.2013 - } 83.2014 - 83.2015 - writeback: 83.2016 - switch ( dst.type ) 83.2017 - { 83.2018 - case OP_REG: 83.2019 - /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 83.2020 - switch ( dst.bytes ) 83.2021 - { 83.2022 - case 1: *(uint8_t *)dst.reg = (uint8_t)dst.val; break; 83.2023 - case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break; 83.2024 - case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */ 83.2025 - case 8: *dst.reg = dst.val; break; 83.2026 - } 83.2027 - break; 83.2028 - case OP_MEM: 83.2029 - if ( !(d & Mov) && (dst.orig_val == dst.val) && 83.2030 - !ctxt->force_writeback ) 83.2031 - /* nothing to do */; 83.2032 - else if ( lock_prefix ) 83.2033 - rc = ops->cmpxchg( 83.2034 - dst.mem.seg, dst.mem.off, dst.orig_val, 83.2035 - dst.val, dst.bytes, ctxt); 83.2036 - else 83.2037 - rc = ops->write( 83.2038 - dst.mem.seg, dst.mem.off, dst.val, dst.bytes, ctxt); 83.2039 - if ( rc != 0 ) 83.2040 - goto done; 83.2041 - default: 83.2042 - break; 83.2043 - } 83.2044 - 83.2045 - /* Commit shadow register state. */ 83.2046 - _regs.eflags &= ~EFLG_RF; 83.2047 - *ctxt->regs = _regs; 83.2048 - if ( (_regs.eflags & EFLG_TF) && (rc == X86EMUL_OKAY) && 83.2049 - (ops->inject_hw_exception != NULL) ) 83.2050 - rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION; 83.2051 - 83.2052 - done: 83.2053 - return rc; 83.2054 - 83.2055 - special_insn: 83.2056 - dst.type = OP_NONE; 83.2057 - 83.2058 - /* 83.2059 - * The only implicit-operands instructions allowed a LOCK prefix are 83.2060 - * CMPXCHG{8,16}B, MOV CRn, MOV DRn. 83.2061 - */ 83.2062 - generate_exception_if(lock_prefix && 83.2063 - ((b < 0x20) || (b > 0x23)) && /* MOV CRn/DRn */ 83.2064 - (b != 0xc7), /* CMPXCHG{8,16}B */ 83.2065 - EXC_GP, 0); 83.2066 - 83.2067 - if ( twobyte ) 83.2068 - goto twobyte_special_insn; 83.2069 - 83.2070 - switch ( b ) 83.2071 - { 83.2072 - case 0x06: /* push %%es */ { 83.2073 - struct segment_register reg; 83.2074 - src.val = x86_seg_es; 83.2075 - push_seg: 83.2076 - fail_if(ops->read_segment == NULL); 83.2077 - if ( (rc = ops->read_segment(src.val, ®, ctxt)) != 0 ) 83.2078 - return rc; 83.2079 - /* 64-bit mode: PUSH defaults to a 64-bit operand. */ 83.2080 - if ( mode_64bit() && (op_bytes == 4) ) 83.2081 - op_bytes = 8; 83.2082 - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.2083 - reg.sel, op_bytes, ctxt)) != 0 ) 83.2084 - goto done; 83.2085 - break; 83.2086 - } 83.2087 - 83.2088 - case 0x07: /* pop %%es */ 83.2089 - src.val = x86_seg_es; 83.2090 - pop_seg: 83.2091 - fail_if(ops->write_segment == NULL); 83.2092 - /* 64-bit mode: POP defaults to a 64-bit operand. */ 83.2093 - if ( mode_64bit() && (op_bytes == 4) ) 83.2094 - op_bytes = 8; 83.2095 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2096 - &dst.val, op_bytes, ctxt)) != 0 ) 83.2097 - goto done; 83.2098 - if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 ) 83.2099 - return rc; 83.2100 - break; 83.2101 - 83.2102 - case 0x0e: /* push %%cs */ 83.2103 - src.val = x86_seg_cs; 83.2104 - goto push_seg; 83.2105 - 83.2106 - case 0x16: /* push %%ss */ 83.2107 - src.val = x86_seg_ss; 83.2108 - goto push_seg; 83.2109 - 83.2110 - case 0x17: /* pop %%ss */ 83.2111 - src.val = x86_seg_ss; 83.2112 - ctxt->retire.flags.mov_ss = 1; 83.2113 - goto pop_seg; 83.2114 - 83.2115 - case 0x1e: /* push %%ds */ 83.2116 - src.val = x86_seg_ds; 83.2117 - goto push_seg; 83.2118 - 83.2119 - case 0x1f: /* pop %%ds */ 83.2120 - src.val = x86_seg_ds; 83.2121 - goto pop_seg; 83.2122 - 83.2123 - case 0x27: /* daa */ { 83.2124 - uint8_t al = _regs.eax; 83.2125 - unsigned long eflags = _regs.eflags; 83.2126 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2127 - _regs.eflags &= ~(EFLG_CF|EFLG_AF); 83.2128 - if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) ) 83.2129 - { 83.2130 - *(uint8_t *)&_regs.eax += 6; 83.2131 - _regs.eflags |= EFLG_AF; 83.2132 - } 83.2133 - if ( (al > 0x99) || (eflags & EFLG_CF) ) 83.2134 - { 83.2135 - *(uint8_t *)&_regs.eax += 0x60; 83.2136 - _regs.eflags |= EFLG_CF; 83.2137 - } 83.2138 - _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF); 83.2139 - _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0; 83.2140 - _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0; 83.2141 - _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0; 83.2142 - break; 83.2143 - } 83.2144 - 83.2145 - case 0x2f: /* das */ { 83.2146 - uint8_t al = _regs.eax; 83.2147 - unsigned long eflags = _regs.eflags; 83.2148 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2149 - _regs.eflags &= ~(EFLG_CF|EFLG_AF); 83.2150 - if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) ) 83.2151 - { 83.2152 - _regs.eflags |= EFLG_AF; 83.2153 - if ( (al < 6) || (eflags & EFLG_CF) ) 83.2154 - _regs.eflags |= EFLG_CF; 83.2155 - *(uint8_t *)&_regs.eax -= 6; 83.2156 - } 83.2157 - if ( (al > 0x99) || (eflags & EFLG_CF) ) 83.2158 - { 83.2159 - *(uint8_t *)&_regs.eax -= 0x60; 83.2160 - _regs.eflags |= EFLG_CF; 83.2161 - } 83.2162 - _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF); 83.2163 - _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0; 83.2164 - _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0; 83.2165 - _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0; 83.2166 - break; 83.2167 - } 83.2168 - 83.2169 - case 0x37: /* aaa */ 83.2170 - case 0x3f: /* aas */ 83.2171 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2172 - _regs.eflags &= ~EFLG_CF; 83.2173 - if ( ((uint8_t)_regs.eax > 9) || (_regs.eflags & EFLG_AF) ) 83.2174 - { 83.2175 - ((uint8_t *)&_regs.eax)[0] += (b == 0x37) ? 6 : -6; 83.2176 - ((uint8_t *)&_regs.eax)[1] += (b == 0x37) ? 1 : -1; 83.2177 - _regs.eflags |= EFLG_CF | EFLG_AF; 83.2178 - } 83.2179 - ((uint8_t *)&_regs.eax)[0] &= 0x0f; 83.2180 - break; 83.2181 - 83.2182 - case 0x40 ... 0x4f: /* inc/dec reg */ 83.2183 - dst.type = OP_REG; 83.2184 - dst.reg = decode_register(b & 7, &_regs, 0); 83.2185 - dst.bytes = op_bytes; 83.2186 - dst.val = *dst.reg; 83.2187 - if ( b & 8 ) 83.2188 - emulate_1op("dec", dst, _regs.eflags); 83.2189 - else 83.2190 - emulate_1op("inc", dst, _regs.eflags); 83.2191 - break; 83.2192 - 83.2193 - case 0x50 ... 0x57: /* push reg */ 83.2194 - src.val = *(unsigned long *)decode_register( 83.2195 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 83.2196 - goto push; 83.2197 - 83.2198 - case 0x58 ... 0x5f: /* pop reg */ 83.2199 - dst.type = OP_REG; 83.2200 - dst.reg = decode_register( 83.2201 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 83.2202 - dst.bytes = op_bytes; 83.2203 - if ( mode_64bit() && (dst.bytes == 4) ) 83.2204 - dst.bytes = 8; 83.2205 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes), 83.2206 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.2207 - goto done; 83.2208 - break; 83.2209 - 83.2210 - case 0x60: /* pusha */ { 83.2211 - int i; 83.2212 - unsigned long regs[] = { 83.2213 - _regs.eax, _regs.ecx, _regs.edx, _regs.ebx, 83.2214 - _regs.esp, _regs.ebp, _regs.esi, _regs.edi }; 83.2215 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2216 - for ( i = 0; i < 8; i++ ) 83.2217 - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.2218 - regs[i], op_bytes, ctxt)) != 0 ) 83.2219 - goto done; 83.2220 - break; 83.2221 - } 83.2222 - 83.2223 - case 0x61: /* popa */ { 83.2224 - int i; 83.2225 - unsigned long dummy_esp, *regs[] = { 83.2226 - (unsigned long *)&_regs.edi, (unsigned long *)&_regs.esi, 83.2227 - (unsigned long *)&_regs.ebp, (unsigned long *)&dummy_esp, 83.2228 - (unsigned long *)&_regs.ebx, (unsigned long *)&_regs.edx, 83.2229 - (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax }; 83.2230 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2231 - for ( i = 0; i < 8; i++ ) 83.2232 - { 83.2233 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2234 - &dst.val, op_bytes, ctxt)) != 0 ) 83.2235 - goto done; 83.2236 - switch ( op_bytes ) 83.2237 - { 83.2238 - case 1: *(uint8_t *)regs[i] = (uint8_t)dst.val; break; 83.2239 - case 2: *(uint16_t *)regs[i] = (uint16_t)dst.val; break; 83.2240 - case 4: *regs[i] = (uint32_t)dst.val; break; /* 64b: zero-ext */ 83.2241 - case 8: *regs[i] = dst.val; break; 83.2242 - } 83.2243 - } 83.2244 - break; 83.2245 - } 83.2246 - 83.2247 - case 0x68: /* push imm{16,32,64} */ 83.2248 - src.val = ((op_bytes == 2) 83.2249 - ? (int32_t)insn_fetch_type(int16_t) 83.2250 - : insn_fetch_type(int32_t)); 83.2251 - goto push; 83.2252 - 83.2253 - case 0x6a: /* push imm8 */ 83.2254 - src.val = insn_fetch_type(int8_t); 83.2255 - push: 83.2256 - d |= Mov; /* force writeback */ 83.2257 - dst.type = OP_MEM; 83.2258 - dst.bytes = op_bytes; 83.2259 - if ( mode_64bit() && (dst.bytes == 4) ) 83.2260 - dst.bytes = 8; 83.2261 - dst.val = src.val; 83.2262 - dst.mem.seg = x86_seg_ss; 83.2263 - dst.mem.off = sp_pre_dec(dst.bytes); 83.2264 - break; 83.2265 - 83.2266 - case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ { 83.2267 - unsigned long nr_reps = get_rep_prefix(); 83.2268 - dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 83.2269 - dst.mem.seg = x86_seg_es; 83.2270 - dst.mem.off = truncate_ea(_regs.edi); 83.2271 - if ( (nr_reps > 1) && (ops->rep_ins != NULL) && 83.2272 - ((rc = ops->rep_ins((uint16_t)_regs.edx, dst.mem.seg, 83.2273 - dst.mem.off, dst.bytes, 83.2274 - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) 83.2275 - { 83.2276 - if ( rc != 0 ) 83.2277 - goto done; 83.2278 - } 83.2279 - else 83.2280 - { 83.2281 - fail_if(ops->read_io == NULL); 83.2282 - if ( (rc = ops->read_io((uint16_t)_regs.edx, dst.bytes, 83.2283 - &dst.val, ctxt)) != 0 ) 83.2284 - goto done; 83.2285 - dst.type = OP_MEM; 83.2286 - nr_reps = 1; 83.2287 - } 83.2288 - register_address_increment( 83.2289 - _regs.edi, 83.2290 - nr_reps * ((_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes)); 83.2291 - put_rep_prefix(nr_reps); 83.2292 - break; 83.2293 - } 83.2294 - 83.2295 - case 0x6e ... 0x6f: /* outs %esi,%dx */ { 83.2296 - unsigned long nr_reps = get_rep_prefix(); 83.2297 - dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 83.2298 - if ( (nr_reps > 1) && (ops->rep_outs != NULL) && 83.2299 - ((rc = ops->rep_outs(ea.mem.seg, truncate_ea(_regs.esi), 83.2300 - (uint16_t)_regs.edx, dst.bytes, 83.2301 - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) 83.2302 - { 83.2303 - if ( rc != 0 ) 83.2304 - goto done; 83.2305 - } 83.2306 - else 83.2307 - { 83.2308 - if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi), 83.2309 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.2310 - goto done; 83.2311 - fail_if(ops->write_io == NULL); 83.2312 - if ( (rc = ops->write_io((uint16_t)_regs.edx, dst.bytes, 83.2313 - dst.val, ctxt)) != 0 ) 83.2314 - goto done; 83.2315 - nr_reps = 1; 83.2316 - } 83.2317 - register_address_increment( 83.2318 - _regs.esi, 83.2319 - nr_reps * ((_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes)); 83.2320 - put_rep_prefix(nr_reps); 83.2321 - break; 83.2322 - } 83.2323 - 83.2324 - case 0x70 ... 0x7f: /* jcc (short) */ { 83.2325 - int rel = insn_fetch_type(int8_t); 83.2326 - if ( test_cc(b, _regs.eflags) ) 83.2327 - jmp_rel(rel); 83.2328 - break; 83.2329 - } 83.2330 - 83.2331 - case 0x90: /* nop / xchg %%r8,%%rax */ 83.2332 - if ( !(rex_prefix & 1) ) 83.2333 - break; /* nop */ 83.2334 - 83.2335 - case 0x91 ... 0x97: /* xchg reg,%%rax */ 83.2336 - src.type = dst.type = OP_REG; 83.2337 - src.bytes = dst.bytes = op_bytes; 83.2338 - src.reg = (unsigned long *)&_regs.eax; 83.2339 - src.val = *src.reg; 83.2340 - dst.reg = decode_register( 83.2341 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 83.2342 - dst.val = *dst.reg; 83.2343 - goto xchg; 83.2344 - 83.2345 - case 0x98: /* cbw/cwde/cdqe */ 83.2346 - switch ( op_bytes ) 83.2347 - { 83.2348 - case 2: *(int16_t *)&_regs.eax = (int8_t)_regs.eax; break; /* cbw */ 83.2349 - case 4: _regs.eax = (uint32_t)(int16_t)_regs.eax; break; /* cwde */ 83.2350 - case 8: _regs.eax = (int32_t)_regs.eax; break; /* cdqe */ 83.2351 - } 83.2352 - break; 83.2353 - 83.2354 - case 0x99: /* cwd/cdq/cqo */ 83.2355 - switch ( op_bytes ) 83.2356 - { 83.2357 - case 2: 83.2358 - *(int16_t *)&_regs.edx = ((int16_t)_regs.eax < 0) ? -1 : 0; 83.2359 - break; 83.2360 - case 4: 83.2361 - _regs.edx = (uint32_t)(((int32_t)_regs.eax < 0) ? -1 : 0); 83.2362 - break; 83.2363 - case 8: 83.2364 - _regs.edx = (_regs.eax < 0) ? -1 : 0; 83.2365 - break; 83.2366 - } 83.2367 - break; 83.2368 - 83.2369 - case 0x9a: /* call (far, absolute) */ { 83.2370 - struct segment_register reg; 83.2371 - uint16_t sel; 83.2372 - uint32_t eip; 83.2373 - 83.2374 - fail_if(ops->read_segment == NULL); 83.2375 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2376 - 83.2377 - eip = insn_fetch_bytes(op_bytes); 83.2378 - sel = insn_fetch_type(uint16_t); 83.2379 - 83.2380 - if ( (rc = ops->read_segment(x86_seg_cs, ®, ctxt)) || 83.2381 - (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.2382 - reg.sel, op_bytes, ctxt)) || 83.2383 - (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes), 83.2384 - _regs.eip, op_bytes, ctxt)) ) 83.2385 - goto done; 83.2386 - 83.2387 - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) 83.2388 - goto done; 83.2389 - _regs.eip = eip; 83.2390 - break; 83.2391 - } 83.2392 - 83.2393 - case 0x9b: /* wait/fwait */ 83.2394 - fail_if(ops->load_fpu_ctxt == NULL); 83.2395 - ops->load_fpu_ctxt(ctxt); 83.2396 - __emulate_fpu_insn("fwait"); 83.2397 - break; 83.2398 - 83.2399 - case 0x9c: /* pushf */ 83.2400 - src.val = _regs.eflags; 83.2401 - goto push; 83.2402 - 83.2403 - case 0x9d: /* popf */ { 83.2404 - uint32_t mask = EFLG_VIP | EFLG_VIF | EFLG_VM; 83.2405 - if ( !mode_ring0() ) 83.2406 - mask |= EFLG_IOPL; 83.2407 - if ( !mode_iopl() ) 83.2408 - mask |= EFLG_IF; 83.2409 - /* 64-bit mode: POP defaults to a 64-bit operand. */ 83.2410 - if ( mode_64bit() && (op_bytes == 4) ) 83.2411 - op_bytes = 8; 83.2412 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2413 - &dst.val, op_bytes, ctxt)) != 0 ) 83.2414 - goto done; 83.2415 - if ( op_bytes == 2 ) 83.2416 - dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u); 83.2417 - dst.val &= 0x257fd5; 83.2418 - _regs.eflags &= mask; 83.2419 - _regs.eflags |= (uint32_t)(dst.val & ~mask) | 0x02; 83.2420 - break; 83.2421 - } 83.2422 - 83.2423 - case 0x9e: /* sahf */ 83.2424 - *(uint8_t *)&_regs.eflags = (((uint8_t *)&_regs.eax)[1] & 0xd7) | 0x02; 83.2425 - break; 83.2426 - 83.2427 - case 0x9f: /* lahf */ 83.2428 - ((uint8_t *)&_regs.eax)[1] = (_regs.eflags & 0xd7) | 0x02; 83.2429 - break; 83.2430 - 83.2431 - case 0xa0 ... 0xa1: /* mov mem.offs,{%al,%ax,%eax,%rax} */ 83.2432 - /* Source EA is not encoded via ModRM. */ 83.2433 - dst.type = OP_REG; 83.2434 - dst.reg = (unsigned long *)&_regs.eax; 83.2435 - dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2436 - if ( (rc = ops->read(ea.mem.seg, insn_fetch_bytes(ad_bytes), 83.2437 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.2438 - goto done; 83.2439 - break; 83.2440 - 83.2441 - case 0xa2 ... 0xa3: /* mov {%al,%ax,%eax,%rax},mem.offs */ 83.2442 - /* Destination EA is not encoded via ModRM. */ 83.2443 - dst.type = OP_MEM; 83.2444 - dst.mem.seg = ea.mem.seg; 83.2445 - dst.mem.off = insn_fetch_bytes(ad_bytes); 83.2446 - dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2447 - dst.val = (unsigned long)_regs.eax; 83.2448 - break; 83.2449 - 83.2450 - case 0xa4 ... 0xa5: /* movs */ { 83.2451 - unsigned long nr_reps = get_rep_prefix(); 83.2452 - dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2453 - dst.mem.seg = x86_seg_es; 83.2454 - dst.mem.off = truncate_ea(_regs.edi); 83.2455 - if ( (nr_reps > 1) && (ops->rep_movs != NULL) && 83.2456 - ((rc = ops->rep_movs(ea.mem.seg, truncate_ea(_regs.esi), 83.2457 - dst.mem.seg, dst.mem.off, dst.bytes, 83.2458 - &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) 83.2459 - { 83.2460 - if ( rc != 0 ) 83.2461 - goto done; 83.2462 - } 83.2463 - else 83.2464 - { 83.2465 - if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi), 83.2466 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.2467 - goto done; 83.2468 - dst.type = OP_MEM; 83.2469 - nr_reps = 1; 83.2470 - } 83.2471 - register_address_increment( 83.2472 - _regs.esi, 83.2473 - nr_reps * ((_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes)); 83.2474 - register_address_increment( 83.2475 - _regs.edi, 83.2476 - nr_reps * ((_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes)); 83.2477 - put_rep_prefix(nr_reps); 83.2478 - break; 83.2479 - } 83.2480 - 83.2481 - case 0xa6 ... 0xa7: /* cmps */ { 83.2482 - unsigned long next_eip = _regs.eip; 83.2483 - get_rep_prefix(); 83.2484 - src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2485 - if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi), 83.2486 - &dst.val, dst.bytes, ctxt)) || 83.2487 - (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi), 83.2488 - &src.val, src.bytes, ctxt)) ) 83.2489 - goto done; 83.2490 - register_address_increment( 83.2491 - _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes); 83.2492 - register_address_increment( 83.2493 - _regs.edi, (_regs.eflags & EFLG_DF) ? -src.bytes : src.bytes); 83.2494 - put_rep_prefix(1); 83.2495 - /* cmp: dst - src ==> src=*%%edi,dst=*%%esi ==> *%%esi - *%%edi */ 83.2496 - emulate_2op_SrcV("cmp", src, dst, _regs.eflags); 83.2497 - if ( ((rep_prefix == REPE_PREFIX) && !(_regs.eflags & EFLG_ZF)) || 83.2498 - ((rep_prefix == REPNE_PREFIX) && (_regs.eflags & EFLG_ZF)) ) 83.2499 - _regs.eip = next_eip; 83.2500 - break; 83.2501 - } 83.2502 - 83.2503 - case 0xaa ... 0xab: /* stos */ { 83.2504 - /* unsigned long max_reps = */get_rep_prefix(); 83.2505 - dst.type = OP_MEM; 83.2506 - dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2507 - dst.mem.seg = x86_seg_es; 83.2508 - dst.mem.off = truncate_ea(_regs.edi); 83.2509 - dst.val = _regs.eax; 83.2510 - register_address_increment( 83.2511 - _regs.edi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes); 83.2512 - put_rep_prefix(1); 83.2513 - break; 83.2514 - } 83.2515 - 83.2516 - case 0xac ... 0xad: /* lods */ { 83.2517 - /* unsigned long max_reps = */get_rep_prefix(); 83.2518 - dst.type = OP_REG; 83.2519 - dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2520 - dst.reg = (unsigned long *)&_regs.eax; 83.2521 - if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi), 83.2522 - &dst.val, dst.bytes, ctxt)) != 0 ) 83.2523 - goto done; 83.2524 - register_address_increment( 83.2525 - _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes); 83.2526 - put_rep_prefix(1); 83.2527 - break; 83.2528 - } 83.2529 - 83.2530 - case 0xae ... 0xaf: /* scas */ { 83.2531 - unsigned long next_eip = _regs.eip; 83.2532 - get_rep_prefix(); 83.2533 - src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes; 83.2534 - dst.val = _regs.eax; 83.2535 - if ( (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi), 83.2536 - &src.val, src.bytes, ctxt)) != 0 ) 83.2537 - goto done; 83.2538 - register_address_increment( 83.2539 - _regs.edi, (_regs.eflags & EFLG_DF) ? -src.bytes : src.bytes); 83.2540 - put_rep_prefix(1); 83.2541 - /* cmp: dst - src ==> src=*%%edi,dst=%%eax ==> %%eax - *%%edi */ 83.2542 - emulate_2op_SrcV("cmp", src, dst, _regs.eflags); 83.2543 - if ( ((rep_prefix == REPE_PREFIX) && !(_regs.eflags & EFLG_ZF)) || 83.2544 - ((rep_prefix == REPNE_PREFIX) && (_regs.eflags & EFLG_ZF)) ) 83.2545 - _regs.eip = next_eip; 83.2546 - break; 83.2547 - } 83.2548 - 83.2549 - case 0xc2: /* ret imm16 (near) */ 83.2550 - case 0xc3: /* ret (near) */ { 83.2551 - int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0; 83.2552 - op_bytes = mode_64bit() ? 8 : op_bytes; 83.2553 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset), 83.2554 - &dst.val, op_bytes, ctxt)) != 0 ) 83.2555 - goto done; 83.2556 - _regs.eip = dst.val; 83.2557 - break; 83.2558 - } 83.2559 - 83.2560 - case 0xc8: /* enter imm16,imm8 */ { 83.2561 - uint16_t size = insn_fetch_type(uint16_t); 83.2562 - uint8_t depth = insn_fetch_type(uint8_t) & 31; 83.2563 - int i; 83.2564 - 83.2565 - dst.type = OP_REG; 83.2566 - dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes; 83.2567 - dst.reg = (unsigned long *)&_regs.ebp; 83.2568 - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), 83.2569 - _regs.ebp, dst.bytes, ctxt)) ) 83.2570 - goto done; 83.2571 - dst.val = _regs.esp; 83.2572 - 83.2573 - if ( depth > 0 ) 83.2574 - { 83.2575 - for ( i = 1; i < depth; i++ ) 83.2576 - { 83.2577 - unsigned long ebp, temp_data; 83.2578 - ebp = truncate_word(_regs.ebp - i*dst.bytes, ctxt->sp_size/8); 83.2579 - if ( (rc = ops->read(x86_seg_ss, ebp, 83.2580 - &temp_data, dst.bytes, ctxt)) || 83.2581 - (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), 83.2582 - temp_data, dst.bytes, ctxt)) ) 83.2583 - goto done; 83.2584 - } 83.2585 - if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes), 83.2586 - dst.val, dst.bytes, ctxt)) ) 83.2587 - goto done; 83.2588 - } 83.2589 - 83.2590 - sp_pre_dec(size); 83.2591 - break; 83.2592 - } 83.2593 - 83.2594 - case 0xc9: /* leave */ 83.2595 - /* First writeback, to %%esp. */ 83.2596 - dst.type = OP_REG; 83.2597 - dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes; 83.2598 - dst.reg = (unsigned long *)&_regs.esp; 83.2599 - dst.val = _regs.ebp; 83.2600 - 83.2601 - /* Flush first writeback, since there is a second. */ 83.2602 - switch ( dst.bytes ) 83.2603 - { 83.2604 - case 1: *(uint8_t *)dst.reg = (uint8_t)dst.val; break; 83.2605 - case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break; 83.2606 - case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */ 83.2607 - case 8: *dst.reg = dst.val; break; 83.2608 - } 83.2609 - 83.2610 - /* Second writeback, to %%ebp. */ 83.2611 - dst.reg = (unsigned long *)&_regs.ebp; 83.2612 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes), 83.2613 - &dst.val, dst.bytes, ctxt)) ) 83.2614 - goto done; 83.2615 - break; 83.2616 - 83.2617 - case 0xca: /* ret imm16 (far) */ 83.2618 - case 0xcb: /* ret (far) */ { 83.2619 - int offset = (b == 0xca) ? insn_fetch_type(uint16_t) : 0; 83.2620 - op_bytes = mode_64bit() ? 8 : op_bytes; 83.2621 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2622 - &dst.val, op_bytes, ctxt)) || 83.2623 - (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset), 83.2624 - &src.val, op_bytes, ctxt)) || 83.2625 - (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) ) 83.2626 - goto done; 83.2627 - _regs.eip = dst.val; 83.2628 - break; 83.2629 - } 83.2630 - 83.2631 - case 0xcc: /* int3 */ 83.2632 - src.val = EXC_BP; 83.2633 - goto swint; 83.2634 - 83.2635 - case 0xcd: /* int imm8 */ 83.2636 - src.val = insn_fetch_type(uint8_t); 83.2637 - swint: 83.2638 - fail_if(ops->inject_sw_interrupt == NULL); 83.2639 - rc = ops->inject_sw_interrupt(src.val, _regs.eip - ctxt->regs->eip, 83.2640 - ctxt) ? : X86EMUL_EXCEPTION; 83.2641 - goto done; 83.2642 - 83.2643 - case 0xce: /* into */ 83.2644 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2645 - if ( !(_regs.eflags & EFLG_OF) ) 83.2646 - break; 83.2647 - src.val = EXC_OF; 83.2648 - goto swint; 83.2649 - 83.2650 - case 0xcf: /* iret */ { 83.2651 - unsigned long cs, eip, eflags; 83.2652 - uint32_t mask = EFLG_VIP | EFLG_VIF | EFLG_VM; 83.2653 - if ( !mode_ring0() ) 83.2654 - mask |= EFLG_IOPL; 83.2655 - if ( !mode_iopl() ) 83.2656 - mask |= EFLG_IF; 83.2657 - fail_if(!in_realmode(ctxt, ops)); 83.2658 - if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2659 - &eip, op_bytes, ctxt)) || 83.2660 - (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2661 - &cs, op_bytes, ctxt)) || 83.2662 - (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes), 83.2663 - &eflags, op_bytes, ctxt)) ) 83.2664 - goto done; 83.2665 - if ( op_bytes == 2 ) 83.2666 - eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u); 83.2667 - eflags &= 0x257fd5; 83.2668 - _regs.eflags &= mask; 83.2669 - _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02; 83.2670 - _regs.eip = eip; 83.2671 - if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 ) 83.2672 - goto done; 83.2673 - break; 83.2674 - } 83.2675 - 83.2676 - case 0xd4: /* aam */ { 83.2677 - unsigned int base = insn_fetch_type(uint8_t); 83.2678 - uint8_t al = _regs.eax; 83.2679 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2680 - generate_exception_if(base == 0, EXC_DE, -1); 83.2681 - *(uint16_t *)&_regs.eax = ((al / base) << 8) | (al % base); 83.2682 - _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF); 83.2683 - _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0; 83.2684 - _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0; 83.2685 - _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0; 83.2686 - break; 83.2687 - } 83.2688 - 83.2689 - case 0xd5: /* aad */ { 83.2690 - unsigned int base = insn_fetch_type(uint8_t); 83.2691 - uint16_t ax = _regs.eax; 83.2692 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2693 - *(uint16_t *)&_regs.eax = (uint8_t)(ax + ((ax >> 8) * base)); 83.2694 - _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF); 83.2695 - _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0; 83.2696 - _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0; 83.2697 - _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0; 83.2698 - break; 83.2699 - } 83.2700 - 83.2701 - case 0xd6: /* salc */ 83.2702 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2703 - *(uint8_t *)&_regs.eax = (_regs.eflags & EFLG_CF) ? 0xff : 0x00; 83.2704 - break; 83.2705 - 83.2706 - case 0xd7: /* xlat */ { 83.2707 - unsigned long al = (uint8_t)_regs.eax; 83.2708 - if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.ebx + al), 83.2709 - &al, 1, ctxt)) != 0 ) 83.2710 - goto done; 83.2711 - *(uint8_t *)&_regs.eax = al; 83.2712 - break; 83.2713 - } 83.2714 - 83.2715 - case 0xd9: /* FPU 0xd9 */ 83.2716 - fail_if(ops->load_fpu_ctxt == NULL); 83.2717 - ops->load_fpu_ctxt(ctxt); 83.2718 - switch ( modrm ) 83.2719 - { 83.2720 - case 0xc0: __emulate_fpu_insn(".byte 0xd9,0xc0"); break; 83.2721 - case 0xc1: __emulate_fpu_insn(".byte 0xd9,0xc1"); break; 83.2722 - case 0xc2: __emulate_fpu_insn(".byte 0xd9,0xc2"); break; 83.2723 - case 0xc3: __emulate_fpu_insn(".byte 0xd9,0xc3"); break; 83.2724 - case 0xc4: __emulate_fpu_insn(".byte 0xd9,0xc4"); break; 83.2725 - case 0xc5: __emulate_fpu_insn(".byte 0xd9,0xc5"); break; 83.2726 - case 0xc6: __emulate_fpu_insn(".byte 0xd9,0xc6"); break; 83.2727 - case 0xc7: __emulate_fpu_insn(".byte 0xd9,0xc7"); break; 83.2728 - case 0xe0: __emulate_fpu_insn(".byte 0xd9,0xe0"); break; 83.2729 - case 0xe8: __emulate_fpu_insn(".byte 0xd9,0xe8"); break; 83.2730 - case 0xee: __emulate_fpu_insn(".byte 0xd9,0xee"); break; 83.2731 - default: 83.2732 - fail_if((modrm_reg & 7) != 7); 83.2733 - fail_if(modrm >= 0xc0); 83.2734 - /* fnstcw m2byte */ 83.2735 - ea.bytes = 2; 83.2736 - dst = ea; 83.2737 - asm volatile ( "fnstcw %0" : "=m" (dst.val) ); 83.2738 - } 83.2739 - break; 83.2740 - 83.2741 - case 0xdb: /* FPU 0xdb */ 83.2742 - fail_if(ops->load_fpu_ctxt == NULL); 83.2743 - ops->load_fpu_ctxt(ctxt); 83.2744 - fail_if(modrm != 0xe3); 83.2745 - /* fninit */ 83.2746 - asm volatile ( "fninit" ); 83.2747 - break; 83.2748 - 83.2749 - case 0xdd: /* FPU 0xdd */ 83.2750 - fail_if(ops->load_fpu_ctxt == NULL); 83.2751 - ops->load_fpu_ctxt(ctxt); 83.2752 - fail_if((modrm_reg & 7) != 7); 83.2753 - fail_if(modrm >= 0xc0); 83.2754 - /* fnstsw m2byte */ 83.2755 - ea.bytes = 2; 83.2756 - dst = ea; 83.2757 - asm volatile ( "fnstsw %0" : "=m" (dst.val) ); 83.2758 - break; 83.2759 - 83.2760 - case 0xde: /* FPU 0xde */ 83.2761 - fail_if(ops->load_fpu_ctxt == NULL); 83.2762 - ops->load_fpu_ctxt(ctxt); 83.2763 - switch ( modrm ) 83.2764 - { 83.2765 - case 0xd9: __emulate_fpu_insn(".byte 0xde,0xd9"); break; 83.2766 - case 0xf8: __emulate_fpu_insn(".byte 0xde,0xf8"); break; 83.2767 - case 0xf9: __emulate_fpu_insn(".byte 0xde,0xf9"); break; 83.2768 - case 0xfa: __emulate_fpu_insn(".byte 0xde,0xfa"); break; 83.2769 - case 0xfb: __emulate_fpu_insn(".byte 0xde,0xfb"); break; 83.2770 - case 0xfc: __emulate_fpu_insn(".byte 0xde,0xfc"); break; 83.2771 - case 0xfd: __emulate_fpu_insn(".byte 0xde,0xfd"); break; 83.2772 - case 0xfe: __emulate_fpu_insn(".byte 0xde,0xfe"); break; 83.2773 - case 0xff: __emulate_fpu_insn(".byte 0xde,0xff"); break; 83.2774 - default: goto cannot_emulate; 83.2775 - } 83.2776 - break; 83.2777 - 83.2778 - case 0xdf: /* FPU 0xdf */ 83.2779 - fail_if(ops->load_fpu_ctxt == NULL); 83.2780 - ops->load_fpu_ctxt(ctxt); 83.2781 - fail_if(modrm != 0xe0); 83.2782 - /* fnstsw %ax */ 83.2783 - dst.bytes = 2; 83.2784 - dst.type = OP_REG; 83.2785 - dst.reg = (unsigned long *)&_regs.eax; 83.2786 - asm volatile ( "fnstsw %0" : "=m" (dst.val) ); 83.2787 - break; 83.2788 - 83.2789 - case 0xe0 ... 0xe2: /* loop{,z,nz} */ { 83.2790 - int rel = insn_fetch_type(int8_t); 83.2791 - int do_jmp = !(_regs.eflags & EFLG_ZF); /* loopnz */ 83.2792 - if ( b == 0xe1 ) 83.2793 - do_jmp = !do_jmp; /* loopz */ 83.2794 - else if ( b == 0xe2 ) 83.2795 - do_jmp = 1; /* loop */ 83.2796 - switch ( ad_bytes ) 83.2797 - { 83.2798 - case 2: 83.2799 - do_jmp &= --(*(uint16_t *)&_regs.ecx) != 0; 83.2800 - break; 83.2801 - case 4: 83.2802 - do_jmp &= --(*(uint32_t *)&_regs.ecx) != 0; 83.2803 - _regs.ecx = (uint32_t)_regs.ecx; /* zero extend in x86/64 mode */ 83.2804 - break; 83.2805 - default: /* case 8: */ 83.2806 - do_jmp &= --_regs.ecx != 0; 83.2807 - break; 83.2808 - } 83.2809 - if ( do_jmp ) 83.2810 - jmp_rel(rel); 83.2811 - break; 83.2812 - } 83.2813 - 83.2814 - case 0xe3: /* jcxz/jecxz (short) */ { 83.2815 - int rel = insn_fetch_type(int8_t); 83.2816 - if ( (ad_bytes == 2) ? !(uint16_t)_regs.ecx : 83.2817 - (ad_bytes == 4) ? !(uint32_t)_regs.ecx : !_regs.ecx ) 83.2818 - jmp_rel(rel); 83.2819 - break; 83.2820 - } 83.2821 - 83.2822 - case 0xe4: /* in imm8,%al */ 83.2823 - case 0xe5: /* in imm8,%eax */ 83.2824 - case 0xe6: /* out %al,imm8 */ 83.2825 - case 0xe7: /* out %eax,imm8 */ 83.2826 - case 0xec: /* in %dx,%al */ 83.2827 - case 0xed: /* in %dx,%eax */ 83.2828 - case 0xee: /* out %al,%dx */ 83.2829 - case 0xef: /* out %eax,%dx */ { 83.2830 - unsigned int port = ((b < 0xe8) 83.2831 - ? insn_fetch_type(uint8_t) 83.2832 - : (uint16_t)_regs.edx); 83.2833 - op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 83.2834 - if ( b & 2 ) 83.2835 - { 83.2836 - /* out */ 83.2837 - fail_if(ops->write_io == NULL); 83.2838 - rc = ops->write_io(port, op_bytes, _regs.eax, ctxt); 83.2839 - 83.2840 - } 83.2841 - else 83.2842 - { 83.2843 - /* in */ 83.2844 - dst.type = OP_REG; 83.2845 - dst.bytes = op_bytes; 83.2846 - dst.reg = (unsigned long *)&_regs.eax; 83.2847 - fail_if(ops->read_io == NULL); 83.2848 - rc = ops->read_io(port, dst.bytes, &dst.val, ctxt); 83.2849 - } 83.2850 - if ( rc != 0 ) 83.2851 - goto done; 83.2852 - break; 83.2853 - } 83.2854 - 83.2855 - case 0xe8: /* call (near) */ { 83.2856 - int rel = (((op_bytes == 2) && !mode_64bit()) 83.2857 - ? (int32_t)insn_fetch_type(int16_t) 83.2858 - : insn_fetch_type(int32_t)); 83.2859 - op_bytes = mode_64bit() ? 8 : op_bytes; 83.2860 - src.val = _regs.eip; 83.2861 - jmp_rel(rel); 83.2862 - goto push; 83.2863 - } 83.2864 - 83.2865 - case 0xe9: /* jmp (near) */ { 83.2866 - int rel = (((op_bytes == 2) && !mode_64bit()) 83.2867 - ? (int32_t)insn_fetch_type(int16_t) 83.2868 - : insn_fetch_type(int32_t)); 83.2869 - jmp_rel(rel); 83.2870 - break; 83.2871 - } 83.2872 - 83.2873 - case 0xea: /* jmp (far, absolute) */ { 83.2874 - uint16_t sel; 83.2875 - uint32_t eip; 83.2876 - generate_exception_if(mode_64bit(), EXC_UD, -1); 83.2877 - eip = insn_fetch_bytes(op_bytes); 83.2878 - sel = insn_fetch_type(uint16_t); 83.2879 - if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 ) 83.2880 - goto done; 83.2881 - _regs.eip = eip; 83.2882 - break; 83.2883 - } 83.2884 - 83.2885 - case 0xeb: /* jmp (short) */ { 83.2886 - int rel = insn_fetch_type(int8_t); 83.2887 - jmp_rel(rel); 83.2888 - break; 83.2889 - } 83.2890 - 83.2891 - case 0xf1: /* int1 (icebp) */ 83.2892 - src.val = EXC_DB; 83.2893 - goto swint; 83.2894 - 83.2895 - case 0xf4: /* hlt */ 83.2896 - ctxt->retire.flags.hlt = 1; 83.2897 - break; 83.2898 - 83.2899 - case 0xf5: /* cmc */ 83.2900 - _regs.eflags ^= EFLG_CF; 83.2901 - break; 83.2902 - 83.2903 - case 0xf8: /* clc */ 83.2904 - _regs.eflags &= ~EFLG_CF; 83.2905 - break; 83.2906 - 83.2907 - case 0xf9: /* stc */ 83.2908 - _regs.eflags |= EFLG_CF; 83.2909 - break; 83.2910 - 83.2911 - case 0xfa: /* cli */ 83.2912 - generate_exception_if(!mode_iopl(), EXC_GP, 0); 83.2913 - _regs.eflags &= ~EFLG_IF; 83.2914 - break; 83.2915 - 83.2916 - case 0xfb: /* sti */ 83.2917 - generate_exception_if(!mode_iopl(), EXC_GP, 0); 83.2918 - if ( !(_regs.eflags & EFLG_IF) ) 83.2919 - { 83.2920 - _regs.eflags |= EFLG_IF; 83.2921 - ctxt->retire.flags.sti = 1; 83.2922 - } 83.2923 - break; 83.2924 - 83.2925 - case 0xfc: /* cld */ 83.2926 - _regs.eflags &= ~EFLG_DF; 83.2927 - break; 83.2928 - 83.2929 - case 0xfd: /* std */ 83.2930 - _regs.eflags |= EFLG_DF; 83.2931 - break; 83.2932 - } 83.2933 - goto writeback; 83.2934 - 83.2935 - twobyte_insn: 83.2936 - switch ( b ) 83.2937 - { 83.2938 - case 0x40 ... 0x4f: /* cmovcc */ 83.2939 - dst.val = src.val; 83.2940 - if ( !test_cc(b, _regs.eflags) ) 83.2941 - dst.type = OP_NONE; 83.2942 - break; 83.2943 - 83.2944 - case 0x90 ... 0x9f: /* setcc */ 83.2945 - dst.val = test_cc(b, _regs.eflags); 83.2946 - break; 83.2947 - 83.2948 - case 0xb0 ... 0xb1: /* cmpxchg */ 83.2949 - /* Save real source value, then compare EAX against destination. */ 83.2950 - src.orig_val = src.val; 83.2951 - src.val = _regs.eax; 83.2952 - emulate_2op_SrcV("cmp", src, dst, _regs.eflags); 83.2953 - if ( _regs.eflags & EFLG_ZF ) 83.2954 - { 83.2955 - /* Success: write back to memory. */ 83.2956 - dst.val = src.orig_val; 83.2957 - } 83.2958 - else 83.2959 - { 83.2960 - /* Failure: write the value we saw to EAX. */ 83.2961 - dst.type = OP_REG; 83.2962 - dst.reg = (unsigned long *)&_regs.eax; 83.2963 - } 83.2964 - break; 83.2965 - 83.2966 - case 0xa3: bt: /* bt */ 83.2967 - emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags); 83.2968 - break; 83.2969 - 83.2970 - case 0xa4: /* shld imm8,r,r/m */ 83.2971 - case 0xa5: /* shld %%cl,r,r/m */ 83.2972 - case 0xac: /* shrd imm8,r,r/m */ 83.2973 - case 0xad: /* shrd %%cl,r,r/m */ { 83.2974 - uint8_t shift, width = dst.bytes << 3; 83.2975 - shift = (b & 1) ? (uint8_t)_regs.ecx : insn_fetch_type(uint8_t); 83.2976 - if ( (shift &= width - 1) == 0 ) 83.2977 - break; 83.2978 - dst.orig_val = truncate_word(dst.val, dst.bytes); 83.2979 - dst.val = ((shift == width) ? src.val : 83.2980 - (b & 8) ? 83.2981 - /* shrd */ 83.2982 - ((dst.orig_val >> shift) | 83.2983 - truncate_word(src.val << (width - shift), dst.bytes)) : 83.2984 - /* shld */ 83.2985 - ((dst.orig_val << shift) | 83.2986 - ((src.val >> (width - shift)) & ((1ull << shift) - 1)))); 83.2987 - dst.val = truncate_word(dst.val, dst.bytes); 83.2988 - _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF); 83.2989 - if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 ) 83.2990 - _regs.eflags |= EFLG_CF; 83.2991 - if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 ) 83.2992 - _regs.eflags |= EFLG_OF; 83.2993 - _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0; 83.2994 - _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0; 83.2995 - _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0; 83.2996 - break; 83.2997 - } 83.2998 - 83.2999 - case 0xb3: btr: /* btr */ 83.3000 - emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags); 83.3001 - break; 83.3002 - 83.3003 - case 0xab: bts: /* bts */ 83.3004 - emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags); 83.3005 - break; 83.3006 - 83.3007 - case 0xaf: /* imul */ 83.3008 - _regs.eflags &= ~(EFLG_OF|EFLG_CF); 83.3009 - switch ( dst.bytes ) 83.3010 - { 83.3011 - case 2: 83.3012 - dst.val = ((uint32_t)(int16_t)src.val * 83.3013 - (uint32_t)(int16_t)dst.val); 83.3014 - if ( (int16_t)dst.val != (uint32_t)dst.val ) 83.3015 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.3016 - break; 83.3017 -#ifdef __x86_64__ 83.3018 - case 4: 83.3019 - dst.val = ((uint64_t)(int32_t)src.val * 83.3020 - (uint64_t)(int32_t)dst.val); 83.3021 - if ( (int32_t)dst.val != dst.val ) 83.3022 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.3023 - break; 83.3024 -#endif 83.3025 - default: { 83.3026 - unsigned long m[2] = { src.val, dst.val }; 83.3027 - if ( imul_dbl(m) ) 83.3028 - _regs.eflags |= EFLG_OF|EFLG_CF; 83.3029 - dst.val = m[0]; 83.3030 - break; 83.3031 - } 83.3032 - } 83.3033 - break; 83.3034 - 83.3035 - case 0xb2: /* lss */ 83.3036 - dst.val = x86_seg_ss; 83.3037 - goto les; 83.3038 - 83.3039 - case 0xb4: /* lfs */ 83.3040 - dst.val = x86_seg_fs; 83.3041 - goto les; 83.3042 - 83.3043 - case 0xb5: /* lgs */ 83.3044 - dst.val = x86_seg_gs; 83.3045 - goto les; 83.3046 - 83.3047 - case 0xb6: /* movzx rm8,r{16,32,64} */ 83.3048 - /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */ 83.3049 - dst.reg = decode_register(modrm_reg, &_regs, 0); 83.3050 - dst.bytes = op_bytes; 83.3051 - dst.val = (uint8_t)src.val; 83.3052 - break; 83.3053 - 83.3054 - case 0xbc: /* bsf */ { 83.3055 - int zf; 83.3056 - asm ( "bsf %2,%0; setz %b1" 83.3057 - : "=r" (dst.val), "=q" (zf) 83.3058 - : "r" (src.val), "1" (0) ); 83.3059 - _regs.eflags &= ~EFLG_ZF; 83.3060 - _regs.eflags |= zf ? EFLG_ZF : 0; 83.3061 - break; 83.3062 - } 83.3063 - 83.3064 - case 0xbd: /* bsr */ { 83.3065 - int zf; 83.3066 - asm ( "bsr %2,%0; setz %b1" 83.3067 - : "=r" (dst.val), "=q" (zf) 83.3068 - : "r" (src.val), "1" (0) ); 83.3069 - _regs.eflags &= ~EFLG_ZF; 83.3070 - _regs.eflags |= zf ? EFLG_ZF : 0; 83.3071 - break; 83.3072 - } 83.3073 - 83.3074 - case 0xb7: /* movzx rm16,r{16,32,64} */ 83.3075 - dst.val = (uint16_t)src.val; 83.3076 - break; 83.3077 - 83.3078 - case 0xbb: btc: /* btc */ 83.3079 - emulate_2op_SrcV_nobyte("btc", src, dst, _regs.eflags); 83.3080 - break; 83.3081 - 83.3082 - case 0xba: /* Grp8 */ 83.3083 - switch ( modrm_reg & 7 ) 83.3084 - { 83.3085 - case 4: goto bt; 83.3086 - case 5: goto bts; 83.3087 - case 6: goto btr; 83.3088 - case 7: goto btc; 83.3089 - default: generate_exception_if(1, EXC_UD, -1); 83.3090 - } 83.3091 - break; 83.3092 - 83.3093 - case 0xbe: /* movsx rm8,r{16,32,64} */ 83.3094 - /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */ 83.3095 - dst.reg = decode_register(modrm_reg, &_regs, 0); 83.3096 - dst.bytes = op_bytes; 83.3097 - dst.val = (int8_t)src.val; 83.3098 - break; 83.3099 - 83.3100 - case 0xbf: /* movsx rm16,r{16,32,64} */ 83.3101 - dst.val = (int16_t)src.val; 83.3102 - break; 83.3103 - 83.3104 - case 0xc0 ... 0xc1: /* xadd */ 83.3105 - /* Write back the register source. */ 83.3106 - switch ( dst.bytes ) 83.3107 - { 83.3108 - case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break; 83.3109 - case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break; 83.3110 - case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */ 83.3111 - case 8: *src.reg = dst.val; break; 83.3112 - } 83.3113 - goto add; 83.3114 - } 83.3115 - goto writeback; 83.3116 - 83.3117 - twobyte_special_insn: 83.3118 - switch ( b ) 83.3119 - { 83.3120 - case 0x01: /* Grp7 */ { 83.3121 - struct segment_register reg; 83.3122 - unsigned long base, limit, cr0, cr0w; 83.3123 - 83.3124 - if ( modrm == 0xdf ) /* invlpga */ 83.3125 - { 83.3126 - generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1); 83.3127 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3128 - fail_if(ops->invlpg == NULL); 83.3129 - if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.eax), 83.3130 - ctxt)) ) 83.3131 - goto done; 83.3132 - break; 83.3133 - } 83.3134 - 83.3135 - switch ( modrm_reg & 7 ) 83.3136 - { 83.3137 - case 0: /* sgdt */ 83.3138 - case 1: /* sidt */ 83.3139 - generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); 83.3140 - fail_if(ops->read_segment == NULL); 83.3141 - if ( (rc = ops->read_segment((modrm_reg & 1) ? 83.3142 - x86_seg_idtr : x86_seg_gdtr, 83.3143 - ®, ctxt)) ) 83.3144 - goto done; 83.3145 - if ( op_bytes == 2 ) 83.3146 - reg.base &= 0xffffff; 83.3147 - if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, 83.3148 - reg.limit, 2, ctxt)) || 83.3149 - (rc = ops->write(ea.mem.seg, ea.mem.off+2, 83.3150 - reg.base, mode_64bit() ? 8 : 4, ctxt)) ) 83.3151 - goto done; 83.3152 - break; 83.3153 - case 2: /* lgdt */ 83.3154 - case 3: /* lidt */ 83.3155 - generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); 83.3156 - fail_if(ops->write_segment == NULL); 83.3157 - memset(®, 0, sizeof(reg)); 83.3158 - if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, 83.3159 - &limit, 2, ctxt)) || 83.3160 - (rc = ops->read(ea.mem.seg, ea.mem.off+2, 83.3161 - &base, mode_64bit() ? 8 : 4, ctxt)) ) 83.3162 - goto done; 83.3163 - reg.base = base; 83.3164 - reg.limit = limit; 83.3165 - if ( op_bytes == 2 ) 83.3166 - reg.base &= 0xffffff; 83.3167 - if ( (rc = ops->write_segment((modrm_reg & 1) ? 83.3168 - x86_seg_idtr : x86_seg_gdtr, 83.3169 - ®, ctxt)) ) 83.3170 - goto done; 83.3171 - break; 83.3172 - case 4: /* smsw */ 83.3173 - ea.bytes = 2; 83.3174 - dst = ea; 83.3175 - fail_if(ops->read_cr == NULL); 83.3176 - if ( (rc = ops->read_cr(0, &dst.val, ctxt)) ) 83.3177 - goto done; 83.3178 - d |= Mov; /* force writeback */ 83.3179 - break; 83.3180 - case 6: /* lmsw */ 83.3181 - fail_if(ops->read_cr == NULL); 83.3182 - fail_if(ops->write_cr == NULL); 83.3183 - if ( (rc = ops->read_cr(0, &cr0, ctxt)) ) 83.3184 - goto done; 83.3185 - if ( ea.type == OP_REG ) 83.3186 - cr0w = *ea.reg; 83.3187 - else if ( (rc = ops->read(ea.mem.seg, ea.mem.off, 83.3188 - &cr0w, 2, ctxt)) ) 83.3189 - goto done; 83.3190 - cr0 &= 0xffff0000; 83.3191 - cr0 |= (uint16_t)cr0w; 83.3192 - if ( (rc = ops->write_cr(0, cr0, ctxt)) ) 83.3193 - goto done; 83.3194 - break; 83.3195 - case 7: /* invlpg */ 83.3196 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3197 - generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); 83.3198 - fail_if(ops->invlpg == NULL); 83.3199 - if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) ) 83.3200 - goto done; 83.3201 - break; 83.3202 - default: 83.3203 - goto cannot_emulate; 83.3204 - } 83.3205 - break; 83.3206 - } 83.3207 - 83.3208 - case 0x06: /* clts */ 83.3209 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3210 - fail_if((ops->read_cr == NULL) || (ops->write_cr == NULL)); 83.3211 - if ( (rc = ops->read_cr(0, &dst.val, ctxt)) || 83.3212 - (rc = ops->write_cr(0, dst.val&~8, ctxt)) ) 83.3213 - goto done; 83.3214 - break; 83.3215 - 83.3216 - case 0x08: /* invd */ 83.3217 - case 0x09: /* wbinvd */ 83.3218 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3219 - fail_if(ops->wbinvd == NULL); 83.3220 - if ( (rc = ops->wbinvd(ctxt)) != 0 ) 83.3221 - goto done; 83.3222 - break; 83.3223 - 83.3224 - case 0x0d: /* GrpP (prefetch) */ 83.3225 - case 0x18: /* Grp16 (prefetch/nop) */ 83.3226 - case 0x19 ... 0x1f: /* nop (amd-defined) */ 83.3227 - break; 83.3228 - 83.3229 - case 0x20: /* mov cr,reg */ 83.3230 - case 0x21: /* mov dr,reg */ 83.3231 - case 0x22: /* mov reg,cr */ 83.3232 - case 0x23: /* mov reg,dr */ 83.3233 - generate_exception_if(ea.type != OP_REG, EXC_UD, -1); 83.3234 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3235 - modrm_reg |= lock_prefix << 3; 83.3236 - if ( b & 2 ) 83.3237 - { 83.3238 - /* Write to CR/DR. */ 83.3239 - src.val = *(unsigned long *)decode_register(modrm_rm, &_regs, 0); 83.3240 - if ( !mode_64bit() ) 83.3241 - src.val = (uint32_t)src.val; 83.3242 - rc = ((b & 1) 83.3243 - ? (ops->write_dr 83.3244 - ? ops->write_dr(modrm_reg, src.val, ctxt) 83.3245 - : X86EMUL_UNHANDLEABLE) 83.3246 - : (ops->write_cr 83.3247 - ? ops->write_cr(modrm_reg, src.val, ctxt) 83.3248 - : X86EMUL_UNHANDLEABLE)); 83.3249 - } 83.3250 - else 83.3251 - { 83.3252 - /* Read from CR/DR. */ 83.3253 - dst.type = OP_REG; 83.3254 - dst.bytes = mode_64bit() ? 8 : 4; 83.3255 - dst.reg = decode_register(modrm_rm, &_regs, 0); 83.3256 - rc = ((b & 1) 83.3257 - ? (ops->read_dr 83.3258 - ? ops->read_dr(modrm_reg, &dst.val, ctxt) 83.3259 - : X86EMUL_UNHANDLEABLE) 83.3260 - : (ops->read_cr 83.3261 - ? ops->read_cr(modrm_reg, &dst.val, ctxt) 83.3262 - : X86EMUL_UNHANDLEABLE)); 83.3263 - } 83.3264 - if ( rc != 0 ) 83.3265 - goto done; 83.3266 - break; 83.3267 - 83.3268 - case 0x30: /* wrmsr */ { 83.3269 - uint64_t val = ((uint64_t)_regs.edx << 32) | (uint32_t)_regs.eax; 83.3270 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3271 - fail_if(ops->write_msr == NULL); 83.3272 - if ( (rc = ops->write_msr((uint32_t)_regs.ecx, val, ctxt)) != 0 ) 83.3273 - goto done; 83.3274 - break; 83.3275 - } 83.3276 - 83.3277 - case 0x31: /* rdtsc */ { 83.3278 - unsigned long cr4; 83.3279 - uint64_t val; 83.3280 - fail_if(ops->read_cr == NULL); 83.3281 - if ( (rc = ops->read_cr(4, &cr4, ctxt)) ) 83.3282 - goto done; 83.3283 - generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP, 0); 83.3284 - fail_if(ops->read_msr == NULL); 83.3285 - if ( (rc = ops->read_msr(MSR_TSC, &val, ctxt)) != 0 ) 83.3286 - goto done; 83.3287 - _regs.edx = (uint32_t)(val >> 32); 83.3288 - _regs.eax = (uint32_t)(val >> 0); 83.3289 - break; 83.3290 - } 83.3291 - 83.3292 - case 0x32: /* rdmsr */ { 83.3293 - uint64_t val; 83.3294 - generate_exception_if(!mode_ring0(), EXC_GP, 0); 83.3295 - fail_if(ops->read_msr == NULL); 83.3296 - if ( (rc = ops->read_msr((uint32_t)_regs.ecx, &val, ctxt)) != 0 ) 83.3297 - goto done; 83.3298 - _regs.edx = (uint32_t)(val >> 32); 83.3299 - _regs.eax = (uint32_t)(val >> 0); 83.3300 - break; 83.3301 - } 83.3302 - 83.3303 - case 0x80 ... 0x8f: /* jcc (near) */ { 83.3304 - int rel = (((op_bytes == 2) && !mode_64bit()) 83.3305 - ? (int32_t)insn_fetch_type(int16_t) 83.3306 - : insn_fetch_type(int32_t)); 83.3307 - if ( test_cc(b, _regs.eflags) ) 83.3308 - jmp_rel(rel); 83.3309 - break; 83.3310 - } 83.3311 - 83.3312 - case 0xa0: /* push %%fs */ 83.3313 - src.val = x86_seg_fs; 83.3314 - goto push_seg; 83.3315 - 83.3316 - case 0xa1: /* pop %%fs */ 83.3317 - src.val = x86_seg_fs; 83.3318 - goto pop_seg; 83.3319 - 83.3320 - case 0xa2: /* cpuid */ { 83.3321 - unsigned int eax = _regs.eax, ebx = _regs.ebx; 83.3322 - unsigned int ecx = _regs.ecx, edx = _regs.edx; 83.3323 - fail_if(ops->cpuid == NULL); 83.3324 - if ( (rc = ops->cpuid(&eax, &ebx, &ecx, &edx, ctxt)) != 0 ) 83.3325 - goto done; 83.3326 - _regs.eax = eax; _regs.ebx = ebx; 83.3327 - _regs.ecx = ecx; _regs.edx = edx; 83.3328 - break; 83.3329 - } 83.3330 - 83.3331 - case 0xa8: /* push %%gs */ 83.3332 - src.val = x86_seg_gs; 83.3333 - goto push_seg; 83.3334 - 83.3335 - case 0xa9: /* pop %%gs */ 83.3336 - src.val = x86_seg_gs; 83.3337 - goto pop_seg; 83.3338 - 83.3339 - case 0xc7: /* Grp9 (cmpxchg8b) */ 83.3340 -#if defined(__i386__) 83.3341 - { 83.3342 - unsigned long old_lo, old_hi; 83.3343 - generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1); 83.3344 - generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); 83.3345 - if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) || 83.3346 - (rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) ) 83.3347 - goto done; 83.3348 - if ( (old_lo != _regs.eax) || (old_hi != _regs.edx) ) 83.3349 - { 83.3350 - _regs.eax = old_lo; 83.3351 - _regs.edx = old_hi; 83.3352 - _regs.eflags &= ~EFLG_ZF; 83.3353 - } 83.3354 - else if ( ops->cmpxchg8b == NULL ) 83.3355 - { 83.3356 - rc = X86EMUL_UNHANDLEABLE; 83.3357 - goto done; 83.3358 - } 83.3359 - else 83.3360 - { 83.3361 - if ( (rc = ops->cmpxchg8b(ea.mem.seg, ea.mem.off, old_lo, old_hi, 83.3362 - _regs.ebx, _regs.ecx, ctxt)) != 0 ) 83.3363 - goto done; 83.3364 - _regs.eflags |= EFLG_ZF; 83.3365 - } 83.3366 - break; 83.3367 - } 83.3368 -#elif defined(__x86_64__) 83.3369 - { 83.3370 - unsigned long old, new; 83.3371 - generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1); 83.3372 - generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); 83.3373 - if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 ) 83.3374 - goto done; 83.3375 - if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) || 83.3376 - ((uint32_t)(old>>32) != (uint32_t)_regs.edx) ) 83.3377 - { 83.3378 - _regs.eax = (uint32_t)(old>>0); 83.3379 - _regs.edx = (uint32_t)(old>>32); 83.3380 - _regs.eflags &= ~EFLG_ZF; 83.3381 - } 83.3382 - else 83.3383 - { 83.3384 - new = (_regs.ecx<<32)|(uint32_t)_regs.ebx; 83.3385 - if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, 83.3386 - new, 8, ctxt)) != 0 ) 83.3387 - goto done; 83.3388 - _regs.eflags |= EFLG_ZF; 83.3389 - } 83.3390 - break; 83.3391 - } 83.3392 -#endif 83.3393 - 83.3394 - case 0xc8 ... 0xcf: /* bswap */ 83.3395 - dst.type = OP_REG; 83.3396 - dst.reg = decode_register( 83.3397 - (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 83.3398 - switch ( dst.bytes = op_bytes ) 83.3399 - { 83.3400 - default: /* case 2: */ 83.3401 - /* Undefined behaviour. Writes zero on all tested CPUs. */ 83.3402 - dst.val = 0; 83.3403 - break; 83.3404 - case 4: 83.3405 -#ifdef __x86_64__ 83.3406 - asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) ); 83.3407 - break; 83.3408 - case 8: 83.3409 -#endif 83.3410 - asm ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) ); 83.3411 - break; 83.3412 - } 83.3413 - break; 83.3414 - } 83.3415 - goto writeback; 83.3416 - 83.3417 - cannot_emulate: 83.3418 -#if 0 83.3419 - gdprintk(XENLOG_DEBUG, "Instr:"); 83.3420 - for ( ea.mem.off = ctxt->regs->eip; ea.mem.off < _regs.eip; ea.mem.off++ ) 83.3421 - { 83.3422 - unsigned long x; 83.3423 - ops->insn_fetch(x86_seg_cs, ea.mem.off, &x, 1, ctxt); 83.3424 - printk(" %02x", (uint8_t)x); 83.3425 - } 83.3426 - printk("\n"); 83.3427 -#endif 83.3428 - return X86EMUL_UNHANDLEABLE; 83.3429 -} 83.3430 +#include "x86_emulate/x86_emulate.c"
84.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 84.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.c Tue Apr 01 11:29:03 2008 -0600 84.3 @@ -0,0 +1,3429 @@ 84.4 +/****************************************************************************** 84.5 + * x86_emulate.c 84.6 + * 84.7 + * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 84.8 + * 84.9 + * Copyright (c) 2005-2007 Keir Fraser 84.10 + * Copyright (c) 2005-2007 XenSource Inc. 84.11 + * 84.12 + * This program is free software; you can redistribute it and/or modify 84.13 + * it under the terms of the GNU General Public License as published by 84.14 + * the Free Software Foundation; either version 2 of the License, or 84.15 + * (at your option) any later version. 84.16 + * 84.17 + * This program is distributed in the hope that it will be useful, 84.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 84.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 84.20 + * GNU General Public License for more details. 84.21 + * 84.22 + * You should have received a copy of the GNU General Public License 84.23 + * along with this program; if not, write to the Free Software 84.24 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 84.25 + */ 84.26 + 84.27 +/* Operand sizes: 8-bit operands or specified/overridden size. */ 84.28 +#define ByteOp (1<<0) /* 8-bit operands. */ 84.29 +/* Destination operand type. */ 84.30 +#define DstBitBase (0<<1) /* Memory operand, bit string. */ 84.31 +#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ 84.32 +#define DstReg (2<<1) /* Register operand. */ 84.33 +#define DstMem (3<<1) /* Memory operand. */ 84.34 +#define DstMask (3<<1) 84.35 +/* Source operand type. */ 84.36 +#define SrcNone (0<<3) /* No source operand. */ 84.37 +#define SrcImplicit (0<<3) /* Source operand is implicit in the opcode. */ 84.38 +#define SrcReg (1<<3) /* Register operand. */ 84.39 +#define SrcMem (2<<3) /* Memory operand. */ 84.40 +#define SrcMem16 (3<<3) /* Memory operand (16-bit). */ 84.41 +#define SrcImm (4<<3) /* Immediate operand. */ 84.42 +#define SrcImmByte (5<<3) /* 8-bit sign-extended immediate operand. */ 84.43 +#define SrcMask (7<<3) 84.44 +/* Generic ModRM decode. */ 84.45 +#define ModRM (1<<6) 84.46 +/* Destination is only written; never read. */ 84.47 +#define Mov (1<<7) 84.48 + 84.49 +static uint8_t opcode_table[256] = { 84.50 + /* 0x00 - 0x07 */ 84.51 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.52 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.53 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 84.54 + /* 0x08 - 0x0F */ 84.55 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.56 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.57 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, 0, 84.58 + /* 0x10 - 0x17 */ 84.59 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.60 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.61 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 84.62 + /* 0x18 - 0x1F */ 84.63 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.64 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.65 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, ImplicitOps, ImplicitOps, 84.66 + /* 0x20 - 0x27 */ 84.67 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.68 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.69 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 84.70 + /* 0x28 - 0x2F */ 84.71 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.72 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.73 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 84.74 + /* 0x30 - 0x37 */ 84.75 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.76 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.77 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 84.78 + /* 0x38 - 0x3F */ 84.79 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.80 + ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.81 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, 0, ImplicitOps, 84.82 + /* 0x40 - 0x4F */ 84.83 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.84 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.85 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.86 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.87 + /* 0x50 - 0x5F */ 84.88 + ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 84.89 + ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 84.90 + ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 84.91 + ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 84.92 + /* 0x60 - 0x67 */ 84.93 + ImplicitOps, ImplicitOps, DstReg|SrcMem|ModRM, DstReg|SrcMem16|ModRM|Mov, 84.94 + 0, 0, 0, 0, 84.95 + /* 0x68 - 0x6F */ 84.96 + ImplicitOps|Mov, DstReg|SrcImm|ModRM|Mov, 84.97 + ImplicitOps|Mov, DstReg|SrcImmByte|ModRM|Mov, 84.98 + ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, ImplicitOps|Mov, 84.99 + /* 0x70 - 0x77 */ 84.100 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.101 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.102 + /* 0x78 - 0x7F */ 84.103 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.104 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.105 + /* 0x80 - 0x87 */ 84.106 + ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImm|ModRM, 84.107 + ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, 84.108 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.109 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.110 + /* 0x88 - 0x8F */ 84.111 + ByteOp|DstMem|SrcReg|ModRM|Mov, DstMem|SrcReg|ModRM|Mov, 84.112 + ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.113 + DstMem|SrcReg|ModRM|Mov, DstReg|SrcNone|ModRM, 84.114 + DstReg|SrcMem|ModRM|Mov, DstMem|SrcNone|ModRM|Mov, 84.115 + /* 0x90 - 0x97 */ 84.116 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.117 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.118 + /* 0x98 - 0x9F */ 84.119 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.120 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.121 + /* 0xA0 - 0xA7 */ 84.122 + ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 84.123 + ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 84.124 + ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 84.125 + ByteOp|ImplicitOps, ImplicitOps, 84.126 + /* 0xA8 - 0xAF */ 84.127 + ByteOp|DstReg|SrcImm, DstReg|SrcImm, 84.128 + ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 84.129 + ByteOp|ImplicitOps|Mov, ImplicitOps|Mov, 84.130 + ByteOp|ImplicitOps, ImplicitOps, 84.131 + /* 0xB0 - 0xB7 */ 84.132 + ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 84.133 + ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 84.134 + ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 84.135 + ByteOp|DstReg|SrcImm|Mov, ByteOp|DstReg|SrcImm|Mov, 84.136 + /* 0xB8 - 0xBF */ 84.137 + DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, 84.138 + DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, DstReg|SrcImm|Mov, 84.139 + /* 0xC0 - 0xC7 */ 84.140 + ByteOp|DstMem|SrcImm|ModRM, DstMem|SrcImmByte|ModRM, 84.141 + ImplicitOps, ImplicitOps, 84.142 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.143 + ByteOp|DstMem|SrcImm|ModRM|Mov, DstMem|SrcImm|ModRM|Mov, 84.144 + /* 0xC8 - 0xCF */ 84.145 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.146 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.147 + /* 0xD0 - 0xD7 */ 84.148 + ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 84.149 + ByteOp|DstMem|SrcImplicit|ModRM, DstMem|SrcImplicit|ModRM, 84.150 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.151 + /* 0xD8 - 0xDF */ 84.152 + 0, ImplicitOps|ModRM|Mov, 0, ImplicitOps|ModRM|Mov, 84.153 + 0, ImplicitOps|ModRM|Mov, ImplicitOps|ModRM|Mov, ImplicitOps|ModRM|Mov, 84.154 + /* 0xE0 - 0xE7 */ 84.155 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.156 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.157 + /* 0xE8 - 0xEF */ 84.158 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.159 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.160 + /* 0xF0 - 0xF7 */ 84.161 + 0, ImplicitOps, 0, 0, 84.162 + ImplicitOps, ImplicitOps, 84.163 + ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM, 84.164 + /* 0xF8 - 0xFF */ 84.165 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.166 + ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM 84.167 +}; 84.168 + 84.169 +static uint8_t twobyte_table[256] = { 84.170 + /* 0x00 - 0x07 */ 84.171 + 0, ImplicitOps|ModRM, 0, 0, 0, 0, ImplicitOps, 0, 84.172 + /* 0x08 - 0x0F */ 84.173 + ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps|ModRM, 0, 0, 84.174 + /* 0x10 - 0x17 */ 84.175 + 0, 0, 0, 0, 0, 0, 0, 0, 84.176 + /* 0x18 - 0x1F */ 84.177 + ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 84.178 + ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 84.179 + /* 0x20 - 0x27 */ 84.180 + ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, ImplicitOps|ModRM, 84.181 + 0, 0, 0, 0, 84.182 + /* 0x28 - 0x2F */ 84.183 + 0, 0, 0, 0, 0, 0, 0, 0, 84.184 + /* 0x30 - 0x37 */ 84.185 + ImplicitOps, ImplicitOps, ImplicitOps, 0, 0, 0, 0, 0, 84.186 + /* 0x38 - 0x3F */ 84.187 + 0, 0, 0, 0, 0, 0, 0, 0, 84.188 + /* 0x40 - 0x47 */ 84.189 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.190 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.191 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.192 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.193 + /* 0x48 - 0x4F */ 84.194 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.195 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.196 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.197 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.198 + /* 0x50 - 0x5F */ 84.199 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84.200 + /* 0x60 - 0x6F */ 84.201 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84.202 + /* 0x70 - 0x7F */ 84.203 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84.204 + /* 0x80 - 0x87 */ 84.205 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.206 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.207 + /* 0x88 - 0x8F */ 84.208 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.209 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.210 + /* 0x90 - 0x97 */ 84.211 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.212 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.213 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.214 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.215 + /* 0x98 - 0x9F */ 84.216 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.217 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.218 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.219 + ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 84.220 + /* 0xA0 - 0xA7 */ 84.221 + ImplicitOps, ImplicitOps, ImplicitOps, DstBitBase|SrcReg|ModRM, 84.222 + DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0, 84.223 + /* 0xA8 - 0xAF */ 84.224 + ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM, 84.225 + DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, DstReg|SrcMem|ModRM, 84.226 + /* 0xB0 - 0xB7 */ 84.227 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 84.228 + DstReg|SrcMem|ModRM|Mov, DstBitBase|SrcReg|ModRM, 84.229 + DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem|ModRM|Mov, 84.230 + ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov, 84.231 + /* 0xB8 - 0xBF */ 84.232 + 0, 0, DstBitBase|SrcImmByte|ModRM, DstBitBase|SrcReg|ModRM, 84.233 + DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM, 84.234 + ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov, 84.235 + /* 0xC0 - 0xC7 */ 84.236 + ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0, 84.237 + 0, 0, 0, ImplicitOps|ModRM, 84.238 + /* 0xC8 - 0xCF */ 84.239 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.240 + ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 84.241 + /* 0xD0 - 0xDF */ 84.242 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84.243 + /* 0xE0 - 0xEF */ 84.244 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84.245 + /* 0xF0 - 0xFF */ 84.246 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 84.247 +}; 84.248 + 84.249 +/* Type, address-of, and value of an instruction's operand. */ 84.250 +struct operand { 84.251 + enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 84.252 + unsigned int bytes; 84.253 + unsigned long val, orig_val; 84.254 + union { 84.255 + /* OP_REG: Pointer to register field. */ 84.256 + unsigned long *reg; 84.257 + /* OP_MEM: Segment and offset. */ 84.258 + struct { 84.259 + enum x86_segment seg; 84.260 + unsigned long off; 84.261 + } mem; 84.262 + }; 84.263 +}; 84.264 + 84.265 +/* MSRs. */ 84.266 +#define MSR_TSC 0x10 84.267 + 84.268 +/* Control register flags. */ 84.269 +#define CR0_PE (1<<0) 84.270 +#define CR4_TSD (1<<2) 84.271 + 84.272 +/* EFLAGS bit definitions. */ 84.273 +#define EFLG_VIP (1<<20) 84.274 +#define EFLG_VIF (1<<19) 84.275 +#define EFLG_AC (1<<18) 84.276 +#define EFLG_VM (1<<17) 84.277 +#define EFLG_RF (1<<16) 84.278 +#define EFLG_NT (1<<14) 84.279 +#define EFLG_IOPL (3<<12) 84.280 +#define EFLG_OF (1<<11) 84.281 +#define EFLG_DF (1<<10) 84.282 +#define EFLG_IF (1<<9) 84.283 +#define EFLG_TF (1<<8) 84.284 +#define EFLG_SF (1<<7) 84.285 +#define EFLG_ZF (1<<6) 84.286 +#define EFLG_AF (1<<4) 84.287 +#define EFLG_PF (1<<2) 84.288 +#define EFLG_CF (1<<0) 84.289 + 84.290 +/* Exception definitions. */ 84.291 +#define EXC_DE 0 84.292 +#define EXC_DB 1 84.293 +#define EXC_BP 3 84.294 +#define EXC_OF 4 84.295 +#define EXC_BR 5 84.296 +#define EXC_UD 6 84.297 +#define EXC_TS 10 84.298 +#define EXC_NP 11 84.299 +#define EXC_SS 12 84.300 +#define EXC_GP 13 84.301 +#define EXC_PF 14 84.302 +#define EXC_MF 16 84.303 + 84.304 +/* 84.305 + * Instruction emulation: 84.306 + * Most instructions are emulated directly via a fragment of inline assembly 84.307 + * code. This allows us to save/restore EFLAGS and thus very easily pick up 84.308 + * any modified flags. 84.309 + */ 84.310 + 84.311 +#if defined(__x86_64__) 84.312 +#define _LO32 "k" /* force 32-bit operand */ 84.313 +#define _STK "%%rsp" /* stack pointer */ 84.314 +#define _BYTES_PER_LONG "8" 84.315 +#elif defined(__i386__) 84.316 +#define _LO32 "" /* force 32-bit operand */ 84.317 +#define _STK "%%esp" /* stack pointer */ 84.318 +#define _BYTES_PER_LONG "4" 84.319 +#endif 84.320 + 84.321 +/* 84.322 + * These EFLAGS bits are restored from saved value during emulation, and 84.323 + * any changes are written back to the saved value after emulation. 84.324 + */ 84.325 +#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 84.326 + 84.327 +/* Before executing instruction: restore necessary bits in EFLAGS. */ 84.328 +#define _PRE_EFLAGS(_sav, _msk, _tmp) \ 84.329 +/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \ 84.330 +"movl %"_sav",%"_LO32 _tmp"; " \ 84.331 +"push %"_tmp"; " \ 84.332 +"push %"_tmp"; " \ 84.333 +"movl %"_msk",%"_LO32 _tmp"; " \ 84.334 +"andl %"_LO32 _tmp",("_STK"); " \ 84.335 +"pushf; " \ 84.336 +"notl %"_LO32 _tmp"; " \ 84.337 +"andl %"_LO32 _tmp",("_STK"); " \ 84.338 +"andl %"_LO32 _tmp",2*"_BYTES_PER_LONG"("_STK"); " \ 84.339 +"pop %"_tmp"; " \ 84.340 +"orl %"_LO32 _tmp",("_STK"); " \ 84.341 +"popf; " \ 84.342 +"pop %"_sav"; " 84.343 + 84.344 +/* After executing instruction: write-back necessary bits in EFLAGS. */ 84.345 +#define _POST_EFLAGS(_sav, _msk, _tmp) \ 84.346 +/* _sav |= EFLAGS & _msk; */ \ 84.347 +"pushf; " \ 84.348 +"pop %"_tmp"; " \ 84.349 +"andl %"_msk",%"_LO32 _tmp"; " \ 84.350 +"orl %"_LO32 _tmp",%"_sav"; " 84.351 + 84.352 +/* Raw emulation: instruction has two explicit operands. */ 84.353 +#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy)\ 84.354 +do{ unsigned long _tmp; \ 84.355 + switch ( (_dst).bytes ) \ 84.356 + { \ 84.357 + case 2: \ 84.358 + asm volatile ( \ 84.359 + _PRE_EFLAGS("0","4","2") \ 84.360 + _op"w %"_wx"3,%1; " \ 84.361 + _POST_EFLAGS("0","4","2") \ 84.362 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.363 + : _wy ((_src).val), "i" (EFLAGS_MASK), \ 84.364 + "m" (_eflags), "m" ((_dst).val) ); \ 84.365 + break; \ 84.366 + case 4: \ 84.367 + asm volatile ( \ 84.368 + _PRE_EFLAGS("0","4","2") \ 84.369 + _op"l %"_lx"3,%1; " \ 84.370 + _POST_EFLAGS("0","4","2") \ 84.371 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.372 + : _ly ((_src).val), "i" (EFLAGS_MASK), \ 84.373 + "m" (_eflags), "m" ((_dst).val) ); \ 84.374 + break; \ 84.375 + case 8: \ 84.376 + __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy); \ 84.377 + break; \ 84.378 + } \ 84.379 +} while (0) 84.380 +#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)\ 84.381 +do{ unsigned long _tmp; \ 84.382 + switch ( (_dst).bytes ) \ 84.383 + { \ 84.384 + case 1: \ 84.385 + asm volatile ( \ 84.386 + _PRE_EFLAGS("0","4","2") \ 84.387 + _op"b %"_bx"3,%1; " \ 84.388 + _POST_EFLAGS("0","4","2") \ 84.389 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.390 + : _by ((_src).val), "i" (EFLAGS_MASK), \ 84.391 + "m" (_eflags), "m" ((_dst).val) ); \ 84.392 + break; \ 84.393 + default: \ 84.394 + __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy);\ 84.395 + break; \ 84.396 + } \ 84.397 +} while (0) 84.398 +/* Source operand is byte-sized and may be restricted to just %cl. */ 84.399 +#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \ 84.400 + __emulate_2op(_op, _src, _dst, _eflags, \ 84.401 + "b", "c", "b", "c", "b", "c", "b", "c") 84.402 +/* Source operand is byte, word, long or quad sized. */ 84.403 +#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \ 84.404 + __emulate_2op(_op, _src, _dst, _eflags, \ 84.405 + "b", "q", "w", "r", _LO32, "r", "", "r") 84.406 +/* Source operand is word, long or quad sized. */ 84.407 +#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \ 84.408 + __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ 84.409 + "w", "r", _LO32, "r", "", "r") 84.410 + 84.411 +/* Instruction has only one explicit operand (no source operand). */ 84.412 +#define emulate_1op(_op,_dst,_eflags) \ 84.413 +do{ unsigned long _tmp; \ 84.414 + switch ( (_dst).bytes ) \ 84.415 + { \ 84.416 + case 1: \ 84.417 + asm volatile ( \ 84.418 + _PRE_EFLAGS("0","3","2") \ 84.419 + _op"b %1; " \ 84.420 + _POST_EFLAGS("0","3","2") \ 84.421 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.422 + : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 84.423 + break; \ 84.424 + case 2: \ 84.425 + asm volatile ( \ 84.426 + _PRE_EFLAGS("0","3","2") \ 84.427 + _op"w %1; " \ 84.428 + _POST_EFLAGS("0","3","2") \ 84.429 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.430 + : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 84.431 + break; \ 84.432 + case 4: \ 84.433 + asm volatile ( \ 84.434 + _PRE_EFLAGS("0","3","2") \ 84.435 + _op"l %1; " \ 84.436 + _POST_EFLAGS("0","3","2") \ 84.437 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.438 + : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 84.439 + break; \ 84.440 + case 8: \ 84.441 + __emulate_1op_8byte(_op, _dst, _eflags); \ 84.442 + break; \ 84.443 + } \ 84.444 +} while (0) 84.445 + 84.446 +/* Emulate an instruction with quadword operands (x86/64 only). */ 84.447 +#if defined(__x86_64__) 84.448 +#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ 84.449 +do{ asm volatile ( \ 84.450 + _PRE_EFLAGS("0","4","2") \ 84.451 + _op"q %"_qx"3,%1; " \ 84.452 + _POST_EFLAGS("0","4","2") \ 84.453 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.454 + : _qy ((_src).val), "i" (EFLAGS_MASK), \ 84.455 + "m" (_eflags), "m" ((_dst).val) ); \ 84.456 +} while (0) 84.457 +#define __emulate_1op_8byte(_op, _dst, _eflags) \ 84.458 +do{ asm volatile ( \ 84.459 + _PRE_EFLAGS("0","3","2") \ 84.460 + _op"q %1; " \ 84.461 + _POST_EFLAGS("0","3","2") \ 84.462 + : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 84.463 + : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \ 84.464 +} while (0) 84.465 +#elif defined(__i386__) 84.466 +#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) 84.467 +#define __emulate_1op_8byte(_op, _dst, _eflags) 84.468 +#endif /* __i386__ */ 84.469 + 84.470 +/* Fetch next part of the instruction being emulated. */ 84.471 +#define insn_fetch_bytes(_size) \ 84.472 +({ unsigned long _x, _eip = _regs.eip; \ 84.473 + if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \ 84.474 + _regs.eip += (_size); /* real hardware doesn't truncate */ \ 84.475 + generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15, \ 84.476 + EXC_GP, 0); \ 84.477 + rc = ops->insn_fetch(x86_seg_cs, _eip, &_x, (_size), ctxt); \ 84.478 + if ( rc ) goto done; \ 84.479 + _x; \ 84.480 +}) 84.481 +#define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type))) 84.482 + 84.483 +#define truncate_word(ea, byte_width) \ 84.484 +({ unsigned long __ea = (ea); \ 84.485 + unsigned int _width = (byte_width); \ 84.486 + ((_width == sizeof(unsigned long)) ? __ea : \ 84.487 + (__ea & ((1UL << (_width << 3)) - 1))); \ 84.488 +}) 84.489 +#define truncate_ea(ea) truncate_word((ea), ad_bytes) 84.490 + 84.491 +#define mode_64bit() (def_ad_bytes == 8) 84.492 + 84.493 +#define fail_if(p) \ 84.494 +do { \ 84.495 + rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \ 84.496 + if ( rc ) goto done; \ 84.497 +} while (0) 84.498 + 84.499 +#define generate_exception_if(p, e, ec) \ 84.500 +({ if ( (p) ) { \ 84.501 + fail_if(ops->inject_hw_exception == NULL); \ 84.502 + rc = ops->inject_hw_exception(e, ec, ctxt) ? : X86EMUL_EXCEPTION; \ 84.503 + goto done; \ 84.504 + } \ 84.505 +}) 84.506 + 84.507 +/* 84.508 + * Given byte has even parity (even number of 1s)? SDM Vol. 1 Sec. 3.4.3.1, 84.509 + * "Status Flags": EFLAGS.PF reflects parity of least-sig. byte of result only. 84.510 + */ 84.511 +static int even_parity(uint8_t v) 84.512 +{ 84.513 + asm ( "test %b0,%b0; setp %b0" : "=a" (v) : "0" (v) ); 84.514 + return v; 84.515 +} 84.516 + 84.517 +/* Update address held in a register, based on addressing mode. */ 84.518 +#define _register_address_increment(reg, inc, byte_width) \ 84.519 +do { \ 84.520 + int _inc = (inc); /* signed type ensures sign extension to long */ \ 84.521 + unsigned int _width = (byte_width); \ 84.522 + if ( _width == sizeof(unsigned long) ) \ 84.523 + (reg) += _inc; \ 84.524 + else if ( mode_64bit() ) \ 84.525 + (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1); \ 84.526 + else \ 84.527 + (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) | \ 84.528 + (((reg) + _inc) & ((1UL << (_width << 3)) - 1)); \ 84.529 +} while (0) 84.530 +#define register_address_increment(reg, inc) \ 84.531 + _register_address_increment((reg), (inc), ad_bytes) 84.532 + 84.533 +#define sp_pre_dec(dec) ({ \ 84.534 + _register_address_increment(_regs.esp, -(dec), ctxt->sp_size/8); \ 84.535 + truncate_word(_regs.esp, ctxt->sp_size/8); \ 84.536 +}) 84.537 +#define sp_post_inc(inc) ({ \ 84.538 + unsigned long __esp = truncate_word(_regs.esp, ctxt->sp_size/8); \ 84.539 + _register_address_increment(_regs.esp, (inc), ctxt->sp_size/8); \ 84.540 + __esp; \ 84.541 +}) 84.542 + 84.543 +#define jmp_rel(rel) \ 84.544 +do { \ 84.545 + int _rel = (int)(rel); \ 84.546 + _regs.eip += _rel; \ 84.547 + if ( !mode_64bit() ) \ 84.548 + _regs.eip = ((op_bytes == 2) \ 84.549 + ? (uint16_t)_regs.eip : (uint32_t)_regs.eip); \ 84.550 +} while (0) 84.551 + 84.552 +static unsigned long __get_rep_prefix( 84.553 + struct cpu_user_regs *int_regs, 84.554 + struct cpu_user_regs *ext_regs, 84.555 + int ad_bytes) 84.556 +{ 84.557 + unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx : 84.558 + (ad_bytes == 4) ? (uint32_t)int_regs->ecx : 84.559 + int_regs->ecx); 84.560 + 84.561 + /* Skip the instruction if no repetitions are required. */ 84.562 + if ( ecx == 0 ) 84.563 + ext_regs->eip = int_regs->eip; 84.564 + 84.565 + return ecx; 84.566 +} 84.567 + 84.568 +#define get_rep_prefix() ({ \ 84.569 + unsigned long max_reps = 1; \ 84.570 + if ( rep_prefix ) \ 84.571 + max_reps = __get_rep_prefix(&_regs, ctxt->regs, ad_bytes); \ 84.572 + if ( max_reps == 0 ) \ 84.573 + goto done; \ 84.574 + max_reps; \ 84.575 +}) 84.576 + 84.577 +static void __put_rep_prefix( 84.578 + struct cpu_user_regs *int_regs, 84.579 + struct cpu_user_regs *ext_regs, 84.580 + int ad_bytes, 84.581 + unsigned long reps_completed) 84.582 +{ 84.583 + unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx : 84.584 + (ad_bytes == 4) ? (uint32_t)int_regs->ecx : 84.585 + int_regs->ecx); 84.586 + 84.587 + /* Reduce counter appropriately, and repeat instruction if non-zero. */ 84.588 + ecx -= reps_completed; 84.589 + if ( ecx != 0 ) 84.590 + int_regs->eip = ext_regs->eip; 84.591 + 84.592 + if ( ad_bytes == 2 ) 84.593 + *(uint16_t *)&int_regs->ecx = ecx; 84.594 + else if ( ad_bytes == 4 ) 84.595 + int_regs->ecx = (uint32_t)ecx; 84.596 + else 84.597 + int_regs->ecx = ecx; 84.598 +} 84.599 + 84.600 +#define put_rep_prefix(reps_completed) ({ \ 84.601 + if ( rep_prefix ) \ 84.602 + __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \ 84.603 +}) 84.604 + 84.605 +/* 84.606 + * Unsigned multiplication with double-word result. 84.607 + * IN: Multiplicand=m[0], Multiplier=m[1] 84.608 + * OUT: Return CF/OF (overflow status); Result=m[1]:m[0] 84.609 + */ 84.610 +static int mul_dbl(unsigned long m[2]) 84.611 +{ 84.612 + int rc; 84.613 + asm ( "mul %4; seto %b2" 84.614 + : "=a" (m[0]), "=d" (m[1]), "=q" (rc) 84.615 + : "0" (m[0]), "1" (m[1]), "2" (0) ); 84.616 + return rc; 84.617 +} 84.618 + 84.619 +/* 84.620 + * Signed multiplication with double-word result. 84.621 + * IN: Multiplicand=m[0], Multiplier=m[1] 84.622 + * OUT: Return CF/OF (overflow status); Result=m[1]:m[0] 84.623 + */ 84.624 +static int imul_dbl(unsigned long m[2]) 84.625 +{ 84.626 + int rc; 84.627 + asm ( "imul %4; seto %b2" 84.628 + : "=a" (m[0]), "=d" (m[1]), "=q" (rc) 84.629 + : "0" (m[0]), "1" (m[1]), "2" (0) ); 84.630 + return rc; 84.631 +} 84.632 + 84.633 +/* 84.634 + * Unsigned division of double-word dividend. 84.635 + * IN: Dividend=u[1]:u[0], Divisor=v 84.636 + * OUT: Return 1: #DE 84.637 + * Return 0: Quotient=u[0], Remainder=u[1] 84.638 + */ 84.639 +static int div_dbl(unsigned long u[2], unsigned long v) 84.640 +{ 84.641 + if ( (v == 0) || (u[1] >= v) ) 84.642 + return 1; 84.643 + asm ( "div %4" 84.644 + : "=a" (u[0]), "=d" (u[1]) 84.645 + : "0" (u[0]), "1" (u[1]), "r" (v) ); 84.646 + return 0; 84.647 +} 84.648 + 84.649 +/* 84.650 + * Signed division of double-word dividend. 84.651 + * IN: Dividend=u[1]:u[0], Divisor=v 84.652 + * OUT: Return 1: #DE 84.653 + * Return 0: Quotient=u[0], Remainder=u[1] 84.654 + * NB. We don't use idiv directly as it's moderately hard to work out 84.655 + * ahead of time whether it will #DE, which we cannot allow to happen. 84.656 + */ 84.657 +static int idiv_dbl(unsigned long u[2], unsigned long v) 84.658 +{ 84.659 + int negu = (long)u[1] < 0, negv = (long)v < 0; 84.660 + 84.661 + /* u = abs(u) */ 84.662 + if ( negu ) 84.663 + { 84.664 + u[1] = ~u[1]; 84.665 + if ( (u[0] = -u[0]) == 0 ) 84.666 + u[1]++; 84.667 + } 84.668 + 84.669 + /* abs(u) / abs(v) */ 84.670 + if ( div_dbl(u, negv ? -v : v) ) 84.671 + return 1; 84.672 + 84.673 + /* Remainder has same sign as dividend. It cannot overflow. */ 84.674 + if ( negu ) 84.675 + u[1] = -u[1]; 84.676 + 84.677 + /* Quotient is overflowed if sign bit is set. */ 84.678 + if ( negu ^ negv ) 84.679 + { 84.680 + if ( (long)u[0] >= 0 ) 84.681 + u[0] = -u[0]; 84.682 + else if ( (u[0] << 1) != 0 ) /* == 0x80...0 is okay */ 84.683 + return 1; 84.684 + } 84.685 + else if ( (long)u[0] < 0 ) 84.686 + return 1; 84.687 + 84.688 + return 0; 84.689 +} 84.690 + 84.691 +static int 84.692 +test_cc( 84.693 + unsigned int condition, unsigned int flags) 84.694 +{ 84.695 + int rc = 0; 84.696 + 84.697 + switch ( (condition & 15) >> 1 ) 84.698 + { 84.699 + case 0: /* o */ 84.700 + rc |= (flags & EFLG_OF); 84.701 + break; 84.702 + case 1: /* b/c/nae */ 84.703 + rc |= (flags & EFLG_CF); 84.704 + break; 84.705 + case 2: /* z/e */ 84.706 + rc |= (flags & EFLG_ZF); 84.707 + break; 84.708 + case 3: /* be/na */ 84.709 + rc |= (flags & (EFLG_CF|EFLG_ZF)); 84.710 + break; 84.711 + case 4: /* s */ 84.712 + rc |= (flags & EFLG_SF); 84.713 + break; 84.714 + case 5: /* p/pe */ 84.715 + rc |= (flags & EFLG_PF); 84.716 + break; 84.717 + case 7: /* le/ng */ 84.718 + rc |= (flags & EFLG_ZF); 84.719 + /* fall through */ 84.720 + case 6: /* l/nge */ 84.721 + rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF)); 84.722 + break; 84.723 + } 84.724 + 84.725 + /* Odd condition identifiers (lsb == 1) have inverted sense. */ 84.726 + return (!!rc ^ (condition & 1)); 84.727 +} 84.728 + 84.729 +static int 84.730 +get_cpl( 84.731 + struct x86_emulate_ctxt *ctxt, 84.732 + struct x86_emulate_ops *ops) 84.733 +{ 84.734 + struct segment_register reg; 84.735 + 84.736 + if ( ctxt->regs->eflags & EFLG_VM ) 84.737 + return 3; 84.738 + 84.739 + if ( (ops->read_segment == NULL) || 84.740 + ops->read_segment(x86_seg_ss, ®, ctxt) ) 84.741 + return -1; 84.742 + 84.743 + return reg.attr.fields.dpl; 84.744 +} 84.745 + 84.746 +static int 84.747 +_mode_iopl( 84.748 + struct x86_emulate_ctxt *ctxt, 84.749 + struct x86_emulate_ops *ops) 84.750 +{ 84.751 + int cpl = get_cpl(ctxt, ops); 84.752 + if ( cpl == -1 ) 84.753 + return -1; 84.754 + return (cpl <= ((ctxt->regs->eflags >> 12) & 3)); 84.755 +} 84.756 + 84.757 +#define mode_ring0() ({ \ 84.758 + int _cpl = get_cpl(ctxt, ops); \ 84.759 + fail_if(_cpl < 0); \ 84.760 + (_cpl == 0); \ 84.761 +}) 84.762 +#define mode_iopl() ({ \ 84.763 + int _iopl = _mode_iopl(ctxt, ops); \ 84.764 + fail_if(_iopl < 0); \ 84.765 + _iopl; \ 84.766 +}) 84.767 + 84.768 +static int ioport_access_check( 84.769 + unsigned int first_port, 84.770 + unsigned int bytes, 84.771 + struct x86_emulate_ctxt *ctxt, 84.772 + struct x86_emulate_ops *ops) 84.773 +{ 84.774 + unsigned long iobmp; 84.775 + struct segment_register tr; 84.776 + int rc = X86EMUL_OKAY; 84.777 + 84.778 + if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() ) 84.779 + return X86EMUL_OKAY; 84.780 + 84.781 + fail_if(ops->read_segment == NULL); 84.782 + if ( (rc = ops->read_segment(x86_seg_tr, &tr, ctxt)) != 0 ) 84.783 + return rc; 84.784 + 84.785 + /* Ensure that the TSS is valid and has an io-bitmap-offset field. */ 84.786 + if ( !tr.attr.fields.p || 84.787 + ((tr.attr.fields.type & 0xd) != 0x9) || 84.788 + (tr.limit < 0x67) ) 84.789 + goto raise_exception; 84.790 + 84.791 + if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) ) 84.792 + return rc; 84.793 + 84.794 + /* Ensure TSS includes two bytes including byte containing first port. */ 84.795 + iobmp += first_port / 8; 84.796 + if ( tr.limit <= iobmp ) 84.797 + goto raise_exception; 84.798 + 84.799 + if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) ) 84.800 + return rc; 84.801 + if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 ) 84.802 + goto raise_exception; 84.803 + 84.804 + done: 84.805 + return rc; 84.806 + 84.807 + raise_exception: 84.808 + fail_if(ops->inject_hw_exception == NULL); 84.809 + return ops->inject_hw_exception(EXC_GP, 0, ctxt) ? : X86EMUL_EXCEPTION; 84.810 +} 84.811 + 84.812 +static int 84.813 +in_realmode( 84.814 + struct x86_emulate_ctxt *ctxt, 84.815 + struct x86_emulate_ops *ops) 84.816 +{ 84.817 + unsigned long cr0; 84.818 + int rc; 84.819 + 84.820 + if ( ops->read_cr == NULL ) 84.821 + return 0; 84.822 + 84.823 + rc = ops->read_cr(0, &cr0, ctxt); 84.824 + return (!rc && !(cr0 & CR0_PE)); 84.825 +} 84.826 + 84.827 +static int 84.828 +realmode_load_seg( 84.829 + enum x86_segment seg, 84.830 + uint16_t sel, 84.831 + struct x86_emulate_ctxt *ctxt, 84.832 + struct x86_emulate_ops *ops) 84.833 +{ 84.834 + struct segment_register reg; 84.835 + int rc; 84.836 + 84.837 + if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) 84.838 + return rc; 84.839 + 84.840 + reg.sel = sel; 84.841 + reg.base = (uint32_t)sel << 4; 84.842 + 84.843 + return ops->write_segment(seg, ®, ctxt); 84.844 +} 84.845 + 84.846 +static int 84.847 +protmode_load_seg( 84.848 + enum x86_segment seg, 84.849 + uint16_t sel, 84.850 + struct x86_emulate_ctxt *ctxt, 84.851 + struct x86_emulate_ops *ops) 84.852 +{ 84.853 + struct segment_register desctab, cs, segr; 84.854 + struct { uint32_t a, b; } desc; 84.855 + unsigned long val; 84.856 + uint8_t dpl, rpl, cpl; 84.857 + int rc, fault_type = EXC_TS; 84.858 + 84.859 + /* NULL selector? */ 84.860 + if ( (sel & 0xfffc) == 0 ) 84.861 + { 84.862 + if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) ) 84.863 + goto raise_exn; 84.864 + memset(&segr, 0, sizeof(segr)); 84.865 + return ops->write_segment(seg, &segr, ctxt); 84.866 + } 84.867 + 84.868 + /* LDT descriptor must be in the GDT. */ 84.869 + if ( (seg == x86_seg_ldtr) && (sel & 4) ) 84.870 + goto raise_exn; 84.871 + 84.872 + if ( (rc = ops->read_segment(x86_seg_cs, &cs, ctxt)) || 84.873 + (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, 84.874 + &desctab, ctxt)) ) 84.875 + return rc; 84.876 + 84.877 + /* Check against descriptor table limit. */ 84.878 + if ( ((sel & 0xfff8) + 7) > desctab.limit ) 84.879 + goto raise_exn; 84.880 + 84.881 + do { 84.882 + if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8), 84.883 + &val, 4, ctxt)) ) 84.884 + return rc; 84.885 + desc.a = val; 84.886 + if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4, 84.887 + &val, 4, ctxt)) ) 84.888 + return rc; 84.889 + desc.b = val; 84.890 + 84.891 + /* Segment present in memory? */ 84.892 + if ( !(desc.b & (1u<<15)) ) 84.893 + { 84.894 + fault_type = EXC_NP; 84.895 + goto raise_exn; 84.896 + } 84.897 + 84.898 + /* LDT descriptor is a system segment. All others are code/data. */ 84.899 + if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) ) 84.900 + goto raise_exn; 84.901 + 84.902 + dpl = (desc.b >> 13) & 3; 84.903 + rpl = sel & 3; 84.904 + cpl = cs.sel & 3; 84.905 + 84.906 + switch ( seg ) 84.907 + { 84.908 + case x86_seg_cs: 84.909 + /* Code segment? */ 84.910 + if ( !(desc.b & (1u<<11)) ) 84.911 + goto raise_exn; 84.912 + /* Non-conforming segment: check DPL against RPL. */ 84.913 + if ( ((desc.b & (6u<<9)) != 6) && (dpl != rpl) ) 84.914 + goto raise_exn; 84.915 + break; 84.916 + case x86_seg_ss: 84.917 + /* Writable data segment? */ 84.918 + if ( (desc.b & (5u<<9)) != (1u<<9) ) 84.919 + goto raise_exn; 84.920 + if ( (dpl != cpl) || (dpl != rpl) ) 84.921 + goto raise_exn; 84.922 + break; 84.923 + case x86_seg_ldtr: 84.924 + /* LDT system segment? */ 84.925 + if ( (desc.b & (15u<<8)) != (2u<<8) ) 84.926 + goto raise_exn; 84.927 + goto skip_accessed_flag; 84.928 + default: 84.929 + /* Readable code or data segment? */ 84.930 + if ( (desc.b & (5u<<9)) == (4u<<9) ) 84.931 + goto raise_exn; 84.932 + /* Non-conforming segment: check DPL against RPL and CPL. */ 84.933 + if ( ((desc.b & (6u<<9)) != 6) && ((dpl < cpl) || (dpl < rpl)) ) 84.934 + goto raise_exn; 84.935 + break; 84.936 + } 84.937 + 84.938 + /* Ensure Accessed flag is set. */ 84.939 + rc = ((desc.b & 0x100) ? X86EMUL_OKAY : 84.940 + ops->cmpxchg( 84.941 + x86_seg_none, desctab.base + (sel & 0xfff8) + 4, desc.b, 84.942 + desc.b | 0x100, 4, ctxt)); 84.943 + } while ( rc == X86EMUL_CMPXCHG_FAILED ); 84.944 + 84.945 + if ( rc ) 84.946 + return rc; 84.947 + 84.948 + /* Force the Accessed flag in our local copy. */ 84.949 + desc.b |= 0x100; 84.950 + 84.951 + skip_accessed_flag: 84.952 + segr.base = (((desc.b << 0) & 0xff000000u) | 84.953 + ((desc.b << 16) & 0x00ff0000u) | 84.954 + ((desc.a >> 16) & 0x0000ffffu)); 84.955 + segr.attr.bytes = (((desc.b >> 8) & 0x00ffu) | 84.956 + ((desc.b >> 12) & 0x0f00u)); 84.957 + segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu); 84.958 + if ( segr.attr.fields.g ) 84.959 + segr.limit = (segr.limit << 12) | 0xfffu; 84.960 + segr.sel = sel; 84.961 + return ops->write_segment(seg, &segr, ctxt); 84.962 + 84.963 + raise_exn: 84.964 + if ( ops->inject_hw_exception == NULL ) 84.965 + return X86EMUL_UNHANDLEABLE; 84.966 + if ( (rc = ops->inject_hw_exception(fault_type, sel & 0xfffc, ctxt)) ) 84.967 + return rc; 84.968 + return X86EMUL_EXCEPTION; 84.969 +} 84.970 + 84.971 +static int 84.972 +load_seg( 84.973 + enum x86_segment seg, 84.974 + uint16_t sel, 84.975 + struct x86_emulate_ctxt *ctxt, 84.976 + struct x86_emulate_ops *ops) 84.977 +{ 84.978 + if ( (ops->read_segment == NULL) || 84.979 + (ops->write_segment == NULL) ) 84.980 + return X86EMUL_UNHANDLEABLE; 84.981 + 84.982 + if ( in_realmode(ctxt, ops) ) 84.983 + return realmode_load_seg(seg, sel, ctxt, ops); 84.984 + 84.985 + return protmode_load_seg(seg, sel, ctxt, ops); 84.986 +} 84.987 + 84.988 +void * 84.989 +decode_register( 84.990 + uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs) 84.991 +{ 84.992 + void *p; 84.993 + 84.994 + switch ( modrm_reg ) 84.995 + { 84.996 + case 0: p = ®s->eax; break; 84.997 + case 1: p = ®s->ecx; break; 84.998 + case 2: p = ®s->edx; break; 84.999 + case 3: p = ®s->ebx; break; 84.1000 + case 4: p = (highbyte_regs ? 84.1001 + ((unsigned char *)®s->eax + 1) : 84.1002 + (unsigned char *)®s->esp); break; 84.1003 + case 5: p = (highbyte_regs ? 84.1004 + ((unsigned char *)®s->ecx + 1) : 84.1005 + (unsigned char *)®s->ebp); break; 84.1006 + case 6: p = (highbyte_regs ? 84.1007 + ((unsigned char *)®s->edx + 1) : 84.1008 + (unsigned char *)®s->esi); break; 84.1009 + case 7: p = (highbyte_regs ? 84.1010 + ((unsigned char *)®s->ebx + 1) : 84.1011 + (unsigned char *)®s->edi); break; 84.1012 +#if defined(__x86_64__) 84.1013 + case 8: p = ®s->r8; break; 84.1014 + case 9: p = ®s->r9; break; 84.1015 + case 10: p = ®s->r10; break; 84.1016 + case 11: p = ®s->r11; break; 84.1017 + case 12: p = ®s->r12; break; 84.1018 + case 13: p = ®s->r13; break; 84.1019 + case 14: p = ®s->r14; break; 84.1020 + case 15: p = ®s->r15; break; 84.1021 +#endif 84.1022 + default: p = NULL; break; 84.1023 + } 84.1024 + 84.1025 + return p; 84.1026 +} 84.1027 + 84.1028 +#define decode_segment_failed x86_seg_tr 84.1029 +enum x86_segment 84.1030 +decode_segment( 84.1031 + uint8_t modrm_reg) 84.1032 +{ 84.1033 + switch ( modrm_reg ) 84.1034 + { 84.1035 + case 0: return x86_seg_es; 84.1036 + case 1: return x86_seg_cs; 84.1037 + case 2: return x86_seg_ss; 84.1038 + case 3: return x86_seg_ds; 84.1039 + case 4: return x86_seg_fs; 84.1040 + case 5: return x86_seg_gs; 84.1041 + default: break; 84.1042 + } 84.1043 + return decode_segment_failed; 84.1044 +} 84.1045 + 84.1046 +int 84.1047 +x86_emulate( 84.1048 + struct x86_emulate_ctxt *ctxt, 84.1049 + struct x86_emulate_ops *ops) 84.1050 +{ 84.1051 + /* Shadow copy of register state. Committed on successful emulation. */ 84.1052 + struct cpu_user_regs _regs = *ctxt->regs; 84.1053 + 84.1054 + uint8_t b, d, sib, sib_index, sib_base, twobyte = 0, rex_prefix = 0; 84.1055 + uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0; 84.1056 + unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes; 84.1057 +#define REPE_PREFIX 1 84.1058 +#define REPNE_PREFIX 2 84.1059 + unsigned int lock_prefix = 0, rep_prefix = 0; 84.1060 + int override_seg = -1, rc = X86EMUL_OKAY; 84.1061 + struct operand src, dst; 84.1062 + 84.1063 + /* Data operand effective address (usually computed from ModRM). */ 84.1064 + struct operand ea; 84.1065 + 84.1066 + /* Default is a memory operand relative to segment DS. */ 84.1067 + ea.type = OP_MEM; 84.1068 + ea.mem.seg = x86_seg_ds; 84.1069 + ea.mem.off = 0; 84.1070 + 84.1071 + ctxt->retire.byte = 0; 84.1072 + 84.1073 + op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8; 84.1074 + if ( op_bytes == 8 ) 84.1075 + { 84.1076 + op_bytes = def_op_bytes = 4; 84.1077 +#ifndef __x86_64__ 84.1078 + return X86EMUL_UNHANDLEABLE; 84.1079 +#endif 84.1080 + } 84.1081 + 84.1082 + /* Prefix bytes. */ 84.1083 + for ( ; ; ) 84.1084 + { 84.1085 + switch ( b = insn_fetch_type(uint8_t) ) 84.1086 + { 84.1087 + case 0x66: /* operand-size override */ 84.1088 + op_bytes = def_op_bytes ^ 6; 84.1089 + break; 84.1090 + case 0x67: /* address-size override */ 84.1091 + ad_bytes = def_ad_bytes ^ (mode_64bit() ? 12 : 6); 84.1092 + break; 84.1093 + case 0x2e: /* CS override */ 84.1094 + override_seg = x86_seg_cs; 84.1095 + break; 84.1096 + case 0x3e: /* DS override */ 84.1097 + override_seg = x86_seg_ds; 84.1098 + break; 84.1099 + case 0x26: /* ES override */ 84.1100 + override_seg = x86_seg_es; 84.1101 + break; 84.1102 + case 0x64: /* FS override */ 84.1103 + override_seg = x86_seg_fs; 84.1104 + break; 84.1105 + case 0x65: /* GS override */ 84.1106 + override_seg = x86_seg_gs; 84.1107 + break; 84.1108 + case 0x36: /* SS override */ 84.1109 + override_seg = x86_seg_ss; 84.1110 + break; 84.1111 + case 0xf0: /* LOCK */ 84.1112 + lock_prefix = 1; 84.1113 + break; 84.1114 + case 0xf2: /* REPNE/REPNZ */ 84.1115 + rep_prefix = REPNE_PREFIX; 84.1116 + break; 84.1117 + case 0xf3: /* REP/REPE/REPZ */ 84.1118 + rep_prefix = REPE_PREFIX; 84.1119 + break; 84.1120 + case 0x40 ... 0x4f: /* REX */ 84.1121 + if ( !mode_64bit() ) 84.1122 + goto done_prefixes; 84.1123 + rex_prefix = b; 84.1124 + continue; 84.1125 + default: 84.1126 + goto done_prefixes; 84.1127 + } 84.1128 + 84.1129 + /* Any legacy prefix after a REX prefix nullifies its effect. */ 84.1130 + rex_prefix = 0; 84.1131 + } 84.1132 + done_prefixes: 84.1133 + 84.1134 + if ( rex_prefix & 8 ) /* REX.W */ 84.1135 + op_bytes = 8; 84.1136 + 84.1137 + /* Opcode byte(s). */ 84.1138 + d = opcode_table[b]; 84.1139 + if ( d == 0 ) 84.1140 + { 84.1141 + /* Two-byte opcode? */ 84.1142 + if ( b == 0x0f ) 84.1143 + { 84.1144 + twobyte = 1; 84.1145 + b = insn_fetch_type(uint8_t); 84.1146 + d = twobyte_table[b]; 84.1147 + } 84.1148 + 84.1149 + /* Unrecognised? */ 84.1150 + if ( d == 0 ) 84.1151 + goto cannot_emulate; 84.1152 + } 84.1153 + 84.1154 + /* Lock prefix is allowed only on RMW instructions. */ 84.1155 + generate_exception_if((d & Mov) && lock_prefix, EXC_GP, 0); 84.1156 + 84.1157 + /* ModRM and SIB bytes. */ 84.1158 + if ( d & ModRM ) 84.1159 + { 84.1160 + modrm = insn_fetch_type(uint8_t); 84.1161 + modrm_mod = (modrm & 0xc0) >> 6; 84.1162 + modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3); 84.1163 + modrm_rm = modrm & 0x07; 84.1164 + 84.1165 + if ( modrm_mod == 3 ) 84.1166 + { 84.1167 + modrm_rm |= (rex_prefix & 1) << 3; 84.1168 + ea.type = OP_REG; 84.1169 + ea.reg = decode_register( 84.1170 + modrm_rm, &_regs, (d & ByteOp) && (rex_prefix == 0)); 84.1171 + } 84.1172 + else if ( ad_bytes == 2 ) 84.1173 + { 84.1174 + /* 16-bit ModR/M decode. */ 84.1175 + switch ( modrm_rm ) 84.1176 + { 84.1177 + case 0: 84.1178 + ea.mem.off = _regs.ebx + _regs.esi; 84.1179 + break; 84.1180 + case 1: 84.1181 + ea.mem.off = _regs.ebx + _regs.edi; 84.1182 + break; 84.1183 + case 2: 84.1184 + ea.mem.seg = x86_seg_ss; 84.1185 + ea.mem.off = _regs.ebp + _regs.esi; 84.1186 + break; 84.1187 + case 3: 84.1188 + ea.mem.seg = x86_seg_ss; 84.1189 + ea.mem.off = _regs.ebp + _regs.edi; 84.1190 + break; 84.1191 + case 4: 84.1192 + ea.mem.off = _regs.esi; 84.1193 + break; 84.1194 + case 5: 84.1195 + ea.mem.off = _regs.edi; 84.1196 + break; 84.1197 + case 6: 84.1198 + if ( modrm_mod == 0 ) 84.1199 + break; 84.1200 + ea.mem.seg = x86_seg_ss; 84.1201 + ea.mem.off = _regs.ebp; 84.1202 + break; 84.1203 + case 7: 84.1204 + ea.mem.off = _regs.ebx; 84.1205 + break; 84.1206 + } 84.1207 + switch ( modrm_mod ) 84.1208 + { 84.1209 + case 0: 84.1210 + if ( modrm_rm == 6 ) 84.1211 + ea.mem.off = insn_fetch_type(int16_t); 84.1212 + break; 84.1213 + case 1: 84.1214 + ea.mem.off += insn_fetch_type(int8_t); 84.1215 + break; 84.1216 + case 2: 84.1217 + ea.mem.off += insn_fetch_type(int16_t); 84.1218 + break; 84.1219 + } 84.1220 + ea.mem.off = truncate_ea(ea.mem.off); 84.1221 + } 84.1222 + else 84.1223 + { 84.1224 + /* 32/64-bit ModR/M decode. */ 84.1225 + if ( modrm_rm == 4 ) 84.1226 + { 84.1227 + sib = insn_fetch_type(uint8_t); 84.1228 + sib_index = ((sib >> 3) & 7) | ((rex_prefix << 2) & 8); 84.1229 + sib_base = (sib & 7) | ((rex_prefix << 3) & 8); 84.1230 + if ( sib_index != 4 ) 84.1231 + ea.mem.off = *(long*)decode_register(sib_index, &_regs, 0); 84.1232 + ea.mem.off <<= (sib >> 6) & 3; 84.1233 + if ( (modrm_mod == 0) && ((sib_base & 7) == 5) ) 84.1234 + ea.mem.off += insn_fetch_type(int32_t); 84.1235 + else if ( sib_base == 4 ) 84.1236 + { 84.1237 + ea.mem.seg = x86_seg_ss; 84.1238 + ea.mem.off += _regs.esp; 84.1239 + if ( !twobyte && (b == 0x8f) ) 84.1240 + /* POP <rm> computes its EA post increment. */ 84.1241 + ea.mem.off += ((mode_64bit() && (op_bytes == 4)) 84.1242 + ? 8 : op_bytes); 84.1243 + } 84.1244 + else if ( sib_base == 5 ) 84.1245 + { 84.1246 + ea.mem.seg = x86_seg_ss; 84.1247 + ea.mem.off += _regs.ebp; 84.1248 + } 84.1249 + else 84.1250 + ea.mem.off += *(long*)decode_register(sib_base, &_regs, 0); 84.1251 + } 84.1252 + else 84.1253 + { 84.1254 + modrm_rm |= (rex_prefix & 1) << 3; 84.1255 + ea.mem.off = *(long *)decode_register(modrm_rm, &_regs, 0); 84.1256 + if ( (modrm_rm == 5) && (modrm_mod != 0) ) 84.1257 + ea.mem.seg = x86_seg_ss; 84.1258 + } 84.1259 + switch ( modrm_mod ) 84.1260 + { 84.1261 + case 0: 84.1262 + if ( (modrm_rm & 7) != 5 ) 84.1263 + break; 84.1264 + ea.mem.off = insn_fetch_type(int32_t); 84.1265 + if ( !mode_64bit() ) 84.1266 + break; 84.1267 + /* Relative to RIP of next instruction. Argh! */ 84.1268 + ea.mem.off += _regs.eip; 84.1269 + if ( (d & SrcMask) == SrcImm ) 84.1270 + ea.mem.off += (d & ByteOp) ? 1 : 84.1271 + ((op_bytes == 8) ? 4 : op_bytes); 84.1272 + else if ( (d & SrcMask) == SrcImmByte ) 84.1273 + ea.mem.off += 1; 84.1274 + else if ( !twobyte && ((b & 0xfe) == 0xf6) && 84.1275 + ((modrm_reg & 7) <= 1) ) 84.1276 + /* Special case in Grp3: test has immediate operand. */ 84.1277 + ea.mem.off += (d & ByteOp) ? 1 84.1278 + : ((op_bytes == 8) ? 4 : op_bytes); 84.1279 + else if ( twobyte && ((b & 0xf7) == 0xa4) ) 84.1280 + /* SHLD/SHRD with immediate byte third operand. */ 84.1281 + ea.mem.off++; 84.1282 + break; 84.1283 + case 1: 84.1284 + ea.mem.off += insn_fetch_type(int8_t); 84.1285 + break; 84.1286 + case 2: 84.1287 + ea.mem.off += insn_fetch_type(int32_t); 84.1288 + break; 84.1289 + } 84.1290 + ea.mem.off = truncate_ea(ea.mem.off); 84.1291 + } 84.1292 + } 84.1293 + 84.1294 + if ( override_seg != -1 ) 84.1295 + ea.mem.seg = override_seg; 84.1296 + 84.1297 + /* Special instructions do their own operand decoding. */ 84.1298 + if ( (d & DstMask) == ImplicitOps ) 84.1299 + goto special_insn; 84.1300 + 84.1301 + /* Decode and fetch the source operand: register, memory or immediate. */ 84.1302 + switch ( d & SrcMask ) 84.1303 + { 84.1304 + case SrcNone: 84.1305 + break; 84.1306 + case SrcReg: 84.1307 + src.type = OP_REG; 84.1308 + if ( d & ByteOp ) 84.1309 + { 84.1310 + src.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0)); 84.1311 + src.val = *(uint8_t *)src.reg; 84.1312 + src.bytes = 1; 84.1313 + } 84.1314 + else 84.1315 + { 84.1316 + src.reg = decode_register(modrm_reg, &_regs, 0); 84.1317 + switch ( (src.bytes = op_bytes) ) 84.1318 + { 84.1319 + case 2: src.val = *(uint16_t *)src.reg; break; 84.1320 + case 4: src.val = *(uint32_t *)src.reg; break; 84.1321 + case 8: src.val = *(uint64_t *)src.reg; break; 84.1322 + } 84.1323 + } 84.1324 + break; 84.1325 + case SrcMem16: 84.1326 + ea.bytes = 2; 84.1327 + goto srcmem_common; 84.1328 + case SrcMem: 84.1329 + ea.bytes = (d & ByteOp) ? 1 : op_bytes; 84.1330 + srcmem_common: 84.1331 + src = ea; 84.1332 + if ( src.type == OP_REG ) 84.1333 + { 84.1334 + switch ( src.bytes ) 84.1335 + { 84.1336 + case 1: src.val = *(uint8_t *)src.reg; break; 84.1337 + case 2: src.val = *(uint16_t *)src.reg; break; 84.1338 + case 4: src.val = *(uint32_t *)src.reg; break; 84.1339 + case 8: src.val = *(uint64_t *)src.reg; break; 84.1340 + } 84.1341 + } 84.1342 + else if ( (rc = ops->read(src.mem.seg, src.mem.off, 84.1343 + &src.val, src.bytes, ctxt)) ) 84.1344 + goto done; 84.1345 + break; 84.1346 + case SrcImm: 84.1347 + src.type = OP_IMM; 84.1348 + src.bytes = (d & ByteOp) ? 1 : op_bytes; 84.1349 + if ( src.bytes == 8 ) src.bytes = 4; 84.1350 + /* NB. Immediates are sign-extended as necessary. */ 84.1351 + switch ( src.bytes ) 84.1352 + { 84.1353 + case 1: src.val = insn_fetch_type(int8_t); break; 84.1354 + case 2: src.val = insn_fetch_type(int16_t); break; 84.1355 + case 4: src.val = insn_fetch_type(int32_t); break; 84.1356 + } 84.1357 + break; 84.1358 + case SrcImmByte: 84.1359 + src.type = OP_IMM; 84.1360 + src.bytes = 1; 84.1361 + src.val = insn_fetch_type(int8_t); 84.1362 + break; 84.1363 + } 84.1364 + 84.1365 + /* Decode and fetch the destination operand: register or memory. */ 84.1366 + switch ( d & DstMask ) 84.1367 + { 84.1368 + case DstReg: 84.1369 + dst.type = OP_REG; 84.1370 + if ( d & ByteOp ) 84.1371 + { 84.1372 + dst.reg = decode_register(modrm_reg, &_regs, (rex_prefix == 0)); 84.1373 + dst.val = *(uint8_t *)dst.reg; 84.1374 + dst.bytes = 1; 84.1375 + } 84.1376 + else 84.1377 + { 84.1378 + dst.reg = decode_register(modrm_reg, &_regs, 0); 84.1379 + switch ( (dst.bytes = op_bytes) ) 84.1380 + { 84.1381 + case 2: dst.val = *(uint16_t *)dst.reg; break; 84.1382 + case 4: dst.val = *(uint32_t *)dst.reg; break; 84.1383 + case 8: dst.val = *(uint64_t *)dst.reg; break; 84.1384 + } 84.1385 + } 84.1386 + break; 84.1387 + case DstBitBase: 84.1388 + if ( ((d & SrcMask) == SrcImmByte) || (ea.type == OP_REG) ) 84.1389 + { 84.1390 + src.val &= (op_bytes << 3) - 1; 84.1391 + } 84.1392 + else 84.1393 + { 84.1394 + /* 84.1395 + * EA += BitOffset DIV op_bytes*8 84.1396 + * BitOffset = BitOffset MOD op_bytes*8 84.1397 + * DIV truncates towards negative infinity. 84.1398 + * MOD always produces a positive result. 84.1399 + */ 84.1400 + if ( op_bytes == 2 ) 84.1401 + src.val = (int16_t)src.val; 84.1402 + else if ( op_bytes == 4 ) 84.1403 + src.val = (int32_t)src.val; 84.1404 + if ( (long)src.val < 0 ) 84.1405 + { 84.1406 + unsigned long byte_offset; 84.1407 + byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1)); 84.1408 + ea.mem.off -= byte_offset; 84.1409 + src.val = (byte_offset << 3) + src.val; 84.1410 + } 84.1411 + else 84.1412 + { 84.1413 + ea.mem.off += (src.val >> 3) & ~(op_bytes - 1); 84.1414 + src.val &= (op_bytes << 3) - 1; 84.1415 + } 84.1416 + } 84.1417 + /* Becomes a normal DstMem operation from here on. */ 84.1418 + d = (d & ~DstMask) | DstMem; 84.1419 + case DstMem: 84.1420 + ea.bytes = (d & ByteOp) ? 1 : op_bytes; 84.1421 + dst = ea; 84.1422 + if ( dst.type == OP_REG ) 84.1423 + { 84.1424 + switch ( dst.bytes ) 84.1425 + { 84.1426 + case 1: dst.val = *(uint8_t *)dst.reg; break; 84.1427 + case 2: dst.val = *(uint16_t *)dst.reg; break; 84.1428 + case 4: dst.val = *(uint32_t *)dst.reg; break; 84.1429 + case 8: dst.val = *(uint64_t *)dst.reg; break; 84.1430 + } 84.1431 + } 84.1432 + else if ( !(d & Mov) ) /* optimisation - avoid slow emulated read */ 84.1433 + { 84.1434 + if ( (rc = ops->read(dst.mem.seg, dst.mem.off, 84.1435 + &dst.val, dst.bytes, ctxt)) ) 84.1436 + goto done; 84.1437 + dst.orig_val = dst.val; 84.1438 + } 84.1439 + break; 84.1440 + } 84.1441 + 84.1442 + /* LOCK prefix allowed only on instructions with memory destination. */ 84.1443 + generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP, 0); 84.1444 + 84.1445 + if ( twobyte ) 84.1446 + goto twobyte_insn; 84.1447 + 84.1448 + switch ( b ) 84.1449 + { 84.1450 + case 0x04 ... 0x05: /* add imm,%%eax */ 84.1451 + dst.reg = (unsigned long *)&_regs.eax; 84.1452 + dst.val = _regs.eax; 84.1453 + case 0x00 ... 0x03: add: /* add */ 84.1454 + emulate_2op_SrcV("add", src, dst, _regs.eflags); 84.1455 + break; 84.1456 + 84.1457 + case 0x0c ... 0x0d: /* or imm,%%eax */ 84.1458 + dst.reg = (unsigned long *)&_regs.eax; 84.1459 + dst.val = _regs.eax; 84.1460 + case 0x08 ... 0x0b: or: /* or */ 84.1461 + emulate_2op_SrcV("or", src, dst, _regs.eflags); 84.1462 + break; 84.1463 + 84.1464 + case 0x14 ... 0x15: /* adc imm,%%eax */ 84.1465 + dst.reg = (unsigned long *)&_regs.eax; 84.1466 + dst.val = _regs.eax; 84.1467 + case 0x10 ... 0x13: adc: /* adc */ 84.1468 + emulate_2op_SrcV("adc", src, dst, _regs.eflags); 84.1469 + break; 84.1470 + 84.1471 + case 0x1c ... 0x1d: /* sbb imm,%%eax */ 84.1472 + dst.reg = (unsigned long *)&_regs.eax; 84.1473 + dst.val = _regs.eax; 84.1474 + case 0x18 ... 0x1b: sbb: /* sbb */ 84.1475 + emulate_2op_SrcV("sbb", src, dst, _regs.eflags); 84.1476 + break; 84.1477 + 84.1478 + case 0x24 ... 0x25: /* and imm,%%eax */ 84.1479 + dst.reg = (unsigned long *)&_regs.eax; 84.1480 + dst.val = _regs.eax; 84.1481 + case 0x20 ... 0x23: and: /* and */ 84.1482 + emulate_2op_SrcV("and", src, dst, _regs.eflags); 84.1483 + break; 84.1484 + 84.1485 + case 0x2c ... 0x2d: /* sub imm,%%eax */ 84.1486 + dst.reg = (unsigned long *)&_regs.eax; 84.1487 + dst.val = _regs.eax; 84.1488 + case 0x28 ... 0x2b: sub: /* sub */ 84.1489 + emulate_2op_SrcV("sub", src, dst, _regs.eflags); 84.1490 + break; 84.1491 + 84.1492 + case 0x34 ... 0x35: /* xor imm,%%eax */ 84.1493 + dst.reg = (unsigned long *)&_regs.eax; 84.1494 + dst.val = _regs.eax; 84.1495 + case 0x30 ... 0x33: xor: /* xor */ 84.1496 + emulate_2op_SrcV("xor", src, dst, _regs.eflags); 84.1497 + break; 84.1498 + 84.1499 + case 0x3c ... 0x3d: /* cmp imm,%%eax */ 84.1500 + dst.reg = (unsigned long *)&_regs.eax; 84.1501 + dst.val = _regs.eax; 84.1502 + case 0x38 ... 0x3b: cmp: /* cmp */ 84.1503 + emulate_2op_SrcV("cmp", src, dst, _regs.eflags); 84.1504 + break; 84.1505 + 84.1506 + case 0x62: /* bound */ { 84.1507 + unsigned long src_val2; 84.1508 + int lb, ub, idx; 84.1509 + generate_exception_if(mode_64bit() || (src.type != OP_MEM), 84.1510 + EXC_UD, -1); 84.1511 + if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes, 84.1512 + &src_val2, op_bytes, ctxt)) ) 84.1513 + goto done; 84.1514 + ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2; 84.1515 + lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val; 84.1516 + idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val; 84.1517 + generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1); 84.1518 + dst.type = OP_NONE; 84.1519 + break; 84.1520 + } 84.1521 + 84.1522 + case 0x63: /* movsxd (x86/64) / arpl (x86/32) */ 84.1523 + if ( mode_64bit() ) 84.1524 + { 84.1525 + /* movsxd */ 84.1526 + if ( src.type == OP_REG ) 84.1527 + src.val = *(int32_t *)src.reg; 84.1528 + else if ( (rc = ops->read(src.mem.seg, src.mem.off, 84.1529 + &src.val, 4, ctxt)) ) 84.1530 + goto done; 84.1531 + dst.val = (int32_t)src.val; 84.1532 + } 84.1533 + else 84.1534 + { 84.1535 + /* arpl */ 84.1536 + uint16_t src_val = dst.val; 84.1537 + dst = src; 84.1538 + _regs.eflags &= ~EFLG_ZF; 84.1539 + _regs.eflags |= ((src_val & 3) > (dst.val & 3)) ? EFLG_ZF : 0; 84.1540 + if ( _regs.eflags & EFLG_ZF ) 84.1541 + dst.val = (dst.val & ~3) | (src_val & 3); 84.1542 + else 84.1543 + dst.type = OP_NONE; 84.1544 + generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1); 84.1545 + } 84.1546 + break; 84.1547 + 84.1548 + case 0x69: /* imul imm16/32 */ 84.1549 + case 0x6b: /* imul imm8 */ { 84.1550 + unsigned long src1; /* ModR/M source operand */ 84.1551 + if ( ea.type == OP_REG ) 84.1552 + src1 = *ea.reg; 84.1553 + else if ( (rc = ops->read(ea.mem.seg, ea.mem.off, 84.1554 + &src1, op_bytes, ctxt)) ) 84.1555 + goto done; 84.1556 + _regs.eflags &= ~(EFLG_OF|EFLG_CF); 84.1557 + switch ( dst.bytes ) 84.1558 + { 84.1559 + case 2: 84.1560 + dst.val = ((uint32_t)(int16_t)src.val * 84.1561 + (uint32_t)(int16_t)src1); 84.1562 + if ( (int16_t)dst.val != (uint32_t)dst.val ) 84.1563 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1564 + break; 84.1565 +#ifdef __x86_64__ 84.1566 + case 4: 84.1567 + dst.val = ((uint64_t)(int32_t)src.val * 84.1568 + (uint64_t)(int32_t)src1); 84.1569 + if ( (int32_t)dst.val != dst.val ) 84.1570 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1571 + break; 84.1572 +#endif 84.1573 + default: { 84.1574 + unsigned long m[2] = { src.val, src1 }; 84.1575 + if ( imul_dbl(m) ) 84.1576 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1577 + dst.val = m[0]; 84.1578 + break; 84.1579 + } 84.1580 + } 84.1581 + break; 84.1582 + } 84.1583 + 84.1584 + case 0x82: /* Grp1 (x86/32 only) */ 84.1585 + generate_exception_if(mode_64bit(), EXC_UD, -1); 84.1586 + case 0x80: case 0x81: case 0x83: /* Grp1 */ 84.1587 + switch ( modrm_reg & 7 ) 84.1588 + { 84.1589 + case 0: goto add; 84.1590 + case 1: goto or; 84.1591 + case 2: goto adc; 84.1592 + case 3: goto sbb; 84.1593 + case 4: goto and; 84.1594 + case 5: goto sub; 84.1595 + case 6: goto xor; 84.1596 + case 7: goto cmp; 84.1597 + } 84.1598 + break; 84.1599 + 84.1600 + case 0xa8 ... 0xa9: /* test imm,%%eax */ 84.1601 + dst.reg = (unsigned long *)&_regs.eax; 84.1602 + dst.val = _regs.eax; 84.1603 + case 0x84 ... 0x85: test: /* test */ 84.1604 + emulate_2op_SrcV("test", src, dst, _regs.eflags); 84.1605 + break; 84.1606 + 84.1607 + case 0x86 ... 0x87: xchg: /* xchg */ 84.1608 + /* Write back the register source. */ 84.1609 + switch ( dst.bytes ) 84.1610 + { 84.1611 + case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break; 84.1612 + case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break; 84.1613 + case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */ 84.1614 + case 8: *src.reg = dst.val; break; 84.1615 + } 84.1616 + /* Write back the memory destination with implicit LOCK prefix. */ 84.1617 + dst.val = src.val; 84.1618 + lock_prefix = 1; 84.1619 + break; 84.1620 + 84.1621 + case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ 84.1622 + generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1); 84.1623 + case 0x88 ... 0x8b: /* mov */ 84.1624 + dst.val = src.val; 84.1625 + break; 84.1626 + 84.1627 + case 0x8c: /* mov Sreg,r/m */ { 84.1628 + struct segment_register reg; 84.1629 + enum x86_segment seg = decode_segment(modrm_reg); 84.1630 + generate_exception_if(seg == decode_segment_failed, EXC_UD, -1); 84.1631 + fail_if(ops->read_segment == NULL); 84.1632 + if ( (rc = ops->read_segment(seg, ®, ctxt)) != 0 ) 84.1633 + goto done; 84.1634 + dst.val = reg.sel; 84.1635 + if ( dst.type == OP_MEM ) 84.1636 + dst.bytes = 2; 84.1637 + break; 84.1638 + } 84.1639 + 84.1640 + case 0x8e: /* mov r/m,Sreg */ { 84.1641 + enum x86_segment seg = decode_segment(modrm_reg); 84.1642 + generate_exception_if(seg == decode_segment_failed, EXC_UD, -1); 84.1643 + if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 ) 84.1644 + goto done; 84.1645 + if ( seg == x86_seg_ss ) 84.1646 + ctxt->retire.flags.mov_ss = 1; 84.1647 + dst.type = OP_NONE; 84.1648 + break; 84.1649 + } 84.1650 + 84.1651 + case 0x8d: /* lea */ 84.1652 + dst.val = ea.mem.off; 84.1653 + break; 84.1654 + 84.1655 + case 0x8f: /* pop (sole member of Grp1a) */ 84.1656 + generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1); 84.1657 + /* 64-bit mode: POP defaults to a 64-bit operand. */ 84.1658 + if ( mode_64bit() && (dst.bytes == 4) ) 84.1659 + dst.bytes = 8; 84.1660 + if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes), 84.1661 + &dst.val, dst.bytes, ctxt)) != 0 ) 84.1662 + goto done; 84.1663 + break; 84.1664 + 84.1665 + case 0xb0 ... 0xb7: /* mov imm8,r8 */ 84.1666 + dst.reg = decode_register( 84.1667 + (b & 7) | ((rex_prefix & 1) << 3), &_regs, (rex_prefix == 0)); 84.1668 + dst.val = src.val; 84.1669 + break; 84.1670 + 84.1671 + case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */ 84.1672 + if ( dst.bytes == 8 ) /* Fetch more bytes to obtain imm64 */ 84.1673 + src.val = ((uint32_t)src.val | 84.1674 + ((uint64_t)insn_fetch_type(uint32_t) << 32)); 84.1675 + dst.reg = decode_register( 84.1676 + (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 84.1677 + dst.val = src.val; 84.1678 + break; 84.1679 + 84.1680 + case 0xc0 ... 0xc1: grp2: /* Grp2 */ 84.1681 + switch ( modrm_reg & 7 ) 84.1682 + { 84.1683 + case 0: /* rol */ 84.1684 + emulate_2op_SrcB("rol", src, dst, _regs.eflags); 84.1685 + break; 84.1686 + case 1: /* ror */ 84.1687 + emulate_2op_SrcB("ror", src, dst, _regs.eflags); 84.1688 + break; 84.1689 + case 2: /* rcl */ 84.1690 + emulate_2op_SrcB("rcl", src, dst, _regs.eflags); 84.1691 + break; 84.1692 + case 3: /* rcr */ 84.1693 + emulate_2op_SrcB("rcr", src, dst, _regs.eflags); 84.1694 + break; 84.1695 + case 4: /* sal/shl */ 84.1696 + case 6: /* sal/shl */ 84.1697 + emulate_2op_SrcB("sal", src, dst, _regs.eflags); 84.1698 + break; 84.1699 + case 5: /* shr */ 84.1700 + emulate_2op_SrcB("shr", src, dst, _regs.eflags); 84.1701 + break; 84.1702 + case 7: /* sar */ 84.1703 + emulate_2op_SrcB("sar", src, dst, _regs.eflags); 84.1704 + break; 84.1705 + } 84.1706 + break; 84.1707 + 84.1708 + case 0xc4: /* les */ { 84.1709 + unsigned long sel; 84.1710 + dst.val = x86_seg_es; 84.1711 + les: /* dst.val identifies the segment */ 84.1712 + generate_exception_if(src.type != OP_MEM, EXC_UD, -1); 84.1713 + if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes, 84.1714 + &sel, 2, ctxt)) != 0 ) 84.1715 + goto done; 84.1716 + if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 ) 84.1717 + goto done; 84.1718 + dst.val = src.val; 84.1719 + break; 84.1720 + } 84.1721 + 84.1722 + case 0xc5: /* lds */ 84.1723 + dst.val = x86_seg_ds; 84.1724 + goto les; 84.1725 + 84.1726 + case 0xd0 ... 0xd1: /* Grp2 */ 84.1727 + src.val = 1; 84.1728 + goto grp2; 84.1729 + 84.1730 + case 0xd2 ... 0xd3: /* Grp2 */ 84.1731 + src.val = _regs.ecx; 84.1732 + goto grp2; 84.1733 + 84.1734 + case 0xf6 ... 0xf7: /* Grp3 */ 84.1735 + switch ( modrm_reg & 7 ) 84.1736 + { 84.1737 + case 0 ... 1: /* test */ 84.1738 + /* Special case in Grp3: test has an immediate source operand. */ 84.1739 + src.type = OP_IMM; 84.1740 + src.bytes = (d & ByteOp) ? 1 : op_bytes; 84.1741 + if ( src.bytes == 8 ) src.bytes = 4; 84.1742 + switch ( src.bytes ) 84.1743 + { 84.1744 + case 1: src.val = insn_fetch_type(int8_t); break; 84.1745 + case 2: src.val = insn_fetch_type(int16_t); break; 84.1746 + case 4: src.val = insn_fetch_type(int32_t); break; 84.1747 + } 84.1748 + goto test; 84.1749 + case 2: /* not */ 84.1750 + dst.val = ~dst.val; 84.1751 + break; 84.1752 + case 3: /* neg */ 84.1753 + emulate_1op("neg", dst, _regs.eflags); 84.1754 + break; 84.1755 + case 4: /* mul */ 84.1756 + src = dst; 84.1757 + dst.type = OP_REG; 84.1758 + dst.reg = (unsigned long *)&_regs.eax; 84.1759 + dst.val = *dst.reg; 84.1760 + _regs.eflags &= ~(EFLG_OF|EFLG_CF); 84.1761 + switch ( src.bytes ) 84.1762 + { 84.1763 + case 1: 84.1764 + dst.val = (uint8_t)dst.val; 84.1765 + dst.val *= src.val; 84.1766 + if ( (uint8_t)dst.val != (uint16_t)dst.val ) 84.1767 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1768 + dst.bytes = 2; 84.1769 + break; 84.1770 + case 2: 84.1771 + dst.val = (uint16_t)dst.val; 84.1772 + dst.val *= src.val; 84.1773 + if ( (uint16_t)dst.val != (uint32_t)dst.val ) 84.1774 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1775 + *(uint16_t *)&_regs.edx = dst.val >> 16; 84.1776 + break; 84.1777 +#ifdef __x86_64__ 84.1778 + case 4: 84.1779 + dst.val = (uint32_t)dst.val; 84.1780 + dst.val *= src.val; 84.1781 + if ( (uint32_t)dst.val != dst.val ) 84.1782 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1783 + _regs.edx = (uint32_t)(dst.val >> 32); 84.1784 + break; 84.1785 +#endif 84.1786 + default: { 84.1787 + unsigned long m[2] = { src.val, dst.val }; 84.1788 + if ( mul_dbl(m) ) 84.1789 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1790 + _regs.edx = m[1]; 84.1791 + dst.val = m[0]; 84.1792 + break; 84.1793 + } 84.1794 + } 84.1795 + break; 84.1796 + case 5: /* imul */ 84.1797 + src = dst; 84.1798 + dst.type = OP_REG; 84.1799 + dst.reg = (unsigned long *)&_regs.eax; 84.1800 + dst.val = *dst.reg; 84.1801 + _regs.eflags &= ~(EFLG_OF|EFLG_CF); 84.1802 + switch ( src.bytes ) 84.1803 + { 84.1804 + case 1: 84.1805 + dst.val = ((uint16_t)(int8_t)src.val * 84.1806 + (uint16_t)(int8_t)dst.val); 84.1807 + if ( (int8_t)dst.val != (uint16_t)dst.val ) 84.1808 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1809 + dst.bytes = 2; 84.1810 + break; 84.1811 + case 2: 84.1812 + dst.val = ((uint32_t)(int16_t)src.val * 84.1813 + (uint32_t)(int16_t)dst.val); 84.1814 + if ( (int16_t)dst.val != (uint32_t)dst.val ) 84.1815 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1816 + *(uint16_t *)&_regs.edx = dst.val >> 16; 84.1817 + break; 84.1818 +#ifdef __x86_64__ 84.1819 + case 4: 84.1820 + dst.val = ((uint64_t)(int32_t)src.val * 84.1821 + (uint64_t)(int32_t)dst.val); 84.1822 + if ( (int32_t)dst.val != dst.val ) 84.1823 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1824 + _regs.edx = (uint32_t)(dst.val >> 32); 84.1825 + break; 84.1826 +#endif 84.1827 + default: { 84.1828 + unsigned long m[2] = { src.val, dst.val }; 84.1829 + if ( imul_dbl(m) ) 84.1830 + _regs.eflags |= EFLG_OF|EFLG_CF; 84.1831 + _regs.edx = m[1]; 84.1832 + dst.val = m[0]; 84.1833 + break; 84.1834 + } 84.1835 + } 84.1836 + break; 84.1837 + case 6: /* div */ { 84.1838 + unsigned long u[2], v; 84.1839 + src = dst; 84.1840 + dst.type = OP_REG; 84.1841 + dst.reg = (unsigned long *)&_regs.eax; 84.1842 + switch ( src.bytes ) 84.1843 + { 84.1844 + case 1: 84.1845 + u[0] = (uint16_t)_regs.eax; 84.1846 + u[1] = 0; 84.1847 + v = (uint8_t)src.val; 84.1848 + generate_exception_if( 84.1849 + div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]), 84.1850 + EXC_DE, -1); 84.1851 + dst.val = (uint8_t)u[0]; 84.1852 + ((uint8_t *)&_regs.eax)[1] = u[1]; 84.1853 + break; 84.1854 + case 2: 84.1855 + u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax; 84.1856 + u[1] = 0; 84.1857 + v = (uint16_t)src.val; 84.1858 + generate_exception_if( 84.1859 + div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]), 84.1860 + EXC_DE, -1); 84.1861 + dst.val = (uint16_t)u[0]; 84.1862 + *(uint16_t *)&_regs.edx = u[1]; 84.1863 + break; 84.1864 +#ifdef __x86_64__ 84.1865 + case 4: 84.1866 + u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax; 84.1867 + u[1] = 0; 84.1868 + v = (uint32_t)src.val; 84.1869 + generate_exception_if( 84.1870 + div_dbl(u, v) || ((uint32_t)u[0] != u[0]), 84.1871 + EXC_DE, -1); 84.1872 + dst.val = (uint32_t)u[0]; 84.1873 + _regs.edx = (uint32_t)u[1]; 84.1874 + break; 84.1875 +#endif 84.1876 + default: 84.1877 + u[0] = _regs.eax; 84.1878 + u[1] = _regs.edx; 84.1879 + v = src.val; 84.1880 + generate_exception_if(div_dbl(u, v), EXC_DE, -1); 84.1881 + dst.val = u[0]; 84.1882 + _regs.edx = u[1]; 84.1883 + break; 84.1884 + } 84.1885 + break; 84.1886 + } 84.1887 + case 7: /* idiv */ { 84.1888 + unsigned long u[2], v; 84.1889 + src = dst; 84.1890 + dst.type = OP_REG; 84.1891 + dst.reg = (unsigned long *)&_regs.eax; 84.1892 + switch ( src.bytes ) 84.1893 + { 84.1894 + case 1: 84.1895 + u[0] = (int16_t)_regs.eax; 84.1896 + u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 84.1897 + v = (int8_t)src.val; 84.1898 + generate_exception_if( 84.1899 + idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]), 84.1900 + EXC_DE, -1); 84.1901 + dst.val = (int8_t)u[0]; 84.1902 + ((int8_t *)&_regs.eax)[1] = u[1]; 84.1903 + break; 84.1904 + case 2: 84.1905 + u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax); 84.1906 + u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 84.1907 + v = (int16_t)src.val; 84.1908 + generate_exception_if( 84.1909 + idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]), 84.1910 + EXC_DE, -1); 84.1911 + dst.val = (int16_t)u[0]; 84.1912 + *(int16_t *)&_regs.edx = u[1]; 84.1913 + break; 84.1914 +#ifdef __x86_64__ 84.1915 + case 4: 84.1916 + u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax; 84.1917 + u[1] = ((long)u[0] < 0) ? ~0UL : 0UL; 84.1918 + v = (int32_t)src.val; 84.1919 + generate_exception_if( 84.1920 + idiv_dbl(u, v) || ((int32_t)u[0] != u[0]), 84.1921 + EXC_DE, -1); 84.1922 + dst.val = (int32_t)u[0]; 84.1923 + _regs