ia64/xen-unstable

changeset 18685:46d7e12c4c91

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed Oct 22 11:46:55 2008 +0900 (2008-10-22)
parents 6583186e5989 d2f7243fc571
children 6db3c096c244
files xen/include/asm-ia64/xenspinlock.h
line diff
     1.1 --- a/docs/xen-api/coversheet.tex	Wed Oct 22 11:38:22 2008 +0900
     1.2 +++ b/docs/xen-api/coversheet.tex	Wed Oct 22 11:46:55 2008 +0900
     1.3 @@ -51,6 +51,7 @@ Hollis Blanchard, IBM & Alastair Tse, Xe
     1.4  Mike Day, IBM & Daniel Veillard, Red Hat \\
     1.5  Jim Fehlig, Novell & Tom Wilkie, University of Cambridge \\
     1.6  Jon Harrop, XenSource & Yosuke Iwamatsu, NEC \\
     1.7 +Masaki Kanno, FUJITSU \\
     1.8  \end{tabular}
     1.9  \end{large}
    1.10  
     2.1 --- a/docs/xen-api/revision-history.tex	Wed Oct 22 11:38:22 2008 +0900
     2.2 +++ b/docs/xen-api/revision-history.tex	Wed Oct 22 11:46:55 2008 +0900
     2.3 @@ -56,5 +56,14 @@
     2.4      \end{flushleft}
     2.5     \end{minipage}\\
     2.6    \hline
     2.7 +  1.0.7 & 20th Oct. 08 & M. Kanno &
     2.8 +   \begin{minipage}[t]{7cm}
     2.9 +    \begin{flushleft}
    2.10 +     Added definitions of new classes DSCSI and PSCSI. Updated the table
    2.11 +     and the diagram representing relationships between classes.
    2.12 +     Added host.PSCSIs and VM.DSCSIs fields.
    2.13 +    \end{flushleft}
    2.14 +   \end{minipage}\\
    2.15 +  \hline
    2.16   \end{tabular}
    2.17  \end{center}
     3.1 --- a/docs/xen-api/xenapi-coversheet.tex	Wed Oct 22 11:38:22 2008 +0900
     3.2 +++ b/docs/xen-api/xenapi-coversheet.tex	Wed Oct 22 11:46:55 2008 +0900
     3.3 @@ -17,12 +17,12 @@
     3.4  \newcommand{\coversheetlogo}{xen.eps}
     3.5  
     3.6  %% Document date
     3.7 -\newcommand{\datestring}{24th July 2008}
     3.8 +\newcommand{\datestring}{20th October 2008}
     3.9  
    3.10  \newcommand{\releasestatement}{Stable Release}
    3.11  
    3.12  %% Document revision
    3.13 -\newcommand{\revstring}{API Revision 1.0.6}
    3.14 +\newcommand{\revstring}{API Revision 1.0.7}
    3.15  
    3.16  %% Document authors
    3.17  \newcommand{\docauthors}{
     4.1 --- a/docs/xen-api/xenapi-datamodel-graph.dot	Wed Oct 22 11:38:22 2008 +0900
     4.2 +++ b/docs/xen-api/xenapi-datamodel-graph.dot	Wed Oct 22 11:46:55 2008 +0900
     4.3 @@ -12,9 +12,11 @@
     4.4  digraph "Xen-API Class Diagram" {
     4.5  fontname="Verdana";
     4.6  
     4.7 -node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user XSPolicy ACMPolicy;
     4.8 -node [shape=ellipse]; PIF_metrics VIF_metrics VM_metrics VBD_metrics PBD_metrics VM_guest_metrics host_metrics;
     4.9 -node [shape=box]; DPCI PPCI host_cpu console VTPM
    4.10 +node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user;
    4.11 +node [ shape=box ]; XSPolicy ACMPolicy DPCI PPCI host_cpu console VTPM;
    4.12 +node [ shape=box ]; DSCSI PSCSI;
    4.13 +node [ shape=ellipse ]; VM_metrics VM_guest_metrics host_metrics;
    4.14 +node [ shape=ellipse ]; PIF_metrics VIF_metrics VBD_metrics PBD_metrics;
    4.15  session -> host [ arrowhead="none" ]
    4.16  session -> user [ arrowhead="none" ]
    4.17  VM -> VM_metrics [ arrowhead="none" ]
    4.18 @@ -41,4 +43,7 @@ XSPolicy -> ACMPolicy [ arrowhead="none"
    4.19  DPCI -> VM [ arrowhead="none", arrowtail="crow" ]
    4.20  DPCI -> PPCI [ arrowhead="none" ]
    4.21  PPCI -> host [ arrowhead="none", arrowtail="crow" ]
    4.22 +DSCSI -> VM [ arrowhead="none", arrowtail="crow" ]
    4.23 +DSCSI -> PSCSI [ arrowhead="none" ]
    4.24 +PSCSI -> host [ arrowhead="none", arrowtail="crow" ]
    4.25  }
     5.1 --- a/docs/xen-api/xenapi-datamodel.tex	Wed Oct 22 11:38:22 2008 +0900
     5.2 +++ b/docs/xen-api/xenapi-datamodel.tex	Wed Oct 22 11:46:55 2008 +0900
     5.3 @@ -46,6 +46,8 @@ Name & Description \\
     5.4  {\tt console} & A console \\
     5.5  {\tt DPCI} & A pass-through PCI device \\
     5.6  {\tt PPCI} & A physical PCI device \\
     5.7 +{\tt DSCSI} & A half-virtualized SCSI device \\
     5.8 +{\tt PSCSI} & A physical SCSI device \\
     5.9  {\tt user} & A user of the system \\
    5.10  {\tt debug} & A basic class for testing \\
    5.11  {\tt XSPolicy} & A class for handling Xen Security Policies \\
    5.12 @@ -74,6 +76,8 @@ VTPM.VM & VM.VTPMs & one-to-many\\
    5.13  console.VM & VM.consoles & one-to-many\\
    5.14  DPCI.VM & VM.DPCIs & one-to-many\\
    5.15  PPCI.host & host.PPCIs & one-to-many\\
    5.16 +DSCSI.VM & VM.DSCSIs & one-to-many\\
    5.17 +PSCSI.host & host.PSCSIs & one-to-many\\
    5.18  host.resident\_VMs & VM.resident\_on & many-to-one\\
    5.19  host.host\_CPUs & host\_cpu.host & many-to-one\\
    5.20  \hline
    5.21 @@ -1407,6 +1411,7 @@ Quals & Field & Type & Description \\
    5.22  $\mathit{RO}_\mathit{run}$ &  {\tt crash\_dumps} & (crashdump ref) Set & crash dumps associated with this VM \\
    5.23  $\mathit{RO}_\mathit{run}$ &  {\tt VTPMs} & (VTPM ref) Set & virtual TPMs \\
    5.24  $\mathit{RO}_\mathit{run}$ &  {\tt DPCIs} & (DPCI ref) Set & pass-through PCI devices \\
    5.25 +$\mathit{RO}_\mathit{run}$ &  {\tt DSCSIs} & (DSCSI ref) Set & half-virtualized SCSI devices \\
    5.26  $\mathit{RW}$ &  {\tt PV/bootloader} & string & name of or path to bootloader \\
    5.27  $\mathit{RW}$ &  {\tt PV/kernel} & string & path to the kernel \\
    5.28  $\mathit{RW}$ &  {\tt PV/ramdisk} & string & path to the initrd \\
    5.29 @@ -3450,6 +3455,38 @@ value of the field
    5.30  \vspace{0.3cm}
    5.31  \vspace{0.3cm}
    5.32  \vspace{0.3cm}
    5.33 +\subsubsection{RPC name:~get\_DSCSIs}
    5.34 +
    5.35 +{\bf Overview:} 
    5.36 +Get the DSCSIs field of the given VM.
    5.37 +
    5.38 + \noindent {\bf Signature:} 
    5.39 +\begin{verbatim} ((DSCSI ref) Set) get_DSCSIs (session_id s, VM ref self)\end{verbatim}
    5.40 +
    5.41 +
    5.42 +\noindent{\bf Arguments:}
    5.43 +
    5.44 +
    5.45 +\vspace{0.3cm}
    5.46 +\begin{tabular}{|c|c|p{7cm}|}
    5.47 + \hline
    5.48 +{\bf type} & {\bf name} & {\bf description} \\ \hline
    5.49 +{\tt VM ref } & self & reference to the object \\ \hline 
    5.50 +
    5.51 +\end{tabular}
    5.52 +
    5.53 +\vspace{0.3cm}
    5.54 +
    5.55 + \noindent {\bf Return Type:} 
    5.56 +{\tt 
    5.57 +(DSCSI ref) Set
    5.58 +}
    5.59 +
    5.60 +
    5.61 +value of the field
    5.62 +\vspace{0.3cm}
    5.63 +\vspace{0.3cm}
    5.64 +\vspace{0.3cm}
    5.65  \subsubsection{RPC name:~get\_PV\_bootloader}
    5.66  
    5.67  {\bf Overview:} 
    5.68 @@ -5518,6 +5555,7 @@ Quals & Field & Type & Description \\
    5.69  $\mathit{RW}$ &  {\tt crash\_dump\_sr} & SR ref & The SR in which VDIs for crash dumps are created \\
    5.70  $\mathit{RO}_\mathit{run}$ &  {\tt PBDs} & (PBD ref) Set & physical blockdevices \\
    5.71  $\mathit{RO}_\mathit{run}$ &  {\tt PPCIs} & (PPCI ref) Set & physical PCI devices \\
    5.72 +$\mathit{RO}_\mathit{run}$ &  {\tt PSCSIs} & (PSCSI ref) Set & physical SCSI devices \\
    5.73  $\mathit{RO}_\mathit{run}$ &  {\tt host\_CPUs} & (host\_cpu ref) Set & The physical CPUs on this host \\
    5.74  $\mathit{RO}_\mathit{run}$ &  {\tt metrics} & host\_metrics ref & metrics associated with this host \\
    5.75  \hline
    5.76 @@ -6844,6 +6882,38 @@ value of the field
    5.77  \vspace{0.3cm}
    5.78  \vspace{0.3cm}
    5.79  \vspace{0.3cm}
    5.80 +\subsubsection{RPC name:~get\_PSCSIs}
    5.81 +
    5.82 +{\bf Overview:} 
    5.83 +Get the PSCSIs field of the given host.
    5.84 +
    5.85 + \noindent {\bf Signature:} 
    5.86 +\begin{verbatim} ((PSCSI ref) Set) get_PSCSIs (session_id s, host ref self)\end{verbatim}
    5.87 +
    5.88 +
    5.89 +\noindent{\bf Arguments:}
    5.90 +
    5.91 +
    5.92 +\vspace{0.3cm}
    5.93 +\begin{tabular}{|c|c|p{7cm}|}
    5.94 + \hline
    5.95 +{\bf type} & {\bf name} & {\bf description} \\ \hline
    5.96 +{\tt host ref } & self & reference to the object \\ \hline 
    5.97 +
    5.98 +\end{tabular}
    5.99 +
   5.100 +\vspace{0.3cm}
   5.101 +
   5.102 + \noindent {\bf Return Type:} 
   5.103 +{\tt 
   5.104 +(PSCSI ref) Set
   5.105 +}
   5.106 +
   5.107 +
   5.108 +value of the field
   5.109 +\vspace{0.3cm}
   5.110 +\vspace{0.3cm}
   5.111 +\vspace{0.3cm}
   5.112  \subsubsection{RPC name:~get\_host\_CPUs}
   5.113  
   5.114  {\bf Overview:} 
   5.115 @@ -15723,6 +15793,1096 @@ all fields from the object
   5.116  
   5.117  \vspace{1cm}
   5.118  \newpage
   5.119 +\section{Class: DSCSI}
   5.120 +\subsection{Fields for class: DSCSI}
   5.121 +\begin{longtable}{|lllp{0.38\textwidth}|}
   5.122 +\hline
   5.123 +\multicolumn{1}{|l}{Name} & \multicolumn{3}{l|}{\bf DSCSI} \\
   5.124 +\multicolumn{1}{|l}{Description} & \multicolumn{3}{l|}{\parbox{11cm}{\em A
   5.125 +half-virtualized SCSI device.}} \\
   5.126 +\hline
   5.127 +Quals & Field & Type & Description \\
   5.128 +\hline
   5.129 +$\mathit{RO}_\mathit{run}$ &  {\tt uuid} & string & unique identifier/object reference \\
   5.130 +$\mathit{RO}_\mathit{inst}$ &  {\tt VM} & VM ref & the virtual machine \\
   5.131 +$\mathit{RO}_\mathit{inst}$ &  {\tt PSCSI} & PSCSI ref & the physical SCSI device \\
   5.132 +$\mathit{RO}_\mathit{run}$ &  {\tt virtual\_host} & int & the virtual host number \\
   5.133 +$\mathit{RO}_\mathit{run}$ &  {\tt virtual\_channel} & int & the virtual channel number \\
   5.134 +$\mathit{RO}_\mathit{run}$ &  {\tt virtual\_target} & int & the virtual target number \\
   5.135 +$\mathit{RO}_\mathit{run}$ &  {\tt virtual\_lun} & int & the virtual logical unit number \\
   5.136 +$\mathit{RO}_\mathit{inst}$ &  {\tt virtual\_HCTL} & string & the virtual HCTL \\
   5.137 +$\mathit{RO}_\mathit{run}$ &  {\tt runtime\_properties} & (string $\rightarrow$ string) Map & Device runtime properties \\
   5.138 +\hline
   5.139 +\end{longtable}
   5.140 +\subsection{RPCs associated with class: DSCSI}
   5.141 +\subsubsection{RPC name:~get\_all}
   5.142 +
   5.143 +{\bf Overview:} 
   5.144 +Return a list of all the DSCSIs known to the system.
   5.145 +
   5.146 + \noindent {\bf Signature:} 
   5.147 +\begin{verbatim} ((DSCSI ref) Set) get_all (session_id s)\end{verbatim}
   5.148 +
   5.149 +
   5.150 +\vspace{0.3cm}
   5.151 +
   5.152 + \noindent {\bf Return Type:} 
   5.153 +{\tt 
   5.154 +(DSCSI ref) Set
   5.155 +}
   5.156 +
   5.157 +
   5.158 +references to all objects
   5.159 +\vspace{0.3cm}
   5.160 +\vspace{0.3cm}
   5.161 +\vspace{0.3cm}
   5.162 +\subsubsection{RPC name:~get\_uuid}
   5.163 +
   5.164 +{\bf Overview:} 
   5.165 +Get the uuid field of the given DSCSI.
   5.166 +
   5.167 + \noindent {\bf Signature:} 
   5.168 +\begin{verbatim} string get_uuid (session_id s, DSCSI ref self)\end{verbatim}
   5.169 +
   5.170 +
   5.171 +\noindent{\bf Arguments:}
   5.172 +
   5.173 +
   5.174 +\vspace{0.3cm}
   5.175 +\begin{tabular}{|c|c|p{7cm}|}
   5.176 + \hline
   5.177 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.178 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.179 +
   5.180 +\end{tabular}
   5.181 +
   5.182 +\vspace{0.3cm}
   5.183 +
   5.184 + \noindent {\bf Return Type:} 
   5.185 +{\tt
   5.186 +string
   5.187 +}
   5.188 +
   5.189 +
   5.190 +value of the field
   5.191 +\vspace{0.3cm}
   5.192 +\vspace{0.3cm}
   5.193 +\vspace{0.3cm}
   5.194 +\subsubsection{RPC name:~get\_VM}
   5.195 +
   5.196 +{\bf Overview:} 
   5.197 +Get the VM field of the given DSCSI.
   5.198 +
   5.199 + \noindent {\bf Signature:} 
   5.200 +\begin{verbatim} (VM ref) get_VM (session_id s, DSCSI ref self)\end{verbatim}
   5.201 +
   5.202 +
   5.203 +\noindent{\bf Arguments:}
   5.204 +
   5.205 +
   5.206 +\vspace{0.3cm}
   5.207 +\begin{tabular}{|c|c|p{7cm}|}
   5.208 + \hline
   5.209 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.210 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.211 +
   5.212 +\end{tabular}
   5.213 +
   5.214 +\vspace{0.3cm}
   5.215 +
   5.216 + \noindent {\bf Return Type:} 
   5.217 +{\tt 
   5.218 +VM ref
   5.219 +}
   5.220 +
   5.221 +
   5.222 +value of the field
   5.223 +\vspace{0.3cm}
   5.224 +\vspace{0.3cm}
   5.225 +\vspace{0.3cm}
   5.226 +\subsubsection{RPC name:~get\_PSCSI}
   5.227 +
   5.228 +{\bf Overview:} 
   5.229 +Get the PSCSI field of the given DSCSI.
   5.230 +
   5.231 + \noindent {\bf Signature:} 
   5.232 +\begin{verbatim} (PSCSI ref) get_PSCSI (session_id s, DSCSI ref self)\end{verbatim}
   5.233 +
   5.234 +
   5.235 +\noindent{\bf Arguments:}
   5.236 +
   5.237 +
   5.238 +\vspace{0.3cm}
   5.239 +\begin{tabular}{|c|c|p{7cm}|}
   5.240 + \hline
   5.241 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.242 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.243 +
   5.244 +\end{tabular}
   5.245 +
   5.246 +\vspace{0.3cm}
   5.247 +
   5.248 + \noindent {\bf Return Type:} 
   5.249 +{\tt 
   5.250 +PSCSI ref
   5.251 +}
   5.252 +
   5.253 +
   5.254 +value of the field
   5.255 +\vspace{0.3cm}
   5.256 +\vspace{0.3cm}
   5.257 +\vspace{0.3cm}
   5.258 +\subsubsection{RPC name:~get\_virtual\_host}
   5.259 +
   5.260 +{\bf Overview:} 
   5.261 +Get the virtual\_host field of the given DSCSI.
   5.262 +
   5.263 + \noindent {\bf Signature:} 
   5.264 +\begin{verbatim} int get_virtual_host (session_id s, DSCSI ref self)\end{verbatim}
   5.265 +
   5.266 +
   5.267 +\noindent{\bf Arguments:}
   5.268 +
   5.269 +
   5.270 +\vspace{0.3cm}
   5.271 +\begin{tabular}{|c|c|p{7cm}|}
   5.272 + \hline
   5.273 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.274 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.275 +
   5.276 +\end{tabular}
   5.277 +
   5.278 +\vspace{0.3cm}
   5.279 +
   5.280 + \noindent {\bf Return Type:} 
   5.281 +{\tt 
   5.282 +int
   5.283 +}
   5.284 +
   5.285 +
   5.286 +value of the field
   5.287 +\vspace{0.3cm}
   5.288 +\vspace{0.3cm}
   5.289 +\vspace{0.3cm}
   5.290 +\subsubsection{RPC name:~get\_virtual\_channel}
   5.291 +
   5.292 +{\bf Overview:} 
   5.293 +Get the virtual\_channel field of the given DSCSI.
   5.294 +
   5.295 + \noindent {\bf Signature:} 
   5.296 +\begin{verbatim} int get_virtual_channel (session_id s, DSCSI ref self)\end{verbatim}
   5.297 +
   5.298 +
   5.299 +\noindent{\bf Arguments:}
   5.300 +
   5.301 +
   5.302 +\vspace{0.3cm}
   5.303 +\begin{tabular}{|c|c|p{7cm}|}
   5.304 + \hline
   5.305 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.306 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.307 +
   5.308 +\end{tabular}
   5.309 +
   5.310 +\vspace{0.3cm}
   5.311 +
   5.312 + \noindent {\bf Return Type:} 
   5.313 +{\tt 
   5.314 +int
   5.315 +}
   5.316 +
   5.317 +
   5.318 +value of the field
   5.319 +\vspace{0.3cm}
   5.320 +\vspace{0.3cm}
   5.321 +\vspace{0.3cm}
   5.322 +\subsubsection{RPC name:~get\_virtual\_target}
   5.323 +
   5.324 +{\bf Overview:} 
   5.325 +Get the virtual\_target field of the given DSCSI.
   5.326 +
   5.327 + \noindent {\bf Signature:} 
   5.328 +\begin{verbatim} int get_virtual_target (session_id s, DSCSI ref self)\end{verbatim}
   5.329 +
   5.330 +
   5.331 +\noindent{\bf Arguments:}
   5.332 +
   5.333 +
   5.334 +\vspace{0.3cm}
   5.335 +\begin{tabular}{|c|c|p{7cm}|}
   5.336 + \hline
   5.337 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.338 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.339 +
   5.340 +\end{tabular}
   5.341 +
   5.342 +\vspace{0.3cm}
   5.343 +
   5.344 + \noindent {\bf Return Type:} 
   5.345 +{\tt 
   5.346 +int
   5.347 +}
   5.348 +
   5.349 +
   5.350 +value of the field
   5.351 +\vspace{0.3cm}
   5.352 +\vspace{0.3cm}
   5.353 +\vspace{0.3cm}
   5.354 +\subsubsection{RPC name:~get\_virtual\_lun}
   5.355 +
   5.356 +{\bf Overview:} 
   5.357 +Get the virtual\_lun field of the given DSCSI.
   5.358 +
   5.359 + \noindent {\bf Signature:} 
   5.360 +\begin{verbatim} int get_virtual_lun (session_id s, DSCSI ref self)\end{verbatim}
   5.361 +
   5.362 +
   5.363 +\noindent{\bf Arguments:}
   5.364 +
   5.365 +
   5.366 +\vspace{0.3cm}
   5.367 +\begin{tabular}{|c|c|p{7cm}|}
   5.368 + \hline
   5.369 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.370 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.371 +
   5.372 +\end{tabular}
   5.373 +
   5.374 +\vspace{0.3cm}
   5.375 +
   5.376 + \noindent {\bf Return Type:} 
   5.377 +{\tt 
   5.378 +int
   5.379 +}
   5.380 +
   5.381 +
   5.382 +value of the field
   5.383 +\vspace{0.3cm}
   5.384 +\vspace{0.3cm}
   5.385 +\vspace{0.3cm}
   5.386 +\subsubsection{RPC name:~get\_virtual\_HCTL}
   5.387 +
   5.388 +{\bf Overview:} 
   5.389 +Get the virtual\_HCTL field of the given DSCSI.
   5.390 +
   5.391 + \noindent {\bf Signature:} 
   5.392 +\begin{verbatim} string get_virtual_HCTL (session_id s, DSCSI ref self)\end{verbatim}
   5.393 +
   5.394 +
   5.395 +\noindent{\bf Arguments:}
   5.396 +
   5.397 +
   5.398 +\vspace{0.3cm}
   5.399 +\begin{tabular}{|c|c|p{7cm}|}
   5.400 + \hline
   5.401 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.402 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.403 +
   5.404 +\end{tabular}
   5.405 +
   5.406 +\vspace{0.3cm}
   5.407 +
   5.408 + \noindent {\bf Return Type:} 
   5.409 +{\tt 
   5.410 +string
   5.411 +}
   5.412 +
   5.413 +
   5.414 +value of the field
   5.415 +\vspace{0.3cm}
   5.416 +\vspace{0.3cm}
   5.417 +\vspace{0.3cm}
   5.418 +\subsubsection{RPC name:~get\_runtime\_properties}
   5.419 +
   5.420 +{\bf Overview:} 
   5.421 +Get the runtime\_properties field of the given DSCSI.
   5.422 +
   5.423 + \noindent {\bf Signature:} 
   5.424 +\begin{verbatim} ((string -> string) Map) get_runtime_properties (session_id s, DSCSI ref self)\end{verbatim}
   5.425 +
   5.426 +
   5.427 +\noindent{\bf Arguments:}
   5.428 +
   5.429 + 
   5.430 +\vspace{0.3cm}
   5.431 +\begin{tabular}{|c|c|p{7cm}|}
   5.432 + \hline
   5.433 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.434 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.435 +
   5.436 +\end{tabular}
   5.437 +
   5.438 +\vspace{0.3cm}
   5.439 +
   5.440 + \noindent {\bf Return Type:} 
   5.441 +{\tt 
   5.442 +(string $\rightarrow$ string) Map
   5.443 +}
   5.444 +
   5.445 +
   5.446 +value of the field
   5.447 +\vspace{0.3cm}
   5.448 +\vspace{0.3cm}
   5.449 +\vspace{0.3cm}
   5.450 +\subsubsection{RPC name:~create}
   5.451 +
   5.452 +{\bf Overview:} 
   5.453 +Create a new DSCSI instance, and return its handle.
   5.454 +
   5.455 + \noindent {\bf Signature:} 
   5.456 +\begin{verbatim} (DSCSI ref) create (session_id s, DSCSI record args)\end{verbatim}
   5.457 +
   5.458 +
   5.459 +\noindent{\bf Arguments:}
   5.460 +
   5.461 +
   5.462 +\vspace{0.3cm}
   5.463 +\begin{tabular}{|c|c|p{7cm}|}
   5.464 + \hline
   5.465 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.466 +{\tt DSCSI record } & args & All constructor arguments \\ \hline 
   5.467 +
   5.468 +\end{tabular}
   5.469 +
   5.470 +\vspace{0.3cm}
   5.471 +
   5.472 + \noindent {\bf Return Type:} 
   5.473 +{\tt 
   5.474 +DSCSI ref
   5.475 +}
   5.476 +
   5.477 +
   5.478 +reference to the newly created object
   5.479 +\vspace{0.3cm}
   5.480 +\vspace{0.3cm}
   5.481 +\vspace{0.3cm}
   5.482 +\subsubsection{RPC name:~destroy}
   5.483 +
   5.484 +{\bf Overview:} 
   5.485 +Destroy the specified DSCSI instance.
   5.486 +
   5.487 + \noindent {\bf Signature:} 
   5.488 +\begin{verbatim} void destroy (session_id s, DSCSI ref self)\end{verbatim}
   5.489 +
   5.490 +
   5.491 +\noindent{\bf Arguments:}
   5.492 +
   5.493 +
   5.494 +\vspace{0.3cm}
   5.495 +\begin{tabular}{|c|c|p{7cm}|}
   5.496 + \hline
   5.497 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.498 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.499 +
   5.500 +\end{tabular}
   5.501 +
   5.502 +\vspace{0.3cm}
   5.503 +
   5.504 + \noindent {\bf Return Type:} 
   5.505 +{\tt 
   5.506 +void
   5.507 +}
   5.508 +
   5.509 +
   5.510 +\vspace{0.3cm}
   5.511 +\vspace{0.3cm}
   5.512 +\vspace{0.3cm}
   5.513 +\subsubsection{RPC name:~get\_by\_uuid}
   5.514 +
   5.515 +{\bf Overview:} 
   5.516 +Get a reference to the DSCSI instance with the specified UUID.
   5.517 +
   5.518 + \noindent {\bf Signature:} 
   5.519 +\begin{verbatim} (DSCSI ref) get_by_uuid (session_id s, string uuid)\end{verbatim}
   5.520 +
   5.521 +
   5.522 +\noindent{\bf Arguments:}
   5.523 +
   5.524 +
   5.525 +\vspace{0.3cm}
   5.526 +\begin{tabular}{|c|c|p{7cm}|}
   5.527 + \hline
   5.528 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.529 +{\tt string } & uuid & UUID of object to return \\ \hline 
   5.530 +
   5.531 +\end{tabular}
   5.532 +
   5.533 +\vspace{0.3cm}
   5.534 +
   5.535 + \noindent {\bf Return Type:} 
   5.536 +{\tt 
   5.537 +DSCSI ref
   5.538 +}
   5.539 +
   5.540 +
   5.541 +reference to the object
   5.542 +\vspace{0.3cm}
   5.543 +\vspace{0.3cm}
   5.544 +\vspace{0.3cm}
   5.545 +\subsubsection{RPC name:~get\_record}
   5.546 +
   5.547 +{\bf Overview:} 
   5.548 +Get a record containing the current state of the given DSCSI.
   5.549 +
   5.550 + \noindent {\bf Signature:} 
   5.551 +\begin{verbatim} (DSCSI record) get_record (session_id s, DSCSI ref self)\end{verbatim}
   5.552 +
   5.553 +
   5.554 +\noindent{\bf Arguments:}
   5.555 +
   5.556 +
   5.557 +\vspace{0.3cm}
   5.558 +\begin{tabular}{|c|c|p{7cm}|}
   5.559 + \hline
   5.560 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.561 +{\tt DSCSI ref } & self & reference to the object \\ \hline 
   5.562 +
   5.563 +\end{tabular}
   5.564 +
   5.565 +\vspace{0.3cm}
   5.566 +
   5.567 + \noindent {\bf Return Type:} 
   5.568 +{\tt 
   5.569 +DSCSI record
   5.570 +}
   5.571 +
   5.572 +
   5.573 +all fields from the object
   5.574 +\vspace{0.3cm}
   5.575 +\vspace{0.3cm}
   5.576 +\vspace{0.3cm}
   5.577 +
   5.578 +\vspace{1cm}
   5.579 +\newpage
   5.580 +\section{Class: PSCSI}
   5.581 +\subsection{Fields for class: PSCSI}
   5.582 +\begin{longtable}{|lllp{0.38\textwidth}|}
   5.583 +\hline
   5.584 +\multicolumn{1}{|l}{Name} & \multicolumn{3}{l|}{\bf PSCSI} \\
   5.585 +\multicolumn{1}{|l}{Description} & \multicolumn{3}{l|}{\parbox{11cm}{\em A
   5.586 +physical SCSI device.}} \\
   5.587 +\hline
   5.588 +Quals & Field & Type & Description \\
   5.589 +\hline
   5.590 +$\mathit{RO}_\mathit{run}$ &  {\tt uuid} & string & unique identifier/object reference \\
   5.591 +$\mathit{RO}_\mathit{run}$ &  {\tt host} & host ref &  the physical machine to which this PSCSI is connected \\
   5.592 +$\mathit{RO}_\mathit{run}$ &  {\tt physical\_host} & int & the physical host number \\
   5.593 +$\mathit{RO}_\mathit{run}$ &  {\tt physical\_channel} & int & the physical channel number \\
   5.594 +$\mathit{RO}_\mathit{run}$ &  {\tt physical\_target} & int & the physical target number \\
   5.595 +$\mathit{RO}_\mathit{run}$ &  {\tt physical\_lun} & int & the physical logical unit number \\
   5.596 +$\mathit{RO}_\mathit{run}$ &  {\tt physical\_HCTL} & string & the physical HCTL \\
   5.597 +$\mathit{RO}_\mathit{run}$ &  {\tt vendor\_name} & string & the vendor name \\
   5.598 +$\mathit{RO}_\mathit{run}$ &  {\tt model} & string & the model \\
   5.599 +$\mathit{RO}_\mathit{run}$ &  {\tt type\_id} & int & the SCSI type ID \\
   5.600 +$\mathit{RO}_\mathit{run}$ &  {\tt type} & string &  the SCSI type \\
   5.601 +$\mathit{RO}_\mathit{run}$ &  {\tt dev\_name} & string & the SCSI device name (e.g. sda or st0) \\
   5.602 +$\mathit{RO}_\mathit{run}$ &  {\tt sg\_name} & string & the SCSI generic device name (e.g. sg0) \\
   5.603 +$\mathit{RO}_\mathit{run}$ &  {\tt revision} & string & the revision \\
   5.604 +$\mathit{RO}_\mathit{run}$ &  {\tt scsi\_id} & string & the SCSI ID \\
   5.605 +$\mathit{RO}_\mathit{run}$ &  {\tt scsi\_level} & int & the SCSI level \\
   5.606 +\hline
   5.607 +\end{longtable}
   5.608 +\subsection{RPCs associated with class: PSCSI}
   5.609 +\subsubsection{RPC name:~get\_all}
   5.610 +
   5.611 +{\bf Overview:} 
   5.612 +Return a list of all the PSCSIs known to the system.
   5.613 +
   5.614 + \noindent {\bf Signature:} 
   5.615 +\begin{verbatim} ((PSCSI ref) Set) get_all (session_id s)\end{verbatim}
   5.616 +
   5.617 +
   5.618 +\vspace{0.3cm}
   5.619 +
   5.620 + \noindent {\bf Return Type:} 
   5.621 +{\tt 
   5.622 +(PSCSI ref) Set
   5.623 +}
   5.624 +
   5.625 +
   5.626 +references to all objects
   5.627 +\vspace{0.3cm}
   5.628 +\vspace{0.3cm}
   5.629 +\vspace{0.3cm}
   5.630 +\subsubsection{RPC name:~get\_uuid}
   5.631 +
   5.632 +{\bf Overview:} 
   5.633 +Get the uuid field of the given PSCSI.
   5.634 +
   5.635 + \noindent {\bf Signature:} 
   5.636 +\begin{verbatim} string get_uuid (session_id s, PSCSI ref self)\end{verbatim}
   5.637 +
   5.638 +
   5.639 +\noindent{\bf Arguments:}
   5.640 +
   5.641 +
   5.642 +\vspace{0.3cm}
   5.643 +\begin{tabular}{|c|c|p{7cm}|}
   5.644 + \hline
   5.645 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.646 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.647 +
   5.648 +\end{tabular}
   5.649 +
   5.650 +\vspace{0.3cm}
   5.651 +
   5.652 + \noindent {\bf Return Type:} 
   5.653 +{\tt 
   5.654 +string
   5.655 +}
   5.656 +
   5.657 +
   5.658 +value of the field
   5.659 +\vspace{0.3cm}
   5.660 +\vspace{0.3cm}
   5.661 +\vspace{0.3cm}
   5.662 +\subsubsection{RPC name:~get\_host}
   5.663 +
   5.664 +{\bf Overview:} 
   5.665 +Get the host field of the given PSCSI.
   5.666 +
   5.667 + \noindent {\bf Signature:} 
   5.668 +\begin{verbatim} (host ref) get_host (session_id s, PSCSI ref self)\end{verbatim}
   5.669 +
   5.670 +
   5.671 +\noindent{\bf Arguments:}
   5.672 +
   5.673 +
   5.674 +\vspace{0.3cm}
   5.675 +\begin{tabular}{|c|c|p{7cm}|}
   5.676 + \hline
   5.677 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.678 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.679 +
   5.680 +\end{tabular}
   5.681 +
   5.682 +\vspace{0.3cm}
   5.683 +
   5.684 + \noindent {\bf Return Type:} 
   5.685 +{\tt 
   5.686 +host ref
   5.687 +}
   5.688 +
   5.689 +
   5.690 +value of the field
   5.691 +\vspace{0.3cm}
   5.692 +\vspace{0.3cm}
   5.693 +\vspace{0.3cm}
   5.694 +\subsubsection{RPC name:~get\_physical\_host}
   5.695 +
   5.696 +{\bf Overview:} 
   5.697 +Get the physical\_host field of the given PSCSI.
   5.698 +
   5.699 + \noindent {\bf Signature:} 
   5.700 +\begin{verbatim} int get_physical_host (session_id s, PSCSI ref self)\end{verbatim}
   5.701 +
   5.702 +
   5.703 +\noindent{\bf Arguments:}
   5.704 +
   5.705 +
   5.706 +\vspace{0.3cm}
   5.707 +\begin{tabular}{|c|c|p{7cm}|}
   5.708 + \hline
   5.709 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.710 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.711 +
   5.712 +\end{tabular}
   5.713 +
   5.714 +\vspace{0.3cm}
   5.715 +
   5.716 + \noindent {\bf Return Type:} 
   5.717 +{\tt 
   5.718 +int
   5.719 +}
   5.720 +
   5.721 +
   5.722 +value of the field
   5.723 +\vspace{0.3cm}
   5.724 +\vspace{0.3cm}
   5.725 +\vspace{0.3cm}
   5.726 +\subsubsection{RPC name:~get\_physical\_channel}
   5.727 +
   5.728 +{\bf Overview:} 
   5.729 +Get the physical\_channel field of the given PSCSI.
   5.730 +
   5.731 + \noindent {\bf Signature:} 
   5.732 +\begin{verbatim} int get_physical_channel (session_id s, PSCSI ref self)\end{verbatim}
   5.733 +
   5.734 +
   5.735 +\noindent{\bf Arguments:}
   5.736 +
   5.737 +
   5.738 +\vspace{0.3cm}
   5.739 +\begin{tabular}{|c|c|p{7cm}|}
   5.740 + \hline
   5.741 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.742 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.743 +
   5.744 +\end{tabular}
   5.745 +
   5.746 +\vspace{0.3cm}
   5.747 +
   5.748 + \noindent {\bf Return Type:} 
   5.749 +{\tt 
   5.750 +int
   5.751 +}
   5.752 +
   5.753 +
   5.754 +value of the field
   5.755 +\vspace{0.3cm}
   5.756 +\vspace{0.3cm}
   5.757 +\vspace{0.3cm}
   5.758 +\subsubsection{RPC name:~get\_physical\_target}
   5.759 +
   5.760 +{\bf Overview:} 
   5.761 +Get the physical\_target field of the given PSCSI.
   5.762 +
   5.763 + \noindent {\bf Signature:} 
   5.764 +\begin{verbatim} int get_physical_target (session_id s, PSCSI ref self)\end{verbatim}
   5.765 +
   5.766 +
   5.767 +\noindent{\bf Arguments:}
   5.768 +
   5.769 +
   5.770 +\vspace{0.3cm}
   5.771 +\begin{tabular}{|c|c|p{7cm}|}
   5.772 + \hline
   5.773 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.774 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.775 +
   5.776 +\end{tabular}
   5.777 +
   5.778 +\vspace{0.3cm}
   5.779 +
   5.780 + \noindent {\bf Return Type:} 
   5.781 +{\tt 
   5.782 +int
   5.783 +}
   5.784 +
   5.785 +
   5.786 +value of the field
   5.787 +\vspace{0.3cm}
   5.788 +\vspace{0.3cm}
   5.789 +\vspace{0.3cm}
   5.790 +\subsubsection{RPC name:~get\_physical\_lun}
   5.791 +
   5.792 +{\bf Overview:} 
   5.793 +Get the physical\_lun field of the given PSCSI.
   5.794 +
   5.795 + \noindent {\bf Signature:} 
   5.796 +\begin{verbatim} int get_physical_lun (session_id s, PSCSI ref self)\end{verbatim}
   5.797 +
   5.798 +
   5.799 +\noindent{\bf Arguments:}
   5.800 +
   5.801 +
   5.802 +\vspace{0.3cm}
   5.803 +\begin{tabular}{|c|c|p{7cm}|}
   5.804 + \hline
   5.805 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.806 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.807 +
   5.808 +\end{tabular}
   5.809 +
   5.810 +\vspace{0.3cm}
   5.811 +
   5.812 + \noindent {\bf Return Type:} 
   5.813 +{\tt 
   5.814 +int
   5.815 +}
   5.816 +
   5.817 +
   5.818 +value of the field
   5.819 +\vspace{0.3cm}
   5.820 +\vspace{0.3cm}
   5.821 +\vspace{0.3cm}
   5.822 +\subsubsection{RPC name:~get\_physical\_HCTL}
   5.823 +
   5.824 +{\bf Overview:} 
   5.825 +Get the physical\_HCTL field of the given PSCSI.
   5.826 +
   5.827 + \noindent {\bf Signature:} 
   5.828 +\begin{verbatim} string get_physical_HCTL (session_id s, PSCSI ref self)\end{verbatim}
   5.829 +
   5.830 +
   5.831 +\noindent{\bf Arguments:}
   5.832 +
   5.833 +
   5.834 +\vspace{0.3cm}
   5.835 +\begin{tabular}{|c|c|p{7cm}|}
   5.836 + \hline
   5.837 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.838 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.839 +
   5.840 +\end{tabular}
   5.841 +
   5.842 +\vspace{0.3cm}
   5.843 +
   5.844 + \noindent {\bf Return Type:} 
   5.845 +{\tt 
   5.846 +string
   5.847 +}
   5.848 +
   5.849 +
   5.850 +value of the field
   5.851 +\vspace{0.3cm}
   5.852 +\vspace{0.3cm}
   5.853 +\vspace{0.3cm}
   5.854 +\subsubsection{RPC name:~get\_vendor\_name}
   5.855 +
   5.856 +{\bf Overview:} 
   5.857 +Get the vendor\_name field of the given PSCSI.
   5.858 +
   5.859 + \noindent {\bf Signature:} 
   5.860 +\begin{verbatim} string get_vendor_name (session_id s, PSCSI ref self)\end{verbatim}
   5.861 +
   5.862 +
   5.863 +\noindent{\bf Arguments:}
   5.864 +
   5.865 +
   5.866 +\vspace{0.3cm}
   5.867 +\begin{tabular}{|c|c|p{7cm}|}
   5.868 + \hline
   5.869 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.870 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.871 +
   5.872 +\end{tabular}
   5.873 +
   5.874 +\vspace{0.3cm}
   5.875 +
   5.876 + \noindent {\bf Return Type:} 
   5.877 +{\tt 
   5.878 +string
   5.879 +}
   5.880 +
   5.881 +
   5.882 +value of the field
   5.883 +\vspace{0.3cm}
   5.884 +\vspace{0.3cm}
   5.885 +\vspace{0.3cm}
   5.886 +\subsubsection{RPC name:~get\_model}
   5.887 +
   5.888 +{\bf Overview:} 
   5.889 +Get the model field of the given PSCSI.
   5.890 +
   5.891 + \noindent {\bf Signature:} 
   5.892 +\begin{verbatim} string get_model (session_id s, PSCSI ref self)\end{verbatim}
   5.893 +
   5.894 +
   5.895 +\noindent{\bf Arguments:}
   5.896 +
   5.897 +
   5.898 +\vspace{0.3cm}
   5.899 +\begin{tabular}{|c|c|p{7cm}|}
   5.900 + \hline
   5.901 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.902 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.903 +
   5.904 +\end{tabular}
   5.905 +
   5.906 +\vspace{0.3cm}
   5.907 +
   5.908 + \noindent {\bf Return Type:} 
   5.909 +{\tt 
   5.910 +string
   5.911 +}
   5.912 +
   5.913 +
   5.914 +value of the field
   5.915 +\vspace{0.3cm}
   5.916 +\vspace{0.3cm}
   5.917 +\vspace{0.3cm}
   5.918 +\subsubsection{RPC name:~get\_type\_id}
   5.919 +
   5.920 +{\bf Overview:} 
   5.921 +Get the type\_id field of the given PSCSI.
   5.922 +
   5.923 + \noindent {\bf Signature:} 
   5.924 +\begin{verbatim} int get_type_id (session_id s, PSCSI ref self)\end{verbatim}
   5.925 +
   5.926 +
   5.927 +\noindent{\bf Arguments:}
   5.928 +
   5.929 +
   5.930 +\vspace{0.3cm}
   5.931 +\begin{tabular}{|c|c|p{7cm}|}
   5.932 + \hline
   5.933 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.934 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.935 +
   5.936 +\end{tabular}
   5.937 +
   5.938 +\vspace{0.3cm}
   5.939 +
   5.940 + \noindent {\bf Return Type:} 
   5.941 +{\tt 
   5.942 +int
   5.943 +}
   5.944 +
   5.945 +
   5.946 +value of the field
   5.947 +\vspace{0.3cm}
   5.948 +\vspace{0.3cm}
   5.949 +\vspace{0.3cm}
   5.950 +\subsubsection{RPC name:~get\_type}
   5.951 +
   5.952 +{\bf Overview:} 
   5.953 +Get the type field of the given PSCSI.
   5.954 +
   5.955 + \noindent {\bf Signature:} 
   5.956 +\begin{verbatim} string get_type (session_id s, PSCSI ref self)\end{verbatim}
   5.957 +
   5.958 +
   5.959 +\noindent{\bf Arguments:}
   5.960 +
   5.961 +
   5.962 +\vspace{0.3cm}
   5.963 +\begin{tabular}{|c|c|p{7cm}|}
   5.964 + \hline
   5.965 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.966 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.967 +
   5.968 +\end{tabular}
   5.969 +
   5.970 +\vspace{0.3cm}
   5.971 +
   5.972 + \noindent {\bf Return Type:} 
   5.973 +{\tt 
   5.974 +string
   5.975 +}
   5.976 +
   5.977 +
   5.978 +value of the field
   5.979 +\vspace{0.3cm}
   5.980 +\vspace{0.3cm}
   5.981 +\vspace{0.3cm}
   5.982 +\subsubsection{RPC name:~get\_dev\_name}
   5.983 +
   5.984 +{\bf Overview:} 
   5.985 +Get the dev\_name field of the given PSCSI.
   5.986 +
   5.987 + \noindent {\bf Signature:} 
   5.988 +\begin{verbatim} string get_dev_name (session_id s, PSCSI ref self)\end{verbatim}
   5.989 +
   5.990 +
   5.991 +\noindent{\bf Arguments:}
   5.992 +
   5.993 +
   5.994 +\vspace{0.3cm}
   5.995 +\begin{tabular}{|c|c|p{7cm}|}
   5.996 + \hline
   5.997 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   5.998 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
   5.999 +
  5.1000 +\end{tabular}
  5.1001 +
  5.1002 +\vspace{0.3cm}
  5.1003 +
  5.1004 + \noindent {\bf Return Type:} 
  5.1005 +{\tt 
  5.1006 +string
  5.1007 +}
  5.1008 +
  5.1009 +
  5.1010 +value of the field
  5.1011 +\vspace{0.3cm}
  5.1012 +\vspace{0.3cm}
  5.1013 +\vspace{0.3cm}
  5.1014 +\subsubsection{RPC name:~get\_sg\_name}
  5.1015 +
  5.1016 +{\bf Overview:} 
  5.1017 +Get the sg\_name field of the given PSCSI.
  5.1018 +
  5.1019 + \noindent {\bf Signature:} 
  5.1020 +\begin{verbatim} string get_sg_name (session_id s, PSCSI ref self)\end{verbatim}
  5.1021 +
  5.1022 +
  5.1023 +\noindent{\bf Arguments:}
  5.1024 +
  5.1025 +
  5.1026 +\vspace{0.3cm}
  5.1027 +\begin{tabular}{|c|c|p{7cm}|}
  5.1028 + \hline
  5.1029 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1030 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
  5.1031 +
  5.1032 +\end{tabular}
  5.1033 +
  5.1034 +\vspace{0.3cm}
  5.1035 +
  5.1036 + \noindent {\bf Return Type:} 
  5.1037 +{\tt 
  5.1038 +string
  5.1039 +}
  5.1040 +
  5.1041 +
  5.1042 +value of the field
  5.1043 +\vspace{0.3cm}
  5.1044 +\vspace{0.3cm}
  5.1045 +\vspace{0.3cm}
  5.1046 +\subsubsection{RPC name:~get\_revision}
  5.1047 +
  5.1048 +{\bf Overview:} 
  5.1049 +Get the revision field of the given PSCSI.
  5.1050 +
  5.1051 + \noindent {\bf Signature:} 
  5.1052 +\begin{verbatim} string get_revision (session_id s, PSCSI ref self)\end{verbatim}
  5.1053 +
  5.1054 +
  5.1055 +\noindent{\bf Arguments:}
  5.1056 +
  5.1057 +
  5.1058 +\vspace{0.3cm}
  5.1059 +\begin{tabular}{|c|c|p{7cm}|}
  5.1060 + \hline
  5.1061 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1062 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
  5.1063 +
  5.1064 +\end{tabular}
  5.1065 +
  5.1066 +\vspace{0.3cm}
  5.1067 +
  5.1068 + \noindent {\bf Return Type:} 
  5.1069 +{\tt 
  5.1070 +string
  5.1071 +}
  5.1072 +
  5.1073 +
  5.1074 +value of the field
  5.1075 +\vspace{0.3cm}
  5.1076 +\vspace{0.3cm}
  5.1077 +\vspace{0.3cm}
  5.1078 +\subsubsection{RPC name:~get\_scsi\_id}
  5.1079 +
  5.1080 +{\bf Overview:} 
  5.1081 +Get the scsi\_id field of the given PSCSI.
  5.1082 +
  5.1083 + \noindent {\bf Signature:} 
  5.1084 +\begin{verbatim} string get_scsi_id (session_id s, PSCSI ref self)\end{verbatim}
  5.1085 +
  5.1086 +
  5.1087 +\noindent{\bf Arguments:}
  5.1088 +
  5.1089 +
  5.1090 +\vspace{0.3cm}
  5.1091 +\begin{tabular}{|c|c|p{7cm}|}
  5.1092 + \hline
  5.1093 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1094 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
  5.1095 +
  5.1096 +\end{tabular}
  5.1097 +
  5.1098 +\vspace{0.3cm}
  5.1099 +
  5.1100 + \noindent {\bf Return Type:} 
  5.1101 +{\tt 
  5.1102 +string
  5.1103 +}
  5.1104 +
  5.1105 +
  5.1106 +value of the field
  5.1107 +\vspace{0.3cm}
  5.1108 +\vspace{0.3cm}
  5.1109 +\vspace{0.3cm}
  5.1110 +\subsubsection{RPC name:~get\_scsi\_level}
  5.1111 +
  5.1112 +{\bf Overview:} 
  5.1113 +Get the scsi\_level field of the given PSCSI.
  5.1114 +
  5.1115 + \noindent {\bf Signature:} 
  5.1116 +\begin{verbatim} int get_scsi_level (session_id s, PSCSI ref self)\end{verbatim}
  5.1117 +
  5.1118 +
  5.1119 +\noindent{\bf Arguments:}
  5.1120 +
  5.1121 +
  5.1122 +\vspace{0.3cm}
  5.1123 +\begin{tabular}{|c|c|p{7cm}|}
  5.1124 + \hline
  5.1125 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1126 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
  5.1127 +
  5.1128 +\end{tabular}
  5.1129 +
  5.1130 +\vspace{0.3cm}
  5.1131 +
  5.1132 + \noindent {\bf Return Type:} 
  5.1133 +{\tt 
  5.1134 +int
  5.1135 +}
  5.1136 +
  5.1137 +
  5.1138 +value of the field
  5.1139 +\vspace{0.3cm}
  5.1140 +\vspace{0.3cm}
  5.1141 +\vspace{0.3cm}
  5.1142 +\subsubsection{RPC name:~get\_by\_uuid}
  5.1143 +
  5.1144 +{\bf Overview:} 
  5.1145 +Get a reference to the PSCSI instance with the specified UUID.
  5.1146 +
  5.1147 + \noindent {\bf Signature:} 
  5.1148 +\begin{verbatim} (PSCSI ref) get_by_uuid (session_id s, string uuid)\end{verbatim}
  5.1149 +
  5.1150 +
  5.1151 +\noindent{\bf Arguments:}
  5.1152 +
  5.1153 +
  5.1154 +\vspace{0.3cm}
  5.1155 +\begin{tabular}{|c|c|p{7cm}|}
  5.1156 + \hline
  5.1157 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1158 +{\tt string } & uuid & UUID of object to return \\ \hline 
  5.1159 +
  5.1160 +\end{tabular}
  5.1161 +
  5.1162 +\vspace{0.3cm}
  5.1163 +
  5.1164 + \noindent {\bf Return Type:} 
  5.1165 +{\tt 
  5.1166 +PSCSI ref
  5.1167 +}
  5.1168 +
  5.1169 +
  5.1170 +reference to the object
  5.1171 +\vspace{0.3cm}
  5.1172 +\vspace{0.3cm}
  5.1173 +\vspace{0.3cm}
  5.1174 +\subsubsection{RPC name:~get\_record}
  5.1175 +
  5.1176 +{\bf Overview:} 
  5.1177 +Get a record containing the current state of the given PSCSI.
  5.1178 +
  5.1179 + \noindent {\bf Signature:} 
  5.1180 +\begin{verbatim} (PSCSI record) get_record (session_id s, PSCSI ref self)\end{verbatim}
  5.1181 +
  5.1182 +
  5.1183 +\noindent{\bf Arguments:}
  5.1184 +
  5.1185 +
  5.1186 +\vspace{0.3cm}
  5.1187 +\begin{tabular}{|c|c|p{7cm}|}
  5.1188 + \hline
  5.1189 +{\bf type} & {\bf name} & {\bf description} \\ \hline
  5.1190 +{\tt PSCSI ref } & self & reference to the object \\ \hline 
  5.1191 +
  5.1192 +\end{tabular}
  5.1193 +
  5.1194 +\vspace{0.3cm}
  5.1195 +
  5.1196 + \noindent {\bf Return Type:} 
  5.1197 +{\tt 
  5.1198 +PSCSI record
  5.1199 +}
  5.1200 +
  5.1201 +
  5.1202 +all fields from the object
  5.1203 +\vspace{0.3cm}
  5.1204 +\vspace{0.3cm}
  5.1205 +\vspace{0.3cm}
  5.1206 +
  5.1207 +\vspace{1cm}
  5.1208 +\newpage
  5.1209  \section{Class: user}
  5.1210  \subsection{Fields for class: user}
  5.1211  \begin{longtable}{|lllp{0.38\textwidth}|}
     6.1 --- a/tools/blktap/drivers/block-qcow.c	Wed Oct 22 11:38:22 2008 +0900
     6.2 +++ b/tools/blktap/drivers/block-qcow.c	Wed Oct 22 11:46:55 2008 +0900
     6.3 @@ -734,8 +734,8 @@ static int tdqcow_open (struct disk_driv
     6.4  
     6.5   	DPRINTF("QCOW: Opening %s\n",name);
     6.6  
     6.7 -	o_flags = O_DIRECT | O_LARGEFILE | 
     6.8 -		((flags == TD_RDONLY) ? O_RDONLY : O_RDWR);
     6.9 +	/* Since we don't handle O_DIRECT correctly, don't use it */
    6.10 +	o_flags = O_LARGEFILE | ((flags == TD_RDONLY) ? O_RDONLY : O_RDWR);
    6.11  	fd = open(name, o_flags);
    6.12  	if (fd < 0) {
    6.13  		DPRINTF("Unable to open %s (%d)\n",name,0 - errno);
    6.14 @@ -1385,7 +1385,7 @@ static int tdqcow_get_parent_id(struct d
    6.15  	filename[len]  = '\0';
    6.16  
    6.17  	id->name       = strdup(filename);
    6.18 -	id->drivertype = DISK_TYPE_QCOW;
    6.19 +	id->drivertype = DISK_TYPE_AIO;
    6.20  	err            = 0;
    6.21   out:
    6.22  	free(buf);
    6.23 @@ -1397,17 +1397,15 @@ static int tdqcow_validate_parent(struct
    6.24  {
    6.25  	struct stat stats;
    6.26  	uint64_t psize, csize;
    6.27 -	struct tdqcow_state *c = (struct tdqcow_state *)child->private;
    6.28 -	struct tdqcow_state *p = (struct tdqcow_state *)parent->private;
    6.29  	
    6.30 -	if (stat(p->name, &stats))
    6.31 +	if (stat(parent->name, &stats))
    6.32  		return -EINVAL;
    6.33 -	if (get_filesize(p->name, &psize, &stats))
    6.34 +	if (get_filesize(parent->name, &psize, &stats))
    6.35  		return -EINVAL;
    6.36  
    6.37 -	if (stat(c->name, &stats))
    6.38 +	if (stat(child->name, &stats))
    6.39  		return -EINVAL;
    6.40 -	if (get_filesize(c->name, &csize, &stats))
    6.41 +	if (get_filesize(child->name, &csize, &stats))
    6.42  		return -EINVAL;
    6.43  
    6.44  	if (csize != psize)
     7.1 --- a/tools/blktap/drivers/block-qcow2.c	Wed Oct 22 11:38:22 2008 +0900
     7.2 +++ b/tools/blktap/drivers/block-qcow2.c	Wed Oct 22 11:46:55 2008 +0900
     7.3 @@ -34,6 +34,7 @@
     7.4  #include "tapdisk.h"
     7.5  #include "tapaio.h"
     7.6  #include "bswap.h"
     7.7 +#include "blk.h"
     7.8  
     7.9  #define USE_AIO
    7.10  
    7.11 @@ -1902,6 +1903,42 @@ repeat:
    7.12  
    7.13  #endif	
    7.14  
    7.15 +static int get_filesize(char *filename, uint64_t *size, struct stat *st)
    7.16 +{
    7.17 +	int fd;
    7.18 +	QCowHeader header;
    7.19 +
    7.20 +	/*Set to the backing file size*/
    7.21 +	fd = open(filename, O_RDONLY);
    7.22 +	if (fd < 0)
    7.23 +		return -1;
    7.24 +	if (read(fd, &header, sizeof(header)) < sizeof(header)) {
    7.25 +		close(fd);
    7.26 +		return -1;
    7.27 +	}
    7.28 +	close(fd);
    7.29 +	
    7.30 +	be32_to_cpus(&header.magic);
    7.31 +	be32_to_cpus(&header.version);
    7.32 +	be64_to_cpus(&header.size);
    7.33 +	if (header.magic == QCOW_MAGIC && header.version == QCOW_VERSION) {
    7.34 +		*size = header.size >> SECTOR_SHIFT;
    7.35 +		return 0;
    7.36 +	}
    7.37 +
    7.38 +	if(S_ISBLK(st->st_mode)) {
    7.39 +		fd = open(filename, O_RDONLY);
    7.40 +		if (fd < 0)
    7.41 +			return -1;
    7.42 +		if (blk_getimagesize(fd, size) != 0) {
    7.43 +			close(fd);
    7.44 +			return -1;
    7.45 +		}
    7.46 +		close(fd);
    7.47 +	} else *size = (st->st_size >> SECTOR_SHIFT);	
    7.48 +	return 0;
    7.49 +}
    7.50 +
    7.51  /**
    7.52   * @return 
    7.53   *	   0 if parent id successfully retrieved;
    7.54 @@ -1916,7 +1953,7 @@ static int qcow_get_parent_id(struct dis
    7.55  		return TD_NO_PARENT;
    7.56  
    7.57  	id->name = strdup(s->backing_file);
    7.58 -	id->drivertype = DISK_TYPE_QCOW2;
    7.59 +	id->drivertype = DISK_TYPE_AIO;
    7.60  
    7.61  	return 0;
    7.62  }
    7.63 @@ -1924,15 +1961,22 @@ static int qcow_get_parent_id(struct dis
    7.64  static int qcow_validate_parent(struct disk_driver *child, 
    7.65  		struct disk_driver *parent, td_flag_t flags)
    7.66  {
    7.67 -	struct BDRVQcowState *cs = (struct BDRVQcowState*) child->private;
    7.68 -	struct BDRVQcowState *ps = (struct BDRVQcowState*) parent->private;
    7.69 +	struct stat stats;
    7.70 +	uint64_t psize, csize;
    7.71 +	
    7.72 +	if (stat(parent->name, &stats))
    7.73 +		return -EINVAL;
    7.74 +	if (get_filesize(parent->name, &psize, &stats))
    7.75 +		return -EINVAL;
    7.76  
    7.77 -	if (ps->total_sectors != cs->total_sectors) {
    7.78 -		DPRINTF("qcow_validate_parent(): %#"PRIx64" != %#"PRIx64"\n",
    7.79 -			ps->total_sectors, cs->total_sectors);
    7.80 +	if (stat(child->name, &stats))
    7.81  		return -EINVAL;
    7.82 -	}
    7.83 -	
    7.84 +	if (get_filesize(child->name, &csize, &stats))
    7.85 +		return -EINVAL;
    7.86 +
    7.87 +	if (csize != psize)
    7.88 +		return -EINVAL;
    7.89 +
    7.90  	return 0;
    7.91  }
    7.92  
     8.1 --- a/tools/libxc/xc_domain.c	Wed Oct 22 11:38:22 2008 +0900
     8.2 +++ b/tools/libxc/xc_domain.c	Wed Oct 22 11:46:55 2008 +0900
     8.3 @@ -1049,6 +1049,18 @@ int xc_domain_get_machine_address_size(i
     8.4      return rc == 0 ? domctl.u.address_size.size : rc;
     8.5  }
     8.6  
     8.7 +int xc_domain_suppress_spurious_page_faults(int xc, uint32_t domid)
     8.8 +{
     8.9 +    DECLARE_DOMCTL;
    8.10 +
    8.11 +    memset(&domctl, 0, sizeof(domctl));
    8.12 +    domctl.domain = domid;
    8.13 +    domctl.cmd    = XEN_DOMCTL_suppress_spurious_page_faults;
    8.14 +
    8.15 +    return do_domctl(xc, &domctl);
    8.16 +
    8.17 +}
    8.18 +
    8.19  /*
    8.20   * Local variables:
    8.21   * mode: C
     9.1 --- a/tools/libxc/xenctrl.h	Wed Oct 22 11:38:22 2008 +0900
     9.2 +++ b/tools/libxc/xenctrl.h	Wed Oct 22 11:46:55 2008 +0900
     9.3 @@ -1103,6 +1103,9 @@ int xc_domain_set_machine_address_size(i
     9.4  int xc_domain_get_machine_address_size(int handle,
     9.5  				       uint32_t domid);
     9.6  
     9.7 +int xc_domain_suppress_spurious_page_faults(int handle,
     9.8 +					  uint32_t domid);
     9.9 +
    9.10  /* Set the target domain */
    9.11  int xc_domain_set_target(int xc_handle,
    9.12                           uint32_t domid,
    10.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Wed Oct 22 11:38:22 2008 +0900
    10.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Wed Oct 22 11:46:55 2008 +0900
    10.3 @@ -859,6 +859,21 @@ static PyObject *pyxc_dom_set_machine_ad
    10.4      return zero;
    10.5  }
    10.6  
    10.7 +static PyObject *pyxc_dom_suppress_spurious_page_faults(XcObject *self,
    10.8 +						      PyObject *args,
    10.9 +						      PyObject *kwds)
   10.10 +{
   10.11 +    uint32_t dom;
   10.12 +
   10.13 +    if (!PyArg_ParseTuple(args, "i", &dom))
   10.14 +	return NULL;
   10.15 +
   10.16 +    if (xc_domain_suppress_spurious_page_faults(self->xc_handle, dom) != 0)
   10.17 +	return pyxc_error_to_exception();
   10.18 +
   10.19 +    Py_INCREF(zero);
   10.20 +    return zero;
   10.21 +}
   10.22  #endif /* __i386__ || __x86_64__ */
   10.23  
   10.24  static PyObject *pyxc_hvm_build(XcObject *self,
   10.25 @@ -1911,6 +1926,12 @@ static PyMethodDef pyxc_methods[] = {
   10.26        "Set maximum machine address size for this domain.\n"
   10.27        " dom [int]: Identifier of domain.\n"
   10.28        " width [int]: Maximum machine address width.\n" },
   10.29 +
   10.30 +    { "domain_suppress_spurious_page_faults",
   10.31 +      (PyCFunction)pyxc_dom_suppress_spurious_page_faults,
   10.32 +      METH_VARARGS, "\n"
   10.33 +      "Do not propagate spurious page faults to this guest.\n"
   10.34 +      " dom [int]: Identifier of domain.\n" },
   10.35  #endif
   10.36  
   10.37      { NULL, NULL, 0, NULL }
    11.1 --- a/tools/python/xen/util/pci.py	Wed Oct 22 11:38:22 2008 +0900
    11.2 +++ b/tools/python/xen/util/pci.py	Wed Oct 22 11:46:55 2008 +0900
    11.3 @@ -12,8 +12,8 @@ import re
    11.4  import types
    11.5  import struct
    11.6  import time
    11.7 +from xen.util import utils
    11.8  
    11.9 -PROC_MNT_PATH = '/proc/mounts'
   11.10  PROC_PCI_PATH = '/proc/bus/pci/devices'
   11.11  PROC_PCI_NUM_RESOURCES = 7
   11.12  
   11.13 @@ -97,9 +97,6 @@ MSIX_SIZE_MASK = 0x7ff
   11.14  # Global variable to store information from lspci
   11.15  lspci_info = None
   11.16  
   11.17 -# Global variable to store the sysfs mount point
   11.18 -sysfs_mnt_point = None
   11.19 -
   11.20  #Calculate PAGE_SHIFT: number of bits to shift an address to get the page number
   11.21  PAGE_SIZE = resource.getpagesize()
   11.22  PAGE_SHIFT = 0
   11.23 @@ -141,20 +138,8 @@ def parse_pci_name(pci_name_string):
   11.24   
   11.25  
   11.26  def find_sysfs_mnt():
   11.27 -    global sysfs_mnt_point
   11.28 -    if not sysfs_mnt_point is None:
   11.29 -        return sysfs_mnt_point
   11.30 -
   11.31      try:
   11.32 -        mounts_file = open(PROC_MNT_PATH,'r')
   11.33 -
   11.34 -        for line in mounts_file:
   11.35 -            sline = line.split()
   11.36 -            if len(sline)<3:
   11.37 -                continue
   11.38 -            if sline[2]=='sysfs':
   11.39 -                sysfs_mnt_point= sline[1]
   11.40 -                return sysfs_mnt_point
   11.41 +        return utils.find_sysfs_mount()
   11.42      except IOError, (errno, strerr):
   11.43          raise PciDeviceParseError(('Failed to locate sysfs mount: %s: %s (%d)'%
   11.44              (PROC_PCI_PATH, strerr, errno)))
    12.1 --- a/tools/python/xen/util/utils.py	Wed Oct 22 11:38:22 2008 +0900
    12.2 +++ b/tools/python/xen/util/utils.py	Wed Oct 22 11:46:55 2008 +0900
    12.3 @@ -48,3 +48,29 @@ def daemonize(prog, args, stdin_tmpfile=
    12.4      os.waitpid(pid, 0)
    12.5      return daemon_pid
    12.6  
    12.7 +# Global variable to store the sysfs mount point
    12.8 +sysfs_mount_point = None
    12.9 +
   12.10 +PROC_MOUNTS_PATH = '/proc/mounts'
   12.11 +
   12.12 +def find_sysfs_mount():
   12.13 +    global sysfs_mount_point
   12.14 +
   12.15 +    if not sysfs_mount_point is None:
   12.16 +        return sysfs_mount_point
   12.17 +
   12.18 +    try:
   12.19 +        mounts_file = open(PROC_MOUNTS_PATH, 'r')
   12.20 +
   12.21 +        for line in mounts_file:
   12.22 +            sline = line.split()
   12.23 +            if len(sline) < 3:
   12.24 +                continue
   12.25 +            if sline[2] == 'sysfs':
   12.26 +                sysfs_mount_point= sline[1]
   12.27 +                return sysfs_mount_point
   12.28 +    except IOError, (errno, strerr):
   12.29 +        raise
   12.30 +
   12.31 +    return None
   12.32 +
    13.1 --- a/tools/python/xen/util/vscsi_util.py	Wed Oct 22 11:38:22 2008 +0900
    13.2 +++ b/tools/python/xen/util/vscsi_util.py	Wed Oct 22 11:46:55 2008 +0900
    13.3 @@ -23,32 +23,40 @@
    13.4  """Support for VSCSI Devices.
    13.5  """
    13.6  import os
    13.7 +import os.path
    13.8  import sys
    13.9  import re
   13.10  import string
   13.11 +from xen.util import utils
   13.12  
   13.13 -def _vscsi_hctl_block(name, scsi_devices):
   13.14 -    """ block-device name is convert into hctl. (e.g., '/dev/sda',
   13.15 -    '0:0:0:0')"""
   13.16 +SYSFS_SCSI_PATH = "/bus/scsi/devices"
   13.17 +SYSFS_SCSI_DEV_VENDOR_PATH = '/vendor'
   13.18 +SYSFS_SCSI_DEV_MODEL_PATH = '/model'
   13.19 +SYSFS_SCSI_DEV_TYPEID_PATH = '/type'
   13.20 +SYSFS_SCSI_DEV_REVISION_PATH = '/rev'
   13.21 +SYSFS_SCSI_DEV_SCSILEVEL_PATH = '/scsi_level'
   13.22 +
   13.23 +def _vscsi_get_devname_by(name, scsi_devices):
   13.24 +    """A device name is gotten by the HCTL.
   13.25 +    (e.g., '0:0:0:0' to '/dev/sda')
   13.26 +    """
   13.27 +
   13.28      try:
   13.29          search = re.compile(r'' + name + '$', re.DOTALL)
   13.30      except Exception, e:
   13.31          raise VmError("vscsi: invalid expression. " + str(e))
   13.32 -    chk = 0
   13.33 -    for hctl, block, sg, scsi_id in scsi_devices:
   13.34 +
   13.35 +    for hctl, devname, sg, scsi_id in scsi_devices:
   13.36          if search.match(hctl):
   13.37 -            chk = 1
   13.38 -            break
   13.39 +            return (hctl, devname)
   13.40  
   13.41 -    if chk:
   13.42 -        return (hctl, block)
   13.43 -    else:
   13.44 -        return (None, None)
   13.45 +    return (None, None)
   13.46  
   13.47  
   13.48 -def _vscsi_block_scsiid_to_hctl(phyname, scsi_devices):
   13.49 -    """ block-device name is convert into hctl. (e.g., '/dev/sda',
   13.50 -    '0:0:0:0')"""
   13.51 +def _vscsi_get_hctl_by(phyname, scsi_devices):
   13.52 +    """An HCTL is gotten by the device name or the scsi_id.
   13.53 +    (e.g., '/dev/sda' to '0:0:0:0')
   13.54 +    """
   13.55      
   13.56      if re.match('/dev/sd[a-z]+([1-9]|1[0-5])?$', phyname):
   13.57          # sd driver
   13.58 @@ -63,71 +71,148 @@ def _vscsi_block_scsiid_to_hctl(phyname,
   13.59          # scsi_id -gu
   13.60          name = phyname
   13.61  
   13.62 -    chk = 0
   13.63 -    for hctl, block, sg, scsi_id in scsi_devices:
   13.64 -        if block == name:
   13.65 -            chk = 1
   13.66 -            break
   13.67 -        elif sg == name:
   13.68 -            chk = 1
   13.69 -            break
   13.70 -        elif scsi_id == name:
   13.71 -            chk = 1
   13.72 -            break
   13.73 +    for hctl, devname, sg, scsi_id in scsi_devices:
   13.74 +        if name in [devname, sg, scsi_id]:
   13.75 +            return (hctl, devname)
   13.76  
   13.77 -    if chk:
   13.78 -        return (hctl, block)
   13.79 -    else:
   13.80 -        return (None, None)
   13.81 +    return (None, None)
   13.82  
   13.83  
   13.84  def vscsi_get_scsidevices():
   13.85      """ get all scsi devices"""
   13.86  
   13.87 -    SERCH_SCSI_PATH = "/sys/bus/scsi/devices"
   13.88      devices = []
   13.89 +    sysfs_mnt = utils.find_sysfs_mount() 
   13.90  
   13.91 -    for dirpath, dirnames, files in os.walk(SERCH_SCSI_PATH):
   13.92 +    for dirpath, dirnames, files in os.walk(sysfs_mnt + SYSFS_SCSI_PATH):
   13.93          for hctl in dirnames:
   13.94              paths = os.path.join(dirpath, hctl)
   13.95 -            block = "-"
   13.96 +            devname = None
   13.97 +            sg = None
   13.98 +            scsi_id = None
   13.99              for f in os.listdir(paths):
  13.100 -                if re.match('^block', f):
  13.101 -                    os.chdir(os.path.join(paths, f))
  13.102 -                    block = os.path.basename(os.getcwd())
  13.103 -                elif re.match('^tape', f):
  13.104 -                    os.chdir(os.path.join(paths, f))
  13.105 -                    block = os.path.basename(os.getcwd())
  13.106 -                elif re.match('^scsi_changer', f):
  13.107 -                    os.chdir(os.path.join(paths, f))
  13.108 -                    block = os.path.basename(os.getcwd())
  13.109 -                elif re.match('^onstream_tape', f):
  13.110 -                    os.chdir(os.path.join(paths, f))
  13.111 -                    block = os.path.basename(os.getcwd())
  13.112 +                realpath = os.path.realpath(os.path.join(paths, f))
  13.113 +                if  re.match('^block', f) or \
  13.114 +                    re.match('^tape', f) or \
  13.115 +                    re.match('^scsi_changer', f) or \
  13.116 +                    re.match('^onstream_tape', f):
  13.117 +                    devname = os.path.basename(realpath)
  13.118  
  13.119                  if re.match('^scsi_generic', f):
  13.120 -                    os.chdir(os.path.join(paths, f))
  13.121 -                    sg = os.path.basename(os.getcwd())
  13.122 +                    sg = os.path.basename(realpath)
  13.123                      lines = os.popen('/sbin/scsi_id -gu -s /class/scsi_generic/' + sg).read().split()
  13.124 -                    if len(lines) == 0:
  13.125 -                        scsi_id = '-'
  13.126 -                    else:
  13.127 +                    if len(lines):
  13.128                          scsi_id = lines[0]
  13.129  
  13.130 -            devices.append([hctl, block, sg, scsi_id])
  13.131 +            devices.append([hctl, devname, sg, scsi_id])
  13.132  
  13.133      return devices
  13.134  
  13.135  
  13.136 -def vscsi_search_hctl_and_block(device):
  13.137 -
  13.138 -    scsi_devices = vscsi_get_scsidevices()
  13.139 +def vscsi_get_hctl_and_devname_by(target, scsi_devices = None):
  13.140 +    if scsi_devices is None:
  13.141 +        scsi_devices = vscsi_get_scsidevices()
  13.142  
  13.143 -    tmp = device.split(':')
  13.144 -    if len(tmp) == 4:
  13.145 -        (hctl, block) = _vscsi_hctl_block(device, scsi_devices)
  13.146 +    if len(target.split(':')) == 4:
  13.147 +        return _vscsi_get_devname_by(target, scsi_devices)
  13.148      else:
  13.149 -        (hctl, block) = _vscsi_block_scsiid_to_hctl(device, scsi_devices)
  13.150 +        return _vscsi_get_hctl_by(target, scsi_devices)
  13.151  
  13.152 -    return (hctl, block)
  13.153  
  13.154 +def get_scsi_vendor(pHCTL):
  13.155 +    try:
  13.156 +        sysfs_mnt = utils.find_sysfs_mount() 
  13.157 +        sysfs_scsi_dev_path = \
  13.158 +            os.path.join(sysfs_mnt + SYSFS_SCSI_PATH, pHCTL)
  13.159 +        scsi_vendor = \
  13.160 +            os.popen('cat ' + sysfs_scsi_dev_path + \
  13.161 +                              SYSFS_SCSI_DEV_VENDOR_PATH).read()
  13.162 +        return scsi_vendor.splitlines()[0]
  13.163 +    except:
  13.164 +        return None
  13.165 +
  13.166 +def get_scsi_model(pHCTL):
  13.167 +    try:
  13.168 +        sysfs_mnt = utils.find_sysfs_mount() 
  13.169 +        sysfs_scsi_dev_path = \
  13.170 +            os.path.join(sysfs_mnt + SYSFS_SCSI_PATH, pHCTL)
  13.171 +        scsi_model = \
  13.172 +            os.popen('cat ' + sysfs_scsi_dev_path + \
  13.173 +                              SYSFS_SCSI_DEV_MODEL_PATH).read()
  13.174 +        return scsi_model.splitlines()[0]
  13.175 +    except:
  13.176 +        return None
  13.177 +
  13.178 +def get_scsi_typeid(pHCTL):
  13.179 +    try:
  13.180 +        sysfs_mnt = utils.find_sysfs_mount() 
  13.181 +        sysfs_scsi_dev_path = \
  13.182 +            os.path.join(sysfs_mnt + SYSFS_SCSI_PATH, pHCTL)
  13.183 +        scsi_typeid = \
  13.184 +            os.popen('cat ' + sysfs_scsi_dev_path + \
  13.185 +                              SYSFS_SCSI_DEV_TYPEID_PATH).read()
  13.186 +        return int(scsi_typeid.splitlines()[0])
  13.187 +    except:
  13.188 +        return None
  13.189 +
  13.190 +def get_scsi_revision(pHCTL):
  13.191 +    try:
  13.192 +        sysfs_mnt = utils.find_sysfs_mount() 
  13.193 +        sysfs_scsi_dev_path = \
  13.194 +            os.path.join(sysfs_mnt + SYSFS_SCSI_PATH, pHCTL)
  13.195 +        scsi_revision = \
  13.196 +            os.popen('cat ' + sysfs_scsi_dev_path + \
  13.197 +                              SYSFS_SCSI_DEV_REVISION_PATH).read()
  13.198 +        return scsi_revision.splitlines()[0]
  13.199 +    except:
  13.200 +        return None
  13.201 +
  13.202 +def get_scsi_scsilevel(pHCTL):
  13.203 +    try:
  13.204 +        sysfs_mnt = utils.find_sysfs_mount() 
  13.205 +        sysfs_scsi_dev_path = \
  13.206 +            os.path.join(sysfs_mnt + SYSFS_SCSI_PATH, pHCTL)
  13.207 +        scsi_scsilevel = \
  13.208 +            os.popen('cat ' + sysfs_scsi_dev_path + \
  13.209 +                              SYSFS_SCSI_DEV_SCSILEVEL_PATH).read()
  13.210 +        return int(scsi_scsilevel.splitlines()[0])
  13.211 +    except:
  13.212 +        return None
  13.213 +
  13.214 +def get_all_scsi_devices():
  13.215 +
  13.216 +    scsi_devs = []
  13.217 +
  13.218 +    for scsi_info in vscsi_get_scsidevices():
  13.219 +        scsi_dev = {
  13.220 +            'physical_HCTL': scsi_info[0],
  13.221 +            'dev_name': None,
  13.222 +            'sg_name': scsi_info[2],
  13.223 +            'scsi_id': None
  13.224 +        }
  13.225 +        if scsi_info[1] is not None:
  13.226 +            scsi_dev['dev_name'] = scsi_info[1] 
  13.227 +        if scsi_info[3] is not None:
  13.228 +            scsi_dev['scsi_id'] = scsi_info[3] 
  13.229 +
  13.230 +        scsi_dev['vendor_name'] = \
  13.231 +            get_scsi_vendor(scsi_dev['physical_HCTL'])
  13.232 +        scsi_dev['model'] = \
  13.233 +            get_scsi_model(scsi_dev['physical_HCTL'])
  13.234 +        scsi_dev['type_id'] = \
  13.235 +            get_scsi_typeid(scsi_dev['physical_HCTL'])
  13.236 +        scsi_dev['revision'] = \
  13.237 +            get_scsi_revision(scsi_dev['physical_HCTL'])
  13.238 +        scsi_dev['scsi_level'] = \
  13.239 +            get_scsi_scsilevel(scsi_dev['physical_HCTL'])
  13.240 +
  13.241 +        try:
  13.242 +            lsscsi_info = os.popen('lsscsi ' + scsi_dev['physical_HCTL']).read().split()
  13.243 +            scsi_dev['type'] = lsscsi_info[1]
  13.244 +        except:
  13.245 +            scsi_dev['type'] = None
  13.246 +
  13.247 +        scsi_devs.append(scsi_dev)
  13.248 +
  13.249 +    return scsi_devs
  13.250 +
    14.1 --- a/tools/python/xen/xend/XendAPI.py	Wed Oct 22 11:38:22 2008 +0900
    14.2 +++ b/tools/python/xen/xend/XendAPI.py	Wed Oct 22 11:46:55 2008 +0900
    14.3 @@ -42,6 +42,8 @@ from XendPIF import XendPIF
    14.4  from XendPBD import XendPBD
    14.5  from XendPPCI import XendPPCI
    14.6  from XendDPCI import XendDPCI
    14.7 +from XendPSCSI import XendPSCSI
    14.8 +from XendDSCSI import XendDSCSI
    14.9  from XendXSPolicy import XendXSPolicy, XendACMPolicy
   14.10  
   14.11  from XendAPIConstants import *
   14.12 @@ -480,7 +482,9 @@ classes = {
   14.13      'PBD'          : valid_object("PBD"),
   14.14      'PIF_metrics'  : valid_object("PIF_metrics"),
   14.15      'PPCI'         : valid_object("PPCI"),
   14.16 -    'DPCI'         : valid_object("DPCI")
   14.17 +    'DPCI'         : valid_object("DPCI"),
   14.18 +    'PSCSI'        : valid_object("PSCSI"),
   14.19 +    'DSCSI'        : valid_object("DSCSI")
   14.20  }
   14.21  
   14.22  autoplug_classes = {
   14.23 @@ -491,6 +495,8 @@ autoplug_classes = {
   14.24      'PIF_metrics' : XendPIFMetrics,
   14.25      'PPCI'        : XendPPCI,
   14.26      'DPCI'        : XendDPCI,
   14.27 +    'PSCSI'       : XendPSCSI,
   14.28 +    'DSCSI'       : XendDSCSI,
   14.29      'XSPolicy'    : XendXSPolicy,
   14.30      'ACMPolicy'   : XendACMPolicy,
   14.31  }
   14.32 @@ -881,6 +887,7 @@ class XendAPI(object):
   14.33                      'PBDs',
   14.34                      'PIFs',
   14.35                      'PPCIs',
   14.36 +                    'PSCSIs',
   14.37                      'host_CPUs',
   14.38                      'cpu_configuration',
   14.39                      'metrics',
   14.40 @@ -961,6 +968,8 @@ class XendAPI(object):
   14.41          return xen_api_success(XendNode.instance().get_PIF_refs())
   14.42      def host_get_PPCIs(self, session, ref):
   14.43          return xen_api_success(XendNode.instance().get_PPCI_refs())
   14.44 +    def host_get_PSCSIs(self, session, ref):
   14.45 +        return xen_api_success(XendNode.instance().get_PSCSI_refs())
   14.46      def host_get_host_CPUs(self, session, host_ref):
   14.47          return xen_api_success(XendNode.instance().get_host_cpu_refs())
   14.48      def host_get_metrics(self, _, ref):
   14.49 @@ -1037,7 +1046,8 @@ class XendAPI(object):
   14.50                    'logging': {},
   14.51                    'PIFs': XendPIF.get_all(),
   14.52                    'PBDs': XendPBD.get_all(),
   14.53 -                  'PPCIs': XendPPCI.get_all()}
   14.54 +                  'PPCIs': XendPPCI.get_all(),
   14.55 +                  'PSCSIs': XendPSCSI.get_all()}
   14.56          return xen_api_success(record)
   14.57  
   14.58      # class methods
   14.59 @@ -1158,6 +1168,7 @@ class XendAPI(object):
   14.60                    'VBDs',
   14.61                    'VTPMs',
   14.62                    'DPCIs',
   14.63 +                  'DSCSIs',
   14.64                    'tools_version',
   14.65                    'domid',
   14.66                    'is_control_domain',
   14.67 @@ -1304,6 +1315,10 @@ class XendAPI(object):
   14.68          dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   14.69          return xen_api_success(dom.get_dpcis())
   14.70      
   14.71 +    def VM_get_DSCSIs(self, session, vm_ref):
   14.72 +        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   14.73 +        return xen_api_success(dom.get_dscsis())
   14.74 +
   14.75      def VM_get_tools_version(self, session, vm_ref):
   14.76          dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   14.77          return dom.get_tools_version()
   14.78 @@ -1684,6 +1699,7 @@ class XendAPI(object):
   14.79              'VBDs': xeninfo.get_vbds(),
   14.80              'VTPMs': xeninfo.get_vtpms(),
   14.81              'DPCIs': xeninfo.get_dpcis(),
   14.82 +            'DSCSIs': xeninfo.get_dscsis(),
   14.83              'PV_bootloader': xeninfo.info.get('PV_bootloader'),
   14.84              'PV_kernel': xeninfo.info.get('PV_kernel'),
   14.85              'PV_ramdisk': xeninfo.info.get('PV_ramdisk'),
    15.1 --- a/tools/python/xen/xend/XendConfig.py	Wed Oct 22 11:38:22 2008 +0900
    15.2 +++ b/tools/python/xen/xend/XendConfig.py	Wed Oct 22 11:46:55 2008 +0900
    15.3 @@ -26,6 +26,8 @@ from xen.xend import XendOptions
    15.4  from xen.xend import XendAPIStore
    15.5  from xen.xend.XendPPCI import XendPPCI
    15.6  from xen.xend.XendDPCI import XendDPCI
    15.7 +from xen.xend.XendPSCSI import XendPSCSI
    15.8 +from xen.xend.XendDSCSI import XendDSCSI
    15.9  from xen.xend.XendError import VmError
   15.10  from xen.xend.XendDevices import XendDevices
   15.11  from xen.xend.PrettyPrint import prettyprintstring
   15.12 @@ -210,6 +212,7 @@ XENAPI_CFG_TYPES = {
   15.13      'cpuid' : dict,
   15.14      'cpuid_check' : dict,
   15.15      'machine_address_size': int,
   15.16 +    'suppress_spurious_page_faults': bool0,
   15.17  }
   15.18  
   15.19  # List of legacy configuration keys that have no equivalent in the
   15.20 @@ -781,8 +784,8 @@ class XendConfig(dict):
   15.21          log.debug('_sxp_to_xapi(%s)' % scrub_password(sxp_cfg))
   15.22  
   15.23          # _parse_sxp() below will call device_add() and construct devices.
   15.24 -        # Some devices (currently only pci) may require VM's uuid, so
   15.25 -        # setup self['uuid'] beforehand.
   15.26 +        # Some devices may require VM's uuid, so setup self['uuid']
   15.27 +        # beforehand.
   15.28          self['uuid'] = sxp.child_value(sxp_cfg, 'uuid', uuid.createString())
   15.29  
   15.30          cfg = self._parse_sxp(sxp_cfg)
   15.31 @@ -1221,29 +1224,28 @@ class XendConfig(dict):
   15.32              dev_type = sxp.name(config)
   15.33              dev_info = {}
   15.34  
   15.35 -            if dev_type == 'pci' or dev_type == 'vscsi':
   15.36 +            if dev_type == 'pci':
   15.37                  pci_devs_uuid = sxp.child_value(config, 'uuid',
   15.38                                                  uuid.createString())
   15.39  
   15.40                  pci_dict = self.pci_convert_sxp_to_dict(config)
   15.41                  pci_devs = pci_dict['devs']
   15.42  
   15.43 -                if dev_type != 'vscsi':
   15.44 -                    # create XenAPI DPCI objects.
   15.45 -                    for pci_dev in pci_devs:
   15.46 -                        dpci_uuid = pci_dev.get('uuid')
   15.47 -                        ppci_uuid = XendPPCI.get_by_sbdf(pci_dev['domain'],
   15.48 -                                                        pci_dev['bus'],
   15.49 -                                                        pci_dev['slot'],
   15.50 -                                                        pci_dev['func'])
   15.51 -                        if ppci_uuid is None:
   15.52 -                            continue
   15.53 -                        dpci_record = {
   15.54 -                            'VM': self['uuid'],
   15.55 -                            'PPCI': ppci_uuid,
   15.56 -                            'hotplug_slot': pci_dev.get('vslot', 0)
   15.57 -                        }
   15.58 -                        XendDPCI(dpci_uuid, dpci_record)
   15.59 +                # create XenAPI DPCI objects.
   15.60 +                for pci_dev in pci_devs:
   15.61 +                    dpci_uuid = pci_dev.get('uuid')
   15.62 +                    ppci_uuid = XendPPCI.get_by_sbdf(pci_dev['domain'],
   15.63 +                                                     pci_dev['bus'],
   15.64 +                                                     pci_dev['slot'],
   15.65 +                                                     pci_dev['func'])
   15.66 +                    if ppci_uuid is None:
   15.67 +                        continue
   15.68 +                    dpci_record = {
   15.69 +                        'VM': self['uuid'],
   15.70 +                        'PPCI': ppci_uuid,
   15.71 +                        'hotplug_slot': pci_dev.get('vslot', 0)
   15.72 +                    }
   15.73 +                    XendDPCI(dpci_uuid, dpci_record)
   15.74  
   15.75                  target['devices'][pci_devs_uuid] = (dev_type,
   15.76                                                      {'devs': pci_devs,
   15.77 @@ -1253,6 +1255,30 @@ class XendConfig(dict):
   15.78  
   15.79                  return pci_devs_uuid
   15.80  
   15.81 +            if dev_type == 'vscsi':
   15.82 +                vscsi_devs_uuid = sxp.child_value(config, 'uuid',
   15.83 +                                                  uuid.createString())
   15.84 +                vscsi_dict = self.vscsi_convert_sxp_to_dict(config)
   15.85 +                vscsi_devs = vscsi_dict['devs']
   15.86 +
   15.87 +                # create XenAPI DSCSI objects.
   15.88 +                for vscsi_dev in vscsi_devs:
   15.89 +                    dscsi_uuid = vscsi_dev.get('uuid')
   15.90 +                    pscsi_uuid = XendPSCSI.get_by_HCTL(vscsi_dev['p-dev'])
   15.91 +                    if pscsi_uuid is None:
   15.92 +                        continue
   15.93 +                    dscsi_record = {
   15.94 +                        'VM': self['uuid'],
   15.95 +                        'PSCSI': pscsi_uuid,
   15.96 +                        'virtual_HCTL': vscsi_dev.get('v-dev')
   15.97 +                    }
   15.98 +                    XendDSCSI(dscsi_uuid, dscsi_record)
   15.99 +
  15.100 +                target['devices'][vscsi_devs_uuid] = \
  15.101 +                    (dev_type, {'devs': vscsi_devs, 'uuid': vscsi_devs_uuid} )
  15.102 +                log.debug("XendConfig: reading device: %s" % vscsi_devs)
  15.103 +                return vscsi_devs_uuid
  15.104 +
  15.105              for opt_val in config[1:]:
  15.106                  try:
  15.107                      opt, val = opt_val
  15.108 @@ -1558,6 +1584,86 @@ class XendConfig(dict):
  15.109  
  15.110          return dev_config
  15.111  
  15.112 +    def vscsi_convert_sxp_to_dict(self, dev_sxp):
  15.113 +        """Convert vscsi device sxp to dict
  15.114 +        @param dev_sxp: device configuration
  15.115 +        @type  dev_sxp: SXP object (parsed config)
  15.116 +        @return: dev_config
  15.117 +        @rtype: dictionary
  15.118 +        """
  15.119 +        # Parsing the device SXP's. In most cases, the SXP looks
  15.120 +        # like this:
  15.121 +        #
  15.122 +        # [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]]
  15.123 +        #
  15.124 +        # However, for SCSI devices it looks like this:
  15.125 +        #
  15.126 +        # [device,
  15.127 +        #   [vscsi,
  15.128 +        #     [dev,
  15.129 +        #       [devid, 0], [p-devname, sdb], [p-dev, 1:0:0:1],
  15.130 +        #       [v-dev, 0:0:0:0], [state, Initialising]
  15.131 +        #     ],
  15.132 +        #     [dev,
  15.133 +        #       [devid, 0], [p-devname, sdc], [p-dev, 1:0:0:2],
  15.134 +        #       [v-dev, 0:0:0:1], [satet, Initialising]
  15.135 +        #     ]
  15.136 +        #   ],
  15.137 +        #   [vscsi,
  15.138 +        #     [dev,
  15.139 +        #       [devid, 1], [p-devname, sdg], [p-dev, 2:0:0:0],
  15.140 +        #       [v-dev, 1:0:0:0], [state, Initialising]
  15.141 +        #     ],
  15.142 +        #     [dev,
  15.143 +        #       [devid, 1], [p-devname, sdh], [p-dev, 2:0:0:1],
  15.144 +        #       [v-dev, 1:0:0:1], [satet, Initialising]
  15.145 +        #     ]
  15.146 +        #   ]
  15.147 +        # ]
  15.148 +        #
  15.149 +        # It seems the reasoning for this difference is because
  15.150 +        # vscsiif.py needs all the SCSI device configurations with 
  15.151 +        # same host number at the same time when creating the devices.
  15.152 +
  15.153 +        # For SCSI device hotplug support, the SXP of SCSI devices is
  15.154 +        # extendend like this:
  15.155 +        #
  15.156 +        # [device,
  15.157 +        #   [vscsi,
  15.158 +        #     [dev,
  15.159 +        #       [devid, 0], [p-devname, sdd], [p-dev, 1:0:0:3],
  15.160 +        #       [v-dev, 0:0:0:2], [state, Initialising]
  15.161 +        #     ]
  15.162 +        #   ]
  15.163 +        # ]
  15.164 +        #
  15.165 +        # state 'Initialising' indicates that the device is being attached,
  15.166 +        # while state 'Closing' indicates that the device is being detached.
  15.167 +        #
  15.168 +        # The Dict looks like this:
  15.169 +        #
  15.170 +        # { devs: [ {devid: 0, p-devname: sdd, p-dev: 1:0:0:3,
  15.171 +        #            v-dev: 0:0:0:2, state: Initialising} ] }
  15.172 +
  15.173 +        dev_config = {}
  15.174 +
  15.175 +        vscsi_devs = []
  15.176 +        for vscsi_dev in sxp.children(dev_sxp, 'dev'):
  15.177 +            vscsi_dev_info = {}
  15.178 +            for opt_val in vscsi_dev[1:]:
  15.179 +                try:
  15.180 +                    opt, val = opt_val
  15.181 +                    vscsi_dev_info[opt] = val
  15.182 +                except TypeError:
  15.183 +                    pass
  15.184 +            # append uuid for each vscsi device.
  15.185 +            vscsi_uuid = vscsi_dev_info.get('uuid', uuid.createString())
  15.186 +            vscsi_dev_info['uuid'] = vscsi_uuid
  15.187 +            vscsi_devs.append(vscsi_dev_info)
  15.188 +        dev_config['devs'] = vscsi_devs 
  15.189 +
  15.190 +        return dev_config
  15.191 +
  15.192      def console_add(self, protocol, location, other_config = {}):
  15.193          dev_uuid = uuid.createString()
  15.194          if protocol == 'vt100':
  15.195 @@ -1631,7 +1737,7 @@ class XendConfig(dict):
  15.196  
  15.197              dev_type, dev_info = self['devices'][dev_uuid]
  15.198  
  15.199 -            if dev_type == 'pci' or dev_type == 'vscsi': # Special case for pci
  15.200 +            if dev_type == 'pci': # Special case for pci
  15.201                  pci_dict = self.pci_convert_sxp_to_dict(config)
  15.202                  pci_devs = pci_dict['devs']
  15.203  
  15.204 @@ -1639,28 +1745,52 @@ class XendConfig(dict):
  15.205                  for dpci_uuid in XendDPCI.get_by_VM(self['uuid']):
  15.206                      XendAPIStore.deregister(dpci_uuid, "DPCI")
  15.207  
  15.208 -                if dev_type != 'vscsi':
  15.209 -                    # create XenAPI DPCI objects.
  15.210 -                    for pci_dev in pci_devs:
  15.211 -                        dpci_uuid = pci_dev.get('uuid')
  15.212 -                        ppci_uuid = XendPPCI.get_by_sbdf(pci_dev['domain'],
  15.213 -                                                         pci_dev['bus'],
  15.214 -                                                         pci_dev['slot'],
  15.215 -                                                         pci_dev['func'])
  15.216 -                        if ppci_uuid is None:
  15.217 -                            continue
  15.218 -                        dpci_record = {
  15.219 -                            'VM': self['uuid'],
  15.220 -                            'PPCI': ppci_uuid,
  15.221 -                            'hotplug_slot': pci_dev.get('vslot', 0)
  15.222 -                        }
  15.223 -                        XendDPCI(dpci_uuid, dpci_record)
  15.224 +                # create XenAPI DPCI objects.
  15.225 +                for pci_dev in pci_devs:
  15.226 +                    dpci_uuid = pci_dev.get('uuid')
  15.227 +                    ppci_uuid = XendPPCI.get_by_sbdf(pci_dev['domain'],
  15.228 +                                                     pci_dev['bus'],
  15.229 +                                                     pci_dev['slot'],
  15.230 +                                                     pci_dev['func'])
  15.231 +                    if ppci_uuid is None:
  15.232 +                        continue
  15.233 +                    dpci_record = {
  15.234 +                        'VM': self['uuid'],
  15.235 +                        'PPCI': ppci_uuid,
  15.236 +                        'hotplug_slot': pci_dev.get('vslot', 0)
  15.237 +                    }
  15.238 +                    XendDPCI(dpci_uuid, dpci_record)
  15.239  
  15.240                  self['devices'][dev_uuid] = (dev_type,
  15.241                                               {'devs': pci_devs,
  15.242                                                'uuid': dev_uuid})
  15.243                  return True
  15.244                  
  15.245 +            if dev_type == 'vscsi': # Special case for vscsi
  15.246 +                vscsi_dict = self.vscsi_convert_sxp_to_dict(config)
  15.247 +                vscsi_devs = vscsi_dict['devs']
  15.248 +
  15.249 +                # destroy existing XenAPI DSCSI objects
  15.250 +                for dscsi_uuid in XendDSCSI.get_by_VM(self['uuid']):
  15.251 +                    XendAPIStore.deregister(dscsi_uuid, "DSCSI")
  15.252 +
  15.253 +                # create XenAPI DSCSI objects.
  15.254 +                for vscsi_dev in vscsi_devs:
  15.255 +                    dscsi_uuid = vscsi_dev.get('uuid')
  15.256 +                    pscsi_uuid = XendPSCSI.get_by_HCTL(vscsi_dev['p-dev'])
  15.257 +                    if pscsi_uuid is None:
  15.258 +                        continue
  15.259 +                    dscsi_record = {
  15.260 +                        'VM': self['uuid'],
  15.261 +                        'PSCSI': pscsi_uuid,
  15.262 +                        'virtual_HCTL': vscsi_dev.get('v-dev')
  15.263 +                    }
  15.264 +                    XendDSCSI(dscsi_uuid, dscsi_record)
  15.265 +
  15.266 +                self['devices'][dev_uuid] = \
  15.267 +                    (dev_type, {'devs': vscsi_devs, 'uuid': dev_uuid} )
  15.268 +                return True
  15.269 +                
  15.270              for opt_val in config[1:]:
  15.271                  try:
  15.272                      opt, val = opt_val
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/tools/python/xen/xend/XendDSCSI.py	Wed Oct 22 11:46:55 2008 +0900
    16.3 @@ -0,0 +1,174 @@
    16.4 +#============================================================================
    16.5 +# This library is free software; you can redistribute it and/or
    16.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    16.7 +# License as published by the Free Software Foundation.
    16.8 +#
    16.9 +# This library is distributed in the hope that it will be useful,
   16.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   16.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   16.12 +# Lesser General Public License for more details.
   16.13 +#
   16.14 +# You should have received a copy of the GNU Lesser General Public
   16.15 +# License along with this library; if not, write to the Free Software
   16.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   16.17 +#============================================================================
   16.18 +# Copyright FUJITSU LIMITED 2008
   16.19 +#       Masaki Kanno <kanno.masaki@jp.fujitsu.com>
   16.20 +#============================================================================
   16.21 +
   16.22 +from xen.xend.XendBase import XendBase
   16.23 +from xen.xend.XendPSCSI import XendPSCSI
   16.24 +from xen.xend import XendAPIStore
   16.25 +from xen.xend import sxp
   16.26 +from xen.xend import uuid as genuuid
   16.27 +
   16.28 +import XendDomain, XendNode
   16.29 +
   16.30 +from XendError import *
   16.31 +from XendTask import XendTask
   16.32 +from XendLogging import log
   16.33 +
   16.34 +class XendDSCSI(XendBase):
   16.35 +    """Representation of a half-virtualized SCSI device."""
   16.36 +
   16.37 +    def getClass(self):
   16.38 +        return "DSCSI"
   16.39 +
   16.40 +    def getAttrRO(self):
   16.41 +        attrRO = ['VM',
   16.42 +                  'PSCSI',
   16.43 +                  'virtual_host',
   16.44 +                  'virtual_channel',
   16.45 +                  'virtual_target',
   16.46 +                  'virtual_lun',
   16.47 +                  'virtual_HCTL',
   16.48 +                  'runtime_properties']
   16.49 +        return XendBase.getAttrRO() + attrRO
   16.50 +
   16.51 +    def getAttrRW(self):
   16.52 +        attrRW = []
   16.53 +        return XendBase.getAttrRW() + attrRW
   16.54 +
   16.55 +    def getAttrInst(self):
   16.56 +        attrInst = ['VM',
   16.57 +                    'PSCSI',
   16.58 +                    'virtual_HCTL']
   16.59 +        return XendBase.getAttrInst() + attrInst
   16.60 +
   16.61 +    def getMethods(self):
   16.62 +        methods = ['destroy']
   16.63 +        return XendBase.getMethods() + methods
   16.64 +
   16.65 +    def getFuncs(self):
   16.66 +        funcs = ['create']
   16.67 +        return XendBase.getFuncs() + funcs
   16.68 +
   16.69 +    getClass    = classmethod(getClass)
   16.70 +    getAttrRO   = classmethod(getAttrRO)
   16.71 +    getAttrRW   = classmethod(getAttrRW)
   16.72 +    getAttrInst = classmethod(getAttrInst)
   16.73 +    getMethods  = classmethod(getMethods)
   16.74 +    getFuncs    = classmethod(getFuncs)
   16.75 + 
   16.76 +    def create(self, dscsi_struct):
   16.77 +
   16.78 +        # Check if VM is valid
   16.79 +        xendom = XendDomain.instance()
   16.80 +        if not xendom.is_valid_vm(dscsi_struct['VM']):
   16.81 +            raise InvalidHandleError('VM', dscsi_struct['VM'])
   16.82 +        dom = xendom.get_vm_by_uuid(dscsi_struct['VM'])
   16.83 +
   16.84 +        # Check if PSCSI is valid
   16.85 +        xennode = XendNode.instance()
   16.86 +        pscsi_uuid = xennode.get_pscsi_by_uuid(dscsi_struct['PSCSI'])
   16.87 +        if not pscsi_uuid:
   16.88 +            raise InvalidHandleError('PSCSI', dscsi_struct['PSCSI'])
   16.89 +
   16.90 +        # Assign PSCSI to VM
   16.91 +        try:
   16.92 +            dscsi_ref = XendTask.log_progress(0, 100, \
   16.93 +                                              dom.create_dscsi, \
   16.94 +                                              dscsi_struct)
   16.95 +        except XendError, e:
   16.96 +            log.exception("Error in create_dscsi")
   16.97 +            raise
   16.98 +
   16.99 +        return dscsi_ref
  16.100 +
  16.101 +    create = classmethod(create)
  16.102 +
  16.103 +    def get_by_VM(cls, VM_ref):
  16.104 +        result = []
  16.105 +        for dscsi in XendAPIStore.get_all("DSCSI"):
  16.106 +            if dscsi.get_VM() == VM_ref:
  16.107 +                result.append(dscsi.get_uuid())
  16.108 +        return result
  16.109 +
  16.110 +    get_by_VM = classmethod(get_by_VM)
  16.111 +
  16.112 +    def __init__(self, uuid, record):
  16.113 +        XendBase.__init__(self, uuid, record)
  16.114 +        v_hctl = self.virtual_HCTL.split(':')
  16.115 +        self.virtual_host = int(v_hctl[0])
  16.116 +        self.virtual_channel = int(v_hctl[1])
  16.117 +        self.virtual_target = int(v_hctl[2])
  16.118 +        self.virtual_lun = int(v_hctl[3])
  16.119 +
  16.120 +    def get_VM(self):
  16.121 +        return self.VM
  16.122 +
  16.123 +    def get_PSCSI(self):
  16.124 +        return self.PSCSI
  16.125 +
  16.126 +    def get_virtual_host(self):
  16.127 +        return self.virtual_host
  16.128 +
  16.129 +    def get_virtual_channel(self):
  16.130 +        return self.virtual_channel
  16.131 +
  16.132 +    def get_virtual_target(self):
  16.133 +        return self.virtual_target
  16.134 +
  16.135 +    def get_virtual_lun(self):
  16.136 +        return self.virtual_lun
  16.137 +
  16.138 +    def get_virtual_HCTL(self):
  16.139 +        return self.virtual_HCTL
  16.140 +
  16.141 +    def get_runtime_properties(self):
  16.142 +        xendom = XendDomain.instance()
  16.143 +        dominfo = xendom.get_vm_by_uuid(self.VM)
  16.144 +
  16.145 +        try:
  16.146 +            device_dict = {}
  16.147 +            for device_sxp in dominfo.getDeviceSxprs('vscsi'):
  16.148 +                target_dev = None
  16.149 +                for dev in device_sxp[1][0][1]:
  16.150 +                    vdev = sxp.child_value(dev, 'v-dev')
  16.151 +                    if vdev == self.virtual_HCTL:
  16.152 +                        target_dev = dev
  16.153 +                        break
  16.154 +                if target_dev is None:
  16.155 +                    continue
  16.156 +
  16.157 +                dev_dict = {}
  16.158 +                for info in target_dev[1:]:
  16.159 +                    dev_dict[info[0]] = info[1]
  16.160 +                device_dict['dev'] = dev_dict
  16.161 +                for info in device_sxp[1][1:]:
  16.162 +                    device_dict[info[0]] = info[1]
  16.163 +
  16.164 +            return device_dict
  16.165 +        except Exception, exn:
  16.166 +            log.exception(exn)
  16.167 +            return {}
  16.168 +
  16.169 +    def destroy(self):
  16.170 +        xendom = XendDomain.instance()
  16.171 +        dom = xendom.get_vm_by_uuid(self.get_VM())
  16.172 +        if not dom:
  16.173 +            raise InvalidHandleError("VM", self.get_VM())
  16.174 +        XendTask.log_progress(0, 100, \
  16.175 +                              dom.destroy_dscsi, \
  16.176 +                              self.get_uuid())
  16.177 +
    17.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Wed Oct 22 11:38:22 2008 +0900
    17.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Wed Oct 22 11:46:55 2008 +0900
    17.3 @@ -55,9 +55,11 @@ from xen.xend.XendAPIConstants import *
    17.4  
    17.5  from xen.xend.XendVMMetrics import XendVMMetrics
    17.6  
    17.7 +from xen.xend import XendAPIStore
    17.8  from xen.xend.XendPPCI import XendPPCI
    17.9  from xen.xend.XendDPCI import XendDPCI
   17.10 -from xen.xend import XendAPIStore
   17.11 +from xen.xend.XendPSCSI import XendPSCSI
   17.12 +from xen.xend.XendDSCSI import XendDSCSI
   17.13  
   17.14  MIGRATE_TIMEOUT = 30.0
   17.15  BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
   17.16 @@ -663,6 +665,9 @@ class XendDomainInfo:
   17.17                  if dev_type == 'pci':
   17.18                      for dev in dev_config_dict['devs']:
   17.19                          XendAPIStore.deregister(dev['uuid'], 'DPCI')
   17.20 +                if dev_type == 'vscsi':
   17.21 +                    for dev in dev_config_dict['devs']:
   17.22 +                        XendAPIStore.deregister(dev['uuid'], 'DSCSI')
   17.23                  elif dev_type == 'tap':
   17.24                      self.info['vbd_refs'].remove(dev_uuid)
   17.25                  else:
   17.26 @@ -786,12 +791,11 @@ class XendDomainInfo:
   17.27          if dev_class != 'vscsi':
   17.28              return False
   17.29  
   17.30 -        dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
   17.31 +        dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
   17.32          dev = dev_config['devs'][0]
   17.33 -        req_devid = sxp.child_value(dev_sxp, 'devid')
   17.34 -        req_devid = int(req_devid)
   17.35 +        req_devid = int(dev['devid'])
   17.36          existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
   17.37 -        state = sxp.child_value(dev_sxp, 'state')
   17.38 +        state = dev['state']
   17.39  
   17.40          if state == 'Initialising':
   17.41              # new create
   17.42 @@ -1502,23 +1506,18 @@ class XendDomainInfo:
   17.43          return self.info['VCPUs_max']
   17.44  
   17.45      def setVCpuCount(self, vcpus):
   17.46 -        if vcpus <= 0:
   17.47 -            raise XendError('Invalid VCPUs')
   17.48 +        def vcpus_valid(n):
   17.49 +            if vcpus <= 0:
   17.50 +                raise XendError('Zero or less VCPUs is invalid')
   17.51 +            if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
   17.52 +                raise XendError('Cannot set vcpus greater than max vcpus on running domain')
   17.53 +        vcpus_valid(vcpus)
   17.54          
   17.55          self.info['vcpu_avail'] = (1 << vcpus) - 1
   17.56          if self.domid >= 0:
   17.57              self.storeVm('vcpu_avail', self.info['vcpu_avail'])
   17.58 -            # update dom differently depending on whether we are adjusting
   17.59 -            # vcpu number up or down, otherwise _vcpuDomDetails does not
   17.60 -            # disable the vcpus
   17.61 -            if self.info['VCPUs_max'] > vcpus:
   17.62 -                # decreasing
   17.63 -                self._writeDom(self._vcpuDomDetails())
   17.64 -                self.info['VCPUs_live'] = vcpus
   17.65 -            else:
   17.66 -                # same or increasing
   17.67 -                self.info['VCPUs_live'] = vcpus
   17.68 -                self._writeDom(self._vcpuDomDetails())
   17.69 +            self._writeDom(self._vcpuDomDetails())
   17.70 +            self.info['VCPUs_live'] = vcpus
   17.71          else:
   17.72              if self.info['VCPUs_max'] > vcpus:
   17.73                  # decreasing
   17.74 @@ -1528,7 +1527,7 @@ class XendDomainInfo:
   17.75                  for c in range(self.info['VCPUs_max'], vcpus):
   17.76                      self.info['cpus'].append(list())
   17.77              self.info['VCPUs_max'] = vcpus
   17.78 -            xen.xend.XendDomain.instance().managed_config_save(self)
   17.79 +        xen.xend.XendDomain.instance().managed_config_save(self)
   17.80          log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
   17.81                   vcpus)
   17.82  
   17.83 @@ -2241,6 +2240,10 @@ class XendDomainInfo:
   17.84              if self.info.has_key('machine_address_size'):
   17.85                  log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
   17.86                  xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
   17.87 +
   17.88 +            if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
   17.89 +                log.debug("_initDomain: suppressing spurious page faults")
   17.90 +                xc.domain_suppress_spurious_page_faults(self.domid)
   17.91                  
   17.92              self._createChannels()
   17.93  
   17.94 @@ -3233,6 +3236,9 @@ class XendDomainInfo:
   17.95      def get_dpcis(self):
   17.96          return XendDPCI.get_by_VM(self.info.get('uuid'))
   17.97  
   17.98 +    def get_dscsis(self):
   17.99 +        return XendDSCSI.get_by_VM(self.info.get('uuid'))
  17.100 +
  17.101      def create_vbd(self, xenapi_vbd, vdi_image_path):
  17.102          """Create a VBD using a VDI from XendStorageRepository.
  17.103  
  17.104 @@ -3413,6 +3419,60 @@ class XendDomainInfo:
  17.105  
  17.106          return dpci_uuid
  17.107  
  17.108 +    def create_dscsi(self, xenapi_dscsi):
  17.109 +        """Create scsi device from the passed struct in Xen API format.
  17.110 +
  17.111 +        @param xenapi_dscsi: DSCSI struct from Xen API
  17.112 +        @rtype: string
  17.113 +        @return: UUID
  17.114 +        """
  17.115 +
  17.116 +        dscsi_uuid = uuid.createString()
  17.117 +
  17.118 +        # Convert xenapi to sxp
  17.119 +        pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
  17.120 +        devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
  17.121 +        target_vscsi_sxp = \
  17.122 +            ['vscsi', 
  17.123 +                ['dev',
  17.124 +                    ['devid', devid],
  17.125 +                    ['p-devname', pscsi.get_dev_name()],
  17.126 +                    ['p-dev', pscsi.get_physical_HCTL()],
  17.127 +                    ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
  17.128 +                    ['state', 'Initialising'],
  17.129 +                    ['uuid', dscsi_uuid]
  17.130 +                ]
  17.131 +            ]
  17.132 +
  17.133 +        if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
  17.134 +
  17.135 +            cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
  17.136 +
  17.137 +            if cur_vscsi_sxp is None:
  17.138 +                dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
  17.139 +                if not dev_uuid:
  17.140 +                    raise XendError('Failed to create device')
  17.141 +
  17.142 +            else:
  17.143 +                new_vscsi_sxp = ['vscsi']
  17.144 +                for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
  17.145 +                    new_vscsi_sxp.append(existing_dev)
  17.146 +                new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
  17.147 +
  17.148 +                dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
  17.149 +                self.info.device_update(dev_uuid, new_vscsi_sxp)
  17.150 +
  17.151 +            xen.xend.XendDomain.instance().managed_config_save(self)
  17.152 +
  17.153 +        else:
  17.154 +            try:
  17.155 +                self.device_configure(target_vscsi_sxp)
  17.156 +
  17.157 +            except Exception, exn:
  17.158 +                raise XendError('Failed to create device')
  17.159 +
  17.160 +        return dscsi_uuid
  17.161 +
  17.162  
  17.163      def destroy_device_by_uuid(self, dev_type, dev_uuid):
  17.164          if dev_uuid not in self.info['devices']:
  17.165 @@ -3480,6 +3540,41 @@ class XendDomainInfo:
  17.166              except Exception, exn:
  17.167                  raise XendError('Failed to destroy device')
  17.168  
  17.169 +    def destroy_dscsi(self, dev_uuid):
  17.170 +        dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
  17.171 +        devid = dscsi.get_virtual_host()
  17.172 +        vHCTL = dscsi.get_virtual_HCTL()
  17.173 +        cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
  17.174 +        dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
  17.175 +
  17.176 +        target_dev = None
  17.177 +        new_vscsi_sxp = ['vscsi']
  17.178 +        for dev in sxp.children(cur_vscsi_sxp, 'dev'):
  17.179 +            if vHCTL == sxp.child_value(dev, 'v-dev'):
  17.180 +                target_dev = dev
  17.181 +            else:
  17.182 +                new_vscsi_sxp.append(dev)
  17.183 +
  17.184 +        if target_dev is None:
  17.185 +            raise XendError('Failed to destroy device')
  17.186 +
  17.187 +        target_dev.append(['state', 'Closing'])
  17.188 +        target_vscsi_sxp = ['vscsi', target_dev]
  17.189 +
  17.190 +        if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
  17.191 +
  17.192 +            self.info.device_update(dev_uuid, new_vscsi_sxp)
  17.193 +            if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
  17.194 +                del self.info['devices'][dev_uuid]
  17.195 +            xen.xend.XendDomain.instance().managed_config_save(self)
  17.196 +
  17.197 +        else:
  17.198 +            try:
  17.199 +                self.device_configure(target_vscsi_sxp)
  17.200 +
  17.201 +            except Exception, exn:
  17.202 +                raise XendError('Failed to destroy device')
  17.203 +
  17.204      def destroy_xapi_instances(self):
  17.205          """Destroy Xen-API instances stored in XendAPIStore.
  17.206          """
  17.207 @@ -3504,6 +3599,10 @@ class XendDomainInfo:
  17.208          for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
  17.209              XendAPIStore.deregister(dpci_uuid, "DPCI")
  17.210              
  17.211 +        # Destroy DSCSI instances.
  17.212 +        for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
  17.213 +            XendAPIStore.deregister(dscsi_uuid, "DSCSI")
  17.214 +            
  17.215      def has_device(self, dev_class, dev_uuid):
  17.216          return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
  17.217  
    18.1 --- a/tools/python/xen/xend/XendNode.py	Wed Oct 22 11:38:22 2008 +0900
    18.2 +++ b/tools/python/xen/xend/XendNode.py	Wed Oct 22 11:46:55 2008 +0900
    18.3 @@ -22,6 +22,7 @@ import xen.lowlevel.xc
    18.4  
    18.5  from xen.util import Brctl
    18.6  from xen.util import pci as PciUtil
    18.7 +from xen.util import vscsi_util
    18.8  from xen.xend import XendAPIStore
    18.9  from xen.xend import osdep
   18.10  
   18.11 @@ -38,7 +39,8 @@ from XendNetwork import *
   18.12  from XendStateStore import XendStateStore
   18.13  from XendMonitor import XendMonitor
   18.14  from XendPPCI import XendPPCI
   18.15 -     
   18.16 +from XendPSCSI import XendPSCSI
   18.17 +
   18.18  class XendNode:
   18.19      """XendNode - Represents a Domain 0 Host."""
   18.20      
   18.21 @@ -53,6 +55,7 @@ class XendNode:
   18.22          * network
   18.23          * Storage Repository
   18.24          * PPCI
   18.25 +        * PSCSI
   18.26          """
   18.27          
   18.28          self.xc = xen.lowlevel.xc.xc()
   18.29 @@ -269,6 +272,24 @@ class XendNode:
   18.30              XendPPCI(ppci_uuid, ppci_record)
   18.31  
   18.32  
   18.33 +        # Initialise PSCSIs
   18.34 +        saved_pscsis = self.state_store.load_state('pscsi')
   18.35 +        saved_pscsi_table = {}
   18.36 +        if saved_pscsis:
   18.37 +            for pscsi_uuid, pscsi_record in saved_pscsis.items():
   18.38 +                try:
   18.39 +                    saved_pscsi_table[pscsi_record['scsi_id']] = pscsi_uuid
   18.40 +                except KeyError:
   18.41 +                    pass
   18.42 +
   18.43 +        for pscsi_record in vscsi_util.get_all_scsi_devices():
   18.44 +            if pscsi_record['scsi_id']:
   18.45 +                # If saved uuid exists, use it. Otherwise create one.
   18.46 +                pscsi_uuid = saved_pscsi_table.get(pscsi_record['scsi_id'],
   18.47 +                                                   uuid.createString())
   18.48 +                XendPSCSI(pscsi_uuid, pscsi_record)
   18.49 +
   18.50 +
   18.51  ##    def network_destroy(self, net_uuid):
   18.52   ##       del self.networks[net_uuid]
   18.53    ##      self.save_networks()
   18.54 @@ -320,6 +341,15 @@ class XendNode:
   18.55          return None
   18.56  
   18.57  
   18.58 +    def get_PSCSI_refs(self):
   18.59 +        return XendPSCSI.get_all()
   18.60 +
   18.61 +    def get_pscsi_by_uuid(self, pscsi_uuid):
   18.62 +        if pscsi_uuid in self.get_PSCSI_refs():
   18.63 +            return pscsi_uuid
   18.64 +        return None
   18.65 +
   18.66 +
   18.67      def save(self):
   18.68          # save state
   18.69          host_record = {self.uuid: {'name_label':self.name,
   18.70 @@ -333,6 +363,7 @@ class XendNode:
   18.71          self.save_PBDs()
   18.72          self.save_SRs()
   18.73          self.save_PPCIs()
   18.74 +        self.save_PSCSIs()
   18.75  
   18.76      def save_PIFs(self):
   18.77          pif_records = dict([(pif_uuid, XendAPIStore.get(
   18.78 @@ -363,6 +394,12 @@ class XendNode:
   18.79                              for ppci_uuid in XendPPCI.get_all()])
   18.80          self.state_store.save_state('ppci', ppci_records)
   18.81  
   18.82 +    def save_PSCSIs(self):
   18.83 +        pscsi_records = dict([(pscsi_uuid, XendAPIStore.get(
   18.84 +                                  pscsi_uuid, "PSCSI").get_record())
   18.85 +                            for pscsi_uuid in XendPSCSI.get_all()])
   18.86 +        self.state_store.save_state('pscsi', pscsi_records)
   18.87 +
   18.88      def shutdown(self):
   18.89          return 0
   18.90  
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/tools/python/xen/xend/XendPSCSI.py	Wed Oct 22 11:46:55 2008 +0900
    19.3 @@ -0,0 +1,143 @@
    19.4 +#============================================================================
    19.5 +# This library is free software; you can redistribute it and/or
    19.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    19.7 +# License as published by the Free Software Foundation.
    19.8 +#
    19.9 +# This library is distributed in the hope that it will be useful,
   19.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   19.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   19.12 +# Lesser General Public License for more details.
   19.13 +#
   19.14 +# You should have received a copy of the GNU Lesser General Public
   19.15 +# License along with this library; if not, write to the Free Software
   19.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   19.17 +#============================================================================
   19.18 +# Copyright FUJITSU LIMITED 2008
   19.19 +#       Masaki Kanno <kanno.masaki@jp.fujitsu.com>
   19.20 +#============================================================================
   19.21 +
   19.22 +from xen.xend.XendBase import XendBase
   19.23 +from xen.xend.XendBase import XendAPIStore
   19.24 +from xen.xend import uuid as genuuid
   19.25 +
   19.26 +class XendPSCSI(XendBase):
   19.27 +    """Representation of a physical SCSI device."""
   19.28 +
   19.29 +    def getClass(self):
   19.30 +        return "PSCSI"
   19.31 +
   19.32 +    def getAttrRO(self):
   19.33 +        attrRO = ['host',
   19.34 +                  'physical_host',
   19.35 +                  'physical_channel',
   19.36 +                  'physical_target',
   19.37 +                  'physical_lun',
   19.38 +                  'physical_HCTL',
   19.39 +                  'vendor_name',
   19.40 +                  'model',
   19.41 +                  'type_id',
   19.42 +                  'type',
   19.43 +                  'dev_name',
   19.44 +                  'sg_name',
   19.45 +                  'revision',
   19.46 +                  'scsi_id',
   19.47 +                  'scsi_level']
   19.48 +        return XendBase.getAttrRO() + attrRO
   19.49 +
   19.50 +    def getAttrRW(self):
   19.51 +        attrRW = []
   19.52 +        return XendBase.getAttrRW() + attrRW
   19.53 +
   19.54 +    def getAttrInst(self):
   19.55 +        attrInst = []
   19.56 +        return XendBase.getAttrInst() + attrInst
   19.57 +
   19.58 +    def getMethods(self):
   19.59 +        methods = []
   19.60 +        return XendBase.getMethods() + methods
   19.61 +
   19.62 +    def getFuncs(self):
   19.63 +        funcs = []
   19.64 +        return XendBase.getFuncs() + funcs
   19.65 +
   19.66 +    getClass    = classmethod(getClass)
   19.67 +    getAttrRO   = classmethod(getAttrRO)
   19.68 +    getAttrRW   = classmethod(getAttrRW)
   19.69 +    getAttrInst = classmethod(getAttrInst)
   19.70 +    getMethods  = classmethod(getMethods)
   19.71 +    getFuncs    = classmethod(getFuncs)
   19.72 + 
   19.73 +    def get_by_HCTL(self, physical_HCTL):
   19.74 +        for pscsi in XendAPIStore.get_all("PSCSI"):
   19.75 +            if pscsi.get_physical_HCTL() == physical_HCTL:
   19.76 +                return pscsi.get_uuid()
   19.77 +        return None
   19.78 +
   19.79 +    get_by_HCTL = classmethod(get_by_HCTL)
   19.80 +
   19.81 +    def __init__(self, uuid, record):
   19.82 +        self.physical_HCTL = record['physical_HCTL']
   19.83 +        self.vendor_name = record['vendor_name']
   19.84 +        self.model = record['model']
   19.85 +        self.type_id = record['type_id']
   19.86 +        self.type = record['type']
   19.87 +        self.dev_name = record['dev_name']
   19.88 +        self.sg_name = record['sg_name']
   19.89 +        self.revision = record['revision']
   19.90 +        self.scsi_id = record['scsi_id']
   19.91 +        self.scsi_level = record['scsi_level']
   19.92 +
   19.93 +        p_hctl = self.physical_HCTL.split(':')
   19.94 +        self.physical_host = int(p_hctl[0])
   19.95 +        self.physical_channel = int(p_hctl[1])
   19.96 +        self.physical_target = int(p_hctl[2])
   19.97 +        self.physical_lun = int(p_hctl[3])
   19.98 +
   19.99 +        XendBase.__init__(self, uuid, record)
  19.100 +
  19.101 +    def get_host(self):
  19.102 +        from xen.xend import XendNode
  19.103 +        return XendNode.instance().get_uuid()
  19.104 +
  19.105 +    def get_physical_host(self):
  19.106 +        return self.physical_host
  19.107 +
  19.108 +    def get_physical_channel(self):
  19.109 +        return self.physical_channel
  19.110 +
  19.111 +    def get_physical_target(self):
  19.112 +        return self.physical_target
  19.113 +
  19.114 +    def get_physical_lun(self):
  19.115 +        return self.physical_lun
  19.116 +
  19.117 +    def get_physical_HCTL(self):
  19.118 +        return self.physical_HCTL
  19.119 +
  19.120 +    def get_vendor_name(self):
  19.121 +        return self.vendor_name
  19.122 +
  19.123 +    def get_model(self):
  19.124 +        return self.model
  19.125 +
  19.126 +    def get_type_id(self):
  19.127 +        return self.type_id
  19.128 +
  19.129 +    def get_type(self):
  19.130 +        return self.type
  19.131 +
  19.132 +    def get_dev_name(self):
  19.133 +        return self.dev_name
  19.134 +
  19.135 +    def get_sg_name(self):
  19.136 +        return self.sg_name
  19.137 +
  19.138 +    def get_revision(self):
  19.139 +        return self.revision
  19.140 +
  19.141 +    def get_scsi_id(self):
  19.142 +        return self.scsi_id
  19.143 +
  19.144 +    def get_scsi_level(self):
  19.145 +        return self.scsi_level
  19.146 +
    20.1 --- a/tools/python/xen/xend/server/vscsiif.py	Wed Oct 22 11:38:22 2008 +0900
    20.2 +++ b/tools/python/xen/xend/server/vscsiif.py	Wed Oct 22 11:46:55 2008 +0900
    20.3 @@ -125,10 +125,10 @@ class VSCSIController(DevController):
    20.4              state = self.readBackend(devid, devpath + '/state')
    20.5              localdevid = self.readBackend(devid, devpath + '/devid')
    20.6              dev_dict = {'p-dev': pdev,
    20.7 -                            'p-devname': pdevname,
    20.8 -                            'v-dev': pdevname,
    20.9 -                            'state': state,
   20.10 -                            'devid': localdevid }
   20.11 +                        'p-devname': pdevname,
   20.12 +                        'v-dev': vdev,
   20.13 +                        'state': state,
   20.14 +                        'devid': localdevid }
   20.15              vscsi_devs.append(dev_dict)
   20.16  
   20.17          config['devs'] = vscsi_devs
   20.18 @@ -168,17 +168,17 @@ class VSCSIController(DevController):
   20.19          (devid, back, front) = self.getDeviceDetails(config)
   20.20          devid = int(devid)
   20.21          vscsi_config = config['devs'][0]
   20.22 -        states = config.get('states', [])
   20.23 +        state = vscsi_config.get('state', '')
   20.24          driver_state = self.readBackend(devid, 'state')
   20.25          if str(xenbusState['Connected']) != driver_state:
   20.26              raise VmError("Driver status is not connected")
   20.27  
   20.28          uuid = self.readBackend(devid, 'uuid')
   20.29 -        if states[0] == 'Initialising':
   20.30 +        if state == 'Initialising':
   20.31              back['uuid'] = uuid
   20.32              self.writeBackend(devid, back)
   20.33  
   20.34 -        elif states[0] == 'Closing':
   20.35 +        elif state == 'Closing':
   20.36              found = False
   20.37              devs = self.readBackendList(devid, "vscsi-devs")
   20.38              vscsipath = "vscsi-devs/"
   20.39 @@ -197,8 +197,8 @@ class VSCSIController(DevController):
   20.40                  raise VmError("Device %s not connected" % vdev)
   20.41  
   20.42          else:
   20.43 -            raise XendError('Error configuring device invalid state %s'
   20.44 -                                % state)
   20.45 +            raise XendError("Error configuring device invalid "
   20.46 +                            "state '%s'" % state)
   20.47  
   20.48          self.writeBackend(devid, 'state', str(xenbusState['Reconfiguring']))
   20.49          return self.readBackend(devid, 'uuid')
    21.1 --- a/tools/python/xen/xm/create.dtd	Wed Oct 22 11:38:22 2008 +0900
    21.2 +++ b/tools/python/xen/xm/create.dtd	Wed Oct 22 11:46:55 2008 +0900
    21.3 @@ -40,6 +40,7 @@
    21.4                   vif*,
    21.5                   vtpm*,
    21.6                   pci*,
    21.7 +                 vscsi*,
    21.8                   console*,
    21.9                   platform*,
   21.10                   vcpu_param*,
   21.11 @@ -88,6 +89,10 @@
   21.12                   func            CDATA #REQUIRED
   21.13                   vslt            CDATA #IMPLIED>
   21.14  
   21.15 +<!ELEMENT vscsi  EMPTY>
   21.16 +<!ATTLIST vscsi  p-dev           CDATA #REQUIRED
   21.17 +                 v-dev           CDATA #REQUIRED>
   21.18 +
   21.19  <!ELEMENT console (other_config*)>
   21.20  <!ATTLIST console protocol       (vt100|rfb|rdp) #REQUIRED>
   21.21  
    22.1 --- a/tools/python/xen/xm/create.py	Wed Oct 22 11:38:22 2008 +0900
    22.2 +++ b/tools/python/xen/xm/create.py	Wed Oct 22 11:46:55 2008 +0900
    22.3 @@ -583,6 +583,10 @@ gopts.var('machine_address_size', val='B
    22.4            fn=set_int, default=None,
    22.5            use="""Maximum machine address size""")
    22.6  
    22.7 +gopts.var('suppress_spurious_page_faults', val='yes|no',
    22.8 +          fn=set_bool, default=None,
    22.9 +          use="""Do not inject spurious page faults into this guest""")
   22.10 +
   22.11  def err(msg):
   22.12      """Print an error to stderr and exit.
   22.13      """
   22.14 @@ -634,6 +638,9 @@ def configure_image(vals):
   22.15      if vals.machine_address_size:
   22.16          config_image.append(['machine_address_size', vals.machine_address_size])
   22.17  
   22.18 +    if vals.suppress_spurious_page_faults:
   22.19 +        config_image.append(['suppress_spurious_page_faults', vals.suppress_spurious_page_faults])
   22.20 +
   22.21      return config_image
   22.22      
   22.23  def configure_disks(config_devs, vals):
   22.24 @@ -696,11 +703,8 @@ def configure_vscsis(config_devs, vals):
   22.25  
   22.26      scsi_devices = vscsi_util.vscsi_get_scsidevices()
   22.27      for (p_dev, v_dev, backend) in vals.vscsi:
   22.28 -        tmp = p_dev.split(':')
   22.29 -        if len(tmp) == 4:
   22.30 -            (p_hctl, block) = vscsi_util._vscsi_hctl_block(p_dev, scsi_devices)
   22.31 -        else:
   22.32 -            (p_hctl, block) = vscsi_util._vscsi_block_scsiid_to_hctl(p_dev, scsi_devices)
   22.33 +        (p_hctl, devname) = \
   22.34 +            vscsi_util.vscsi_get_hctl_and_devname_by(p_dev, scsi_devices)
   22.35  
   22.36          if p_hctl == None:
   22.37              raise ValueError("Cannot find device \"%s\"" % p_dev)
   22.38 @@ -716,7 +720,7 @@ def configure_vscsis(config_devs, vals):
   22.39                          ['state', 'Initialising'], \
   22.40                          ['devid', devid], \
   22.41                          ['p-dev', p_hctl], \
   22.42 -                        ['p-devname', block], \
   22.43 +                        ['p-devname', devname], \
   22.44                          ['v-dev', v_dev] ])
   22.45  
   22.46          if vscsi_lookup_devid(devidlist, devid) == 0:
   22.47 @@ -887,7 +891,7 @@ def make_config(vals):
   22.48                     'restart', 'on_poweroff',
   22.49                     'on_reboot', 'on_crash', 'vcpus', 'vcpu_avail', 'features',
   22.50                     'on_xend_start', 'on_xend_stop', 'target', 'cpuid',
   22.51 -                   'cpuid_check', 'machine_address_size'])
   22.52 +                   'cpuid_check', 'machine_address_size', 'suppress_spurious_page_faults'])
   22.53  
   22.54      if vals.uuid is not None:
   22.55          config.append(['uuid', vals.uuid])
    23.1 --- a/tools/python/xen/xm/main.py	Wed Oct 22 11:38:22 2008 +0900
    23.2 +++ b/tools/python/xen/xm/main.py	Wed Oct 22 11:46:55 2008 +0900
    23.3 @@ -2235,12 +2235,34 @@ def vscsi_convert_sxp_to_dict(dev_sxp):
    23.4      return dev_dict
    23.5  
    23.6  def xm_scsi_list(args):
    23.7 -    xenapi_unsupported()
    23.8      (use_long, params) = arg_check_for_resource_list(args, "scsi-list")
    23.9  
   23.10      dom = params[0]
   23.11  
   23.12 -    devs = server.xend.domain.getDeviceSxprs(dom, 'vscsi')
   23.13 +    devs = []
   23.14 +    if serverType == SERVER_XEN_API:
   23.15 +
   23.16 +        dscsi_refs = server.xenapi.VM.get_DSCSIs(get_single_vm(dom))
   23.17 +        dscsi_properties = \
   23.18 +            map(server.xenapi.DSCSI.get_runtime_properties, dscsi_refs)
   23.19 +        dscsi_dict = {}
   23.20 +        for dscsi_property in dscsi_properties:
   23.21 +            devid = int(dscsi_property['dev']['devid'])
   23.22 +            try:
   23.23 +                dscsi_sxp = dscsi_dict[devid]
   23.24 +            except:
   23.25 +                dscsi_sxp = [['devs', []]]
   23.26 +                for key, value in dscsi_property.items():
   23.27 +                    if key != 'dev':
   23.28 +                        dscsi_sxp.append([key, value])
   23.29 +            dev_sxp = ['dev']
   23.30 +            dev_sxp.extend(map2sxp(dscsi_property['dev']))
   23.31 +            dscsi_sxp[0][1].append(dev_sxp)
   23.32 +            dscsi_dict[devid] = dscsi_sxp
   23.33 +        devs = map2sxp(dscsi_dict)
   23.34 +
   23.35 +    else:
   23.36 +        devs = server.xend.domain.getDeviceSxprs(dom, 'vscsi')
   23.37  
   23.38      if use_long:
   23.39          map(PrettyPrint.prettyprint, devs)
   23.40 @@ -2464,37 +2486,60 @@ def xm_pci_attach(args):
   23.41      else:
   23.42          server.xend.domain.device_configure(dom, pci)
   23.43  
   23.44 +def parse_scsi_configuration(p_scsi, v_hctl, state):
   23.45 +    v = v_hctl.split(':')
   23.46 +    if len(v) != 4:
   23.47 +        raise OptionError("Invalid argument: %s" % v_hctl)
   23.48 +
   23.49 +    p_hctl = None
   23.50 +    devname = None
   23.51 +    if p_scsi is not None:
   23.52 +        (p_hctl, devname) = \
   23.53 +            vscsi_util.vscsi_get_hctl_and_devname_by(p_scsi)
   23.54 +        if p_hctl is None:
   23.55 +            raise OptionError("Cannot find device '%s'" % p_scsi)
   23.56 +
   23.57 +    scsi = ['vscsi']
   23.58 +    scsi.append(['dev', \
   23.59 +                 ['state', state], \
   23.60 +                 ['devid', int(v[0])], \
   23.61 +                 ['p-dev', p_hctl], \
   23.62 +                 ['p-devname', devname], \
   23.63 +                 ['v-dev', v_hctl] \
   23.64 +               ])
   23.65 +
   23.66 +    return scsi
   23.67 +
   23.68  def xm_scsi_attach(args):
   23.69 -    xenapi_unsupported()
   23.70 -
   23.71      arg_check(args, 'scsi-attach', 3, 4)
   23.72 -    p_devname = args[1]
   23.73 -    v_dev = args[2]
   23.74 -
   23.75 -    v_hctl = v_dev.split(':')
   23.76 -    if len(v_hctl) != 4:
   23.77 -        raise OptionError("Invalid argument: %s" % v_dev)
   23.78 -
   23.79 -    (p_hctl, block) = vscsi_util.vscsi_search_hctl_and_block(p_devname)
   23.80 -
   23.81 -    if p_hctl == None:
   23.82 -        raise OptionError("Cannot find device \"%s\"" % p_devname)
   23.83 -
   23.84      dom = args[0]
   23.85 -    vscsi = ['vscsi']
   23.86 -    vscsi.append(['dev', \
   23.87 -                ['state', 'Initialising'], \
   23.88 -                ['devid', v_hctl[0]], \
   23.89 -                ['p-dev', p_hctl], \
   23.90 -                ['p-devname', block], \
   23.91 -                ['v-dev', v_dev] ])
   23.92 -
   23.93 -    if len(args) == 4:
   23.94 -        vscsi.append(['backend', args[3]])
   23.95 -
   23.96 -    vscsi.append(['state', 'Initialising'])
   23.97 -    vscsi.append(['devid', v_hctl[0]])
   23.98 -    server.xend.domain.device_configure(dom, vscsi)
   23.99 +    p_scsi = args[1]
  23.100 +    v_hctl = args[2]
  23.101 +    scsi = parse_scsi_configuration(p_scsi, v_hctl, 'Initialising')
  23.102 +
  23.103 +    if serverType == SERVER_XEN_API:
  23.104 +
  23.105 +        scsi_dev = sxp.children(scsi, 'dev')[0]
  23.106 +        p_hctl = sxp.child_value(scsi_dev, 'p-dev')
  23.107 +        target_ref = None
  23.108 +        for pscsi_ref in server.xenapi.PSCSI.get_all():
  23.109 +            if p_hctl == server.xenapi.PSCSI.get_physical_HCTL(pscsi_ref):
  23.110 +                target_ref = pscsi_ref
  23.111 +                break
  23.112 +        if target_ref is None:
  23.113 +            raise OptionError("Cannot find device '%s'" % p_scsi)
  23.114 +
  23.115 +        dscsi_record = {
  23.116 +            "VM":           get_single_vm(dom),
  23.117 +            "PSCSI":        target_ref,
  23.118 +            "virtual_HCTL": v_hctl
  23.119 +        }
  23.120 +        server.xenapi.DSCSI.create(dscsi_record)
  23.121 +
  23.122 +    else:
  23.123 +        if len(args) == 4:
  23.124 +            scsi.append(['backend', args[3]])
  23.125 +        server.xend.domain.device_configure(dom, scsi)
  23.126  
  23.127  def detach(args, deviceClass):
  23.128      rm_cfg = True
  23.129 @@ -2587,26 +2632,25 @@ def xm_pci_detach(args):
  23.130          server.xend.domain.device_configure(dom, pci)
  23.131  
  23.132  def xm_scsi_detach(args):
  23.133 -    xenapi_unsupported()
  23.134      arg_check(args, 'scsi-detach', 2)
  23.135 -
  23.136 -    v_dev = args[1]
  23.137 -    v_hctl = v_dev.split(':')
  23.138 -    if len(v_hctl) != 4:
  23.139 -        raise OptionError("Invalid argument: %s" % v_dev)
  23.140 -
  23.141      dom = args[0]
  23.142 -    vscsi = ['vscsi']
  23.143 -    vscsi.append(['dev', \
  23.144 -                ['state', 'Closing'], \
  23.145 -                ['devid', v_hctl[0]], \
  23.146 -                ['p-dev', ''], \
  23.147 -                ['p-devname', ''], \
  23.148 -                ['v-dev', v_dev] ])
  23.149 -
  23.150 -    vscsi.append(['state', 'Closing'])
  23.151 -    vscsi.append(['devid', v_hctl[0]])
  23.152 -    server.xend.domain.device_configure(dom, vscsi)
  23.153 +    v_hctl = args[1]
  23.154 +    scsi = parse_scsi_configuration(None, v_hctl, 'Closing')
  23.155 +
  23.156 +    if serverType == SERVER_XEN_API:
  23.157 +
  23.158 +        target_ref = None
  23.159 +        for dscsi_ref in server.xenapi.VM.get_DSCSIs(get_single_vm(dom)):
  23.160 +            if v_hctl == server.xenapi.DSCSI.get_virtual_HCTL(dscsi_ref):
  23.161 +                target_ref = dscsi_ref
  23.162 +                break
  23.163 +        if target_ref is None:
  23.164 +            raise OptionError("Device %s not assigned" % v_hctl)
  23.165 +
  23.166 +        server.xenapi.DSCSI.destroy(target_ref)
  23.167 +
  23.168 +    else:
  23.169 +        server.xend.domain.device_configure(dom, scsi)
  23.170  
  23.171  def xm_vnet_list(args):
  23.172      xenapi_unsupported()
    24.1 --- a/tools/python/xen/xm/xenapi_create.py	Wed Oct 22 11:38:22 2008 +0900
    24.2 +++ b/tools/python/xen/xm/xenapi_create.py	Wed Oct 22 11:46:55 2008 +0900
    24.3 @@ -375,6 +375,12 @@ class xenapi_create:
    24.4  
    24.5              self.create_pcis(vm_ref, pcis)
    24.6  
    24.7 +            # Now create scsis
    24.8 +
    24.9 +            scsis = vm.getElementsByTagName("vscsi")
   24.10 +
   24.11 +            self.create_scsis(vm_ref, scsis)
   24.12 +
   24.13              return vm_ref
   24.14          except:
   24.15              server.xenapi.VM.destroy(vm_ref)
   24.16 @@ -532,6 +538,33 @@ class xenapi_create:
   24.17  
   24.18          return server.xenapi.DPCI.create(dpci_record)
   24.19  
   24.20 +    def create_scsis(self, vm_ref, scsis):
   24.21 +        log(DEBUG, "create_scsis")
   24.22 +        return map(lambda scsi: self.create_scsi(vm_ref, scsi), scsis)
   24.23 +
   24.24 +    def create_scsi(self, vm_ref, scsi):
   24.25 +        log(DEBUG, "create_scsi")
   24.26 +
   24.27 +        target_ref = None
   24.28 +        for pscsi_ref in server.xenapi.PSCSI.get_all():
   24.29 +            if scsi.attributes["p-dev"].value == server.xenapi.PSCSI.get_physical_HCTL(pscsi_ref):
   24.30 +                target_ref = pscsi_ref
   24.31 +                break
   24.32 +        if target_ref is None:
   24.33 +            log(DEBUG, "create_scsi: scsi device not found")
   24.34 +            return None
   24.35 +
   24.36 +        dscsi_record = {
   24.37 +            "VM":
   24.38 +                vm_ref,
   24.39 +            "PSCSI":
   24.40 +                target_ref,
   24.41 +            "virtual_HCTL":
   24.42 +                scsi.attributes["v-dev"].value
   24.43 +        }
   24.44 +
   24.45 +        return server.xenapi.DSCSI.create(dscsi_record)
   24.46 +
   24.47  def get_child_by_name(exp, childname, default = None):
   24.48      try:
   24.49          return [child for child in sxp.children(exp)
   24.50 @@ -563,6 +596,9 @@ class sxp2xml:
   24.51          pcis_sxp = map(lambda x: x[1], [device for device in devices
   24.52                                          if device[1][0] == "pci"])
   24.53  
   24.54 +        scsis_sxp = map(lambda x: x[1], [device for device in devices
   24.55 +                                         if device[1][0] == "vscsi"])
   24.56 +
   24.57          # Create XML Document
   24.58          
   24.59          impl = getDOMImplementation()
   24.60 @@ -704,6 +740,12 @@ class sxp2xml:
   24.61  
   24.62          map(vm.appendChild, pcis)
   24.63  
   24.64 +        # And now the scsis
   24.65 +
   24.66 +        scsis = self.extract_scsis(scsis_sxp, document)
   24.67 +
   24.68 +        map(vm.appendChild, scsis)
   24.69 +
   24.70          # Last but not least the consoles...
   24.71  
   24.72          consoles = self.extract_consoles(image, document)
   24.73 @@ -894,6 +936,23 @@ class sxp2xml:
   24.74  
   24.75          return pcis
   24.76  
   24.77 +    def extract_scsis(self, scsis_sxp, document):
   24.78 +
   24.79 +        scsis = []
   24.80 +
   24.81 +        for scsi_sxp in scsis_sxp:
   24.82 +            for dev_sxp in sxp.children(scsi_sxp, "dev"):
   24.83 +                scsi = document.createElement("vscsi")
   24.84 +
   24.85 +                scsi.attributes["p-dev"] \
   24.86 +                    = get_child_by_name(dev_sxp, "p-dev")
   24.87 +                scsi.attributes["v-dev"] \
   24.88 +                    = get_child_by_name(dev_sxp, "v-dev")
   24.89 +
   24.90 +                scsis.append(scsi)
   24.91 +
   24.92 +        return scsis
   24.93 +
   24.94      def mk_other_config(self, key, value, document):
   24.95          other_config = document.createElement("other_config")
   24.96          other_config.attributes["key"] = key
    25.1 --- a/tools/xentrace/formats	Wed Oct 22 11:38:22 2008 +0900
    25.2 +++ b/tools/xentrace/formats	Wed Oct 22 11:46:55 2008 +0900
    25.3 @@ -116,3 +116,7 @@ 0x0040f00e  CPU%(cpu)d  %(tsc)d (+%(relt
    25.4  0x0040f10e  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  shadow_emulate_resync_full        [ gfn = 0x%(1)16x ]
    25.5  0x0040f00f  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  shadow_emulate_resync_only        [ gfn = 0x%(1)08x ]
    25.6  0x0040f10f  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  shadow_emulate_resync_only        [ gfn = 0x%(1)16x ]
    25.7 +
    25.8 +0x00801001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_freq_change [ %(1)dMHz -> %(2)dMHz ]
    25.9 +0x00802001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_idle_entry  [ C0 -> C%(1)d ]
   25.10 +0x00802002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_idle_exit   [ C%(1)d -> C0 ]
    26.1 --- a/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c	Wed Oct 22 11:38:22 2008 +0900
    26.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c	Wed Oct 22 11:46:55 2008 +0900
    26.3 @@ -11,12 +11,6 @@ struct ap_suspend_info {
    26.4  	atomic_t nr_spinning;
    26.5  };
    26.6  
    26.7 -/*
    26.8 - * Use a rwlock to protect the hypercall page from being executed in AP context
    26.9 - * while the BSP is re-initializing it after restore.
   26.10 - */
   26.11 -static DEFINE_RWLOCK(suspend_lock);
   26.12 -
   26.13  #ifdef CONFIG_SMP
   26.14  
   26.15  /*
   26.16 @@ -33,12 +27,8 @@ static void ap_suspend(void *_info)
   26.17  	atomic_inc(&info->nr_spinning);
   26.18  	mb();
   26.19  
   26.20 -	while (info->do_spin) {
   26.21 +	while (info->do_spin)
   26.22  		cpu_relax();
   26.23 -		read_lock(&suspend_lock);
   26.24 -		HYPERVISOR_yield();
   26.25 -		read_unlock(&suspend_lock);
   26.26 -	}
   26.27  
   26.28  	mb();
   26.29  	atomic_dec(&info->nr_spinning);
   26.30 @@ -61,9 +51,7 @@ static int bp_suspend(void)
   26.31  	suspend_cancelled = HYPERVISOR_suspend(0);
   26.32  
   26.33  	if (!suspend_cancelled) {
   26.34 -		write_lock(&suspend_lock);
   26.35  		platform_pci_resume();
   26.36 -		write_unlock(&suspend_lock);
   26.37  		gnttab_resume();
   26.38  		irq_resume();
   26.39  	}
    27.1 --- a/xen/arch/ia64/vmx/vmmu.c	Wed Oct 22 11:38:22 2008 +0900
    27.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Wed Oct 22 11:46:55 2008 +0900
    27.3 @@ -446,7 +446,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6
    27.4          do {
    27.5              cpu = v->processor;
    27.6              if (cpu != current->processor) {
    27.7 -                spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock);
    27.8 +                spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
    27.9                  /* Flush VHPT on remote processors. */
   27.10                  smp_call_function_single(cpu, &ptc_ga_remote_func,
   27.11                                           &args, 0, 1);
    28.1 --- a/xen/arch/x86/acpi/cpu_idle.c	Wed Oct 22 11:38:22 2008 +0900
    28.2 +++ b/xen/arch/x86/acpi/cpu_idle.c	Wed Oct 22 11:46:55 2008 +0900
    28.3 @@ -40,6 +40,7 @@
    28.4  #include <xen/guest_access.h>
    28.5  #include <xen/keyhandler.h>
    28.6  #include <xen/cpuidle.h>
    28.7 +#include <xen/trace.h>
    28.8  #include <asm/cache.h>
    28.9  #include <asm/io.h>
   28.10  #include <asm/hpet.h>
   28.11 @@ -251,6 +252,9 @@ static void acpi_processor_idle(void)
   28.12      switch ( cx->type )
   28.13      {
   28.14      case ACPI_STATE_C1:
   28.15 +        /* Trace cpu idle entry */
   28.16 +        TRACE_1D(TRC_PM_IDLE_ENTRY, 1);
   28.17 +
   28.18          /*
   28.19           * Invoke C1.
   28.20           * Use the appropriate idle routine, the one that would
   28.21 @@ -261,6 +265,9 @@ static void acpi_processor_idle(void)
   28.22          else 
   28.23              acpi_safe_halt();
   28.24  
   28.25 +        /* Trace cpu idle exit */
   28.26 +        TRACE_1D(TRC_PM_IDLE_EXIT, 1);
   28.27 +
   28.28          /*
   28.29           * TBD: Can't get time duration while in C1, as resumes
   28.30           *      go to an ISR rather than here.  Need to instrument
   28.31 @@ -272,12 +279,16 @@ static void acpi_processor_idle(void)
   28.32      case ACPI_STATE_C2:
   28.33          if ( local_apic_timer_c2_ok )
   28.34          {
   28.35 +            /* Trace cpu idle entry */
   28.36 +            TRACE_1D(TRC_PM_IDLE_ENTRY, 2);
   28.37              /* Get start time (ticks) */
   28.38              t1 = inl(pmtmr_ioport);
   28.39              /* Invoke C2 */
   28.40              acpi_idle_do_entry(cx);
   28.41              /* Get end time (ticks) */
   28.42              t2 = inl(pmtmr_ioport);
   28.43 +            /* Trace cpu idle exit */
   28.44 +            TRACE_1D(TRC_PM_IDLE_EXIT, 2);
   28.45  
   28.46              /* Re-enable interrupts */
   28.47              local_irq_enable();
   28.48 @@ -316,6 +327,8 @@ static void acpi_processor_idle(void)
   28.49              ACPI_FLUSH_CPU_CACHE();
   28.50          }
   28.51  
   28.52 +        /* Trace cpu idle entry */
   28.53 +        TRACE_1D(TRC_PM_IDLE_ENTRY, cx - &power->states[0]);
   28.54          /*
   28.55           * Before invoking C3, be aware that TSC/APIC timer may be 
   28.56           * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
   28.57 @@ -335,6 +348,8 @@ static void acpi_processor_idle(void)
   28.58  
   28.59          /* recovering TSC */
   28.60          cstate_restore_tsc();
   28.61 +        /* Trace cpu idle exit */
   28.62 +        TRACE_1D(TRC_PM_IDLE_EXIT, cx - &power->states[0]);
   28.63  
   28.64          if ( power->flags.bm_check && power->flags.bm_control )
   28.65          {
    29.1 --- a/xen/arch/x86/cpu/amd.c	Wed Oct 22 11:38:22 2008 +0900
    29.2 +++ b/xen/arch/x86/cpu/amd.c	Wed Oct 22 11:46:55 2008 +0900
    29.3 @@ -37,8 +37,8 @@ static unsigned int opt_cpuid_mask_ecx, 
    29.4  integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
    29.5  integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
    29.6  static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
    29.7 -integer_param("cpuid_mask_ecx", opt_cpuid_mask_ext_ecx);
    29.8 -integer_param("cpuid_mask_edx", opt_cpuid_mask_ext_edx);
    29.9 +integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
   29.10 +integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
   29.11  
   29.12  static inline void wrmsr_amd(unsigned int index, unsigned int lo, 
   29.13  		unsigned int hi)
    30.1 --- a/xen/arch/x86/domain.c	Wed Oct 22 11:38:22 2008 +0900
    30.2 +++ b/xen/arch/x86/domain.c	Wed Oct 22 11:46:55 2008 +0900
    30.3 @@ -575,7 +575,10 @@ int arch_set_info_guest(
    30.4      v->arch.guest_context.user_regs.eflags |= 2;
    30.5  
    30.6      if ( is_hvm_vcpu(v) )
    30.7 +    {
    30.8 +        hvm_set_info_guest(v);
    30.9          goto out;
   30.10 +    }
   30.11  
   30.12      /* Only CR0.TS is modifiable by guest or admin. */
   30.13      v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;
   30.14 @@ -1252,11 +1255,11 @@ void context_switch(struct vcpu *prev, s
   30.15              flush_tlb_mask(next->vcpu_dirty_cpumask);
   30.16      }
   30.17  
   30.18 -    local_irq_disable();
   30.19 -
   30.20      if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
   30.21          pt_save_timer(prev);
   30.22  
   30.23 +    local_irq_disable();
   30.24 +
   30.25      set_current(next);
   30.26  
   30.27      if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) )
    31.1 --- a/xen/arch/x86/domctl.c	Wed Oct 22 11:38:22 2008 +0900
    31.2 +++ b/xen/arch/x86/domctl.c	Wed Oct 22 11:46:55 2008 +0900
    31.3 @@ -1028,6 +1028,21 @@ long arch_do_domctl(
    31.4      }
    31.5      break;
    31.6  
    31.7 +    case XEN_DOMCTL_suppress_spurious_page_faults:
    31.8 +    {
    31.9 +        struct domain *d;
   31.10 +
   31.11 +        ret = -ESRCH;
   31.12 +        d = rcu_lock_domain_by_id(domctl->domain);
   31.13 +        if ( d != NULL )
   31.14 +        {
   31.15 +            d->arch.suppress_spurious_page_faults = 1;
   31.16 +            rcu_unlock_domain(d);
   31.17 +            ret = 0;
   31.18 +        }
   31.19 +    }
   31.20 +    break;
   31.21 +
   31.22      default:
   31.23          ret = -ENOSYS;
   31.24          break;
    32.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Wed Oct 22 11:38:22 2008 +0900
    32.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Wed Oct 22 11:46:55 2008 +0900
    32.3 @@ -61,6 +61,34 @@ static unsigned long svm_rip2pointer(str
    32.4      return p;
    32.5  }
    32.6  
    32.7 +static unsigned long svm_nextrip_insn_length(struct vcpu *v)
    32.8 +{
    32.9 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   32.10 +
   32.11 +    if ( !cpu_has_svm_nrips || (vmcb->nextrip <= vmcb->rip) )
   32.12 +        return 0;
   32.13 +
   32.14 +#ifndef NDEBUG
   32.15 +    switch ( vmcb->exitcode )
   32.16 +    {
   32.17 +    case VMEXIT_CR0_READ... VMEXIT_DR15_WRITE:
   32.18 +        /* faults due to instruction intercepts */
   32.19 +        /* (exitcodes 84-95) are reserved */
   32.20 +    case VMEXIT_IDTR_READ ... VMEXIT_TR_WRITE:
   32.21 +    case VMEXIT_RDTSC ... VMEXIT_MSR:
   32.22 +    case VMEXIT_VMRUN ...  VMEXIT_MWAIT_CONDITIONAL:
   32.23 +        /* ...and the rest of the #VMEXITs */
   32.24 +    case VMEXIT_CR0_SEL_WRITE:
   32.25 +    case VMEXIT_EXCEPTION_BP:
   32.26 +        break;
   32.27 +    default:
   32.28 +        BUG();
   32.29 +    }
   32.30 +#endif
   32.31 +
   32.32 +    return vmcb->nextrip - vmcb->rip;
   32.33 +}
   32.34 +
   32.35  /* First byte: Length. Following bytes: Opcode bytes. */
   32.36  #define MAKE_INSTR(nm, ...) static const u8 OPCODE_##nm[] = { __VA_ARGS__ }
   32.37  MAKE_INSTR(INVD,   2, 0x0f, 0x08);
   32.38 @@ -118,6 +146,9 @@ int __get_instruction_length_from_list(s
   32.39      unsigned long fetch_addr;
   32.40      unsigned int fetch_len;
   32.41  
   32.42 +    if ( (inst_len = svm_nextrip_insn_length(v)) != 0 )
   32.43 +        return inst_len;
   32.44 +
   32.45      /* Fetch up to the next page break; we'll fetch from the next page
   32.46       * later if we have to. */
   32.47      fetch_addr = svm_rip2pointer(v);
    33.1 --- a/xen/arch/x86/hvm/svm/intr.c	Wed Oct 22 11:38:22 2008 +0900
    33.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Wed Oct 22 11:46:55 2008 +0900
    33.3 @@ -100,61 +100,6 @@ static void enable_intr_window(struct vc
    33.4      vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
    33.5  }
    33.6  
    33.7 -extern int vmsi_deliver(struct domain *d, int pirq);
    33.8 -static int hvm_pci_msi_assert(struct domain *d, int pirq)
    33.9 -{
   33.10 -    return vmsi_deliver(d, pirq);
   33.11 -}
   33.12 -
   33.13 -static void svm_dirq_assist(struct vcpu *v)
   33.14 -{
   33.15 -    unsigned int irq;
   33.16 -    uint32_t device, intx;
   33.17 -    struct domain *d = v->domain;
   33.18 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   33.19 -    struct dev_intx_gsi_link *digl;
   33.20 -
   33.21 -    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
   33.22 -        return;
   33.23 -
   33.24 -    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
   33.25 -          irq < NR_IRQS;
   33.26 -          irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
   33.27 -    {
   33.28 -        if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
   33.29 -            continue;
   33.30 -
   33.31 -        spin_lock(&d->event_lock);
   33.32 -        if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
   33.33 -        {
   33.34 -            hvm_pci_msi_assert(d, irq);
   33.35 -            spin_unlock(&d->event_lock);
   33.36 -            continue;
   33.37 -        }
   33.38 -
   33.39 -        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
   33.40 -
   33.41 -        list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
   33.42 -        {
   33.43 -            device = digl->device;
   33.44 -            intx = digl->intx;
   33.45 -            hvm_pci_intx_assert(d, device, intx);
   33.46 -            hvm_irq_dpci->mirq[irq].pending++;
   33.47 -        }
   33.48 -
   33.49 -        /*
   33.50 -         * Set a timer to see if the guest can finish the interrupt or not. For
   33.51 -         * example, the guest OS may unmask the PIC during boot, before the
   33.52 -         * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
   33.53 -         * guest will never deal with the irq, then the physical interrupt line
   33.54 -         * will never be deasserted.
   33.55 -         */
   33.56 -        set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
   33.57 -                  NOW() + PT_IRQ_TIME_OUT);
   33.58 -        spin_unlock(&d->event_lock);
   33.59 -    }
   33.60 -}
   33.61 -
   33.62  asmlinkage void svm_intr_assist(void) 
   33.63  {
   33.64      struct vcpu *v = current;
   33.65 @@ -163,7 +108,7 @@ asmlinkage void svm_intr_assist(void)
   33.66  
   33.67      /* Crank the handle on interrupt state. */
   33.68      pt_update_irq(v);
   33.69 -    svm_dirq_assist(v);
   33.70 +    hvm_dirq_assist(v);
   33.71  
   33.72      do {
   33.73          intack = hvm_vcpu_has_pending_irq(v);
    34.1 --- a/xen/arch/x86/hvm/viridian.c	Wed Oct 22 11:38:22 2008 +0900
    34.2 +++ b/xen/arch/x86/hvm/viridian.c	Wed Oct 22 11:46:55 2008 +0900
    34.3 @@ -244,7 +244,6 @@ int rdmsr_viridian_regs(uint32_t idx, ui
    34.4  
    34.5  int viridian_hypercall(struct cpu_user_regs *regs)
    34.6  {
    34.7 -    struct domain *d = current->domain;
    34.8      int mode = hvm_guest_x86_mode(current);
    34.9      unsigned long input_params_gpa, output_params_gpa;
   34.10      uint16_t status = HV_STATUS_SUCCESS;
   34.11 @@ -271,7 +270,7 @@ int viridian_hypercall(struct cpu_user_r
   34.12          };
   34.13      } output = { 0 };
   34.14  
   34.15 -    ASSERT(is_viridian_domain(d));
   34.16 +    ASSERT(is_viridian_domain(current->domain));
   34.17  
   34.18      switch ( mode )
   34.19      {
    35.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Wed Oct 22 11:38:22 2008 +0900
    35.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Wed Oct 22 11:46:55 2008 +0900
    35.3 @@ -103,61 +103,6 @@ static void enable_intr_window(struct vc
    35.4      }
    35.5  }
    35.6  
    35.7 -extern int vmsi_deliver(struct domain *d, int pirq);
    35.8 -static int hvm_pci_msi_assert(struct domain *d, int pirq)
    35.9 -{
   35.10 -    return vmsi_deliver(d, pirq);
   35.11 -}
   35.12 -
   35.13 -static void vmx_dirq_assist(struct vcpu *v)
   35.14 -{
   35.15 -    unsigned int irq;
   35.16 -    uint32_t device, intx;
   35.17 -    struct domain *d = v->domain;
   35.18 -    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   35.19 -    struct dev_intx_gsi_link *digl;
   35.20 -
   35.21 -    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
   35.22 -        return;
   35.23 -
   35.24 -    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
   35.25 -          irq < NR_IRQS;
   35.26 -          irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
   35.27 -    {
   35.28 -        if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
   35.29 -            continue;
   35.30 -
   35.31 -        spin_lock(&d->event_lock);
   35.32 -        if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
   35.33 -        {
   35.34 -            hvm_pci_msi_assert(d, irq);
   35.35 -            spin_unlock(&d->event_lock);
   35.36 -            continue;
   35.37 -        }
   35.38 -
   35.39 -        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
   35.40 -
   35.41 -        list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
   35.42 -        {
   35.43 -            device = digl->device;
   35.44 -            intx = digl->intx;
   35.45 -            hvm_pci_intx_assert(d, device, intx);
   35.46 -            hvm_irq_dpci->mirq[irq].pending++;
   35.47 -        }
   35.48 -
   35.49 -        /*
   35.50 -         * Set a timer to see if the guest can finish the interrupt or not. For
   35.51 -         * example, the guest OS may unmask the PIC during boot, before the
   35.52 -         * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
   35.53 -         * guest will never deal with the irq, then the physical interrupt line
   35.54 -         * will never be deasserted.
   35.55 -         */
   35.56 -        set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
   35.57 -                  NOW() + PT_IRQ_TIME_OUT);
   35.58 -        spin_unlock(&d->event_lock);
   35.59 -    }
   35.60 -}
   35.61 -
   35.62  asmlinkage void vmx_intr_assist(void)
   35.63  {
   35.64      struct hvm_intack intack;
   35.65 @@ -167,7 +112,7 @@ asmlinkage void vmx_intr_assist(void)
   35.66  
   35.67      /* Crank the handle on interrupt state. */
   35.68      pt_update_irq(v);
   35.69 -    vmx_dirq_assist(v);
   35.70 +    hvm_dirq_assist(v);
   35.71  
   35.72      do {
   35.73          intack = hvm_vcpu_has_pending_irq(v);
    36.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Oct 22 11:38:22 2008 +0900
    36.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Oct 22 11:46:55 2008 +0900
    36.3 @@ -1184,6 +1184,13 @@ static void vmx_set_uc_mode(struct vcpu 
    36.4      vpid_sync_all();
    36.5  }
    36.6  
    36.7 +static void vmx_set_info_guest(struct vcpu *v)
    36.8 +{
    36.9 +    vmx_vmcs_enter(v);
   36.10 +    __vmwrite(GUEST_DR7, v->arch.guest_context.debugreg[7]);
   36.11 +    vmx_vmcs_exit(v);
   36.12 +}
   36.13 +
   36.14  static struct hvm_function_table vmx_function_table = {
   36.15      .name                 = "VMX",
   36.16      .domain_initialise    = vmx_domain_initialise,
   36.17 @@ -1214,7 +1221,8 @@ static struct hvm_function_table vmx_fun
   36.18      .msr_read_intercept   = vmx_msr_read_intercept,
   36.19      .msr_write_intercept  = vmx_msr_write_intercept,
   36.20      .invlpg_intercept     = vmx_invlpg_intercept,
   36.21 -    .set_uc_mode          = vmx_set_uc_mode
   36.22 +    .set_uc_mode          = vmx_set_uc_mode,
   36.23 +    .set_info_guest       = vmx_set_info_guest
   36.24  };
   36.25  
   36.26  static unsigned long *vpid_bitmap;
   36.27 @@ -2048,8 +2056,12 @@ asmlinkage void vmx_vmexit_handler(struc
   36.28  
   36.29      perfc_incra(vmexits, exit_reason);
   36.30  
   36.31 -    if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
   36.32 -        local_irq_enable();
   36.33 +    /* Handle the interrupt we missed before allowing any more in. */
   36.34 +    if ( exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT )
   36.35 +        vmx_do_extint(regs);
   36.36 +
   36.37 +    /* Now enable interrupts so it's safe to take locks. */
   36.38 +    local_irq_enable();
   36.39  
   36.40      if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
   36.41          return vmx_failed_vmentry(exit_reason, regs);
   36.42 @@ -2177,7 +2189,7 @@ asmlinkage void vmx_vmexit_handler(struc
   36.43          break;
   36.44      }
   36.45      case EXIT_REASON_EXTERNAL_INTERRUPT:
   36.46 -        vmx_do_extint(regs);
   36.47 +        /* Already handled above. */
   36.48          break;
   36.49      case EXIT_REASON_TRIPLE_FAULT:
   36.50          hvm_triple_fault();
    37.1 --- a/xen/arch/x86/irq.c	Wed Oct 22 11:38:22 2008 +0900
    37.2 +++ b/xen/arch/x86/irq.c	Wed Oct 22 11:46:55 2008 +0900
    37.3 @@ -510,7 +510,7 @@ int pirq_guest_bind(struct vcpu *v, int 
    37.4  {
    37.5      unsigned int        vector;
    37.6      irq_desc_t         *desc;
    37.7 -    irq_guest_action_t *action;
    37.8 +    irq_guest_action_t *action, *newaction = NULL;
    37.9      int                 rc = 0;
   37.10      cpumask_t           cpumask = CPU_MASK_NONE;
   37.11  
   37.12 @@ -520,7 +520,10 @@ int pirq_guest_bind(struct vcpu *v, int 
   37.13   retry:
   37.14      desc = domain_spin_lock_irq_desc(v->domain, irq, NULL);
   37.15      if ( desc == NULL )
   37.16 -        return -EINVAL;
   37.17 +    {
   37.18 +        rc = -EINVAL;
   37.19 +        goto out;
   37.20 +    }
   37.21  
   37.22      action = (irq_guest_action_t *)desc->action;
   37.23      vector = desc - irq_desc;
   37.24 @@ -533,18 +536,24 @@ int pirq_guest_bind(struct vcpu *v, int 
   37.25                      "Cannot bind IRQ %d to guest. In use by '%s'.\n",
   37.26                      irq, desc->action->name);
   37.27              rc = -EBUSY;
   37.28 +            goto unlock_out;
   37.29 +        }
   37.30 +
   37.31 +        if ( newaction == NULL )
   37.32 +        {
   37.33 +            spin_unlock_irq(&desc->lock);
   37.34 +            if ( (newaction = xmalloc(irq_guest_action_t)) != NULL )
   37.35 +                goto retry;
   37.36 +            gdprintk(XENLOG_INFO,
   37.37 +                     "Cannot bind IRQ %d to guest. Out of memory.\n",
   37.38 +                     irq);
   37.39 +            rc = -ENOMEM;
   37.40              goto out;
   37.41          }
   37.42  
   37.43 -        action = xmalloc(irq_guest_action_t);
   37.44 -        if ( (desc->action = (struct irqaction *)action) == NULL )
   37.45 -        {
   37.46 -            gdprintk(XENLOG_INFO,
   37.47 -                    "Cannot bind IRQ %d to guest. Out of memory.\n",
   37.48 -                    irq);
   37.49 -            rc = -ENOMEM;
   37.50 -            goto out;
   37.51 -        }
   37.52 +        action = newaction;
   37.53 +        desc->action = (struct irqaction *)action;
   37.54 +        newaction = NULL;
   37.55  
   37.56          action->nr_guests   = 0;
   37.57          action->in_flight   = 0;
   37.58 @@ -568,7 +577,7 @@ int pirq_guest_bind(struct vcpu *v, int 
   37.59                 "Will not share with others.\n",
   37.60                  irq);
   37.61          rc = -EBUSY;
   37.62 -        goto out;
   37.63 +        goto unlock_out;
   37.64      }
   37.65      else if ( action->nr_guests == 0 )
   37.66      {
   37.67 @@ -588,17 +597,21 @@ int pirq_guest_bind(struct vcpu *v, int 
   37.68          gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
   37.69                 "Already at max share.\n", irq);
   37.70          rc = -EBUSY;
   37.71 -        goto out;
   37.72 +        goto unlock_out;
   37.73      }
   37.74  
   37.75      action->guest[action->nr_guests++] = v->domain;
   37.76  
   37.77 + unlock_out:
   37.78 +    spin_unlock_irq(&desc->lock);
   37.79   out:
   37.80 -    spin_unlock_irq(&desc->lock);
   37.81 +    if ( newaction != NULL )
   37.82 +        xfree(newaction);
   37.83      return rc;
   37.84  }
   37.85  
   37.86 -static void __pirq_guest_unbind(struct domain *d, int irq, irq_desc_t *desc)
   37.87 +static irq_guest_action_t *__pirq_guest_unbind(
   37.88 +    struct domain *d, int irq, irq_desc_t *desc)
   37.89  {
   37.90      unsigned int        vector;
   37.91      irq_guest_action_t *action;
   37.92 @@ -644,7 +657,7 @@ static void __pirq_guest_unbind(struct d
   37.93      BUG_ON(test_bit(irq, d->pirq_mask));
   37.94  
   37.95      if ( action->nr_guests != 0 )
   37.96 -        return;
   37.97 +        return NULL;
   37.98  
   37.99      BUG_ON(action->in_flight != 0);
  37.100  
  37.101 @@ -672,15 +685,18 @@ static void __pirq_guest_unbind(struct d
  37.102      BUG_ON(!cpus_empty(action->cpu_eoi_map));
  37.103  
  37.104      desc->action = NULL;
  37.105 -    xfree(action);
  37.106      desc->status &= ~IRQ_GUEST;
  37.107      desc->status &= ~IRQ_INPROGRESS;
  37.108      kill_timer(&irq_guest_eoi_timer[vector]);
  37.109      desc->handler->shutdown(vector);
  37.110 +
  37.111 +    /* Caller frees the old guest descriptor block. */
  37.112 +    return action;
  37.113  }
  37.114  
  37.115  void pirq_guest_unbind(struct domain *d, int irq)
  37.116  {
  37.117 +    irq_guest_action_t *oldaction = NULL;
  37.118      irq_desc_t *desc;
  37.119      int vector;
  37.120  
  37.121 @@ -699,16 +715,19 @@ void pirq_guest_unbind(struct domain *d,
  37.122      }
  37.123      else
  37.124      {
  37.125 -        __pirq_guest_unbind(d, irq, desc);
  37.126 +        oldaction = __pirq_guest_unbind(d, irq, desc);
  37.127      }
  37.128  
  37.129      spin_unlock_irq(&desc->lock);
  37.130 +
  37.131 +    if ( oldaction != NULL )
  37.132 +        xfree(oldaction);
  37.133  }
  37.134  
  37.135  int pirq_guest_force_unbind(struct domain *d, int irq)
  37.136  {
  37.137      irq_desc_t *desc;
  37.138 -    irq_guest_action_t *action;
  37.139 +    irq_guest_action_t *action, *oldaction = NULL;
  37.140      int i, bound = 0;
  37.141  
  37.142      WARN_ON(!spin_is_locked(&d->event_lock));
  37.143 @@ -727,10 +746,14 @@ int pirq_guest_force_unbind(struct domai
  37.144          goto out;
  37.145  
  37.146      bound = 1;
  37.147 -    __pirq_guest_unbind(d, irq, desc);
  37.148 +    oldaction = __pirq_guest_unbind(d, irq, desc);
  37.149  
  37.150   out:
  37.151      spin_unlock_irq(&desc->lock);
  37.152 +
  37.153 +    if ( oldaction != NULL )
  37.154 +        xfree(oldaction);
  37.155 +
  37.156      return bound;
  37.157  }
  37.158  
    38.1 --- a/xen/arch/x86/mm/hap/hap.c	Wed Oct 22 11:38:22 2008 +0900
    38.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Oct 22 11:46:55 2008 +0900
    38.3 @@ -639,9 +639,16 @@ static void
    38.4  hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
    38.5                      mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
    38.6  {
    38.7 +    uint32_t old_flags;
    38.8 +
    38.9      hap_lock(v->domain);
   38.10  
   38.11 +    old_flags = l1e_get_flags(*p);
   38.12      safe_write_pte(p, new);
   38.13 +    if ( (old_flags & _PAGE_PRESENT)
   38.14 +         && (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) )
   38.15 +             flush_tlb_mask(v->domain->domain_dirty_cpumask);
   38.16 +
   38.17  #if CONFIG_PAGING_LEVELS == 3
   38.18      /* install P2M in monitor table for PAE Xen */
   38.19      if ( level == 3 )
    39.1 --- a/xen/arch/x86/mm/shadow/private.h	Wed Oct 22 11:38:22 2008 +0900
    39.2 +++ b/xen/arch/x86/mm/shadow/private.h	Wed Oct 22 11:46:55 2008 +0900
    39.3 @@ -227,32 +227,40 @@ extern void shadow_audit_tables(struct v
    39.4  struct shadow_page_info
    39.5  {
    39.6      union {
    39.7 -        /* When in use, guest page we're a shadow of */
    39.8 -        unsigned long backpointer;
    39.9 -        /* When free, order of the freelist we're on */
   39.10 -        unsigned int order;
   39.11 -    };
   39.12 -    union {
   39.13 -        /* When in use, next shadow in this hash chain */
   39.14 -        struct shadow_page_info *next_shadow;
   39.15 -        /* When free, TLB flush time when freed */
   39.16 -        u32 tlbflush_timestamp;
   39.17 -    };
   39.18 -    struct {
   39.19 -        unsigned int type:5;      /* What kind of shadow is this? */
   39.20 -        unsigned int pinned:1;    /* Is the shadow pinned? */
   39.21 -        unsigned int count:26;    /* Reference count */
   39.22 -        u32 mbz;                  /* Must be zero: this is where the owner 
   39.23 -                                   * field lives in a non-shadow page */
   39.24 -    } __attribute__((packed));
   39.25 -    union {
   39.26 -        /* For unused shadow pages, a list of pages of this order; 
   39.27 -         * for pinnable shadows, if pinned, a list of other pinned shadows
   39.28 -         * (see sh_type_is_pinnable() below for the definition of 
   39.29 -         * "pinnable" shadow types). */
   39.30 -        struct list_head list;
   39.31 -        /* For non-pinnable shadows, a higher entry that points at us */
   39.32 -        paddr_t up;
   39.33 +        /* Ensures that shadow_page_info is same size as page_info. */
   39.34 +        struct page_info page_info;
   39.35 +
   39.36 +        struct {
   39.37 +            union {
   39.38 +                /* When in use, guest page we're a shadow of */
   39.39 +                unsigned long backpointer;
   39.40 +                /* When free, order of the freelist we're on */
   39.41 +                unsigned int order;
   39.42 +            };
   39.43 +            union {
   39.44 +                /* When in use, next shadow in this hash chain */
   39.45 +                struct shadow_page_info *next_shadow;
   39.46 +                /* When free, TLB flush time when freed */
   39.47 +                u32 tlbflush_timestamp;
   39.48 +            };
   39.49 +            struct {
   39.50 +                unsigned int type:5;   /* What kind of shadow is this? */
   39.51 +                unsigned int pinned:1; /* Is the shadow pinned? */
   39.52 +                unsigned int count:26; /* Reference count */
   39.53 +                u32 mbz;               /* Must be zero: this is where the
   39.54 +                                        * owner field lives in page_info */
   39.55 +            } __attribute__((packed));
   39.56 +            union {
   39.57 +                /* For unused shadow pages, a list of pages of this order; for 
   39.58 +                 * pinnable shadows, if pinned, a list of other pinned shadows
   39.59 +                 * (see sh_type_is_pinnable() below for the definition of 
   39.60 +                 * "pinnable" shadow types). */
   39.61 +                struct list_head list;
   39.62 +                /* For non-pinnable shadows, a higher entry that points
   39.63 +                 * at us. */
   39.64 +                paddr_t up;
   39.65 +            };
   39.66 +        };
   39.67      };
   39.68  };
   39.69  
   39.70 @@ -261,7 +269,8 @@ struct shadow_page_info
   39.71   * Also, the mbz field must line up with the owner field of normal 
   39.72   * pages, so they look properly like anonymous/xen pages. */
   39.73  static inline void shadow_check_page_struct_offsets(void) {
   39.74 -    BUILD_BUG_ON(sizeof (struct shadow_page_info) > sizeof (struct page_info));
   39.75 +    BUILD_BUG_ON(sizeof (struct shadow_page_info) !=
   39.76 +                 sizeof (struct page_info));
   39.77      BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) !=
   39.78                   offsetof(struct page_info, u.inuse._domain));
   39.79  };
    40.1 --- a/xen/arch/x86/msi.c	Wed Oct 22 11:38:22 2008 +0900
    40.2 +++ b/xen/arch/x86/msi.c	Wed Oct 22 11:46:55 2008 +0900
    40.3 @@ -364,6 +364,7 @@ static struct msi_desc* alloc_msi_entry(
    40.4  
    40.5      INIT_LIST_HEAD(&entry->list);
    40.6      entry->dev = NULL;
    40.7 +    entry->remap_index = -1;
    40.8  
    40.9      return entry;
   40.10  }
    41.1 --- a/xen/arch/x86/nmi.c	Wed Oct 22 11:38:22 2008 +0900
    41.2 +++ b/xen/arch/x86/nmi.c	Wed Oct 22 11:46:55 2008 +0900
    41.3 @@ -72,8 +72,8 @@ int nmi_active;
    41.4  #define P6_EVNTSEL_INT		(1 << 20)
    41.5  #define P6_EVNTSEL_OS		(1 << 17)
    41.6  #define P6_EVNTSEL_USR		(1 << 16)
    41.7 -#define P6_EVENT_CPU_CLOCKS_NOT_HALTED	0x79
    41.8 -#define P6_NMI_EVENT		P6_EVENT_CPU_CLOCKS_NOT_HALTED
    41.9 +#define P6_EVENT_CPU_CLOCKS_NOT_HALTED	 0x79
   41.10 +#define CORE_EVENT_CPU_CLOCKS_NOT_HALTED 0x3c
   41.11  
   41.12  #define P4_ESCR_EVENT_SELECT(N)	((N)<<25)
   41.13  #define P4_CCCR_OVF_PMI0	(1<<26)
   41.14 @@ -122,10 +122,17 @@ int __init check_nmi_watchdog (void)
   41.15  
   41.16      printk("\n");
   41.17  
   41.18 -    /* now that we know it works we can reduce NMI frequency to
   41.19 -       something more reasonable; makes a difference in some configs */
   41.20 +    /*
   41.21 +     * Now that we know it works we can reduce NMI frequency to
   41.22 +     * something more reasonable; makes a difference in some configs.
   41.23 +     * There's a limit to how slow we can go because writing the perfctr
   41.24 +     * MSRs only sets the low 32 bits, with the top 8 bits sign-extended
   41.25 +     * from those, so it's not possible to set up a delay larger than
   41.26 +     * 2^31 cycles and smaller than (2^40 - 2^31) cycles. 
   41.27 +     * (Intel SDM, section 18.22.2)
   41.28 +     */
   41.29      if ( nmi_watchdog == NMI_LOCAL_APIC )
   41.30 -        nmi_hz = 1;
   41.31 +        nmi_hz = max(1ul, cpu_khz >> 20);
   41.32  
   41.33      return 0;
   41.34  }
   41.35 @@ -248,7 +255,7 @@ static void __pminit setup_k7_watchdog(v
   41.36      wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
   41.37  }
   41.38  
   41.39 -static void __pminit setup_p6_watchdog(void)
   41.40 +static void __pminit setup_p6_watchdog(unsigned counter)
   41.41  {
   41.42      unsigned int evntsel;
   41.43  
   41.44 @@ -260,7 +267,7 @@ static void __pminit setup_p6_watchdog(v
   41.45      evntsel = P6_EVNTSEL_INT
   41.46          | P6_EVNTSEL_OS
   41.47          | P6_EVNTSEL_USR
   41.48 -        | P6_NMI_EVENT;
   41.49 +        | counter;
   41.50  
   41.51      wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
   41.52      write_watchdog_counter("P6_PERFCTR0");
   41.53 @@ -326,7 +333,9 @@ void __pminit setup_apic_nmi_watchdog(vo
   41.54      case X86_VENDOR_INTEL:
   41.55          switch (boot_cpu_data.x86) {
   41.56          case 6:
   41.57 -            setup_p6_watchdog();
   41.58 +            setup_p6_watchdog((boot_cpu_data.x86_model < 14) 
   41.59 +                              ? P6_EVENT_CPU_CLOCKS_NOT_HALTED
   41.60 +                              : CORE_EVENT_CPU_CLOCKS_NOT_HALTED);
   41.61              break;
   41.62          case 15:
   41.63              if (!setup_p4_watchdog())
    42.1 --- a/xen/arch/x86/platform_hypercall.c	Wed Oct 22 11:38:22 2008 +0900
    42.2 +++ b/xen/arch/x86/platform_hypercall.c	Wed Oct 22 11:46:55 2008 +0900
    42.3 @@ -53,15 +53,6 @@ static long cpu_frequency_change_helper(
    42.4      return cpu_frequency_change(this_cpu(freq));
    42.5  }
    42.6  
    42.7 -int xenpf_copy_px_states(struct processor_performance *pxpt,
    42.8 -        struct xen_processor_performance *dom0_px_info)
    42.9 -{
   42.10 -    if (!pxpt || !dom0_px_info)
   42.11 -        return -EINVAL;
   42.12 -    return  copy_from_compat(pxpt->states, dom0_px_info->states, 
   42.13 -                    dom0_px_info->state_count);
   42.14 -}
   42.15 -
   42.16  ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
   42.17  {
   42.18      ret_t ret = 0;
   42.19 @@ -372,12 +363,13 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
   42.20          switch ( op->u.set_pminfo.type )
   42.21          {
   42.22          case XEN_PM_PX:
   42.23 -        {
   42.24 -
   42.25 -            ret = set_px_pminfo(op->u.set_pminfo.id,
   42.26 -                                &op->u.set_pminfo.perf);
   42.27 +            if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
   42.28 +            {
   42.29 +                ret = -ENOSYS;
   42.30 +                break;
   42.31 +            }
   42.32 +            ret = set_px_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.perf);
   42.33              break;
   42.34 -        }
   42.35   
   42.36          case XEN_PM_CX:
   42.37              if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) )
    43.1 --- a/xen/arch/x86/smpboot.c	Wed Oct 22 11:38:22 2008 +0900
    43.2 +++ b/xen/arch/x86/smpboot.c	Wed Oct 22 11:46:55 2008 +0900
    43.3 @@ -473,13 +473,6 @@ static void construct_percpu_idt(unsigne
    43.4  {
    43.5  	unsigned char idt_load[10];
    43.6  
    43.7 -	/* If IDT table exists since last hotplug, reuse it */
    43.8 -	if (!idt_tables[cpu]) {
    43.9 -		idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
   43.10 -		memcpy(idt_tables[cpu], idt_table,
   43.11 -				IDT_ENTRIES*sizeof(idt_entry_t));
   43.12 -	}
   43.13 -
   43.14  	*(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1;
   43.15  	*(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
   43.16  	__asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
   43.17 @@ -908,6 +901,12 @@ static int __devinit do_boot_cpu(int api
   43.18  	}
   43.19  #endif
   43.20  
   43.21 +	if (!idt_tables[cpu]) {
   43.22 +		idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
   43.23 +		memcpy(idt_tables[cpu], idt_table,
   43.24 +		       IDT_ENTRIES*sizeof(idt_entry_t));
   43.25 +	}
   43.26 +
   43.27  	/*
   43.28  	 * This grunge runs the startup process for
   43.29  	 * the targeted processor.
    44.1 --- a/xen/arch/x86/traps.c	Wed Oct 22 11:38:22 2008 +0900
    44.2 +++ b/xen/arch/x86/traps.c	Wed Oct 22 11:46:55 2008 +0900
    44.3 @@ -710,7 +710,7 @@ static void pv_cpuid(struct cpu_user_reg
    44.4      if ( current->domain->domain_id != 0 )
    44.5      {
    44.6          if ( !cpuid_hypervisor_leaves(a, &a, &b, &c, &d) )
    44.7 -            domain_cpuid(current->domain, a, b, &a, &b, &c, &d);
    44.8 +            domain_cpuid(current->domain, a, c, &a, &b, &c, &d);
    44.9          goto out;
   44.10      }
   44.11  
   44.12 @@ -1242,6 +1242,10 @@ asmlinkage void do_page_fault(struct cpu
   44.13                regs->error_code, _p(addr));
   44.14      }
   44.15  
   44.16 +    if ( unlikely(current->domain->arch.suppress_spurious_page_faults
   44.17 +                  && spurious_page_fault(addr, regs)) )
   44.18 +        return;
   44.19 +
   44.20      propagate_page_fault(addr, regs->error_code);
   44.21  }
   44.22  
    45.1 --- a/xen/arch/x86/x86_32/xen.lds.S	Wed Oct 22 11:38:22 2008 +0900
    45.2 +++ b/xen/arch/x86/x86_32/xen.lds.S	Wed Oct 22 11:46:55 2008 +0900
    45.3 @@ -26,7 +26,6 @@ SECTIONS
    45.4  	*(.fixup)
    45.5  	*(.gnu.warning)
    45.6  	} :text =0x9090
    45.7 -  .text.lock : { *(.text.lock) } :text	/* out-of-line lock text */
    45.8  
    45.9    _etext = .;			/* End of text section */
   45.10  
    46.1 --- a/xen/arch/x86/x86_64/Makefile	Wed Oct 22 11:38:22 2008 +0900
    46.2 +++ b/xen/arch/x86/x86_64/Makefile	Wed Oct 22 11:46:55 2008 +0900
    46.3 @@ -13,6 +13,7 @@ obj-$(CONFIG_COMPAT) += domain.o
    46.4  obj-$(CONFIG_COMPAT) += physdev.o
    46.5  obj-$(CONFIG_COMPAT) += platform_hypercall.o
    46.6  obj-$(CONFIG_COMPAT) += cpu_idle.o
    46.7 +obj-$(CONFIG_COMPAT) += cpufreq.o
    46.8  
    46.9  ifeq ($(CONFIG_COMPAT),y)
   46.10  # extra dependencies
   46.11 @@ -24,4 +25,5 @@ platform_hypercall.o: ../platform_hyperc
   46.12  sysctl.o:	../sysctl.c
   46.13  traps.o:	compat/traps.c
   46.14  cpu_idle.o:	../acpi/cpu_idle.c
   46.15 +cpufreq.o:	../../../drivers/cpufreq/cpufreq.c
   46.16  endif
    47.1 --- a/xen/arch/x86/x86_64/cpu_idle.c	Wed Oct 22 11:38:22 2008 +0900
    47.2 +++ b/xen/arch/x86/x86_64/cpu_idle.c	Wed Oct 22 11:46:55 2008 +0900
    47.3 @@ -44,7 +44,7 @@ DEFINE_XEN_GUEST_HANDLE(compat_processor
    47.4      xlat_page_current = xlat_page_start; \
    47.5  } while (0)
    47.6  
    47.7 -static void *xlat_malloc(unsigned long *xlat_page_current, size_t size)
    47.8 +void *xlat_malloc(unsigned long *xlat_page_current, size_t size)
    47.9  {
   47.10      void *ret;
   47.11  
    48.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    48.2 +++ b/xen/arch/x86/x86_64/cpufreq.c	Wed Oct 22 11:46:55 2008 +0900
    48.3 @@ -0,0 +1,91 @@
    48.4 +/******************************************************************************
    48.5 + * cpufreq.c -- adapt 32b compat guest to 64b hypervisor.
    48.6 + *
    48.7 + *  Copyright (C) 2008, Liu Jinsong <jinsong.liu@intel.com>
    48.8 + *
    48.9 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   48.10 + *
   48.11 + *  This program is free software; you can redistribute it and/or modify
   48.12 + *  it under the terms of the GNU General Public License as published by
   48.13 + *  the Free Software Foundation; either version 2 of the License, or (at
   48.14 + *  your option) any later version.
   48.15 + *
   48.16 + *  This program is distributed in the hope that it will be useful, but
   48.17 + *  WITHOUT ANY WARRANTY; without even the implied warranty of
   48.18 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   48.19 + *  General Public License for more details.
   48.20 + *
   48.21 + *  You should have received a copy of the GNU General Public License along
   48.22 + *  with this program; if not, write to the Free Software Foundation, Inc.,
   48.23 + *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
   48.24 + *
   48.25 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   48.26 + */
   48.27 +#include <xen/config.h>
   48.28 +#include <xen/types.h>
   48.29 +#include <xen/xmalloc.h>
   48.30 +#include <xen/guest_access.h>
   48.31 +#include <compat/platform.h>
   48.32 +
   48.33 +DEFINE_XEN_GUEST_HANDLE(compat_processor_px_t);
   48.34 +
   48.35 +#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
   48.36 +
   48.37 +#define xlat_malloc_init(xlat_page_current)    do { \
   48.38 +    xlat_page_current = xlat_page_start; \
   48.39 +} while (0)
   48.40 +
   48.41 +extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size);
   48.42 +
   48.43 +#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c))
   48.44 +
   48.45 +extern int 
   48.46 +set_px_pminfo(uint32_t cpu, struct xen_processor_performance *perf);
   48.47 +
   48.48 +int 
   48.49 +compat_set_px_pminfo(uint32_t cpu, struct compat_processor_performance *perf)
   48.50 +{
   48.51 +    struct xen_processor_performance *xen_perf;
   48.52 +    unsigned long xlat_page_current;
   48.53 +
   48.54 +    xlat_malloc_init(xlat_page_current);
   48.55 +
   48.56 +    xen_perf = xlat_malloc_array(xlat_page_current,
   48.57 +                                  struct xen_processor_performance, 1);
   48.58 +    if ( unlikely(xen_perf == NULL) )
   48.59 +	return -EFAULT;
   48.60 +
   48.61 +#define XLAT_processor_performance_HNDL_states(_d_, _s_) do { \
   48.62 +    xen_processor_px_t *xen_states = NULL; \
   48.63 +\
   48.64 +    if ( likely((_s_)->state_count > 0) ) \
   48.65 +    { \
   48.66 +        XEN_GUEST_HANDLE(compat_processor_px_t) states; \
   48.67 +        compat_processor_px_t state; \
   48.68 +        int i; \
   48.69 +\
   48.70 +        xen_states = xlat_malloc_array(xlat_page_current, \
   48.71 +                               xen_processor_px_t, (_s_)->state_count); \
   48.72 +        if ( unlikely(xen_states == NULL) ) \
   48.73 +            return -EFAULT; \
   48.74 +\
   48.75 +        if ( unlikely(!compat_handle_okay((_s_)->states, \
   48.76 +                                (_s_)->state_count)) ) \
   48.77 +            return -EFAULT; \
   48.78 +        guest_from_compat_handle(states, (_s_)->states); \
   48.79 +\
   48.80 +        for ( i = 0; i < _s_->state_count; i++ ) \
   48.81 +        { \
   48.82 +           if ( unlikely(copy_from_guest_offset(&state, states, i, 1)) ) \
   48.83 +               return -EFAULT; \
   48.84 +           XLAT_processor_px(&xen_states[i], &state); \
   48.85 +        } \
   48.86 +    } \
   48.87 +\
   48.88 +    set_xen_guest_handle((_d_)->states, xen_states); \
   48.89 +} while (0)
   48.90 +    XLAT_processor_performance(xen_perf, perf);
   48.91 +#undef XLAT_processor_performance_HNDL_states
   48.92 +
   48.93 +    return set_px_pminfo(cpu, xen_perf);
   48.94 +}
    49.1 --- a/xen/arch/x86/x86_64/mm.c	Wed Oct 22 11:38:22 2008 +0900
    49.2 +++ b/xen/arch/x86/x86_64/mm.c	Wed Oct 22 11:46:55 2008 +0900
    49.3 @@ -252,8 +252,6 @@ void __init subarch_init_memory(void)
    49.4      BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
    49.5                   (offsetof(struct page_info, count_info) + sizeof(u32)));
    49.6      BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
    49.7 -    BUILD_BUG_ON(sizeof(struct page_info) !=
    49.8 -                 (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
    49.9  
   49.10      /* M2P table is mappable read-only by privileged domains. */
   49.11      for ( v  = RDWR_MPT_VIRT_START;
    50.1 --- a/xen/arch/x86/x86_64/platform_hypercall.c	Wed Oct 22 11:38:22 2008 +0900
    50.2 +++ b/xen/arch/x86/x86_64/platform_hypercall.c	Wed Oct 22 11:46:55 2008 +0900
    50.3 @@ -11,14 +11,14 @@ DEFINE_XEN_GUEST_HANDLE(compat_platform_
    50.4  #define xen_platform_op_t   compat_platform_op_t
    50.5  #define do_platform_op(x)   compat_platform_op(_##x)
    50.6  
    50.7 -#define xenpf_copy_px_states compat_xenpf_copy_px_states
    50.8 -
    50.9  #define xen_processor_px    compat_processor_px
   50.10  #define xen_processor_px_t  compat_processor_px_t
   50.11  #define xen_processor_performance    compat_processor_performance
   50.12  #define xen_processor_performance_t  compat_processor_performance_t
   50.13  #define xenpf_set_processor_pminfo   compat_pf_set_processor_pminfo
   50.14  
   50.15 +#define set_px_pminfo		compat_set_px_pminfo
   50.16 +
   50.17  #define xen_processor_power     compat_processor_power
   50.18  #define xen_processor_power_t   compat_processor_power_t
   50.19  #define set_cx_pminfo           compat_set_cx_pminfo
    51.1 --- a/xen/arch/x86/x86_64/xen.lds.S	Wed Oct 22 11:38:22 2008 +0900
    51.2 +++ b/xen/arch/x86/x86_64/xen.lds.S	Wed Oct 22 11:46:55 2008 +0900
    51.3 @@ -24,7 +24,6 @@ SECTIONS
    51.4  	*(.fixup)
    51.5  	*(.gnu.warning)
    51.6  	} :text = 0x9090
    51.7 -  .text.lock : { *(.text.lock) } :text	/* out-of-line lock text */
    51.8  
    51.9    _etext = .;			/* End of text section */
   51.10  
    52.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.c	Wed Oct 22 11:38:22 2008 +0900
    52.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.c	Wed Oct 22 11:46:55 2008 +0900
    52.3 @@ -236,7 +236,8 @@ static uint8_t twobyte_table[256] = {
    52.4      DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
    52.5      ByteOp|DstReg|SrcMem|ModRM|Mov, DstReg|SrcMem16|ModRM|Mov,
    52.6      /* 0xC0 - 0xC7 */
    52.7 -    ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM, 0, 0,
    52.8 +    ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
    52.9 +    0, DstMem|SrcReg|ModRM|Mov,
   52.10      0, 0, 0, ImplicitOps|ModRM,
   52.11      /* 0xC8 - 0xCF */
   52.12      ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
   52.13 @@ -3910,6 +3911,12 @@ x86_emulate(
   52.14          }
   52.15          goto add;
   52.16  
   52.17 +    case 0xc3: /* movnti */
   52.18 +        /* Ignore the non-temporal hint for now. */
   52.19 +        generate_exception_if(dst.bytes <= 2, EXC_UD, -1);
   52.20 +        dst.val = src.val;
   52.21 +        break;
   52.22 +
   52.23      case 0xc7: /* Grp9 (cmpxchg8b/cmpxchg16b) */ {
   52.24          unsigned long old[2], exp[2], new[2];
   52.25          unsigned int i;
    53.1 --- a/xen/common/Makefile	Wed Oct 22 11:38:22 2008 +0900
    53.2 +++ b/xen/common/Makefile	Wed Oct 22 11:46:55 2008 +0900
    53.3 @@ -16,6 +16,7 @@ obj-y += sched_sedf.o
    53.4  obj-y += schedule.o
    53.5  obj-y += shutdown.o
    53.6  obj-y += softirq.o
    53.7 +obj-y += spinlock.o
    53.8  obj-y += stop_machine.o
    53.9  obj-y += string.o
   53.10  obj-y += symbols.o
   53.11 @@ -25,7 +26,7 @@ obj-y += timer.o
   53.12  obj-y += trace.o
   53.13  obj-y += version.o
   53.14  obj-y += vsprintf.o
   53.15 -obj-y += xmalloc.o
   53.16 +obj-y += xmalloc_tlsf.o
   53.17  obj-y += rcupdate.o
   53.18  
   53.19  obj-$(perfc)       += perfc.o
    54.1 --- a/xen/common/kernel.c	Wed Oct 22 11:38:22 2008 +0900
    54.2 +++ b/xen/common/kernel.c	Wed Oct 22 11:46:55 2008 +0900
    54.3 @@ -75,8 +75,7 @@ void cmdline_parse(char *cmdline)
    54.4                  strlcpy(param->var, optval, param->len);
    54.5                  break;
    54.6              case OPT_UINT:
    54.7 -                *(unsigned int *)param->var =
    54.8 -                    simple_strtol(optval, (const char **)&optval, 0);
    54.9 +                *(unsigned int *)param->var = simple_strtol(optval, NULL, 0);
   54.10                  break;
   54.11              case OPT_BOOL:
   54.12              case OPT_INVBOOL:
    55.1 --- a/xen/common/schedule.c	Wed Oct 22 11:38:22 2008 +0900
    55.2 +++ b/xen/common/schedule.c	Wed Oct 22 11:46:55 2008 +0900
    55.3 @@ -455,6 +455,10 @@ static long do_poll(struct sched_poll *s
    55.4          goto out;
    55.5  #endif
    55.6  
    55.7 +    rc = 0;
    55.8 +    if ( local_events_need_delivery() )
    55.9 +        goto out;
   55.10 +
   55.11      for ( i = 0; i < sched_poll->nr_ports; i++ )
   55.12      {
   55.13          rc = -EFAULT;
    56.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    56.2 +++ b/xen/common/spinlock.c	Wed Oct 22 11:46:55 2008 +0900
    56.3 @@ -0,0 +1,154 @@
    56.4 +#include <xen/config.h>
    56.5 +#include <xen/smp.h>
    56.6 +#include <xen/spinlock.h>
    56.7 +
    56.8 +void _spin_lock(spinlock_t *lock)
    56.9 +{
   56.10 +    _raw_spin_lock(&lock->raw);
   56.11 +}
   56.12 +
   56.13 +void _spin_lock_irq(spinlock_t *lock)
   56.14 +{
   56.15 +    local_irq_disable();
   56.16 +    _raw_spin_lock(&lock->raw);
   56.17 +}
   56.18 +
   56.19 +unsigned long _spin_lock_irqsave(spinlock_t *lock)
   56.20 +{
   56.21 +    unsigned long flags;
   56.22 +    local_irq_save(flags);
   56.23 +    _raw_spin_lock(&lock->raw);
   56.24 +    return flags;
   56.25 +}
   56.26 +
   56.27 +void _spin_unlock(spinlock_t *lock)
   56.28 +{
   56.29 +    _raw_spin_unlock(&lock->raw);
   56.30 +}
   56.31 +
   56.32 +void _spin_unlock_irq(spinlock_t *lock)
   56.33 +{
   56.34 +    _raw_spin_unlock(&lock->raw);
   56.35 +    local_irq_enable();
   56.36 +}
   56.37 +
   56.38 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
   56.39 +{
   56.40 +    _raw_spin_unlock(&lock->raw);
   56.41 +    local_irq_restore(flags);
   56.42 +}
   56.43 +
   56.44 +int _spin_is_locked(spinlock_t *lock)
   56.45 +{
   56.46 +    return _raw_spin_is_locked(&lock->raw);
   56.47 +}
   56.48 +
   56.49 +int _spin_trylock(spinlock_t *lock)
   56.50 +{
   56.51 +    return _raw_spin_trylock(&lock->raw);
   56.52 +}
   56.53 +
   56.54 +void _spin_barrier(spinlock_t *lock)
   56.55 +{
   56.56 +    do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
   56.57 +    mb();
   56.58 +}
   56.59 +
   56.60 +void _spin_lock_recursive(spinlock_t *lock)
   56.61 +{
   56.62 +    int cpu = smp_processor_id();
   56.63 +
   56.64 +    /* Don't allow overflow of recurse_cpu field. */
   56.65 +    BUILD_BUG_ON(NR_CPUS > 0xfffu);
   56.66 +
   56.67 +    if ( likely(lock->recurse_cpu != cpu) )
   56.68 +    {
   56.69 +        spin_lock(lock);
   56.70 +        lock->recurse_cpu = cpu;
   56.71 +    }
   56.72 +
   56.73 +    /* We support only fairly shallow recursion, else the counter overflows. */
   56.74 +    ASSERT(lock->recurse_cnt < 0xfu);
   56.75 +    lock->recurse_cnt++;
   56.76 +}
   56.77 +
   56.78 +void _spin_unlock_recursive(spinlock_t *lock)
   56.79 +{
   56.80 +    if ( likely(--lock->recurse_cnt == 0) )
   56.81 +    {
   56.82 +        lock->recurse_cpu = 0xfffu;
   56.83 +        spin_unlock(lock);
   56.84 +    }
   56.85 +}
   56.86 +
   56.87 +void _read_lock(rwlock_t *lock)
   56.88 +{
   56.89 +    _raw_read_lock(&lock->raw);
   56.90 +}
   56.91 +
   56.92 +void _read_lock_irq(rwlock_t *lock)
   56.93 +{
   56.94 +    local_irq_disable();
   56.95 +    _raw_read_lock(&lock->raw);
   56.96 +}
   56.97 +
   56.98 +unsigned long _read_lock_irqsave(rwlock_t *lock)
   56.99 +{
  56.100 +    unsigned long flags;
  56.101 +    local_irq_save(flags);
  56.102 +    _raw_read_lock(&lock->raw);
  56.103 +    return flags;
  56.104 +}
  56.105 +
  56.106 +void _read_unlock(rwlock_t *lock)
  56.107 +{
  56.108 +    _raw_read_unlock(&lock->raw);
  56.109 +}
  56.110 +
  56.111 +void _read_unlock_irq(rwlock_t *lock)
  56.112 +{
  56.113 +    _raw_read_unlock(&lock->raw);
  56.114 +    local_irq_enable();
  56.115 +}
  56.116 +
  56.117 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  56.118 +{
  56.119 +    _raw_read_unlock(&lock->raw);
  56.120 +    local_irq_restore(flags);
  56.121 +}
  56.122 +
  56.123 +void _write_lock(rwlock_t *lock)
  56.124 +{
  56.125 +    _raw_write_lock(&lock->raw);
  56.126 +}
  56.127 +
  56.128 +void _write_lock_irq(rwlock_t *lock)
  56.129 +{
  56.130 +    local_irq_disable();
  56.131 +    _raw_write_lock(&lock->raw);
  56.132 +}
  56.133 +
  56.134 +unsigned long _write_lock_irqsave(rwlock_t *lock)
  56.135 +{
  56.136 +    unsigned long flags;
  56.137 +    local_irq_save(flags);
  56.138 +    _raw_write_lock(&lock->raw);
  56.139 +    return flags;
  56.140 +}
  56.141 +
  56.142 +void _write_unlock(rwlock_t *lock)
  56.143 +{
  56.144 +    _raw_write_unlock(&lock->raw);
  56.145 +}
  56.146 +
  56.147 +void _write_unlock_irq(rwlock_t *lock)
  56.148 +{
  56.149 +    _raw_write_unlock(&lock->raw);
  56.150 +    local_irq_enable();
  56.151 +}
  56.152 +
  56.153 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  56.154 +{
  56.155 +    _raw_write_unlock(&lock->raw);
  56.156 +    local_irq_restore(flags);
  56.157 +}
    57.1 --- a/xen/common/timer.c	Wed Oct 22 11:38:22 2008 +0900
    57.2 +++ b/xen/common/timer.c	Wed Oct 22 11:46:55 2008 +0900
    57.3 @@ -114,34 +114,19 @@ static int remove_from_heap(struct timer
    57.4  
    57.5  
    57.6  /* Add new entry @t to @heap. Return TRUE if new top of heap. */
    57.7 -static int add_to_heap(struct timer ***pheap, struct timer *t)
    57.8 +static int add_to_heap(struct timer **heap, struct timer *t)
    57.9  {
   57.10 -    struct timer **heap = *pheap;
   57.11      int sz = GET_HEAP_SIZE(heap);
   57.12  
   57.13 -    /* Copy the heap if it is full. */
   57.14 +    /* Fail if the heap is full. */
   57.15      if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
   57.16 -    {
   57.17 -        /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
   57.18 -        int old_limit = GET_HEAP_LIMIT(heap);
   57.19 -        int new_limit = ((old_limit + 1) << 4) - 1;
   57.20 -        if ( in_irq() )
   57.21 -            goto out;
   57.22 -        heap = xmalloc_array(struct timer *, new_limit + 1);
   57.23 -        if ( heap == NULL )
   57.24 -            goto out;
   57.25 -        memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
   57.26 -        SET_HEAP_LIMIT(heap, new_limit);
   57.27 -        if ( old_limit != 0 )
   57.28 -            xfree(*pheap);
   57.29 -        *pheap = heap;
   57.30 -    }
   57.31 +        return 0;
   57.32  
   57.33      SET_HEAP_SIZE(heap, ++sz);
   57.34      heap[sz] = t;
   57.35      t->heap_offset = sz;
   57.36      up_heap(heap, sz);
   57.37 - out:
   57.38 +
   57.39      return (t->heap_offset == 1);
   57.40  }
   57.41  
   57.42 @@ -210,7 +195,7 @@ static int add_entry(struct timers *time
   57.43      /* Try to add to heap. t->heap_offset indicates whether we succeed. */
   57.44      t->heap_offset = 0;
   57.45      t->status = TIMER_STATUS_in_heap;
   57.46 -    rc = add_to_heap(&timers->heap, t);
   57.47 +    rc = add_to_heap(timers->heap, t);
   57.48      if ( t->heap_offset != 0 )
   57.49          return rc;
   57.50  
   57.51 @@ -368,6 +353,27 @@ static void timer_softirq_action(void)
   57.52      void          *data;
   57.53  
   57.54      ts = &this_cpu(timers);
   57.55 +    heap = ts->heap;
   57.56 +
   57.57 +    /* If we are using overflow linked list, try to allocate a larger heap. */
   57.58 +    if ( unlikely(ts->list != NULL) )
   57.59 +    {
   57.60 +        /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
   57.61 +        int old_limit = GET_HEAP_LIMIT(heap);
   57.62 +        int new_limit = ((old_limit + 1) << 4) - 1;
   57.63 +        struct timer **newheap = xmalloc_array(struct timer *, new_limit + 1);
   57.64 +        if ( newheap != NULL )
   57.65 +        {
   57.66 +            spin_lock_irq(&ts->lock);
   57.67 +            memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap));
   57.68 +            SET_HEAP_LIMIT(newheap, new_limit);
   57.69 +            ts->heap = newheap;
   57.70 +            spin_unlock_irq(&ts->lock);
   57.71 +            if ( old_limit != 0 )
   57.72 +                xfree(heap);
   57.73 +            heap = newheap;
   57.74 +        }
   57.75 +    }
   57.76  
   57.77      spin_lock_irq(&ts->lock);
   57.78  
   57.79 @@ -380,9 +386,8 @@ static void timer_softirq_action(void)
   57.80          t->status = TIMER_STATUS_inactive;
   57.81          add_entry(ts, t);
   57.82      }
   57.83 -    
   57.84 -    heap = ts->heap;
   57.85 -    now  = NOW();
   57.86 +
   57.87 +    now = NOW();
   57.88  
   57.89      while ( (GET_HEAP_SIZE(heap) != 0) &&
   57.90              ((t = heap[1])->expires < (now + TIMER_SLOP)) )
   57.91 @@ -397,9 +402,6 @@ static void timer_softirq_action(void)
   57.92          spin_unlock_irq(&ts->lock);
   57.93          (*fn)(data);
   57.94          spin_lock_irq(&ts->lock);
   57.95 -
   57.96 -        /* Heap may have grown while the lock was released. */
   57.97 -        heap = ts->heap;
   57.98      }
   57.99  
  57.100      deadline = GET_HEAP_SIZE(heap) ? heap[1]->expires : 0;
    58.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.2 +++ b/xen/common/xmalloc_tlsf.c	Wed Oct 22 11:46:55 2008 +0900
    58.3 @@ -0,0 +1,599 @@
    58.4 +/*
    58.5 + * Two Levels Segregate Fit memory allocator (TLSF)
    58.6 + * Version 2.3.2
    58.7 + *
    58.8 + * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
    58.9 + *
   58.10 + * Thanks to Ismael Ripoll for his suggestions and reviews
   58.11 + *
   58.12 + * Copyright (C) 2007, 2006, 2005, 2004
   58.13 + *
   58.14 + * This code is released using a dual license strategy: GPL/LGPL
   58.15 + * You can choose the licence that better fits your requirements.
   58.16 + *
   58.17 + * Released under the terms of the GNU General Public License Version 2.0
   58.18 + * Released under the terms of the GNU Lesser General Public License 
   58.19 + * Version 2.1
   58.20 + *
   58.21 + * This is kernel port of TLSF allocator.
   58.22 + * Original code can be found at: http://rtportal.upv.es/rtmalloc/
   58.23 + * Adapted for Linux by Nitin Gupta (nitingupta910@gmail.com)
   58.24 + * (http://code.google.com/p/compcache/source/browse/trunk/sub-projects
   58.25 + *  /allocators/tlsf-kmod r229 dated Aug 27, 2008
   58.26 + * Adapted for Xen by Dan Magenheimer (dan.magenheimer@oracle.com)
   58.27 + */
   58.28 +
   58.29 +#include <xen/config.h>
   58.30 +#include <xen/irq.h>
   58.31 +#include <xen/mm.h>
   58.32 +#include <asm/time.h>
   58.33 +
   58.34 +#define MAX_POOL_NAME_LEN       16
   58.35 +
   58.36 +/* Some IMPORTANT TLSF parameters */
   58.37 +#define MEM_ALIGN       (sizeof(void *) * 2)
   58.38 +#define MEM_ALIGN_MASK  (~(MEM_ALIGN - 1))
   58.39 +
   58.40 +#define MAX_FLI         (30)
   58.41 +#define MAX_LOG2_SLI    (5)
   58.42 +#define MAX_SLI         (1 << MAX_LOG2_SLI)
   58.43 +
   58.44 +#define FLI_OFFSET      (6)
   58.45 +/* tlsf structure just will manage blocks bigger than 128 bytes */
   58.46 +#define SMALL_BLOCK     (128)
   58.47 +#define REAL_FLI        (MAX_FLI - FLI_OFFSET)
   58.48 +#define MIN_BLOCK_SIZE  (sizeof(struct free_ptr))
   58.49 +#define BHDR_OVERHEAD   (sizeof(struct bhdr) - MIN_BLOCK_SIZE)
   58.50 +
   58.51 +#define PTR_MASK        (sizeof(void *) - 1)
   58.52 +#define BLOCK_SIZE_MASK (0xFFFFFFFF - PTR_MASK)
   58.53 +
   58.54 +#define GET_NEXT_BLOCK(addr, r) ((struct bhdr *) \
   58.55 +                                ((char *)(addr) + (r)))
   58.56 +#define ROUNDUP_SIZE(r)         (((r) + MEM_ALIGN - 1) & MEM_ALIGN_MASK)
   58.57 +#define ROUNDDOWN_SIZE(r)       ((r) & MEM_ALIGN_MASK)
   58.58 +#define ROUNDUP_PAGE(r)         (((r) + PAGE_SIZE - 1) & PAGE_MASK)
   58.59 +
   58.60 +#define BLOCK_STATE     (0x1)
   58.61 +#define PREV_STATE      (0x2)
   58.62 +
   58.63 +/* bit 0 of the block size */
   58.64 +#define FREE_BLOCK      (0x1)
   58.65 +#define USED_BLOCK      (0x0)
   58.66 +
   58.67 +/* bit 1 of the block size */
   58.68 +#define PREV_FREE       (0x2)
   58.69 +#define PREV_USED       (0x0)
   58.70 +
   58.71 +static spinlock_t pool_list_lock;
   58.72 +static struct list_head pool_list_head;
   58.73 +
   58.74 +struct free_ptr {
   58.75 +    struct bhdr *prev;
   58.76 +    struct bhdr *next;
   58.77 +};
   58.78 +
   58.79 +struct bhdr {
   58.80 +    /* All blocks in a region are linked in order of physical address */
   58.81 +    struct bhdr *prev_hdr;
   58.82 +    /*
   58.83 +     * The size is stored in bytes
   58.84 +     *  bit 0: block is free, if set
   58.85 +     *  bit 1: previous block is free, if set
   58.86 +     */
   58.87 +    u32 size;
   58.88 +    /* Free blocks in individual freelists are linked */
   58.89 +    union {
   58.90 +        struct free_ptr free_ptr;
   58.91 +        u8 buffer[sizeof(struct free_ptr)];
   58.92 +    } ptr;
   58.93 +};
   58.94 +
   58.95 +struct xmem_pool {
   58.96 +    /* First level bitmap (REAL_FLI bits) */
   58.97 +    u32 fl_bitmap;
   58.98 +
   58.99 +    /* Second level bitmap */
  58.100 +    u32 sl_bitmap[REAL_FLI];
  58.101 +
  58.102 +    /* Free lists */
  58.103 +    struct bhdr *matrix[REAL_FLI][MAX_SLI];
  58.104 +
  58.105 +    spinlock_t lock;
  58.106 +
  58.107 +    unsigned long init_size;
  58.108 +    unsigned long max_size;
  58.109 +    unsigned long grow_size;
  58.110 +
  58.111 +    /* Basic stats */
  58.112 +    unsigned long used_size;
  58.113 +    unsigned long num_regions;
  58.114 +
  58.115 +    /* User provided functions for expanding/shrinking pool */
  58.116 +    xmem_pool_get_memory *get_mem;
  58.117 +    xmem_pool_put_memory *put_mem;
  58.118 +
  58.119 +    struct list_head list;
  58.120 +
  58.121 +    void *init_region;
  58.122 +    char name[MAX_POOL_NAME_LEN];
  58.123 +};
  58.124 +
  58.125 +/*
  58.126 + * Helping functions
  58.127 + */
  58.128 +
  58.129 +/**
  58.130 + * Returns indexes (fl, sl) of the list used to serve request of size r
  58.131 + */
  58.132 +static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
  58.133 +{
  58.134 +    int t;
  58.135 +
  58.136 +    if ( *r < SMALL_BLOCK )
  58.137 +    {
  58.138 +        *fl = 0;
  58.139 +        *sl = *r / (SMALL_BLOCK / MAX_SLI);
  58.140 +    }
  58.141 +    else
  58.142 +    {
  58.143 +        t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1;
  58.144 +        *r = *r + t;
  58.145 +        *fl = fls(*r) - 1;
  58.146 +        *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
  58.147 +        *fl -= FLI_OFFSET;
  58.148 +        /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0!
  58.149 +         *fl = *sl = 0;
  58.150 +         */
  58.151 +        *r &= ~t;
  58.152 +    }
  58.153 +}
  58.154 +
  58.155 +/**
  58.156 + * Returns indexes (fl, sl) which is used as starting point to search
  58.157 + * for a block of size r. It also rounds up requested size(r) to the
  58.158 + * next list.
  58.159 + */
  58.160 +static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
  58.161 +{
  58.162 +    if ( r < SMALL_BLOCK )
  58.163 +    {
  58.164 +        *fl = 0;
  58.165 +        *sl = r / (SMALL_BLOCK / MAX_SLI);
  58.166 +    }
  58.167 +    else
  58.168 +    {
  58.169 +        *fl = fls(r) - 1;
  58.170 +        *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI;
  58.171 +        *fl -= FLI_OFFSET;
  58.172 +    }
  58.173 +}
  58.174 +
  58.175 +/**
  58.176 + * Returns first block from a list that hold blocks larger than or
  58.177 + * equal to the one pointed by the indexes (fl, sl)
  58.178 + */
  58.179 +static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
  58.180 +                                               int *sl)
  58.181 +{
  58.182 +    u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
  58.183 +    struct bhdr *b = NULL;
  58.184 +
  58.185 +    if ( tmp )
  58.186 +    {
  58.187 +        *sl = ffs(tmp) - 1;
  58.188 +        b = p->matrix[*fl][*sl];
  58.189 +    }
  58.190 +    else
  58.191 +    {
  58.192 +        *fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1;
  58.193 +        if ( likely(*fl > 0) )
  58.194 +        {
  58.195 +            *sl = ffs(p->sl_bitmap[*fl]) - 1;
  58.196 +            b = p->matrix[*fl][*sl];
  58.197 +        }
  58.198 +    }
  58.199 +
  58.200 +    return b;
  58.201 +}
  58.202 +
  58.203 +/**
  58.204 + * Remove first free block(b) from free list with indexes (fl, sl).
  58.205 + */
  58.206 +static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
  58.207 +                                     int sl)
  58.208 +{
  58.209 +    p->matrix[fl][sl] = b->ptr.free_ptr.next;
  58.210 +    if ( p->matrix[fl][sl] )
  58.211 +    {
  58.212 +        p->matrix[fl][sl]->ptr.free_ptr.prev = NULL;
  58.213 +    }
  58.214 +    else
  58.215 +    {
  58.216 +        clear_bit(sl, &p->sl_bitmap[fl]);
  58.217 +        if ( !p->sl_bitmap[fl] )
  58.218 +            clear_bit(fl, &p->fl_bitmap);
  58.219 +    }
  58.220 +    b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
  58.221 +}
  58.222 +
  58.223 +/**
  58.224 + * Removes block(b) from free list with indexes (fl, sl)
  58.225 + */
  58.226 +static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
  58.227 +                                 int sl)
  58.228 +{
  58.229 +    if ( b->ptr.free_ptr.next )
  58.230 +        b->ptr.free_ptr.next->ptr.free_ptr.prev =
  58.231 +            b->ptr.free_ptr.prev;
  58.232 +    if ( b->ptr.free_ptr.prev )
  58.233 +        b->ptr.free_ptr.prev->ptr.free_ptr.next =
  58.234 +            b->ptr.free_ptr.next;
  58.235 +    if ( p->matrix[fl][sl] == b )
  58.236 +    {
  58.237 +        p->matrix[fl][sl] = b->ptr.free_ptr.next;
  58.238 +        if ( !p->matrix[fl][sl] )
  58.239 +        {
  58.240 +            clear_bit(sl, &p->sl_bitmap[fl]);
  58.241 +            if ( !p->sl_bitmap[fl] )
  58.242 +                clear_bit (fl, &p->fl_bitmap);
  58.243 +        }
  58.244 +    }
  58.245 +    b->ptr.free_ptr = (struct free_ptr) {NULL, NULL};
  58.246 +}
  58.247 +
  58.248 +/**
  58.249 + * Insert block(b) in free list with indexes (fl, sl)
  58.250 + */
  58.251 +static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
  58.252 +{
  58.253 +    b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
  58.254 +    if ( p->matrix[fl][sl] )
  58.255 +        p->matrix[fl][sl]->ptr.free_ptr.prev = b;
  58.256 +    p->matrix[fl][sl] = b;
  58.257 +    set_bit(sl, &p->sl_bitmap[fl]);
  58.258 +    set_bit(fl, &p->fl_bitmap);
  58.259 +}
  58.260 +
  58.261 +/**
  58.262 + * Region is a virtually contiguous memory region and Pool is
  58.263 + * collection of such regions
  58.264 + */
  58.265 +static inline void ADD_REGION(void *region, unsigned long region_size,
  58.266 +                              struct xmem_pool *pool)
  58.267 +{
  58.268 +    int fl, sl;
  58.269 +    struct bhdr *b, *lb;
  58.270 +
  58.271 +    b = (struct bhdr *)(region);
  58.272 +    b->prev_hdr = NULL;
  58.273 +    b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD)
  58.274 +        | FREE_BLOCK | PREV_USED;
  58.275 +    MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
  58.276 +    INSERT_BLOCK(b, pool, fl, sl);
  58.277 +    /* The sentinel block: allows us to know when we're in the last block */
  58.278 +    lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
  58.279 +    lb->prev_hdr = b;
  58.280 +    lb->size = 0 | USED_BLOCK | PREV_FREE;
  58.281 +    pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */
  58.282 +    pool->num_regions++;
  58.283 +}
  58.284 +
  58.285 +/*
  58.286 + * TLSF pool-based allocator start.
  58.287 + */
  58.288 +
  58.289 +struct xmem_pool *xmem_pool_create(
  58.290 +    const char *name,
  58.291 +    xmem_pool_get_memory get_mem,
  58.292 +    xmem_pool_put_memory put_mem,
  58.293 +    unsigned long init_size,
  58.294 +    unsigned long max_size,
  58.295 +    unsigned long grow_size)
  58.296 +{
  58.297 +    struct xmem_pool *pool;
  58.298 +    void *region;
  58.299 +    int pool_bytes, pool_order;
  58.300 +
  58.301 +    BUG_ON(max_size && (max_size < init_size));
  58.302 +
  58.303 +    pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
  58.304 +    pool_order = get_order_from_bytes(pool_bytes);
  58.305 +
  58.306 +    pool = (void *)alloc_xenheap_pages(pool_order);
  58.307 +    if ( pool == NULL )
  58.308 +        return NULL;
  58.309 +    memset(pool, 0, pool_bytes);
  58.310 +
  58.311 +    /* Round to next page boundary */
  58.312 +    init_size = ROUNDUP_PAGE(init_size);
  58.313 +    max_size = ROUNDUP_PAGE(max_size);
  58.314 +    grow_size = ROUNDUP_PAGE(grow_size);
  58.315 +
  58.316 +    /* pool global overhead not included in used size */
  58.317 +    pool->used_size = 0;
  58.318 +
  58.319 +    pool->init_size = init_size;
  58.320 +    pool->max_size = max_size;
  58.321 +    pool->grow_size = grow_size;
  58.322 +    pool->get_mem = get_mem;
  58.323 +    pool->put_mem = put_mem;
  58.324 +    strlcpy(pool->name, name, sizeof(pool->name));
  58.325 +    region = get_mem(init_size);
  58.326 +    if ( region == NULL )
  58.327 +        goto out_region;
  58.328 +    ADD_REGION(region, init_size, pool);
  58.329 +    pool->init_region = region;
  58.330 +
  58.331 +    spin_lock_init(&pool->lock);
  58.332 +
  58.333 +    spin_lock(&pool_list_lock);
  58.334 +    list_add_tail(&pool->list, &pool_list_head);
  58.335 +    spin_unlock(&pool_list_lock);
  58.336 +
  58.337 +    return pool;
  58.338 +
  58.339 + out_region:
  58.340 +    free_xenheap_pages(pool, pool_order);
  58.341 +    return NULL;
  58.342 +}
  58.343 +
  58.344 +unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
  58.345 +{
  58.346 +    return pool->used_size;
  58.347 +}
  58.348 +
  58.349 +unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
  58.350 +{
  58.351 +    unsigned long total;
  58.352 +    total = ROUNDUP_SIZE(sizeof(*pool))
  58.353 +        + pool->init_size
  58.354 +        + (pool->num_regions - 1) * pool->grow_size;
  58.355 +    return total;
  58.356 +}
  58.357 +
  58.358 +void xmem_pool_destroy(struct xmem_pool *pool) 
  58.359 +{
  58.360 +    if ( pool == NULL )
  58.361 +        return;
  58.362 +
  58.363 +    /* User is destroying without ever allocating from this pool */
  58.364 +    if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
  58.365 +    {
  58.366 +        pool->put_mem(pool->init_region);
  58.367 +        pool->used_size -= BHDR_OVERHEAD;
  58.368 +    }
  58.369 +
  58.370 +    /* Check for memory leaks in this pool */
  58.371 +    if ( xmem_pool_get_used_size(pool) )
  58.372 +        printk("memory leak in pool: %s (%p). "
  58.373 +               "%lu bytes still in use.\n",
  58.374 +               pool->name, pool, xmem_pool_get_used_size(pool));
  58.375 +
  58.376 +    spin_lock(&pool_list_lock);
  58.377 +    list_del_init(&pool->list);
  58.378 +    spin_unlock(&pool_list_lock);
  58.379 +    pool->put_mem(pool);
  58.380 +}
  58.381 +
  58.382 +void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
  58.383 +{
  58.384 +    struct bhdr *b, *b2, *next_b, *region;
  58.385 +    int fl, sl;
  58.386 +    unsigned long tmp_size;
  58.387 +
  58.388 +    size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
  58.389 +    /* Rounding up the requested size and calculating fl and sl */
  58.390 +
  58.391 +    spin_lock(&pool->lock);
  58.392 + retry_find:
  58.393 +    MAPPING_SEARCH(&size, &fl, &sl);
  58.394 +
  58.395 +    /* Searching a free block */
  58.396 +    if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) )
  58.397 +    {
  58.398 +        /* Not found */
  58.399 +        if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) )
  58.400 +            goto out_locked;
  58.401 +        if ( pool->max_size && (pool->init_size +
  58.402 +                                pool->num_regions * pool->grow_size
  58.403 +                                > pool->max_size) )
  58.404 +            goto out_locked;
  58.405 +        spin_unlock(&pool->lock);
  58.406 +        if ( (region = pool->get_mem(pool->grow_size)) == NULL )
  58.407 +            goto out;
  58.408 +        spin_lock(&pool->lock);
  58.409 +        ADD_REGION(region, pool->grow_size, pool);
  58.410 +        goto retry_find;
  58.411 +    }
  58.412 +    EXTRACT_BLOCK_HDR(b, pool, fl, sl);
  58.413 +
  58.414 +    /*-- found: */
  58.415 +    next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
  58.416 +    /* Should the block be split? */
  58.417 +    tmp_size = (b->size & BLOCK_SIZE_MASK) - size;
  58.418 +    if ( tmp_size >= sizeof(struct bhdr) )
  58.419 +    {
  58.420 +        tmp_size -= BHDR_OVERHEAD;
  58.421 +        b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
  58.422 +
  58.423 +        b2->size = tmp_size | FREE_BLOCK | PREV_USED;
  58.424 +        b2->prev_hdr = b;
  58.425 +
  58.426 +        next_b->prev_hdr = b2;
  58.427 +
  58.428 +        MAPPING_INSERT(tmp_size, &fl, &sl);
  58.429 +        INSERT_BLOCK(b2, pool, fl, sl);
  58.430 +
  58.431 +        b->size = size | (b->size & PREV_STATE);
  58.432 +    }
  58.433 +    else
  58.434 +    {
  58.435 +        next_b->size &= (~PREV_FREE);
  58.436 +        b->size &= (~FREE_BLOCK); /* Now it's used */
  58.437 +    }
  58.438 +
  58.439 +    pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
  58.440 +
  58.441 +    spin_unlock(&pool->lock);
  58.442 +    return (void *)b->ptr.buffer;
  58.443 +
  58.444 +    /* Failed alloc */
  58.445 + out_locked:
  58.446 +    spin_unlock(&pool->lock);
  58.447 +
  58.448 + out:
  58.449 +    return NULL;
  58.450 +}
  58.451 +
  58.452 +void xmem_pool_free(void *ptr, struct xmem_pool *pool)
  58.453 +{
  58.454 +    struct bhdr *b, *tmp_b;
  58.455 +    int fl = 0, sl = 0;
  58.456 +
  58.457 +    if ( unlikely(ptr == NULL) )
  58.458 +        return;
  58.459 +
  58.460 +    b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD);
  58.461 +
  58.462 +    spin_lock(&pool->lock);
  58.463 +    b->size |= FREE_BLOCK;
  58.464 +    pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
  58.465 +    b->ptr.free_ptr = (struct free_ptr) { NULL, NULL};
  58.466 +    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
  58.467 +    if ( tmp_b->size & FREE_BLOCK )
  58.468 +    {
  58.469 +        MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
  58.470 +        EXTRACT_BLOCK(tmp_b, pool, fl, sl);
  58.471 +        b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
  58.472 +    }
  58.473 +    if ( b->size & PREV_FREE )
  58.474 +    {
  58.475 +        tmp_b = b->prev_hdr;
  58.476 +        MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl);
  58.477 +        EXTRACT_BLOCK(tmp_b, pool, fl, sl);
  58.478 +        tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD;
  58.479 +        b = tmp_b;
  58.480 +    }
  58.481 +    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK);
  58.482 +    tmp_b->prev_hdr = b;
  58.483 +
  58.484 +    MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl);
  58.485 +
  58.486 +    if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) )
  58.487 +    {
  58.488 +        pool->put_mem(b);
  58.489 +        pool->num_regions--;
  58.490 +        pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */
  58.491 +        goto out;
  58.492 +    }
  58.493 +
  58.494 +    INSERT_BLOCK(b, pool, fl, sl);
  58.495 +
  58.496 +    tmp_b->size |= PREV_FREE;
  58.497 +    tmp_b->prev_hdr = b;
  58.498 + out:
  58.499 +    spin_unlock(&pool->lock);
  58.500 +}
  58.501 +
  58.502 +/*
  58.503 + * Glue for xmalloc().
  58.504 + */
  58.505 +
  58.506 +static struct xmem_pool *xenpool;
  58.507 +
  58.508 +static void *xmalloc_pool_get(unsigned long size)
  58.509 +{
  58.510 +    ASSERT(size == PAGE_SIZE);
  58.511 +    return alloc_xenheap_pages(0);
  58.512 +}
  58.513 +
  58.514 +static void xmalloc_pool_put(void *p)
  58.515 +{
  58.516 +    free_xenheap_pages(p,0);
  58.517 +}
  58.518 +
  58.519 +static void *xmalloc_whole_pages(unsigned long size)
  58.520 +{
  58.521 +    struct bhdr *b;
  58.522 +    unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
  58.523 +
  58.524 +    b = alloc_xenheap_pages(pageorder);
  58.525 +    if ( b == NULL )
  58.526 +        return NULL;
  58.527 +
  58.528 +    b->size = (1 << (pageorder + PAGE_SHIFT));
  58.529 +    return (void *)b->ptr.buffer;
  58.530 +}
  58.531 +
  58.532 +static void tlsf_init(void)
  58.533 +{
  58.534 +    INIT_LIST_HEAD(&pool_list_head);
  58.535 +    spin_lock_init(&pool_list_lock);
  58.536 +    xenpool = xmem_pool_create(
  58.537 +        "xmalloc", xmalloc_pool_get, xmalloc_pool_put,
  58.538 +        PAGE_SIZE, 0, PAGE_SIZE);
  58.539 +    BUG_ON(!xenpool);
  58.540 +}
  58.541 +
  58.542 +/*
  58.543 + * xmalloc()
  58.544 + */
  58.545 +
  58.546 +void *_xmalloc(unsigned long size, unsigned long align)
  58.547 +{
  58.548 +    void *p;
  58.549 +    u32 pad;
  58.550 +
  58.551 +    ASSERT(!in_irq());
  58.552 +
  58.553 +    ASSERT((align & (align - 1)) == 0);
  58.554 +    if ( align < MEM_ALIGN )
  58.555 +        align = MEM_ALIGN;
  58.556 +    size += align - MEM_ALIGN;
  58.557 +
  58.558 +    if ( !xenpool )
  58.559 +        tlsf_init();
  58.560 +
  58.561 +    if ( size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
  58.562 +        p = xmalloc_whole_pages(size);
  58.563 +    else
  58.564 +        p = xmem_pool_alloc(size, xenpool);
  58.565 +
  58.566 +    /* Add alignment padding. */
  58.567 +    if ( (pad = -(long)p & (align - 1)) != 0 )
  58.568 +    {
  58.569 +        char *q = (char *)p + pad;
  58.570 +        struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
  58.571 +        ASSERT(q > (char *)p);
  58.572 +        b->size = pad | 1;
  58.573 +        p = q;
  58.574 +    }
  58.575 +
  58.576 +    ASSERT(((unsigned long)p & (align - 1)) == 0);
  58.577 +    return p;
  58.578 +}
  58.579 +
  58.580 +void xfree(void *p)
  58.581 +{
  58.582 +    struct bhdr *b;
  58.583 +
  58.584 +    ASSERT(!in_irq());
  58.585 +
  58.586 +    if ( p == NULL )
  58.587 +        return;
  58.588 +
  58.589 +    /* Strip alignment padding. */
  58.590 +    b = (struct bhdr *)((char *) p - BHDR_OVERHEAD);
  58.591 +    if ( b->size & 1 )
  58.592 +    {
  58.593 +        p = (char *)p - (b->size & ~1u);
  58.594 +        b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
  58.595 +        ASSERT(!(b->size & 1));
  58.596 +    }
  58.597 +
  58.598 +    if ( b->size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
  58.599 +        free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
  58.600 +    else
  58.601 +        xmem_pool_free(p, xenpool);
  58.602 +}
    59.1 --- a/xen/drivers/Makefile	Wed Oct 22 11:38:22 2008 +0900
    59.2 +++ b/xen/drivers/Makefile	Wed Oct 22 11:46:55 2008 +0900
    59.3 @@ -1,6 +1,6 @@
    59.4  subdir-y += char
    59.5  subdir-y += cpufreq
    59.6  subdir-y += pci
    59.7 -subdir-$(x86) += passthrough
    59.8 +subdir-y += passthrough
    59.9  subdir-$(HAS_ACPI) += acpi
   59.10  subdir-$(HAS_VGA) += video
    60.1 --- a/xen/drivers/acpi/pmstat.c	Wed Oct 22 11:38:22 2008 +0900
    60.2 +++ b/xen/drivers/acpi/pmstat.c	Wed Oct 22 11:46:55 2008 +0900
    60.3 @@ -52,7 +52,7 @@ int do_get_pm_info(struct xen_sysctl_get
    60.4      int ret = 0;
    60.5      const struct processor_pminfo *pmpt;
    60.6  
    60.7 -    if ( (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) )
    60.8 +    if ( !op || (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) )
    60.9          return -EINVAL;
   60.10      pmpt = processor_pminfo[op->cpuid];
   60.11  
   60.12 @@ -87,7 +87,7 @@ int do_get_pm_info(struct xen_sysctl_get
   60.13          uint64_t tmp_idle_ns;
   60.14          struct pm_px *pxpt = cpufreq_statistic_data[op->cpuid];
   60.15  
   60.16 -        if ( !pxpt )
   60.17 +        if ( !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
   60.18              return -ENODATA;
   60.19  
   60.20          total_idle_ns = get_cpu_idle_time(op->cpuid);
    61.1 --- a/xen/drivers/char/ns16550.c	Wed Oct 22 11:38:22 2008 +0900
    61.2 +++ b/xen/drivers/char/ns16550.c	Wed Oct 22 11:46:55 2008 +0900
    61.3 @@ -18,17 +18,19 @@
    61.4  #include <asm/io.h>
    61.5  
    61.6  /*
    61.7 - * Configure serial port with a string <baud>,DPS,<io-base>,<irq>.
    61.8 + * Configure serial port with a string:
    61.9 + *   <baud>[/<clock_hz>][,DPS[,<io-base>[,<irq>]]].
   61.10   * The tail of the string can be omitted if platform defaults are sufficient.
   61.11   * If the baud rate is pre-configured, perhaps by a bootloader, then 'auto'
   61.12 - * can be specified in place of a numeric baud rate.
   61.13 + * can be specified in place of a numeric baud rate. Polled mode is specified
   61.14 + * by requesting irq 0.
   61.15   */
   61.16  static char opt_com1[30] = "", opt_com2[30] = "";
   61.17  string_param("com1", opt_com1);
   61.18  string_param("com2", opt_com2);
   61.19  
   61.20  static struct ns16550 {
   61.21 -    int baud, data_bits, parity, stop_bits, irq;
   61.22 +    int baud, clock_hz, data_bits, parity, stop_bits, irq;
   61.23      unsigned long io_base;   /* I/O port or memory-mapped I/O address. */
   61.24      char *remapped_io_base;  /* Remapped virtual address of mmap I/O.  */ 
   61.25      /* UART with IRQ line: interrupt-driven I/O. */
   61.26 @@ -192,7 +194,7 @@ static void __devinit ns16550_init_preir
   61.27      if ( uart->baud != BAUD_AUTO )
   61.28      {
   61.29          /* Baud rate specified: program it into the divisor latch. */
   61.30 -        divisor = UART_CLOCK_HZ / (uart->baud * 16);
   61.31 +        divisor = uart->clock_hz / (uart->baud << 4);
   61.32          ns_write_reg(uart, DLL, (char)divisor);
   61.33          ns_write_reg(uart, DLM, (char)(divisor >> 8));
   61.34      }
   61.35 @@ -201,7 +203,7 @@ static void __devinit ns16550_init_preir
   61.36          /* Baud rate already set: read it out from the divisor latch. */
   61.37          divisor  = ns_read_reg(uart, DLL);
   61.38          divisor |= ns_read_reg(uart, DLM) << 8;
   61.39 -        uart->baud = UART_CLOCK_HZ / (divisor * 16);
   61.40 +        uart->baud = uart->clock_hz / (divisor << 4);
   61.41      }
   61.42      ns_write_reg(uart, LCR, lcr);
   61.43  
   61.44 @@ -355,6 +357,12 @@ static void __init ns16550_parse_port_co
   61.45      else if ( (baud = simple_strtoul(conf, &conf, 10)) != 0 )
   61.46          uart->baud = baud;
   61.47  
   61.48 +    if ( *conf == '/')
   61.49 +    {
   61.50 +        conf++;
   61.51 +        uart->clock_hz = simple_strtoul(conf, &conf, 0) << 4;
   61.52 +    }
   61.53 +
   61.54      if ( *conf != ',' )
   61.55          goto config_parsed;
   61.56      conf++;
   61.57 @@ -408,6 +416,7 @@ void __init ns16550_init(int index, stru
   61.58      uart->baud      = (defaults->baud ? :
   61.59                         console_has((index == 0) ? "com1" : "com2")
   61.60                         ? BAUD_AUTO : 0);
   61.61 +    uart->clock_hz  = UART_CLOCK_HZ;
   61.62      uart->data_bits = defaults->data_bits;
   61.63      uart->parity    = parse_parity_char(defaults->parity);
   61.64      uart->stop_bits = defaults->stop_bits;
    62.1 --- a/xen/drivers/cpufreq/cpufreq.c	Wed Oct 22 11:38:22 2008 +0900
    62.2 +++ b/xen/drivers/cpufreq/cpufreq.c	Wed Oct 22 11:46:55 2008 +0900
    62.3 @@ -34,6 +34,7 @@
    62.4  #include <xen/sched.h>
    62.5  #include <xen/timer.h>
    62.6  #include <xen/xmalloc.h>
    62.7 +#include <xen/guest_access.h>
    62.8  #include <xen/domain.h>
    62.9  #include <asm/bug.h>
   62.10  #include <asm/io.h>
   62.11 @@ -185,10 +186,18 @@ int cpufreq_del_cpu(unsigned int cpu)
   62.12      return 0;
   62.13  }
   62.14  
   62.15 +static void print_PCT(struct xen_pct_register *ptr)
   62.16 +{
   62.17 +    printk(KERN_INFO "\t_PCT: descriptor=%d, length=%d, space_id=%d, "
   62.18 +            "bit_width=%d, bit_offset=%d, reserved=%d, address=%"PRId64"\n",
   62.19 +            ptr->descriptor, ptr->length, ptr->space_id, ptr->bit_width, 
   62.20 +            ptr->bit_offset, ptr->reserved, ptr->address);
   62.21 +}
   62.22 +
   62.23  static void print_PSS(struct xen_processor_px *ptr, int count)
   62.24  {
   62.25      int i;
   62.26 -    printk(KERN_INFO "\t_PSS:\n");
   62.27 +    printk(KERN_INFO "\t_PSS: state_count=%d\n", count);
   62.28      for (i=0; i<count; i++){
   62.29          printk(KERN_INFO "\tState%d: %"PRId64"MHz %"PRId64"mW %"PRId64"us "
   62.30                 "%"PRId64"us 0x%"PRIx64" 0x%"PRIx64"\n",
   62.31 @@ -211,20 +220,19 @@ static void print_PSD( struct xen_psd_pa
   62.32              ptr->num_processors);
   62.33  }
   62.34  
   62.35 +static void print_PPC(unsigned int platform_limit)
   62.36 +{
   62.37 +    printk(KERN_INFO "\t_PPC: %d\n", platform_limit);
   62.38 +}
   62.39 +
   62.40  int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *dom0_px_info)
   62.41  {
   62.42      int ret=0, cpuid;
   62.43      struct processor_pminfo *pmpt;
   62.44      struct processor_performance *pxpt;
   62.45  
   62.46 -    if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
   62.47 -    {
   62.48 -        ret = -ENOSYS;
   62.49 -        goto out;
   62.50 -    }
   62.51 -
   62.52      cpuid = get_cpu_id(acpi_id);
   62.53 -    if ( cpuid < 0 )
   62.54 +    if ( cpuid < 0 || !dom0_px_info)
   62.55      {
   62.56          ret = -EINVAL;
   62.57          goto out;
   62.58 @@ -256,6 +264,8 @@ int set_px_pminfo(uint32_t acpi_id, stru
   62.59          memcpy ((void *)&pxpt->status_register,
   62.60                  (void *)&dom0_px_info->status_register,
   62.61                  sizeof(struct xen_pct_register));
   62.62 +        print_PCT(&pxpt->control_register);
   62.63 +        print_PCT(&pxpt->status_register);
   62.64      }
   62.65      if ( dom0_px_info->flags & XEN_PX_PSS ) 
   62.66      {
   62.67 @@ -265,12 +275,8 @@ int set_px_pminfo(uint32_t acpi_id, stru
   62.68              ret = -ENOMEM;
   62.69              goto out;
   62.70          }
   62.71 -        if ( xenpf_copy_px_states(pxpt, dom0_px_info) )
   62.72 -        {
   62.73 -            xfree(pxpt->states);
   62.74 -            ret = -EFAULT;
   62.75 -            goto out;
   62.76 -        }
   62.77 +        copy_from_guest(pxpt->states, dom0_px_info->states, 
   62.78 +                                      dom0_px_info->state_count);
   62.79          pxpt->state_count = dom0_px_info->state_count;
   62.80          print_PSS(pxpt->states,pxpt->state_count);
   62.81      }
   62.82 @@ -285,6 +291,7 @@ int set_px_pminfo(uint32_t acpi_id, stru
   62.83      if ( dom0_px_info->flags & XEN_PX_PPC )
   62.84      {
   62.85          pxpt->platform_limit = dom0_px_info->platform_limit;
   62.86 +        print_PPC(pxpt->platform_limit);
   62.87  
   62.88          if ( pxpt->init == XEN_PX_INIT )
   62.89          {
    63.1 --- a/xen/drivers/cpufreq/utility.c	Wed Oct 22 11:38:22 2008 +0900
    63.2 +++ b/xen/drivers/cpufreq/utility.c	Wed Oct 22 11:46:55 2008 +0900
    63.3 @@ -27,6 +27,7 @@
    63.4  #include <xen/types.h>
    63.5  #include <xen/sched.h>
    63.6  #include <xen/timer.h>
    63.7 +#include <xen/trace.h>
    63.8  #include <asm/config.h>
    63.9  #include <acpi/cpufreq/cpufreq.h>
   63.10  #include <public/sysctl.h>
   63.11 @@ -72,27 +73,30 @@ int cpufreq_statistic_init(unsigned int 
   63.12      struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
   63.13      const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
   63.14  
   63.15 -    count = pmpt->perf.state_count;
   63.16 -
   63.17      if ( !pmpt )
   63.18          return -EINVAL;
   63.19  
   63.20 +    if ( pxpt )
   63.21 +        return 0;
   63.22 +
   63.23 +    count = pmpt->perf.state_count;
   63.24 +
   63.25 +    pxpt = xmalloc(struct pm_px);
   63.26      if ( !pxpt )
   63.27 -    {
   63.28 -        pxpt = xmalloc(struct pm_px);
   63.29 -        if ( !pxpt )
   63.30 -            return -ENOMEM;
   63.31 -        memset(pxpt, 0, sizeof(*pxpt));
   63.32 -        cpufreq_statistic_data[cpuid] = pxpt;
   63.33 -    }
   63.34 +        return -ENOMEM;
   63.35 +    memset(pxpt, 0, sizeof(*pxpt));
   63.36 +    cpufreq_statistic_data[cpuid] = pxpt;
   63.37  
   63.38      pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
   63.39 -    if (!pxpt->u.trans_pt)
   63.40 +    if (!pxpt->u.trans_pt) {
   63.41 +        xfree(pxpt);
   63.42          return -ENOMEM;
   63.43 +    }
   63.44  
   63.45      pxpt->u.pt = xmalloc_array(struct pm_px_val, count);
   63.46      if (!pxpt->u.pt) {
   63.47          xfree(pxpt->u.trans_pt);
   63.48 +        xfree(pxpt);
   63.49          return -ENOMEM;
   63.50      }
   63.51  
   63.52 @@ -119,7 +123,8 @@ void cpufreq_statistic_exit(unsigned int
   63.53          return;
   63.54      xfree(pxpt->u.trans_pt);
   63.55      xfree(pxpt->u.pt);
   63.56 -    memset(pxpt, 0, sizeof(struct pm_px));
   63.57 +    xfree(pxpt);
   63.58 +    cpufreq_statistic_data[cpuid] = NULL;
   63.59  }
   63.60  
   63.61  void cpufreq_statistic_reset(unsigned int cpuid)
   63.62 @@ -128,7 +133,7 @@ void cpufreq_statistic_reset(unsigned in
   63.63      struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
   63.64      const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
   63.65  
   63.66 -    if ( !pxpt || !pmpt )
   63.67 +    if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
   63.68          return;
   63.69  
   63.70      count = pmpt->perf.state_count;
   63.71 @@ -293,7 +298,13 @@ int __cpufreq_driver_target(struct cpufr
   63.72      int retval = -EINVAL;
   63.73  
   63.74      if (cpu_online(policy->cpu) && cpufreq_driver->target)
   63.75 +    {
   63.76 +        unsigned int prev_freq = policy->cur;
   63.77 +
   63.78          retval = cpufreq_driver->target(policy, target_freq, relation);
   63.79 +        if ( retval == 0 )
   63.80 +            TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000);
   63.81 +    }
   63.82  
   63.83      return retval;
   63.84  }
    64.1 --- a/xen/drivers/passthrough/Makefile	Wed Oct 22 11:38:22 2008 +0900
    64.2 +++ b/xen/drivers/passthrough/Makefile	Wed Oct 22 11:46:55 2008 +0900
    64.3 @@ -1,4 +1,5 @@
    64.4  subdir-$(x86) += vtd
    64.5 +subdir-$(ia64) += vtd
    64.6  subdir-$(x86) += amd
    64.7  
    64.8  obj-y += iommu.o
    65.1 --- a/xen/drivers/passthrough/io.c	Wed Oct 22 11:38:22 2008 +0900
    65.2 +++ b/xen/drivers/passthrough/io.c	Wed Oct 22 11:46:55 2008 +0900
    65.3 @@ -20,6 +20,9 @@
    65.4  
    65.5  #include <xen/event.h>
    65.6  #include <xen/iommu.h>
    65.7 +#include <asm/hvm/irq.h>
    65.8 +#include <asm/hvm/iommu.h>
    65.9 +#include <xen/hvm/irq.h>
   65.10  
   65.11  static void pt_irq_time_out(void *data)
   65.12  {
   65.13 @@ -245,6 +248,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
   65.14      return 1;
   65.15  }
   65.16  
   65.17 +#ifdef SUPPORT_MSI_REMAPPING
   65.18  void hvm_dpci_msi_eoi(struct domain *d, int vector)
   65.19  {
   65.20      struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   65.21 @@ -278,6 +282,63 @@ void hvm_dpci_msi_eoi(struct domain *d, 
   65.22      spin_unlock(&d->event_lock);
   65.23  }
   65.24  
   65.25 +extern int vmsi_deliver(struct domain *d, int pirq);
   65.26 +static int hvm_pci_msi_assert(struct domain *d, int pirq)
   65.27 +{
   65.28 +    return vmsi_deliver(d, pirq);
   65.29 +}
   65.30 +#endif
   65.31 +
   65.32 +void hvm_dirq_assist(struct vcpu *v)
   65.33 +{
   65.34 +    unsigned int irq;
   65.35 +    uint32_t device, intx;
   65.36 +    struct domain *d = v->domain;
   65.37 +    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
   65.38 +    struct dev_intx_gsi_link *digl;
   65.39 +
   65.40 +    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
   65.41 +        return;
   65.42 +
   65.43 +    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
   65.44 +          irq < NR_IRQS;
   65.45 +          irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
   65.46 +    {
   65.47 +        if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
   65.48 +            continue;
   65.49 +
   65.50 +        spin_lock(&d->event_lock);
   65.51 +#ifdef SUPPORT_MSI_REMAPPING
   65.52 +        if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
   65.53 +        {
   65.54 +            hvm_pci_msi_assert(d, irq);
   65.55 +            spin_unlock(&d->event_lock);
   65.56 +            continue;
   65.57 +        }
   65.58 +#endif
   65.59 +        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
   65.60 +
   65.61 +        list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
   65.62 +        {
   65.63 +            device = digl->device;
   65.64 +            intx = digl->intx;
   65.65 +            hvm_pci_intx_assert(d, device, intx);
   65.66 +            hvm_irq_dpci->mirq[irq].pending++;
   65.67 +        }
   65.68 +
   65.69 +        /*
   65.70 +         * Set a timer to see if the guest can finish the interrupt or not. For
   65.71 +         * example, the guest OS may unmask the PIC during boot, before the
   65.72 +         * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
   65.73 +         * guest will never deal with the irq, then the physical interrupt line
   65.74 +         * will never be deasserted.
   65.75 +         */
   65.76 +        set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
   65.77 +                  NOW() + PT_IRQ_TIME_OUT);
   65.78 +        spin_unlock(&d->event_lock);
   65.79 +    }
   65.80 +}
   65.81 +
   65.82  void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
   65.83                    union vioapic_redir_entry *ent)
   65.84  {
    66.1 --- a/xen/drivers/passthrough/iommu.c	Wed Oct 22 11:38:22 2008 +0900
    66.2 +++ b/xen/drivers/passthrough/iommu.c	Wed Oct 22 11:46:55 2008 +0900
    66.3 @@ -19,8 +19,6 @@
    66.4  #include <xen/paging.h>
    66.5  #include <xen/guest_access.h>
    66.6  
    66.7 -extern struct iommu_ops intel_iommu_ops;
    66.8 -extern struct iommu_ops amd_iommu_ops;
    66.9  static void parse_iommu_param(char *s);
   66.10  static int iommu_populate_page_table(struct domain *d);
   66.11  int intel_vtd_setup(void);
    67.1 --- a/xen/drivers/passthrough/pci.c	Wed Oct 22 11:38:22 2008 +0900
    67.2 +++ b/xen/drivers/passthrough/pci.c	Wed Oct 22 11:46:55 2008 +0900
    67.3 @@ -21,6 +21,8 @@
    67.4  #include <xen/list.h>
    67.5  #include <xen/prefetch.h>
    67.6  #include <xen/iommu.h>
    67.7 +#include <asm/hvm/iommu.h>
    67.8 +#include <asm/hvm/irq.h>
    67.9  #include <xen/delay.h>
   67.10  #include <xen/keyhandler.h>
   67.11  
   67.12 @@ -207,6 +209,7 @@ void pci_release_devices(struct domain *
   67.13      }
   67.14  }
   67.15  
   67.16 +#ifdef SUPPORT_MSI_REMAPPING
   67.17  static void dump_pci_devices(unsigned char ch)
   67.18  {
   67.19      struct pci_dev *pdev;
   67.20 @@ -236,7 +239,7 @@ static int __init setup_dump_pcidevs(voi
   67.21      return 0;
   67.22  }
   67.23  __initcall(setup_dump_pcidevs);
   67.24 -
   67.25 +#endif
   67.26  
   67.27  
   67.28  /*
    68.1 --- a/xen/drivers/passthrough/vtd/Makefile	Wed Oct 22 11:38:22 2008 +0900
    68.2 +++ b/xen/drivers/passthrough/vtd/Makefile	Wed Oct 22 11:46:55 2008 +0900
    68.3 @@ -1,4 +1,5 @@
    68.4  subdir-$(x86) += x86
    68.5 +subdir-$(ia64) += ia64
    68.6  
    68.7  obj-y += iommu.o
    68.8  obj-y += dmar.o
    69.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    69.2 +++ b/xen/drivers/passthrough/vtd/ia64/Makefile	Wed Oct 22 11:46:55 2008 +0900
    69.3 @@ -0,0 +1,1 @@
    69.4 +obj-y += vtd.o
    70.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    70.2 +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c	Wed Oct 22 11:46:55 2008 +0900
    70.3 @@ -0,0 +1,112 @@
    70.4 +/*
    70.5 + * Copyright (c) 2008, Intel Corporation.
    70.6 + *
    70.7 + * This program is free software; you can redistribute it and/or modify it
    70.8 + * under the terms and conditions of the GNU General Public License,
    70.9 + * version 2, as published by the Free Software Foundation.
   70.10 + *
   70.11 + * This program is distributed in the hope it will be useful, but WITHOUT
   70.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   70.13 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   70.14 + * more details.
   70.15 + *
   70.16 + * You should have received a copy of the GNU General Public License along with
   70.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   70.18 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   70.19 + *
   70.20 + * Copyright (C) Allen Kay <allen.m.kay@intel.com>
   70.21 + * Copyright (C) Weidong Han <weidong.han@intel.com>
   70.22 + */
   70.23 +
   70.24 +#include <xen/sched.h>
   70.25 +#include <xen/domain_page.h>
   70.26 +#include <xen/iommu.h>
   70.27 +#include <asm/xensystem.h>
   70.28 +#include <asm/sal.h>
   70.29 +#include "../iommu.h"
   70.30 +#include "../dmar.h"
   70.31 +#include "../vtd.h"
   70.32 +
   70.33 +
   70.34 +int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
   70.35 +/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
   70.36 +u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
   70.37 +
   70.38 +void *map_vtd_domain_page(u64 maddr)
   70.39 +{
   70.40 +    return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
   70.41 +            (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
   70.42 +}
   70.43 +
   70.44 +void unmap_vtd_domain_page(void *va)
   70.45 +{
   70.46 +    unmap_domain_page(va);
   70.47 +}
   70.48 +
   70.49 +/* Allocate page table, return its machine address */
   70.50 +u64 alloc_pgtable_maddr(void)
   70.51 +{
   70.52 +    struct page_info *pg;
   70.53 +    u64 *vaddr;
   70.54 +
   70.55 +    pg = alloc_domheap_page(NULL, 0);
   70.56 +    vaddr = map_domain_page(page_to_mfn(pg));
   70.57 +    if ( !vaddr )
   70.58 +        return 0;
   70.59 +    memset(vaddr, 0, PAGE_SIZE);
   70.60 +
   70.61 +    iommu_flush_cache_page(vaddr);
   70.62 +    unmap_domain_page(vaddr);
   70.63 +
   70.64 +    return page_to_maddr(pg);
   70.65 +}
   70.66 +
   70.67 +void free_pgtable_maddr(u64 maddr)
   70.68 +{
   70.69 +    if ( maddr != 0 )
   70.70 +        free_domheap_page(maddr_to_page(maddr));
   70.71 +}
   70.72 +
   70.73 +unsigned int get_cache_line_size(void)
   70.74 +{
   70.75 +    return L1_CACHE_BYTES;
   70.76 +}
   70.77 +
   70.78 +void cacheline_flush(char * addr)
   70.79 +{
   70.80 +    ia64_fc(addr);
   70.81 +    ia64_sync_i();
   70.82 +    ia64_srlz_i();
   70.83 +}
   70.84 +
   70.85 +void flush_all_cache()
   70.86 +{
   70.87 +    ia64_sal_cache_flush(3);
   70.88 +}
   70.89 +
   70.90 +void * map_to_nocache_virt(int nr_iommus, u64 maddr)
   70.91 +{
   70.92 +  return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
   70.93 +}
   70.94 +
   70.95 +struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
   70.96 +{
   70.97 +    if ( !domain )
   70.98 +        return NULL;
   70.99 +
  70.100 +    return domain->arch.hvm_domain.irq.dpci;
  70.101 +}
  70.102 +
  70.103 +int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
  70.104 +{
  70.105 +    if ( !domain || !dpci )
  70.106 +        return 0;
  70.107 +
  70.108 +    domain->arch.hvm_domain.irq.dpci = dpci;
  70.109 +    return 1;
  70.110 +}
  70.111 +
  70.112 +void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
  70.113 +{
  70.114 +    /* dummy */
  70.115 +}
    71.1 --- a/xen/drivers/passthrough/vtd/intremap.c	Wed Oct 22 11:38:22 2008 +0900
    71.2 +++ b/xen/drivers/passthrough/vtd/intremap.c	Wed Oct 22 11:46:55 2008 +0900
    71.3 @@ -21,6 +21,7 @@
    71.4  #include <xen/irq.h>
    71.5  #include <xen/sched.h>
    71.6  #include <xen/iommu.h>
    71.7 +#include <asm/hvm/iommu.h>
    71.8  #include <xen/time.h>
    71.9  #include <xen/pci.h>
   71.10  #include <xen/pci_regs.h>
   71.11 @@ -128,7 +129,13 @@ static int ioapic_rte_to_remap_entry(str
   71.12      memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
   71.13  
   71.14      if ( rte_upper )
   71.15 +    {
   71.16 +#if defined(__i386__) || defined(__x86_64__)
   71.17          new_ire.lo.dst = (value >> 24) << 8;
   71.18 +#else /* __ia64__ */
   71.19 +        new_ire.lo.dst = value >> 16;
   71.20 +#endif
   71.21 +    }
   71.22      else
   71.23      {
   71.24          *(((u32 *)&new_rte) + 0) = value;
   71.25 @@ -179,7 +186,7 @@ unsigned int io_apic_read_remap_rte(
   71.26      struct IO_xAPIC_route_entry old_rte = { 0 };
   71.27      struct IO_APIC_route_remap_entry *remap_rte;
   71.28      int rte_upper = (reg & 1) ? 1 : 0;
   71.29 -    struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
   71.30 +    struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
   71.31      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   71.32  
   71.33      if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
   71.34 @@ -224,7 +231,7 @@ void io_apic_write_remap_rte(
   71.35      struct IO_xAPIC_route_entry old_rte = { 0 };
   71.36      struct IO_APIC_route_remap_entry *remap_rte;
   71.37      unsigned int rte_upper = (reg & 1) ? 1 : 0;
   71.38 -    struct iommu *iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
   71.39 +    struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
   71.40      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   71.41      int saved_mask;
   71.42  
   71.43 @@ -253,7 +260,7 @@ void io_apic_write_remap_rte(
   71.44      *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
   71.45      remap_rte->mask = saved_mask;
   71.46  
   71.47 -    if ( ioapic_rte_to_remap_entry(iommu, mp_ioapics[apic].mpc_apicid,
   71.48 +    if ( ioapic_rte_to_remap_entry(iommu, IO_APIC_ID(apic),
   71.49                                     &old_rte, rte_upper, value) )
   71.50      {
   71.51          *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
   71.52 @@ -328,7 +335,8 @@ static int remap_entry_to_msi_msg(
   71.53  }
   71.54  
   71.55  static int msi_msg_to_remap_entry(
   71.56 -    struct iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
   71.57 +    struct iommu *iommu, struct pci_dev *pdev,
   71.58 +    struct msi_desc *msi_desc, struct msi_msg *msg)
   71.59  {
   71.60      struct iremap_entry *iremap_entry = NULL, *iremap_entries;
   71.61      struct iremap_entry new_ire;
   71.62 @@ -336,32 +344,18 @@ static int msi_msg_to_remap_entry(
   71.63      unsigned int index;
   71.64      unsigned long flags;
   71.65      struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
   71.66 -    int i = 0;
   71.67  
   71.68      remap_rte = (struct msi_msg_remap_entry *) msg;
   71.69      spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
   71.70  
   71.71 -    iremap_entries =
   71.72 -        (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
   71.73 -
   71.74 -    /* If the entry for a PCI device has been there, use the old entry,
   71.75 -     * Or, assign a new entry for it.
   71.76 -     */
   71.77 -    for ( i = 0; i <= ir_ctrl->iremap_index; i++ )
   71.78 +    if ( msi_desc->remap_index < 0 )
   71.79      {
   71.80 -        iremap_entry = &iremap_entries[i];
   71.81 -        if ( iremap_entry->hi.sid ==
   71.82 -             ((pdev->bus << 8) | pdev->devfn) )
   71.83 -           break;
   71.84 -    }
   71.85 -
   71.86 -    if ( i > ir_ctrl->iremap_index )
   71.87 -    {
   71.88 -    	ir_ctrl->iremap_index++;
   71.89 +        ir_ctrl->iremap_index++;
   71.90          index = ir_ctrl->iremap_index;
   71.91 +        msi_desc->remap_index = index;
   71.92      }
   71.93      else
   71.94 -        index = i;
   71.95 +        index = msi_desc->remap_index;
   71.96  
   71.97      if ( index > IREMAP_ENTRY_NR - 1 )
   71.98      {
   71.99 @@ -369,11 +363,13 @@ static int msi_msg_to_remap_entry(
  71.100                  "%s: intremap index (%d) is larger than"
  71.101                  " the maximum index (%ld)!\n",
  71.102                  __func__, index, IREMAP_ENTRY_NR - 1);
  71.103 -        unmap_vtd_domain_page(iremap_entries);
  71.104 +        msi_desc->remap_index = -1;
  71.105          spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
  71.106          return -EFAULT;
  71.107      }
  71.108  
  71.109 +    iremap_entries =
  71.110 +        (struct iremap_entry *)map_vtd_domain_page(ir_ctrl->iremap_maddr);
  71.111      iremap_entry = &iremap_entries[index];
  71.112      memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
  71.113  
  71.114 @@ -450,7 +446,7 @@ void msi_msg_write_remap_rte(
  71.115      if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
  71.116          return;
  71.117  
  71.118 -    msi_msg_to_remap_entry(iommu, pdev, msg);
  71.119 +    msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
  71.120  }
  71.121  #elif defined(__ia64__)
  71.122  void msi_msg_read_remap_rte(
  71.123 @@ -482,7 +478,7 @@ int intremap_setup(struct iommu *iommu)
  71.124          {
  71.125              dprintk(XENLOG_WARNING VTDPREFIX,
  71.126                      "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
  71.127 -            return -ENODEV;
  71.128 +            return -ENOMEM;
  71.129          }
  71.130          ir_ctrl->iremap_index = -1;
  71.131      }
  71.132 @@ -490,10 +486,10 @@ int intremap_setup(struct iommu *iommu)
  71.133  #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
  71.134      /* set extended interrupt mode bit */
  71.135      ir_ctrl->iremap_maddr |=
  71.136 -            ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0;
  71.137 +            ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
  71.138  #endif
  71.139 -    /* size field = 256 entries per 4K page = 8 - 1 */
  71.140 -    ir_ctrl->iremap_maddr |= 7;
  71.141 +    /* set size of the interrupt remapping table */ 
  71.142 +    ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
  71.143      dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
  71.144  
  71.145      /* set SIRTP */
    72.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed Oct 22 11:38:22 2008 +0900
    72.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed Oct 22 11:46:55 2008 +0900
    72.3 @@ -24,6 +24,7 @@
    72.4  #include <xen/xmalloc.h>
    72.5  #include <xen/domain_page.h>
    72.6  #include <xen/iommu.h>
    72.7 +#include <asm/hvm/iommu.h>
    72.8  #include <xen/numa.h>
    72.9  #include <xen/time.h>
   72.10  #include <xen/pci.h>
   72.11 @@ -218,10 +219,10 @@ static u64 addr_to_dma_page_maddr(struct
   72.12              if ( !alloc )
   72.13                  break;
   72.14              maddr = alloc_pgtable_maddr();
   72.15 +            if ( !maddr )
   72.16 +                break;
   72.17              dma_set_pte_addr(*pte, maddr);
   72.18              vaddr = map_vtd_domain_page(maddr);
   72.19 -            if ( !vaddr )
   72.20 -                break;
   72.21  
   72.22              /*
   72.23               * high level table always sets r/w, last level
   72.24 @@ -234,8 +235,6 @@ static u64 addr_to_dma_page_maddr(struct
   72.25          else
   72.26          {
   72.27              vaddr = map_vtd_domain_page(pte->val);
   72.28 -            if ( !vaddr )
   72.29 -                break;
   72.30          }
   72.31  
   72.32          if ( level == 2 )
   72.33 @@ -569,26 +568,6 @@ static void dma_pte_clear_one(struct dom
   72.34      unmap_vtd_domain_page(page);
   72.35  }
   72.36  
   72.37 -/* clear last level pte, a tlb flush should be followed */
   72.38 -static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end)
   72.39 -{
   72.40 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
   72.41 -    int addr_width = agaw_to_width(hd->agaw);
   72.42 -
   72.43 -    start &= (((u64)1) << addr_width) - 1;
   72.44 -    end &= (((u64)1) << addr_width) - 1;
   72.45 -    /* in case it's partial page */
   72.46 -    start = PAGE_ALIGN_4K(start);
   72.47 -    end &= PAGE_MASK_4K;
   72.48 -
   72.49 -    /* we don't need lock here, nobody else touches the iova range */
   72.50 -    while ( start < end )
   72.51 -    {
   72.52 -        dma_pte_clear_one(domain, start);
   72.53 -        start += PAGE_SIZE_4K;
   72.54 -    }
   72.55 -}
   72.56 -
   72.57  static void iommu_free_pagetable(u64 pt_maddr, int level)
   72.58  {
   72.59      int i;
   72.60 @@ -877,6 +856,7 @@ static void dma_msi_data_init(struct iom
   72.61      spin_unlock_irqrestore(&iommu->register_lock, flags);
   72.62  }
   72.63  
   72.64 +#ifdef SUPPORT_MSI_REMAPPING
   72.65  static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
   72.66  {
   72.67      u64 msi_address;
   72.68 @@ -893,6 +873,12 @@ static void dma_msi_addr_init(struct iom
   72.69      dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
   72.70      spin_unlock_irqrestore(&iommu->register_lock, flags);
   72.71  }
   72.72 +#else
   72.73 +static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
   72.74 +{
   72.75 +    /* ia64: TODO */
   72.76 +}
   72.77 +#endif
   72.78  
   72.79  static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
   72.80  {
   72.81 @@ -1024,7 +1010,7 @@ static int intel_iommu_domain_init(struc
   72.82  {
   72.83      struct hvm_iommu *hd = domain_hvm_iommu(d);
   72.84      struct iommu *iommu = NULL;
   72.85 -    u64 i;
   72.86 +    u64 i, j, tmp;
   72.87      struct acpi_drhd_unit *drhd;
   72.88  
   72.89      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
   72.90 @@ -1043,11 +1029,13 @@ static int intel_iommu_domain_init(struc
   72.91           */
   72.92          for ( i = 0; i < max_page; i++ )
   72.93          {
   72.94 -            if ( xen_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) ||
   72.95 -                 tboot_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) )
   72.96 +            if ( xen_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) ||
   72.97 +                 tboot_in_range(i << PAGE_SHIFT, (i + 1) << PAGE_SHIFT) )
   72.98                  continue;
   72.99  
  72.100 -            iommu_map_page(d, i, i);
  72.101 +            tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
  72.102 +            for ( j = 0; j < tmp; j++ )
  72.103 +                iommu_map_page(d, (i*tmp+j), (i*tmp+j));
  72.104          }
  72.105  
  72.106          setup_dom0_devices(d);
  72.107 @@ -1511,75 +1499,26 @@ int intel_iommu_unmap_page(struct domain
  72.108      return 0;
  72.109  }
  72.110  
  72.111 -int iommu_page_mapping(struct domain *domain, paddr_t iova,
  72.112 -                       paddr_t hpa, size_t size, int prot)
  72.113 -{
  72.114 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
  72.115 -    struct acpi_drhd_unit *drhd;
  72.116 -    struct iommu *iommu;
  72.117 -    u64 start_pfn, end_pfn;
  72.118 -    struct dma_pte *page = NULL, *pte = NULL;
  72.119 -    int index;
  72.120 -    u64 pg_maddr;
  72.121 -
  72.122 -    if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
  72.123 -        return -EINVAL;
  72.124 -
  72.125 -    iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
  72.126 -    start_pfn = hpa >> PAGE_SHIFT_4K;
  72.127 -    end_pfn = (PAGE_ALIGN_4K(hpa + size)) >> PAGE_SHIFT_4K;
  72.128 -    index = 0;
  72.129 -    while ( start_pfn < end_pfn )
  72.130 -    {
  72.131 -        pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K*index, 1);
  72.132 -        if ( pg_maddr == 0 )
  72.133 -            return -ENOMEM;
  72.134 -        page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
  72.135 -        pte = page + (start_pfn & LEVEL_MASK);
  72.136 -        dma_set_pte_addr(*pte, (paddr_t)start_pfn << PAGE_SHIFT_4K);
  72.137 -        dma_set_pte_prot(*pte, prot);
  72.138 -        iommu_flush_cache_entry(pte);
  72.139 -        unmap_vtd_domain_page(page);
  72.140 -        start_pfn++;
  72.141 -        index++;
  72.142 -    }
  72.143 -
  72.144 -    if ( index > 0 )
  72.145 -    {
  72.146 -        for_each_drhd_unit ( drhd )
  72.147 -        {
  72.148 -            iommu = drhd->iommu;
  72.149 -            if ( test_bit(iommu->index, &hd->iommu_bitmap) )
  72.150 -                if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
  72.151 -                                           iova, index, 1))
  72.152 -                    iommu_flush_write_buffer(iommu);
  72.153 -        }
  72.154 -    }
  72.155 -
  72.156 -    return 0;
  72.157 -}
  72.158 -
  72.159 -int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
  72.160 -{
  72.161 -    dma_pte_clear_range(domain, addr, addr + size);
  72.162 -
  72.163 -    return 0;
  72.164 -}
  72.165 -
  72.166  static int iommu_prepare_rmrr_dev(struct domain *d,
  72.167                                    struct acpi_rmrr_unit *rmrr,
  72.168                                    u8 bus, u8 devfn)
  72.169  {
  72.170 -    u64 size;
  72.171 -    int ret;
  72.172 +    int ret = 0;
  72.173 +    u64 base, end;
  72.174 +    unsigned long base_pfn, end_pfn;
  72.175  
  72.176 -    /* page table init */
  72.177 -    size = rmrr->end_address - rmrr->base_address + 1;
  72.178 -    ret = iommu_page_mapping(d, rmrr->base_address,
  72.179 -                             rmrr->base_address, size,
  72.180 -                             DMA_PTE_READ|DMA_PTE_WRITE);
  72.181 -    if ( ret )
  72.182 -        return ret;
  72.183 +    ASSERT(rmrr->base_address < rmrr->end_address);
  72.184 +    
  72.185 +    base = rmrr->base_address & PAGE_MASK_4K;
  72.186 +    base_pfn = base >> PAGE_SHIFT_4K;
  72.187 +    end = PAGE_ALIGN_4K(rmrr->end_address);
  72.188 +    end_pfn = end >> PAGE_SHIFT_4K;
  72.189 +
  72.190 +    while ( base_pfn < end_pfn )
  72.191 +    {
  72.192 +        intel_iommu_map_page(d, base_pfn, base_pfn);
  72.193 +        base_pfn++;
  72.194 +    }
  72.195  
  72.196      if ( domain_context_mapped(bus, devfn) == 0 )
  72.197          ret = domain_context_mapping(d, bus, devfn);
    73.1 --- a/xen/drivers/passthrough/vtd/qinval.c	Wed Oct 22 11:38:22 2008 +0900
    73.2 +++ b/xen/drivers/passthrough/vtd/qinval.c	Wed Oct 22 11:46:55 2008 +0900
    73.3 @@ -428,7 +428,11 @@ int qinval_setup(struct iommu *iommu)
    73.4      {
    73.5          qi_ctrl->qinval_maddr = alloc_pgtable_maddr();
    73.6          if ( qi_ctrl->qinval_maddr == 0 )
    73.7 -            panic("Cannot allocate memory for qi_ctrl->qinval_maddr\n");
    73.8 +        {
    73.9 +            dprintk(XENLOG_WARNING VTDPREFIX,
   73.10 +                    "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
   73.11 +            return -ENOMEM;
   73.12 +        }
   73.13          flush->context = flush_context_qi;
   73.14          flush->iotlb = flush_iotlb_qi;
   73.15      }
    74.1 --- a/xen/drivers/passthrough/vtd/utils.c	Wed Oct 22 11:38:22 2008 +0900
    74.2 +++ b/xen/drivers/passthrough/vtd/utils.c	Wed Oct 22 11:46:55 2008 +0900
    74.3 @@ -204,6 +204,7 @@ void print_vtd_entries(struct iommu *iom
    74.4  
    74.5  void dump_iommu_info(unsigned char key)
    74.6  {
    74.7 +#if defined(__i386__) || defined(__x86_64__)
    74.8      struct acpi_drhd_unit *drhd;
    74.9      struct iommu *iommu;
   74.10      int i;
   74.11 @@ -305,6 +306,10 @@ void dump_iommu_info(unsigned char key)
   74.12              }
   74.13          }
   74.14      }
   74.15 +#else
   74.16 +    printk("%s: not implemnted on IA64 for now.\n", __func__);
   74.17 +    /* ia64: TODO */
   74.18 +#endif
   74.19  }
   74.20  
   74.21  /*
    75.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Wed Oct 22 11:38:22 2008 +0900
    75.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Wed Oct 22 11:46:55 2008 +0900
    75.3 @@ -41,17 +41,19 @@ u64 alloc_pgtable_maddr(void)
    75.4  {
    75.5      struct page_info *pg;
    75.6      u64 *vaddr;
    75.7 +    unsigned long mfn;
    75.8  
    75.9      pg = alloc_domheap_page(NULL, 0);
   75.10 -    vaddr = map_domain_page(page_to_mfn(pg));
   75.11 -    if ( !vaddr )
   75.12 +    if ( !pg )
   75.13          return 0;
   75.14 +    mfn = page_to_mfn(pg);
   75.15 +    vaddr = map_domain_page(mfn);
   75.16      memset(vaddr, 0, PAGE_SIZE);
   75.17  
   75.18      iommu_flush_cache_page(vaddr);
   75.19      unmap_domain_page(vaddr);
   75.20  
   75.21 -    return page_to_maddr(pg);
   75.22 +    return (u64)mfn << PAGE_SHIFT_4K;
   75.23  }
   75.24  
   75.25  void free_pgtable_maddr(u64 maddr)
    76.1 --- a/xen/include/acpi/cpufreq/processor_perf.h	Wed Oct 22 11:38:22 2008 +0900
    76.2 +++ b/xen/include/acpi/cpufreq/processor_perf.h	Wed Oct 22 11:46:55 2008 +0900
    76.3 @@ -60,8 +60,5 @@ struct pm_px {
    76.4  
    76.5  extern struct pm_px *cpufreq_statistic_data[NR_CPUS];
    76.6  
    76.7 -int xenpf_copy_px_states(struct processor_performance *pxpt,
    76.8 -        struct xen_processor_performance *dom0_px_info);
    76.9 -
   76.10  int cpufreq_cpu_init(unsigned int cpuid);
   76.11  #endif /* __XEN_PROCESSOR_PM_H__ */
    77.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Wed Oct 22 11:38:22 2008 +0900
    77.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Wed Oct 22 11:46:55 2008 +0900
    77.3 @@ -27,25 +27,16 @@ typedef struct {
    77.4  #ifdef DEBUG_SPINLOCK
    77.5  	void *locker;
    77.6  #endif
    77.7 -#ifdef XEN
    77.8 -	unsigned char recurse_cpu;
    77.9 -	unsigned char recurse_cnt;
   77.10 -#endif
   77.11 -} spinlock_t;
   77.12 +} raw_spinlock_t;
   77.13  
   77.14  #ifdef XEN
   77.15  #ifdef DEBUG_SPINLOCK
   77.16 -#define SPIN_LOCK_UNLOCKED	/*(spinlock_t)*/ { 0, NULL, -1, 0 }
   77.17 +#define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0, NULL }
   77.18  #else
   77.19 -#define SPIN_LOCK_UNLOCKED	/*(spinlock_t)*/ { 0, -1, 0 }
   77.20 +#define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0 }
   77.21  #endif
   77.22 -static inline void spin_lock_init(spinlock_t *lock)
   77.23 -{
   77.24 -	*lock = ((spinlock_t)SPIN_LOCK_UNLOCKED);
   77.25 -}
   77.26  #else
   77.27 -#define SPIN_LOCK_UNLOCKED			/*(spinlock_t)*/ { 0 }
   77.28 -#define spin_lock_init(x)			((x)->lock = 0)
   77.29 +#define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0 }
   77.30  #endif
   77.31  
   77.32  #ifdef ASM_SUPPORTED
   77.33 @@ -59,7 +50,7 @@ static inline void spin_lock_init(spinlo
   77.34  #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
   77.35  
   77.36  static inline void
   77.37 -_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
   77.38 +_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
   77.39  {
   77.40  	register volatile unsigned int *ptr asm ("r31") = &lock->lock;
   77.41  
   77.42 @@ -136,10 +127,9 @@ do {											\
   77.43  } while (0)
   77.44  #endif /* !ASM_SUPPORTED */
   77.45  
   77.46 -#define spin_is_locked(x)	((x)->lock != 0)
   77.47 -#define _raw_spin_unlock(x)	do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
   77.48 +#define _raw_spin_is_locked(x)	((x)->lock != 0)
   77.49 +#define _raw_spin_unlock(x)	do { barrier(); (x)->lock = 0; } while (0)
   77.50  #define _raw_spin_trylock(x)	(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
   77.51 -#define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
   77.52  
   77.53  typedef struct {
   77.54  	volatile unsigned int read_counter	: 31;
   77.55 @@ -147,16 +137,12 @@ typedef struct {
   77.56  #ifdef CONFIG_PREEMPT
   77.57  	unsigned int break_lock;
   77.58  #endif
   77.59 -} rwlock_t;
   77.60 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 }
   77.61 -
   77.62 -#define rwlock_init(x)		do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
   77.63 -#define read_can_lock(rw)	(*(volatile int *)(rw) >= 0)
   77.64 -#define write_can_lock(rw)	(*(volatile int *)(rw) == 0)
   77.65 +} raw_rwlock_t;
   77.66 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
   77.67  
   77.68  #define _raw_read_lock(rw)								\
   77.69  do {											\
   77.70 -	rwlock_t *__read_lock_ptr = (rw);						\
   77.71 +	raw_rwlock_t *__read_lock_ptr = (rw);						\
   77.72  											\
   77.73  	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
   77.74  		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
   77.75 @@ -167,7 +153,7 @@ do {											\
   77.76  
   77.77  #define _raw_read_unlock(rw)					\
   77.78  do {								\
   77.79 -	rwlock_t *__read_lock_ptr = (rw);			\
   77.80 +	raw_rwlock_t *__read_lock_ptr = (rw);			\
   77.81  	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
   77.82  } while (0)
   77.83  
   77.84 @@ -230,7 +216,4 @@ do {										\
   77.85  	clear_bit(31, (x));								\
   77.86  })
   77.87  
   77.88 -#ifdef XEN
   77.89 -#include <asm/xenspinlock.h>
   77.90 -#endif
   77.91  #endif /*  _ASM_IA64_SPINLOCK_H */
    78.1 --- a/xen/include/asm-ia64/xenspinlock.h	Wed Oct 22 11:38:22 2008 +0900
    78.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    78.3 @@ -1,30 +0,0 @@
    78.4 -#ifndef _ASM_IA64_XENSPINLOCK_H
    78.5 -#define _ASM_IA64_XENSPINLOCK_H
    78.6 -
    78.7 -/*
    78.8 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
    78.9 - * reentered recursively on the same CPU. All critical regions that may form
   78.10 - * part of a recursively-nested set must be protected by these forms. If there
   78.11 - * are any critical regions that cannot form part of such a set, they can use
   78.12 - * standard spin_[un]lock().
   78.13 - */
   78.14 -#define _raw_spin_lock_recursive(_lock)            \
   78.15 -    do {                                           \
   78.16 -        int cpu = smp_processor_id();              \
   78.17 -        if ( likely((_lock)->recurse_cpu != cpu) ) \
   78.18 -        {                                          \
   78.19 -            spin_lock(_lock);                      \
   78.20 -            (_lock)->recurse_cpu = cpu;            \
   78.21 -        }                                          \
   78.22 -        (_lock)->recurse_cnt++;                    \
   78.23 -    } while ( 0 )
   78.24 -
   78.25 -#define _raw_spin_unlock_recursive(_lock)          \
   78.26 -    do {                                           \
   78.27 -        if ( likely(--(_lock)->recurse_cnt == 0) ) \
   78.28 -        {                                          \
   78.29 -            (_lock)->recurse_cpu = -1;             \
   78.30 -            spin_unlock(_lock);                    \
   78.31 -        }                                          \
   78.32 -    } while ( 0 )
   78.33 -#endif /*  _ASM_IA64_XENSPINLOCK_H */
    79.1 --- a/xen/include/asm-x86/domain.h	Wed Oct 22 11:38:22 2008 +0900
    79.2 +++ b/xen/include/asm-x86/domain.h	Wed Oct 22 11:46:55 2008 +0900
    79.3 @@ -250,6 +250,8 @@ struct arch_domain
    79.4      bool_t is_32bit_pv;
    79.5      /* Is shared-info page in 32-bit format? */
    79.6      bool_t has_32bit_shinfo;
    79.7 +    /* Domain cannot handle spurious page faults? */
    79.8 +    bool_t suppress_spurious_page_faults;
    79.9  
   79.10      /* Continuable domain_relinquish_resources(). */
   79.11      enum {
    80.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Oct 22 11:38:22 2008 +0900
    80.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Oct 22 11:46:55 2008 +0900
    80.3 @@ -128,6 +128,7 @@ struct hvm_function_table {
    80.4      int (*msr_write_intercept)(struct cpu_user_regs *regs);
    80.5      void (*invlpg_intercept)(unsigned long vaddr);
    80.6      void (*set_uc_mode)(struct vcpu *v);
    80.7 +    void (*set_info_guest)(struct vcpu *v);
    80.8  };
    80.9  
   80.10  extern struct hvm_function_table hvm_funcs;
   80.11 @@ -314,4 +315,10 @@ int hvm_virtual_to_linear_addr(
   80.12      unsigned int addr_size,
   80.13      unsigned long *linear_addr);
   80.14  
   80.15 +static inline void hvm_set_info_guest(struct vcpu *v)
   80.16 +{
   80.17 +    if ( hvm_funcs.set_info_guest )
   80.18 +        return hvm_funcs.set_info_guest(v);
   80.19 +}
   80.20 +
   80.21  #endif /* __ASM_X86_HVM_HVM_H__ */
    81.1 --- a/xen/include/asm-x86/hvm/irq.h	Wed Oct 22 11:38:22 2008 +0900
    81.2 +++ b/xen/include/asm-x86/hvm/irq.h	Wed Oct 22 11:46:55 2008 +0900
    81.3 @@ -22,62 +22,11 @@
    81.4  #ifndef __ASM_X86_HVM_IRQ_H__
    81.5  #define __ASM_X86_HVM_IRQ_H__
    81.6  
    81.7 -#include <xen/types.h>
    81.8 -#include <xen/spinlock.h>
    81.9 -#include <asm/irq.h>
   81.10  #include <asm/pirq.h>
   81.11 +#include <xen/hvm/irq.h>
   81.12  #include <asm/hvm/hvm.h>
   81.13  #include <asm/hvm/vpic.h>
   81.14  #include <asm/hvm/vioapic.h>
   81.15 -#include <public/hvm/save.h>
   81.16 -
   81.17 -struct dev_intx_gsi_link {
   81.18 -    struct list_head list;
   81.19 -    uint8_t device;
   81.20 -    uint8_t intx;
   81.21 -    uint8_t gsi;
   81.22 -    uint8_t link;
   81.23 -};
   81.24 -
   81.25 -#define _HVM_IRQ_DPCI_MSI  0x1
   81.26 -
   81.27 -struct hvm_gmsi_info {
   81.28 -    uint32_t gvec;
   81.29 -    uint32_t gflags;
   81.30 -};
   81.31 -
   81.32 -struct hvm_mirq_dpci_mapping {
   81.33 -    uint32_t flags;
   81.34 -    int pending;
   81.35 -    struct list_head digl_list;
   81.36 -    struct domain *dom;
   81.37 -    struct hvm_gmsi_info gmsi;
   81.38 -};
   81.39 -
   81.40 -struct hvm_girq_dpci_mapping {
   81.41 -    uint8_t valid;
   81.42 -    uint8_t device;
   81.43 -    uint8_t intx;
   81.44 -    uint8_t machine_gsi;
   81.45 -};
   81.46 -
   81.47 -#define NR_ISAIRQS  16
   81.48 -#define NR_LINK     4
   81.49 -/* Protected by domain's event_lock */
   81.50 -struct hvm_irq_dpci {
   81.51 -    /* Machine IRQ to guest device/intx mapping. */
   81.52 -    DECLARE_BITMAP(mapping, NR_PIRQS);
   81.53 -    struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
   81.54 -    /* Guest IRQ to guest device/intx mapping. */
   81.55 -    struct hvm_girq_dpci_mapping girq[NR_IRQS];
   81.56 -    uint8_t msi_gvec_pirq[NR_VECTORS];
   81.57 -    DECLARE_BITMAP(dirq_mask, NR_IRQS);
   81.58 -    /* Record of mapped ISA IRQs */
   81.59 -    DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
   81.60 -    /* Record of mapped Links */
   81.61 -    uint8_t link_cnt[NR_LINK];
   81.62 -    struct timer hvm_timer[NR_IRQS];
   81.63 -};
   81.64  
   81.65  struct hvm_irq {
   81.66      /*
   81.67 @@ -149,27 +98,16 @@ struct hvm_irq {
   81.68  
   81.69  #define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
   81.70  
   81.71 -/* Modify state of a PCI INTx wire. */
   81.72 -void hvm_pci_intx_assert(
   81.73 -    struct domain *d, unsigned int device, unsigned int intx);
   81.74 -void hvm_pci_intx_deassert(
   81.75 -    struct domain *d, unsigned int device, unsigned int intx);
   81.76 -
   81.77 -/* Modify state of an ISA device's IRQ wire. */
   81.78 -void hvm_isa_irq_assert(
   81.79 -    struct domain *d, unsigned int isa_irq);
   81.80 -void hvm_isa_irq_deassert(
   81.81 -    struct domain *d, unsigned int isa_irq);
   81.82 -
   81.83 -void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
   81.84 -
   81.85 -void hvm_maybe_deassert_evtchn_irq(void);
   81.86 -void hvm_assert_evtchn_irq(struct vcpu *v);
   81.87 -void hvm_set_callback_via(struct domain *d, uint64_t via);
   81.88 -
   81.89  /* Check/Acknowledge next pending interrupt. */
   81.90  struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
   81.91  struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
   81.92                                             struct hvm_intack intack);
   81.93  
   81.94 +/*
   81.95 + * Currently IA64 Xen doesn't support MSI. So for x86, we define this macro
   81.96 + * to control the conditional compilation of some MSI-related functions.
   81.97 + * This macro will be removed once IA64 has MSI support.
   81.98 + */
   81.99 +#define SUPPORT_MSI_REMAPPING 1
  81.100 +
  81.101  #endif /* __ASM_X86_HVM_IRQ_H__ */
    82.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Oct 22 11:38:22 2008 +0900
    82.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Oct 22 11:46:55 2008 +0900
    82.3 @@ -393,7 +393,9 @@ struct vmcb_struct {
    82.4      eventinj_t  eventinj;       /* offset 0xA8 */
    82.5      u64 h_cr3;                  /* offset 0xB0 */
    82.6      lbrctrl_t lbr_control;      /* offset 0xB8 */
    82.7 -    u64 res09[104];             /* offset 0xC0 pad to save area */
    82.8 +    u64 res09;                  /* offset 0xC0 */
    82.9 +    u64 nextrip;                /* offset 0xC8 */
   82.10 +    u64 res10a[102];            /* offset 0xD0 pad to save area */
   82.11  
   82.12      svm_segment_register_t es;      /* offset 1024 */
   82.13      svm_segment_register_t cs;
    83.1 --- a/xen/include/asm-x86/io_apic.h	Wed Oct 22 11:38:22 2008 +0900
    83.2 +++ b/xen/include/asm-x86/io_apic.h	Wed Oct 22 11:46:55 2008 +0900
    83.3 @@ -20,6 +20,8 @@
    83.4  		((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
    83.5  		+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
    83.6  
    83.7 +#define IO_APIC_ID(idx) (mp_ioapics[idx].mpc_apicid)
    83.8 +
    83.9  /*
   83.10   * The structure of the IO-APIC:
   83.11   */
    84.1 --- a/xen/include/asm-x86/msi.h	Wed Oct 22 11:38:22 2008 +0900
    84.2 +++ b/xen/include/asm-x86/msi.h	Wed Oct 22 11:46:55 2008 +0900
    84.3 @@ -90,10 +90,11 @@ struct msi_desc {
    84.4  
    84.5  	void __iomem *mask_base;
    84.6  	struct pci_dev *dev;
    84.7 -    int vector;
    84.8 +	int vector;
    84.9  
   84.10 -	/* Last set MSI message */
   84.11 -	struct msi_msg msg;
   84.12 +	struct msi_msg msg;		/* Last set MSI message */
   84.13 +
   84.14 +	int remap_index;		/* index in interrupt remapping table */
   84.15  };
   84.16  
   84.17  /*
    85.1 --- a/xen/include/asm-x86/rwlock.h	Wed Oct 22 11:38:22 2008 +0900
    85.2 +++ b/xen/include/asm-x86/rwlock.h	Wed Oct 22 11:46:55 2008 +0900
    85.3 @@ -22,25 +22,19 @@
    85.4  
    85.5  #define __build_read_lock_ptr(rw, helper)   \
    85.6  	asm volatile(LOCK "subl $1,(%0)\n\t" \
    85.7 -		     "js 2f\n" \
    85.8 +		     "jns 1f\n\t" \
    85.9 +		     "call " helper "\n\t" \
   85.10  		     "1:\n" \
   85.11 -		     ".section .text.lock,\"ax\"\n" \
   85.12 -		     "2:\tcall " helper "\n\t" \
   85.13 -		     "jmp 1b\n" \
   85.14 -		     ".previous" \
   85.15  		     ::"a" (rw) : "memory")
   85.16  
   85.17  #define __build_read_lock_const(rw, helper)   \
   85.18  	asm volatile(LOCK "subl $1,%0\n\t" \
   85.19 -		     "js 2f\n" \
   85.20 -		     "1:\n" \
   85.21 -		     ".section .text.lock,\"ax\"\n" \
   85.22 -		     "2:\tpush %%"__OP"ax\n\t" \
   85.23 +		     "jns 1f\n\t" \
   85.24 +		     "push %%"__OP"ax\n\t" \
   85.25  		     "lea %0,%%"__OP"ax\n\t" \
   85.26  		     "call " helper "\n\t" \
   85.27  		     "pop %%"__OP"ax\n\t" \
   85.28 -		     "jmp 1b\n" \
   85.29 -		     ".previous" \
   85.30 +		     "1:\n" \
   85.31  		     :"=m" (*(volatile int *)rw) : : "memory")
   85.32  
   85.33  #define __build_read_lock(rw, helper)	do { \
   85.34 @@ -52,25 +46,19 @@
   85.35  
   85.36  #define __build_write_lock_ptr(rw, helper) \
   85.37  	asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
   85.38 -		     "jnz 2f\n" \
   85.39 +		     "jz 1f\n\t" \
   85.40 +		     "call " helper "\n\t" \
   85.41  		     "1:\n" \
   85.42 -		     ".section .text.lock,\"ax\"\n" \
   85.43 -		     "2:\tcall " helper "\n\t" \
   85.44 -		     "jmp 1b\n" \
   85.45 -		     ".previous" \
   85.46  		     ::"a" (rw) : "memory")
   85.47  
   85.48  #define __build_write_lock_const(rw, helper) \
   85.49  	asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
   85.50 -		     "jnz 2f\n" \
   85.51 -		     "1:\n" \
   85.52 -		     ".section .text.lock,\"ax\"\n" \
   85.53 -		     "2:\tpush %%"__OP"ax\n\t" \
   85.54 +		     "jz 1f\n\t" \
   85.55 +		     "push %%"__OP"ax\n\t" \
   85.56  		     "lea %0,%%"__OP"ax\n\t" \
   85.57  		     "call " helper "\n\t" \
   85.58  		     "pop %%"__OP"ax\n\t" \
   85.59 -		     "jmp 1b\n" \
   85.60 -		     ".previous" \
   85.61 +		     "1:\n" \
   85.62  		     :"=m" (*(volatile int *)rw) : : "memory")
   85.63  
   85.64  #define __build_write_lock(rw, helper)	do { \
    86.1 --- a/xen/include/asm-x86/spinlock.h	Wed Oct 22 11:38:22 2008 +0900
    86.2 +++ b/xen/include/asm-x86/spinlock.h	Wed Oct 22 11:46:55 2008 +0900
    86.3 @@ -8,104 +8,71 @@
    86.4  
    86.5  typedef struct {
    86.6      volatile s16 lock;
    86.7 -    s8 recurse_cpu;
    86.8 -    u8 recurse_cnt;
    86.9 -} spinlock_t;
   86.10 -
   86.11 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
   86.12 +} raw_spinlock_t;
   86.13  
   86.14 -#define spin_lock_init(x)	do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
   86.15 -#define spin_is_locked(x)	(*(volatile char *)(&(x)->lock) <= 0)
   86.16 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
   86.17  
   86.18 -static inline void _raw_spin_lock(spinlock_t *lock)
   86.19 +#define _raw_spin_is_locked(x)                  \
   86.20 +    (*(volatile char *)(&(x)->lock) <= 0)
   86.21 +
   86.22 +static always_inline void _raw_spin_lock(raw_spinlock_t *lock)
   86.23  {
   86.24 -    __asm__ __volatile__ (
   86.25 -        "1:  lock; decb %0         \n"
   86.26 -        "    js 2f                 \n"
   86.27 -        ".section .text.lock,\"ax\"\n"
   86.28 +    asm volatile (
   86.29 +        "1:  lock; decw %0         \n"
   86.30 +        "    jns 3f                \n"
   86.31          "2:  rep; nop              \n"
   86.32 -        "    cmpb $0,%0            \n"
   86.33 +        "    cmpw $0,%0            \n"
   86.34          "    jle 2b                \n"
   86.35          "    jmp 1b                \n"
   86.36 -        ".previous"
   86.37 -        : "=m" (lock->lock) : : "memory" );
   86.38 -}
   86.39 -
   86.40 -static inline void _raw_spin_unlock(spinlock_t *lock)
   86.41 -{
   86.42 -    ASSERT(spin_is_locked(lock));
   86.43 -    __asm__ __volatile__ (
   86.44 -	"movb $1,%0" 
   86.45 +        "3:"
   86.46          : "=m" (lock->lock) : : "memory" );
   86.47  }
   86.48  
   86.49 -static inline int _raw_spin_trylock(spinlock_t *lock)
   86.50 +static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
   86.51  {
   86.52 -    char oldval;
   86.53 -    __asm__ __volatile__(
   86.54 -        "xchgb %b0,%1"
   86.55 -        :"=q" (oldval), "=m" (lock->lock)
   86.56 -        :"0" (0) : "memory");
   86.57 -    return oldval > 0;
   86.58 +    ASSERT(_raw_spin_is_locked(lock));
   86.59 +    asm volatile (
   86.60 +        "movw $1,%0" 
   86.61 +        : "=m" (lock->lock) : : "memory" );
   86.62  }
   86.63  
   86.64 -/*
   86.65 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
   86.66 - * reentered recursively on the same CPU. All critical regions that may form
   86.67 - * part of a recursively-nested set must be protected by these forms. If there
   86.68 - * are any critical regions that cannot form part of such a set, they can use
   86.69 - * standard spin_[un]lock().
   86.70 - */
   86.71 -#define _raw_spin_lock_recursive(_lock)            \
   86.72 -    do {                                           \
   86.73 -        int cpu = smp_processor_id();              \
   86.74 -        if ( likely((_lock)->recurse_cpu != cpu) ) \
   86.75 -        {                                          \
   86.76 -            spin_lock(_lock);                      \
   86.77 -            (_lock)->recurse_cpu = cpu;            \
   86.78 -        }                                          \
   86.79 -        (_lock)->recurse_cnt++;                    \
   86.80 -    } while ( 0 )
   86.81 -
   86.82 -#define _raw_spin_unlock_recursive(_lock)          \
   86.83 -    do {                                           \
   86.84 -        if ( likely(--(_lock)->recurse_cnt == 0) ) \
   86.85 -        {                                          \
   86.86 -            (_lock)->recurse_cpu = -1;             \
   86.87 -            spin_unlock(_lock);                    \
   86.88 -        }                                          \
   86.89 -    } while ( 0 )
   86.90 -
   86.91 +static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
   86.92 +{
   86.93 +    s16 oldval;
   86.94 +    asm volatile (
   86.95 +        "xchgw %w0,%1"
   86.96 +        :"=r" (oldval), "=m" (lock->lock)
   86.97 +        :"0" (0) : "memory" );
   86.98 +    return (oldval > 0);
   86.99 +}
  86.100  
  86.101  typedef struct {
  86.102      volatile unsigned int lock;
  86.103 -} rwlock_t;
  86.104 +} raw_rwlock_t;
  86.105  
  86.106 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
  86.107 -
  86.108 -#define rwlock_init(x)	do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
  86.109 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS }
  86.110  
  86.111  /*
  86.112   * On x86, we implement read-write locks as a 32-bit counter
  86.113   * with the high bit (sign) being the "contended" bit.
  86.114   */
  86.115 -static inline void _raw_read_lock(rwlock_t *rw)
  86.116 +static always_inline void _raw_read_lock(raw_rwlock_t *rw)
  86.117  {
  86.118      __build_read_lock(rw, "__read_lock_failed");
  86.119  }
  86.120  
  86.121 -static inline void _raw_write_lock(rwlock_t *rw)
  86.122 +static always_inline void _raw_write_lock(raw_rwlock_t *rw)
  86.123  {
  86.124      __build_write_lock(rw, "__write_lock_failed");
  86.125  }
  86.126  
  86.127 -#define _raw_read_unlock(rw)                       \
  86.128 -    __asm__ __volatile__ (                         \
  86.129 -        "lock ; incl %0" :                         \
  86.130 +#define _raw_read_unlock(rw)                    \
  86.131 +    asm volatile (                              \
  86.132 +        "lock ; incl %0" :                      \
  86.133          "=m" ((rw)->lock) : : "memory" )
  86.134 -#define _raw_write_unlock(rw)                      \
  86.135 -    __asm__ __volatile__ (                         \
  86.136 -        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
  86.137 +#define _raw_write_unlock(rw)                           \
  86.138 +    asm volatile (                                      \
  86.139 +        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :        \
  86.140          "=m" ((rw)->lock) : : "memory" )
  86.141  
  86.142  #endif /* __ASM_SPINLOCK_H */
    87.1 --- a/xen/include/public/domctl.h	Wed Oct 22 11:38:22 2008 +0900
    87.2 +++ b/xen/include/public/domctl.h	Wed Oct 22 11:46:55 2008 +0900
    87.3 @@ -614,6 +614,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_subsc
    87.4  #define XEN_DOMCTL_set_machine_address_size  51
    87.5  #define XEN_DOMCTL_get_machine_address_size  52
    87.6  
    87.7 +/*
    87.8 + * Do not inject spurious page faults into this domain.
    87.9 + */
   87.10 +#define XEN_DOMCTL_suppress_spurious_page_faults 53
   87.11  
   87.12  struct xen_domctl {
   87.13      uint32_t cmd;
    88.1 --- a/xen/include/public/trace.h	Wed Oct 22 11:38:22 2008 +0900
    88.2 +++ b/xen/include/public/trace.h	Wed Oct 22 11:46:55 2008 +0900
    88.3 @@ -38,6 +38,7 @@
    88.4  #define TRC_MEM      0x0010f000    /* Xen memory trace         */
    88.5  #define TRC_PV       0x0020f000    /* Xen PV traces            */
    88.6  #define TRC_SHADOW   0x0040f000    /* Xen shadow tracing       */
    88.7 +#define TRC_PM       0x0080f000    /* Xen power management trace */
    88.8  #define TRC_ALL      0x0ffff000
    88.9  #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
   88.10  #define TRC_HD_CYCLE_FLAG (1UL<<31)
   88.11 @@ -146,6 +147,15 @@
   88.12  #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
   88.13  #define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
   88.14  
   88.15 +/* trace subclasses for power management */
   88.16 +#define TRC_PM_FREQ     0x00801000      /* xen cpu freq events */
   88.17 +#define TRC_PM_IDLE     0x00802000      /* xen cpu idle events */
   88.18 +
   88.19 +/* trace events for per class */
   88.20 +#define TRC_PM_FREQ_CHANGE      (TRC_PM_FREQ + 0x01)
   88.21 +#define TRC_PM_IDLE_ENTRY       (TRC_PM_IDLE + 0x01)
   88.22 +#define TRC_PM_IDLE_EXIT        (TRC_PM_IDLE + 0x02)
   88.23 +
   88.24  /* This structure represents a single trace buffer record. */
   88.25  struct t_rec {
   88.26      uint32_t event:28;
    89.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    89.2 +++ b/xen/include/xen/hvm/irq.h	Wed Oct 22 11:46:55 2008 +0900
    89.3 @@ -0,0 +1,99 @@
    89.4 +/******************************************************************************
    89.5 + * irq.h
    89.6 + * 
    89.7 + * Interrupt distribution and delivery logic.
    89.8 + * 
    89.9 + * Copyright (c) 2006, K A Fraser, XenSource Inc.
   89.10 + *
   89.11 + * This program is free software; you can redistribute it and/or modify it
   89.12 + * under the terms and conditions of the GNU General Public License,
   89.13 + * version 2, as published by the Free Software Foundation.
   89.14 + *
   89.15 + * This program is distributed in the hope it will be useful, but WITHOUT
   89.16 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   89.17 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   89.18 + * more details.
   89.19 + *
   89.20 + * You should have received a copy of the GNU General Public License along with
   89.21 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   89.22 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   89.23 + */
   89.24 +
   89.25 +#ifndef __XEN_HVM_IRQ_H__
   89.26 +#define __XEN_HVM_IRQ_H__
   89.27 +
   89.28 +#include <xen/types.h>
   89.29 +#include <xen/spinlock.h>
   89.30 +#include <asm/irq.h>
   89.31 +#include <public/hvm/save.h>
   89.32 +
   89.33 +struct dev_intx_gsi_link {
   89.34 +    struct list_head list;
   89.35 +    uint8_t device;
   89.36 +    uint8_t intx;
   89.37 +    uint8_t gsi;
   89.38 +    uint8_t link;
   89.39 +};
   89.40 +
   89.41 +#define _HVM_IRQ_DPCI_MSI  0x1
   89.42 +
   89.43 +struct hvm_gmsi_info {
   89.44 +    uint32_t gvec;
   89.45 +    uint32_t gflags;
   89.46 +};
   89.47 +
   89.48 +struct hvm_mirq_dpci_mapping {
   89.49 +    uint32_t flags;
   89.50 +    int pending;
   89.51 +    struct list_head digl_list;
   89.52 +    struct domain *dom;
   89.53 +    struct hvm_gmsi_info gmsi;
   89.54 +};
   89.55 +
   89.56 +struct hvm_girq_dpci_mapping {
   89.57 +    uint8_t valid;
   89.58 +    uint8_t device;
   89.59 +    uint8_t intx;
   89.60 +    uint8_t machine_gsi;
   89.61 +};
   89.62 +
   89.63 +#define NR_ISAIRQS  16
   89.64 +#define NR_LINK     4
   89.65 +
   89.66 +/* Protected by domain's event_lock */
   89.67 +struct hvm_irq_dpci {
   89.68 +    /* Machine IRQ to guest device/intx mapping. */
   89.69 +    DECLARE_BITMAP(mapping, NR_PIRQS);
   89.70 +    struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
   89.71 +    /* Guest IRQ to guest device/intx mapping. */
   89.72 +    struct hvm_girq_dpci_mapping girq[NR_IRQS];
   89.73 +    uint8_t msi_gvec_pirq[NR_VECTORS];
   89.74 +    DECLARE_BITMAP(dirq_mask, NR_IRQS);
   89.75 +    /* Record of mapped ISA IRQs */
   89.76 +    DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
   89.77 +    /* Record of mapped Links */
   89.78 +    uint8_t link_cnt[NR_LINK];
   89.79 +    struct timer hvm_timer[NR_IRQS];
   89.80 +};
   89.81 +
   89.82 +/* Modify state of a PCI INTx wire. */
   89.83 +void hvm_pci_intx_assert(
   89.84 +    struct domain *d, unsigned int device, unsigned int intx);
   89.85 +void hvm_pci_intx_deassert(
   89.86 +    struct domain *d, unsigned int device, unsigned int intx);
   89.87 +
   89.88 +/* Modify state of an ISA device's IRQ wire. */
   89.89 +void hvm_isa_irq_assert(
   89.90 +    struct domain *d, unsigned int isa_irq);
   89.91 +void hvm_isa_irq_deassert(
   89.92 +    struct domain *d, unsigned int isa_irq);
   89.93 +
   89.94 +void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
   89.95 +
   89.96 +void hvm_maybe_deassert_evtchn_irq(void);
   89.97 +void hvm_assert_evtchn_irq(struct vcpu *v);
   89.98 +void hvm_set_callback_via(struct domain *d, uint64_t via);
   89.99 +
  89.100 +void hvm_dirq_assist(struct vcpu *v);
  89.101 +
  89.102 +#endif /* __XEN_HVM_IRQ_H__ */
    90.1 --- a/xen/include/xen/spinlock.h	Wed Oct 22 11:38:22 2008 +0900
    90.2 +++ b/xen/include/xen/spinlock.h	Wed Oct 22 11:46:55 2008 +0900
    90.3 @@ -3,93 +3,95 @@
    90.4  
    90.5  #include <xen/config.h>
    90.6  #include <asm/system.h>
    90.7 -
    90.8 -#define spin_lock_irqsave(lock, flags) \
    90.9 -    do { local_irq_save(flags); spin_lock(lock); } while ( 0 )
   90.10 -#define spin_lock_irq(lock) \
   90.11 -    do { local_irq_disable(); spin_lock(lock); } while ( 0 )
   90.12 -
   90.13 -#define read_lock_irqsave(lock, flags) \
   90.14 -    do { local_irq_save(flags); read_lock(lock); } while ( 0 )
   90.15 -#define read_lock_irq(lock) \
   90.16 -    do { local_irq_disable(); read_lock(lock); } while ( 0 )
   90.17 -
   90.18 -#define write_lock_irqsave(lock, flags) \
   90.19 -    do { local_irq_save(flags); write_lock(lock); } while ( 0 )
   90.20 -#define write_lock_irq(lock) \
   90.21 -    do { local_irq_disable(); write_lock(lock); } while ( 0 )
   90.22 -
   90.23 -#define spin_unlock_irqrestore(lock, flags) \
   90.24 -    do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 )
   90.25 -#define spin_unlock_irq(lock) \
   90.26 -    do { spin_unlock(lock); local_irq_enable(); } while ( 0 )
   90.27 -
   90.28 -#define read_unlock_irqrestore(lock, flags) \
   90.29 -    do { read_unlock(lock); local_irq_restore(flags); } while ( 0 )
   90.30 -#define read_unlock_irq(lock) \
   90.31 -    do { read_unlock(lock); local_irq_enable(); } while ( 0 )
   90.32 -
   90.33 -#define write_unlock_irqrestore(lock, flags) \
   90.34 -    do { write_unlock(lock); local_irq_restore(flags); } while ( 0 )
   90.35 -#define write_unlock_irq(lock) \
   90.36 -    do { write_unlock(lock); local_irq_enable(); } while ( 0 )
   90.37 -
   90.38 -#ifdef CONFIG_SMP
   90.39 -
   90.40  #include <asm/spinlock.h>
   90.41  
   90.42 -#else
   90.43 -
   90.44 -#if (__GNUC__ > 2)
   90.45 -typedef struct { } spinlock_t;
   90.46 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { }
   90.47 -#else
   90.48 -typedef struct { int gcc_is_buggy; } spinlock_t;
   90.49 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
   90.50 -#endif
   90.51 -
   90.52 -#define spin_lock_init(lock)             do { } while(0)
   90.53 -#define spin_is_locked(lock)             (0)
   90.54 -#define _raw_spin_lock(lock)             (void)(lock)
   90.55 -#define _raw_spin_trylock(lock)          ({1; })
   90.56 -#define _raw_spin_unlock(lock)           do { } while(0)
   90.57 -#define _raw_spin_lock_recursive(lock)   do { } while(0)
   90.58 -#define _raw_spin_unlock_recursive(lock) do { } while(0)
   90.59 +typedef struct {
   90.60 +    raw_spinlock_t raw;
   90.61 +    u16 recurse_cpu:12;
   90.62 +    u16 recurse_cnt:4;
   90.63 +} spinlock_t;
   90.64  
   90.65 -#if (__GNUC__ > 2)
   90.66 -typedef struct { } rwlock_t;
   90.67 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { }
   90.68 -#else
   90.69 -typedef struct { int gcc_is_buggy; } rwlock_t;
   90.70 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 }
   90.71 -#endif
   90.72 +#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 }
   90.73 +#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
   90.74 +#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
   90.75  
   90.76 -#define rwlock_init(lock)            do { } while(0)
   90.77 -#define _raw_read_lock(lock)         (void)(lock) /* Not "unused variable". */
   90.78 -#define _raw_read_unlock(lock)       do { } while(0)
   90.79 -#define _raw_write_lock(lock)        (void)(lock) /* Not "unused variable". */
   90.80 -#define _raw_write_unlock(lock)      do { } while(0)
   90.81 -
   90.82 -#endif
   90.83 +typedef struct {
   90.84 +    raw_rwlock_t raw;
   90.85 +} rwlock_t;
   90.86  
   90.87 -#define spin_lock(_lock)             _raw_spin_lock(_lock)
   90.88 -#define spin_trylock(_lock)          _raw_spin_trylock(_lock)
   90.89 -#define spin_unlock(_lock)           _raw_spin_unlock(_lock)
   90.90 -#define spin_lock_recursive(_lock)   _raw_spin_lock_recursive(_lock)
   90.91 -#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
   90.92 -#define read_lock(_lock)             _raw_read_lock(_lock)
   90.93 -#define read_unlock(_lock)           _raw_read_unlock(_lock)
   90.94 -#define write_lock(_lock)            _raw_write_lock(_lock)
   90.95 -#define write_unlock(_lock)          _raw_write_unlock(_lock)
   90.96 +#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
   90.97 +#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
   90.98 +#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
   90.99 +
  90.100 +void _spin_lock(spinlock_t *lock);
  90.101 +void _spin_lock_irq(spinlock_t *lock);
  90.102 +unsigned long _spin_lock_irqsave(spinlock_t *lock);
  90.103 +
  90.104 +void _spin_unlock(spinlock_t *lock);
  90.105 +void _spin_unlock_irq(spinlock_t *lock);
  90.106 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
  90.107 +
  90.108 +int _spin_is_locked(spinlock_t *lock);
  90.109 +int _spin_trylock(spinlock_t *lock);
  90.110 +void _spin_barrier(spinlock_t *lock);
  90.111 +
  90.112 +void _spin_lock_recursive(spinlock_t *lock);
  90.113 +void _spin_unlock_recursive(spinlock_t *lock);
  90.114 +
  90.115 +void _read_lock(rwlock_t *lock);
  90.116 +void _read_lock_irq(rwlock_t *lock);
  90.117 +unsigned long _read_lock_irqsave(rwlock_t *lock);
  90.118 +
  90.119 +void _read_unlock(rwlock_t *lock);
  90.120 +void _read_unlock_irq(rwlock_t *lock);
  90.121 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
  90.122 +
  90.123 +void _write_lock(rwlock_t *lock);
  90.124 +void _write_lock_irq(rwlock_t *lock);
  90.125 +unsigned long _write_lock_irqsave(rwlock_t *lock);
  90.126 +
  90.127 +void _write_unlock(rwlock_t *lock);
  90.128 +void _write_unlock_irq(rwlock_t *lock);
  90.129 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
  90.130 +
  90.131 +#define spin_lock(l)                  _spin_lock(l)
  90.132 +#define spin_lock_irq(l)              _spin_lock_irq(l)
  90.133 +#define spin_lock_irqsave(l, f)       ((f) = _spin_lock_irqsave(l))
  90.134 +
  90.135 +#define spin_unlock(l)                _spin_unlock(l)
  90.136 +#define spin_unlock_irq(l)            _spin_unlock_irq(l)
  90.137 +#define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
  90.138 +
  90.139 +#define spin_is_locked(l)             _raw_spin_is_locked(&(l)->raw)
  90.140 +#define spin_trylock(l)               _spin_trylock(l)
  90.141  
  90.142  /* Ensure a lock is quiescent between two critical operations. */
  90.143 -static inline void spin_barrier(spinlock_t *lock)
  90.144 -{
  90.145 -    do { mb(); } while ( spin_is_locked(lock) );
  90.146 -    mb();
  90.147 -}
  90.148 +#define spin_barrier(l)               _spin_barrier(l)
  90.149  
  90.150 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
  90.151 -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
  90.152 +/*
  90.153 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
  90.154 + * reentered recursively on the same CPU. All critical regions that may form
  90.155 + * part of a recursively-nested set must be protected by these forms. If there
  90.156 + * are any critical regions that cannot form part of such a set, they can use
  90.157 + * standard spin_[un]lock().
  90.158 + */
  90.159 +#define spin_lock_recursive(l)        _spin_lock_recursive(l)
  90.160 +#define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
  90.161 +
  90.162 +#define read_lock(l)                  _read_lock(l)
  90.163 +#define read_lock_irq(l)              _read_lock_irq(l)
  90.164 +#define read_lock_irqsave(l, f)       ((f) = _read_lock_irqsave(l))
  90.165 +
  90.166 +#define read_unlock(l)                _read_unlock(l)
  90.167 +#define read_unlock_irq(l)            _read_unlock_irq(l)
  90.168 +#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
  90.169 +
  90.170 +#define write_lock(l)                 _write_lock(l)
  90.171 +#define write_lock_irq(l)             _write_lock_irq(l)
  90.172 +#define write_lock_irqsave(l, f)      ((f) = _write_lock_irqsave(l))
  90.173 +
  90.174 +#define write_unlock(l)               _write_unlock(l)
  90.175 +#define write_unlock_irq(l)           _write_unlock_irq(l)
  90.176 +#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
  90.177  
  90.178  #endif /* __SPINLOCK_H__ */
    91.1 --- a/xen/include/xen/xmalloc.h	Wed Oct 22 11:38:22 2008 +0900
    91.2 +++ b/xen/include/xen/xmalloc.h	Wed Oct 22 11:46:55 2008 +0900
    91.3 @@ -2,11 +2,16 @@
    91.4  #ifndef __XMALLOC_H__
    91.5  #define __XMALLOC_H__
    91.6  
    91.7 +/*
    91.8 + * Xen malloc/free-style interface.
    91.9 + */
   91.10 +
   91.11  /* Allocate space for typed object. */
   91.12  #define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), __alignof__(_type)))
   91.13  
   91.14  /* Allocate space for array of typed objects. */
   91.15 -#define xmalloc_array(_type, _num) ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
   91.16 +#define xmalloc_array(_type, _num) \
   91.17 +    ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
   91.18  
   91.19  /* Allocate untyped storage. */
   91.20  #define xmalloc_bytes(_bytes) (_xmalloc(_bytes, SMP_CACHE_BYTES))
   91.21 @@ -15,8 +20,9 @@
   91.22  extern void xfree(void *);
   91.23  
   91.24  /* Underlying functions */
   91.25 -extern void *_xmalloc(size_t size, size_t align);
   91.26 -static inline void *_xmalloc_array(size_t size, size_t align, size_t num)
   91.27 +extern void *_xmalloc(unsigned long size, unsigned long align);
   91.28 +static inline void *_xmalloc_array(
   91.29 +    unsigned long size, unsigned long align, unsigned long num)
   91.30  {
   91.31  	/* Check for overflow. */
   91.32  	if (size && num > UINT_MAX / size)
   91.33 @@ -24,4 +30,73 @@ static inline void *_xmalloc_array(size_
   91.34   	return _xmalloc(size * num, align);
   91.35  }
   91.36  
   91.37 +/*
   91.38 + * Pooled allocator interface.
   91.39 + */
   91.40 +
   91.41 +struct xmem_pool;
   91.42 +
   91.43 +typedef void *(xmem_pool_get_memory)(unsigned long bytes);
   91.44 +typedef void (xmem_pool_put_memory)(void *ptr);
   91.45 +
   91.46 +/**
   91.47 + * xmem_pool_create - create dynamic memory pool
   91.48 + * @name: name of the pool
   91.49 + * @get_mem: callback function used to expand pool
   91.50 + * @put_mem: callback function used to shrink pool
   91.51 + * @init_size: inital pool size (in bytes)
   91.52 + * @max_size: maximum pool size (in bytes) - set this as 0 for no limit
   91.53 + * @grow_size: amount of memory (in bytes) added to pool whenever required
   91.54 + *
   91.55 + * All size values are rounded up to next page boundary.
   91.56 + */
   91.57 +struct xmem_pool *xmem_pool_create(
   91.58 +    const char *name,
   91.59 +    xmem_pool_get_memory get_mem,
   91.60 +    xmem_pool_put_memory put_mem,
   91.61 +    unsigned long init_size,
   91.62 +    unsigned long max_size,
   91.63 +    unsigned long grow_size);
   91.64 +
   91.65 +/**
   91.66 + * xmem_pool_destroy - cleanup given pool
   91.67 + * @mem_pool: Pool to be destroyed
   91.68 + *
   91.69 + * Data structures associated with pool are freed.
   91.70 + * All memory allocated from pool must be freed before
   91.71 + * destorying it.
   91.72 + */
   91.73 +void xmem_pool_destroy(struct xmem_pool *pool);
   91.74 +
   91.75 +/**
   91.76 + * xmem_pool_alloc - allocate memory from given pool
   91.77 + * @size: no. of bytes
   91.78 + * @mem_pool: pool to allocate from
   91.79 + */
   91.80 +void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool);
   91.81 +
   91.82 +/**
   91.83 + * xmem_pool_free - free memory from given pool
   91.84 + * @ptr: address of memory to be freed
   91.85 + * @mem_pool: pool to free from
   91.86 + */
   91.87 +void xmem_pool_free(void *ptr, struct xmem_pool *pool);
   91.88 +
   91.89 +/**
   91.90 + * xmem_pool_get_used_size - get memory currently used by given pool
   91.91 + *
   91.92 + * Used memory includes stored data + metadata + internal fragmentation
   91.93 + */
   91.94 +unsigned long xmem_pool_get_used_size(struct xmem_pool *pool);
   91.95 +
   91.96 +/**
   91.97 + * xmem_pool_get_total_size - get total memory currently allocated for pool
   91.98 + *
   91.99 + * This is the total memory currently allocated for this pool which includes
  91.100 + * used size + free size.
  91.101 + *
  91.102 + * (Total - Used) is good indicator of memory efficiency of allocator.
  91.103 + */
  91.104 +unsigned long xmem_pool_get_total_size(struct xmem_pool *pool);
  91.105 +
  91.106  #endif /* __XMALLOC_H__ */
    92.1 --- a/xen/include/xlat.lst	Wed Oct 22 11:38:22 2008 +0900
    92.2 +++ b/xen/include/xlat.lst	Wed Oct 22 11:46:55 2008 +0900
    92.3 @@ -55,3 +55,7 @@
    92.4  !	processor_cx			platform.h
    92.5  !	processor_flags			platform.h
    92.6  !	processor_power			platform.h
    92.7 +!	pct_register			platform.h
    92.8 +!	processor_px			platform.h
    92.9 +!	psd_package			platform.h
   92.10 +!	processor_performance		platform.h