ia64/xen-unstable

changeset 14618:0b2794d3320f

Merge with xen-ia64-unstable.hg
author kfraser@localhost.localdomain
date Wed Mar 28 10:38:41 2007 +0100 (2007-03-28)
parents 14aeb7981e4e ff6a1607c17b
children 4c5ea17290fe
files xen/arch/ia64/xen/mm.c
line diff
     1.1 --- a/Config.mk	Tue Mar 27 12:21:48 2007 -0600
     1.2 +++ b/Config.mk	Wed Mar 28 10:38:41 2007 +0100
     1.3 @@ -31,17 +31,27 @@ EXTRA_INCLUDES += $(EXTRA_PREFIX)/includ
     1.4  EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR)
     1.5  endif
     1.6  
     1.7 -# cc-option
     1.8 +# cc-option: Check if compiler supports first option, else fall back to second.
     1.9  # Usage: cflags-y += $(call cc-option,$(CC),-march=winchip-c6,-march=i586)
    1.10  cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc \
    1.11                /dev/null 2>&1`"; then echo "$(2)"; else echo "$(3)"; fi ;)
    1.12  
    1.13 -# cc-ver
    1.14 +# cc-ver: Check compiler is at least specified version. Return boolean 'y'/'n'.
    1.15  # Usage: ifeq ($(call cc-ver,$(CC),0x030400),y)
    1.16  cc-ver = $(shell if [ $$((`$(1) -dumpversion | awk -F. \
    1.17             '{ printf "0x%02x%02x%02x", $$1, $$2, $$3}'`)) -ge $$(($(2))) ]; \
    1.18             then echo y; else echo n; fi ;)
    1.19  
    1.20 +# cc-ver-check: Check compiler is at least specified version, else fail.
    1.21 +# Usage: $(call cc-ver-check,CC,0x030400,"Require at least gcc-3.4")
    1.22 +cc-ver-check = $(eval $(call cc-ver-check-closure,$(1),$(2),$(3)))
    1.23 +define cc-ver-check-closure
    1.24 +    ifeq ($$(call cc-ver,$$($(1)),$(2)),n)
    1.25 +        override $(1) = echo "*** FATAL BUILD ERROR: "$(3) >&2; exit 1;
    1.26 +        cc-option := n
    1.27 +    endif
    1.28 +endef
    1.29 +
    1.30  ifneq ($(debug),y)
    1.31  CFLAGS += -DNDEBUG
    1.32  else
     2.1 --- a/docs/xen-api/xenapi-datamodel.tex	Tue Mar 27 12:21:48 2007 -0600
     2.2 +++ b/docs/xen-api/xenapi-datamodel.tex	Wed Mar 28 10:38:41 2007 +0100
     2.3 @@ -6549,6 +6549,7 @@ Quals & Field & Type & Description \\
     2.4  $\mathit{RW}$ &  {\tt name/description} & string & a notes field containg human-readable description \\
     2.5  $\mathit{RO}_\mathit{run}$ &  {\tt VIFs} & (VIF ref) Set & list of connected vifs \\
     2.6  $\mathit{RO}_\mathit{run}$ &  {\tt PIFs} & (PIF ref) Set & list of connected pifs \\
     2.7 +$\mathit{RW}$ &  {\tt other\_config} & (string $\rightarrow$ string) Map & additional configuration \\
     2.8  \hline
     2.9  \end{longtable}
    2.10  \subsection{RPCs associated with class: network}
    2.11 @@ -6801,6 +6802,145 @@ value of the field
    2.12  \vspace{0.3cm}
    2.13  \vspace{0.3cm}
    2.14  \vspace{0.3cm}
    2.15 +\subsubsection{RPC name:~get\_other\_config}
    2.16 +
    2.17 +{\bf Overview:} 
    2.18 +Get the other\_config field of the given network.
    2.19 +
    2.20 + \noindent {\bf Signature:} 
    2.21 +\begin{verbatim} ((string -> string) Map) get_other_config (session_id s, network ref self)\end{verbatim}
    2.22 +
    2.23 +
    2.24 +\noindent{\bf Arguments:}
    2.25 +
    2.26 + 
    2.27 +\vspace{0.3cm}
    2.28 +\begin{tabular}{|c|c|p{7cm}|}
    2.29 + \hline
    2.30 +{\bf type} & {\bf name} & {\bf description} \\ \hline
    2.31 +{\tt network ref } & self & reference to the object \\ \hline 
    2.32 +
    2.33 +\end{tabular}
    2.34 +
    2.35 +\vspace{0.3cm}
    2.36 +
    2.37 + \noindent {\bf Return Type:} 
    2.38 +{\tt 
    2.39 +(string $\rightarrow$ string) Map
    2.40 +}
    2.41 +
    2.42 +
    2.43 +value of the field
    2.44 +\vspace{0.3cm}
    2.45 +\vspace{0.3cm}
    2.46 +\vspace{0.3cm}
    2.47 +\subsubsection{RPC name:~set\_other\_config}
    2.48 +
    2.49 +{\bf Overview:} 
    2.50 +Set the other\_config field of the given network.
    2.51 +
    2.52 + \noindent {\bf Signature:} 
    2.53 +\begin{verbatim} void set_other_config (session_id s, network ref self, (string -> string) Map value)\end{verbatim}
    2.54 +
    2.55 +
    2.56 +\noindent{\bf Arguments:}
    2.57 +
    2.58 + 
    2.59 +\vspace{0.3cm}
    2.60 +\begin{tabular}{|c|c|p{7cm}|}
    2.61 + \hline
    2.62 +{\bf type} & {\bf name} & {\bf description} \\ \hline
    2.63 +{\tt network ref } & self & reference to the object \\ \hline 
    2.64 +
    2.65 +{\tt (string $\rightarrow$ string) Map } & value & New value to set \\ \hline 
    2.66 +
    2.67 +\end{tabular}
    2.68 +
    2.69 +\vspace{0.3cm}
    2.70 +
    2.71 + \noindent {\bf Return Type:} 
    2.72 +{\tt 
    2.73 +void
    2.74 +}
    2.75 +
    2.76 +
    2.77 +
    2.78 +\vspace{0.3cm}
    2.79 +\vspace{0.3cm}
    2.80 +\vspace{0.3cm}
    2.81 +\subsubsection{RPC name:~add\_to\_other\_config}
    2.82 +
    2.83 +{\bf Overview:} 
    2.84 +Add the given key-value pair to the other\_config field of the given
    2.85 +network.
    2.86 +
    2.87 + \noindent {\bf Signature:} 
    2.88 +\begin{verbatim} void add_to_other_config (session_id s, network ref self, string key, string value)\end{verbatim}
    2.89 +
    2.90 +
    2.91 +\noindent{\bf Arguments:}
    2.92 +
    2.93 + 
    2.94 +\vspace{0.3cm}
    2.95 +\begin{tabular}{|c|c|p{7cm}|}
    2.96 + \hline
    2.97 +{\bf type} & {\bf name} & {\bf description} \\ \hline
    2.98 +{\tt network ref } & self & reference to the object \\ \hline 
    2.99 +
   2.100 +{\tt string } & key & Key to add \\ \hline 
   2.101 +
   2.102 +{\tt string } & value & Value to add \\ \hline 
   2.103 +
   2.104 +\end{tabular}
   2.105 +
   2.106 +\vspace{0.3cm}
   2.107 +
   2.108 + \noindent {\bf Return Type:} 
   2.109 +{\tt 
   2.110 +void
   2.111 +}
   2.112 +
   2.113 +
   2.114 +
   2.115 +\vspace{0.3cm}
   2.116 +\vspace{0.3cm}
   2.117 +\vspace{0.3cm}
   2.118 +\subsubsection{RPC name:~remove\_from\_other\_config}
   2.119 +
   2.120 +{\bf Overview:} 
   2.121 +Remove the given key and its corresponding value from the other\_config
   2.122 +field of the given network.  If the key is not in that Map, then do
   2.123 +nothing.
   2.124 +
   2.125 + \noindent {\bf Signature:} 
   2.126 +\begin{verbatim} void remove_from_other_config (session_id s, network ref self, string key)\end{verbatim}
   2.127 +
   2.128 +
   2.129 +\noindent{\bf Arguments:}
   2.130 +
   2.131 + 
   2.132 +\vspace{0.3cm}
   2.133 +\begin{tabular}{|c|c|p{7cm}|}
   2.134 + \hline
   2.135 +{\bf type} & {\bf name} & {\bf description} \\ \hline
   2.136 +{\tt network ref } & self & reference to the object \\ \hline 
   2.137 +
   2.138 +{\tt string } & key & Key to remove \\ \hline 
   2.139 +
   2.140 +\end{tabular}
   2.141 +
   2.142 +\vspace{0.3cm}
   2.143 +
   2.144 + \noindent {\bf Return Type:} 
   2.145 +{\tt 
   2.146 +void
   2.147 +}
   2.148 +
   2.149 +
   2.150 +
   2.151 +\vspace{0.3cm}
   2.152 +\vspace{0.3cm}
   2.153 +\vspace{0.3cm}
   2.154  \subsubsection{RPC name:~create}
   2.155  
   2.156  {\bf Overview:} 
     3.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S	Tue Mar 27 12:21:48 2007 -0600
     3.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S	Wed Mar 28 10:38:41 2007 +0100
     3.3 @@ -148,11 +148,11 @@ NMI_MASK = 0x80000000
     3.4  	.endm
     3.5  
     3.6          /*
     3.7 -         * Must be consistent with the definition in arch-x86_64.h:    
     3.8 +         * Must be consistent with the definition in arch-x86/xen-x86_64.h:
     3.9           *     struct iret_context {
    3.10           *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
    3.11           *     };
    3.12 -         * #define VGCF_IN_SYSCALL (1<<8) 
    3.13 +         * with rax, r11, and rcx being taken care of in the hypercall stub.
    3.14           */
    3.15  	.macro HYPERVISOR_IRET flag
    3.16  	testb $3,1*8(%rsp)
    3.17 @@ -164,22 +164,16 @@ NMI_MASK = 0x80000000
    3.18  	jnz   1f
    3.19  
    3.20  	/* Direct iret to kernel space. Correct CS and SS. */
    3.21 -	orb   $3,1*8(%rsp)
    3.22 -	orb   $3,4*8(%rsp)
    3.23 +	orl   $3,1*8(%rsp)
    3.24 +	orl   $3,4*8(%rsp)
    3.25  1:	iretq
    3.26  
    3.27  2:	/* Slow iret via hypervisor. */
    3.28 -	andl  $~NMI_MASK, 16(%rsp)
    3.29 +	andl  $~NMI_MASK, 2*8(%rsp)
    3.30  	pushq $\flag
    3.31  	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
    3.32  	.endm
    3.33  
    3.34 -        .macro SWITCH_TO_KERNEL ssoff,adjust=0
    3.35 -	jc  1f
    3.36 -	orb  $1,\ssoff-\adjust+4(%rsp)
    3.37 -1:
    3.38 -        .endm
    3.39 -
    3.40  /*
    3.41   * A newly forked process directly context switches into this.
    3.42   */ 	
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c	Tue Mar 27 12:21:48 2007 -0600
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c	Wed Mar 28 10:38:41 2007 +0100
     4.3 @@ -123,7 +123,7 @@ static void post_suspend(int suspend_can
     4.4  static int take_machine_down(void *p_fast_suspend)
     4.5  {
     4.6  	int fast_suspend = *(int *)p_fast_suspend;
     4.7 -	int suspend_cancelled, err, cpu;
     4.8 +	int suspend_cancelled, err;
     4.9  	extern void time_resume(void);
    4.10  
    4.11  	if (fast_suspend) {
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Tue Mar 27 12:21:48 2007 -0600
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c	Wed Mar 28 10:38:41 2007 +0100
     5.3 @@ -236,7 +236,10 @@ static ssize_t xenbus_dev_write(struct f
     5.4  		break;
     5.5  
     5.6  	case XS_WATCH:
     5.7 -	case XS_UNWATCH:
     5.8 +	case XS_UNWATCH: {
     5.9 +		static const char *XS_RESP = "OK";
    5.10 +		struct xsd_sockmsg hdr;
    5.11 +
    5.12  		path = u->u.buffer + sizeof(u->u.msg);
    5.13  		token = memchr(path, 0, u->u.msg.len);
    5.14  		if (token == NULL) {
    5.15 @@ -246,9 +249,6 @@ static ssize_t xenbus_dev_write(struct f
    5.16  		token++;
    5.17  
    5.18  		if (msg_type == XS_WATCH) {
    5.19 -			static const char * XS_WATCH_RESP = "OK";
    5.20 -			struct xsd_sockmsg hdr;
    5.21 -
    5.22  			watch = kmalloc(sizeof(*watch), GFP_KERNEL);
    5.23  			watch->watch.node = kmalloc(strlen(path)+1,
    5.24                                                      GFP_KERNEL);
    5.25 @@ -266,11 +266,6 @@ static ssize_t xenbus_dev_write(struct f
    5.26  			}
    5.27  			
    5.28  			list_add(&watch->list, &u->watches);
    5.29 -
    5.30 -			hdr.type = XS_WATCH;
    5.31 -			hdr.len = strlen(XS_WATCH_RESP) + 1;
    5.32 -			queue_reply(u, (char *)&hdr, sizeof(hdr));
    5.33 -			queue_reply(u, (char *)XS_WATCH_RESP, hdr.len);
    5.34  		} else {
    5.35  			list_for_each_entry_safe(watch, tmp_watch,
    5.36                                                   &u->watches, list) {
    5.37 @@ -285,7 +280,12 @@ static ssize_t xenbus_dev_write(struct f
    5.38  			}
    5.39  		}
    5.40  
    5.41 +		hdr.type = msg_type;
    5.42 +		hdr.len = strlen(XS_RESP) + 1;
    5.43 +		queue_reply(u, (char *)&hdr, sizeof(hdr));
    5.44 +		queue_reply(u, (char *)XS_RESP, hdr.len);
    5.45  		break;
    5.46 +	}
    5.47  
    5.48  	default:
    5.49  		rc = -EINVAL;
     6.1 --- a/tools/Rules.mk	Tue Mar 27 12:21:48 2007 -0600
     6.2 +++ b/tools/Rules.mk	Wed Mar 28 10:38:41 2007 +0100
     6.3 @@ -24,9 +24,9 @@ CFLAGS-$(CONFIG_X86_32) += $(call cc-opt
     6.4  CFLAGS += $(CFLAGS-y)
     6.5  
     6.6  # Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers)
     6.7 -ifeq ($(CONFIG_X86)$(call cc-ver,$(CC),0x030400),yn)
     6.8 -$(error Xen tools require at least gcc-3.4)
     6.9 -endif
    6.10 +check-$(CONFIG_X86) = $(call cc-ver-check,CC,0x030400,\
    6.11 +                        "Xen requires at least gcc-3.4")
    6.12 +$(eval $(check-y))
    6.13  
    6.14  %.opic: %.c
    6.15  	$(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $<
     7.1 --- a/tools/examples/xend-config.sxp	Tue Mar 27 12:21:48 2007 -0600
     7.2 +++ b/tools/examples/xend-config.sxp	Wed Mar 28 10:38:41 2007 +0100
     7.3 @@ -46,6 +46,11 @@
     7.4  #   (xen-api-server ((9363 pam '^localhost$ example\\.com$')
     7.5  #                    (unix none)))
     7.6  #
     7.7 +# Optionally, the TCP Xen-API server can use SSL by specifying the private
     7.8 +# key and certificate location:
     7.9 +#
    7.10 +#                    (9367 pam '' /etc/xen/xen-api.key /etc/xen/xen-api.crt)
    7.11 +#
    7.12  # Default:
    7.13  #   (xen-api-server ((unix)))
    7.14  
    7.15 @@ -59,11 +64,18 @@
    7.16  
    7.17  #(xend-unix-path /var/lib/xend/xend-socket)
    7.18  
    7.19 -# Address and port xend should use for the TCP XMLRPC interface, 
    7.20 +
    7.21 +# Address and port xend should use for the legacy TCP XMLRPC interface, 
    7.22  # if xen-tcp-xmlrpc-server is set.
    7.23  #(xen-tcp-xmlrpc-server-address 'localhost')
    7.24  #(xen-tcp-xmlrpc-server-port 8006)
    7.25  
    7.26 +# SSL key and certificate to use for the legacy TCP XMLRPC interface.
    7.27 +# Setting these will mean that this port serves only SSL connections as
    7.28 +# opposed to plaintext ones.
    7.29 +#(xend-tcp-xmlrpc-server-ssl-key-file  /etc/xen/xmlrpc.key)
    7.30 +#(xend-tcp-xmlrpc-server-ssl-cert-file /etc/xen/xmlrpc.crt)
    7.31 +
    7.32  
    7.33  # Port xend should use for the HTTP interface, if xend-http-server is set.
    7.34  #(xend-port            8000)
     8.1 --- a/tools/ioemu/Makefile.target	Tue Mar 27 12:21:48 2007 -0600
     8.2 +++ b/tools/ioemu/Makefile.target	Wed Mar 28 10:38:41 2007 +0100
     8.3 @@ -193,6 +193,10 @@ ifdef CONFIG_SOLARIS
     8.4  LIBS+=-lsocket -lnsl -lresolv
     8.5  endif
     8.6  
     8.7 +ifeq ($(debug),y)
     8.8 +CFLAGS += -DQEMU_VNC_MONITOR_EXPORT
     8.9 +endif
    8.10 +
    8.11  # profiling code
    8.12  ifdef TARGET_GPROF
    8.13  LDFLAGS+=-p
     9.1 --- a/tools/ioemu/vnc.c	Tue Mar 27 12:21:48 2007 -0600
     9.2 +++ b/tools/ioemu/vnc.c	Wed Mar 28 10:38:41 2007 +0100
     9.3 @@ -113,8 +113,10 @@ struct VncState
     9.4      int visible_w;
     9.5      int visible_h;
     9.6  
     9.7 +#ifdef QEMU_VNC_MONITOR_EXPORT
     9.8      int ctl_keys;               /* Ctrl+Alt starts calibration */
     9.9      int shift_keys;             /* Shift / CapsLock keys */
    9.10 +#endif
    9.11      int numlock;
    9.12  };
    9.13  
    9.14 @@ -895,6 +897,7 @@ static void do_key_event(VncState *vs, i
    9.15  	    kbd_put_keycode(keycode & 0x7f);
    9.16  	else
    9.17  	    kbd_put_keycode(keycode | 0x80);
    9.18 +#ifdef QEMU_VNC_MONITOR_EXPORT
    9.19      } else if (down) {
    9.20  	int qemu_keysym = 0;
    9.21  
    9.22 @@ -922,8 +925,10 @@ static void do_key_event(VncState *vs, i
    9.23  	}
    9.24  	if (qemu_keysym != 0)
    9.25  	    kbd_put_keysym(qemu_keysym);
    9.26 +#endif
    9.27      }
    9.28  
    9.29 +#ifdef QEMU_VNC_MONITOR_EXPORT
    9.30      if (down) {
    9.31  	switch (sym) {
    9.32  	case XK_Control_L:
    9.33 @@ -976,6 +981,10 @@ static void do_key_event(VncState *vs, i
    9.34  	    break;
    9.35  	}
    9.36      }
    9.37 +#else
    9.38 +    if (!down && sym == XK_Num_Lock)
    9.39 +        vs->numlock = !vs->numlock;
    9.40 +#endif
    9.41  }
    9.42  
    9.43  static void key_event(VncState *vs, int down, uint32_t sym)
    10.1 --- a/tools/libxen/include/xen_network.h	Tue Mar 27 12:21:48 2007 -0600
    10.2 +++ b/tools/libxen/include/xen_network.h	Wed Mar 28 10:38:41 2007 +0100
    10.3 @@ -22,6 +22,7 @@
    10.4  #include "xen_common.h"
    10.5  #include "xen_network_decl.h"
    10.6  #include "xen_pif_decl.h"
    10.7 +#include "xen_string_string_map.h"
    10.8  #include "xen_vif_decl.h"
    10.9  
   10.10  
   10.11 @@ -68,6 +69,7 @@ typedef struct xen_network_record
   10.12      char *name_description;
   10.13      struct xen_vif_record_opt_set *vifs;
   10.14      struct xen_pif_record_opt_set *pifs;
   10.15 +    xen_string_string_map *other_config;
   10.16  } xen_network_record;
   10.17  
   10.18  /**
   10.19 @@ -220,6 +222,13 @@ xen_network_get_pifs(xen_session *sessio
   10.20  
   10.21  
   10.22  /**
   10.23 + * Get the other_config field of the given network.
   10.24 + */
   10.25 +extern bool
   10.26 +xen_network_get_other_config(xen_session *session, xen_string_string_map **result, xen_network network);
   10.27 +
   10.28 +
   10.29 +/**
   10.30   * Set the name/label field of the given network.
   10.31   */
   10.32  extern bool
   10.33 @@ -234,6 +243,30 @@ xen_network_set_name_description(xen_ses
   10.34  
   10.35  
   10.36  /**
   10.37 + * Set the other_config field of the given network.
   10.38 + */
   10.39 +extern bool
   10.40 +xen_network_set_other_config(xen_session *session, xen_network network, xen_string_string_map *other_config);
   10.41 +
   10.42 +
   10.43 +/**
   10.44 + * Add the given key-value pair to the other_config field of the given
   10.45 + * network.
   10.46 + */
   10.47 +extern bool
   10.48 +xen_network_add_to_other_config(xen_session *session, xen_network network, char *key, char *value);
   10.49 +
   10.50 +
   10.51 +/**
   10.52 + * Remove the given key and its corresponding value from the
   10.53 + * other_config field of the given network.  If the key is not in that Map,
   10.54 + * then do nothing.
   10.55 + */
   10.56 +extern bool
   10.57 +xen_network_remove_from_other_config(xen_session *session, xen_network network, char *key);
   10.58 +
   10.59 +
   10.60 +/**
   10.61   * Return a list of all the networks known to the system.
   10.62   */
   10.63  extern bool
    11.1 --- a/tools/libxen/src/xen_network.c	Tue Mar 27 12:21:48 2007 -0600
    11.2 +++ b/tools/libxen/src/xen_network.c	Wed Mar 28 10:38:41 2007 +0100
    11.3 @@ -24,6 +24,7 @@
    11.4  #include "xen_internal.h"
    11.5  #include "xen_network.h"
    11.6  #include "xen_pif.h"
    11.7 +#include "xen_string_string_map.h"
    11.8  #include "xen_vif.h"
    11.9  
   11.10  
   11.11 @@ -52,7 +53,10 @@ static const struct_member xen_network_r
   11.12            .offset = offsetof(xen_network_record, vifs) },
   11.13          { .key = "PIFs",
   11.14            .type = &abstract_type_ref_set,
   11.15 -          .offset = offsetof(xen_network_record, pifs) }
   11.16 +          .offset = offsetof(xen_network_record, pifs) },
   11.17 +        { .key = "other_config",
   11.18 +          .type = &abstract_type_string_string_map,
   11.19 +          .offset = offsetof(xen_network_record, other_config) }
   11.20      };
   11.21  
   11.22  const abstract_type xen_network_record_abstract_type_ =
   11.23 @@ -78,6 +82,7 @@ xen_network_record_free(xen_network_reco
   11.24      free(record->name_description);
   11.25      xen_vif_record_opt_set_free(record->vifs);
   11.26      xen_pif_record_opt_set_free(record->pifs);
   11.27 +    xen_string_string_map_free(record->other_config);
   11.28      free(record);
   11.29  }
   11.30  
   11.31 @@ -239,6 +244,23 @@ xen_network_get_pifs(xen_session *sessio
   11.32  
   11.33  
   11.34  bool
   11.35 +xen_network_get_other_config(xen_session *session, xen_string_string_map **result, xen_network network)
   11.36 +{
   11.37 +    abstract_value param_values[] =
   11.38 +        {
   11.39 +            { .type = &abstract_type_string,
   11.40 +              .u.string_val = network }
   11.41 +        };
   11.42 +
   11.43 +    abstract_type result_type = abstract_type_string_string_map;
   11.44 +
   11.45 +    *result = NULL;
   11.46 +    XEN_CALL_("network.get_other_config");
   11.47 +    return session->ok;
   11.48 +}
   11.49 +
   11.50 +
   11.51 +bool
   11.52  xen_network_set_name_label(xen_session *session, xen_network network, char *label)
   11.53  {
   11.54      abstract_value param_values[] =
   11.55 @@ -271,6 +293,56 @@ xen_network_set_name_description(xen_ses
   11.56  
   11.57  
   11.58  bool
   11.59 +xen_network_set_other_config(xen_session *session, xen_network network, xen_string_string_map *other_config)
   11.60 +{
   11.61 +    abstract_value param_values[] =
   11.62 +        {
   11.63 +            { .type = &abstract_type_string,
   11.64 +              .u.string_val = network },
   11.65 +            { .type = &abstract_type_string_string_map,
   11.66 +              .u.set_val = (arbitrary_set *)other_config }
   11.67 +        };
   11.68 +
   11.69 +    xen_call_(session, "network.set_other_config", param_values, 2, NULL, NULL);
   11.70 +    return session->ok;
   11.71 +}
   11.72 +
   11.73 +
   11.74 +bool
   11.75 +xen_network_add_to_other_config(xen_session *session, xen_network network, char *key, char *value)
   11.76 +{
   11.77 +    abstract_value param_values[] =
   11.78 +        {
   11.79 +            { .type = &abstract_type_string,
   11.80 +              .u.string_val = network },
   11.81 +            { .type = &abstract_type_string,
   11.82 +              .u.string_val = key },
   11.83 +            { .type = &abstract_type_string,
   11.84 +              .u.string_val = value }
   11.85 +        };
   11.86 +
   11.87 +    xen_call_(session, "network.add_to_other_config", param_values, 3, NULL, NULL);
   11.88 +    return session->ok;
   11.89 +}
   11.90 +
   11.91 +
   11.92 +bool
   11.93 +xen_network_remove_from_other_config(xen_session *session, xen_network network, char *key)
   11.94 +{
   11.95 +    abstract_value param_values[] =
   11.96 +        {
   11.97 +            { .type = &abstract_type_string,
   11.98 +              .u.string_val = network },
   11.99 +            { .type = &abstract_type_string,
  11.100 +              .u.string_val = key }
  11.101 +        };
  11.102 +
  11.103 +    xen_call_(session, "network.remove_from_other_config", param_values, 2, NULL, NULL);
  11.104 +    return session->ok;
  11.105 +}
  11.106 +
  11.107 +
  11.108 +bool
  11.109  xen_network_get_all(xen_session *session, struct xen_network_set **result)
  11.110  {
  11.111  
    12.1 --- a/tools/pygrub/src/pygrub	Tue Mar 27 12:21:48 2007 -0600
    12.2 +++ b/tools/pygrub/src/pygrub	Wed Mar 28 10:38:41 2007 +0100
    12.3 @@ -125,16 +125,13 @@ class GrubLineEditor(curses.textpad.Text
    12.4          is that we can handle lines longer than the window."""
    12.5  
    12.6          self.win.clear()
    12.7 -        if self.pos > 70:
    12.8 -            if self.pos > 130:
    12.9 -                off = 120
   12.10 -            else:
   12.11 -                off = 55
   12.12 -            l = [ "<" ] + self.line[off:]
   12.13 -            p = self.pos - off
   12.14 -        else:
   12.15 -            l = self.line[:70]
   12.16 -            p = self.pos
   12.17 +        p = self.pos
   12.18 +        off = 0
   12.19 +        while p > 70:
   12.20 +            p -= 55
   12.21 +            off += 55
   12.22 +
   12.23 +        l = self.line[off:off+70]
   12.24          self.win.addstr(0, 0, string.join(l, ("")))
   12.25          if self.pos > 70:
   12.26              self.win.addch(0, 0, curses.ACS_LARROW)
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/tools/python/xen/util/xmlrpcclient.py	Wed Mar 28 10:38:41 2007 +0100
    13.3 @@ -0,0 +1,123 @@
    13.4 +#============================================================================
    13.5 +# This library is free software; you can redistribute it and/or
    13.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    13.7 +# License as published by the Free Software Foundation.
    13.8 +#
    13.9 +# This library is distributed in the hope that it will be useful,
   13.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   13.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   13.12 +# Lesser General Public License for more details.
   13.13 +#
   13.14 +# You should have received a copy of the GNU Lesser General Public
   13.15 +# License along with this library; if not, write to the Free Software
   13.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   13.17 +#============================================================================
   13.18 +# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
   13.19 +# Copyright (C) 2007 XenSource Inc.
   13.20 +#============================================================================
   13.21 +
   13.22 +
   13.23 +from httplib import FakeSocket, HTTPConnection, HTTP
   13.24 +import socket
   13.25 +import string
   13.26 +import xmlrpclib
   13.27 +from types import StringTypes
   13.28 +
   13.29 +
   13.30 +try:
   13.31 +    import SSHTransport
   13.32 +    ssh_enabled = True
   13.33 +except ImportError:
   13.34 +    # SSHTransport is disabled on Python <2.4, because it uses the subprocess
   13.35 +    # package.
   13.36 +    ssh_enabled = False
   13.37 +
   13.38 +
   13.39 +# A new ServerProxy that also supports httpu urls.  An http URL comes in the
   13.40 +# form:
   13.41 +#
   13.42 +# httpu:///absolute/path/to/socket.sock
   13.43 +#
   13.44 +# It assumes that the RPC handler is /RPC2.  This probably needs to be improved
   13.45 +
   13.46 +class HTTPUnixConnection(HTTPConnection):
   13.47 +    def connect(self):
   13.48 +        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
   13.49 +        self.sock.connect(self.host)
   13.50 +
   13.51 +class HTTPUnix(HTTP):
   13.52 +    _connection_class = HTTPUnixConnection
   13.53 +
   13.54 +class UnixTransport(xmlrpclib.Transport):
   13.55 +    def request(self, host, handler, request_body, verbose=0):
   13.56 +        self.__handler = handler
   13.57 +        return xmlrpclib.Transport.request(self, host, '/RPC2',
   13.58 +                                           request_body, verbose)
   13.59 +    def make_connection(self, host):
   13.60 +        return HTTPUnix(self.__handler)
   13.61 +
   13.62 +
   13.63 +# We need our own transport for HTTPS, because xmlrpclib.SafeTransport is
   13.64 +# broken -- it does not handle ERROR_ZERO_RETURN properly.
   13.65 +class HTTPSTransport(xmlrpclib.SafeTransport):
   13.66 +    def _parse_response(self, file, sock):
   13.67 +        p, u = self.getparser()
   13.68 +        while 1:
   13.69 +            try:
   13.70 +                if sock:
   13.71 +                    response = sock.recv(1024)
   13.72 +                else:
   13.73 +                    response = file.read(1024)
   13.74 +            except socket.sslerror, exn:
   13.75 +                if exn[0] == socket.SSL_ERROR_ZERO_RETURN:
   13.76 +                    break
   13.77 +                raise
   13.78 +                
   13.79 +            if not response:
   13.80 +                break
   13.81 +            if self.verbose:
   13.82 +                print 'body:', repr(response)
   13.83 +            p.feed(response)
   13.84 +            
   13.85 +        file.close()
   13.86 +        p.close()
   13.87 +        return u.close()
   13.88 +
   13.89 +
   13.90 +# See xmlrpclib2.TCPXMLRPCServer._marshalled_dispatch.
   13.91 +def conv_string(x):
   13.92 +    if isinstance(x, StringTypes):
   13.93 +        s = string.replace(x, "'", r"\047")
   13.94 +        exec "s = '" + s + "'"
   13.95 +        return s
   13.96 +    else:
   13.97 +        return x
   13.98 +
   13.99 +
  13.100 +class ServerProxy(xmlrpclib.ServerProxy):
  13.101 +    def __init__(self, uri, transport=None, encoding=None, verbose=0,
  13.102 +                 allow_none=1):
  13.103 +        if transport == None:
  13.104 +            (protocol, rest) = uri.split(':', 1)
  13.105 +            if protocol == 'httpu':
  13.106 +                uri = 'http:' + rest
  13.107 +                transport = UnixTransport()
  13.108 +            elif protocol == 'https':
  13.109 +                transport = HTTPSTransport()
  13.110 +            elif protocol == 'ssh':
  13.111 +                global ssh_enabled
  13.112 +                if ssh_enabled:
  13.113 +                    (transport, uri) = SSHTransport.getHTTPURI(uri)
  13.114 +                else:
  13.115 +                    raise ValueError(
  13.116 +                        "SSH transport not supported on Python <2.4.")
  13.117 +        xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
  13.118 +                                       verbose, allow_none)
  13.119 +
  13.120 +    def __request(self, methodname, params):
  13.121 +        response = xmlrpclib.ServerProxy.__request(self, methodname, params)
  13.122 +
  13.123 +        if isinstance(response, tuple):
  13.124 +            return tuple([conv_string(x) for x in response])
  13.125 +        else:
  13.126 +            return conv_string(response)
    14.1 --- a/tools/python/xen/util/xmlrpclib2.py	Tue Mar 27 12:21:48 2007 -0600
    14.2 +++ b/tools/python/xen/util/xmlrpclib2.py	Wed Mar 28 10:38:41 2007 +0100
    14.3 @@ -21,12 +21,10 @@ An enhanced XML-RPC client/server interf
    14.4  """
    14.5  
    14.6  import re
    14.7 -import string
    14.8  import fcntl
    14.9  from types import *
   14.10      
   14.11  
   14.12 -from httplib import HTTPConnection, HTTP
   14.13  from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
   14.14  import SocketServer
   14.15  import xmlrpclib, socket, os, stat
   14.16 @@ -36,14 +34,6 @@ import mkdir
   14.17  from xen.web import connection
   14.18  from xen.xend.XendLogging import log
   14.19  
   14.20 -try:
   14.21 -    import SSHTransport
   14.22 -    ssh_enabled = True
   14.23 -except ImportError:
   14.24 -    # SSHTransport is disabled on Python <2.4, because it uses the subprocess
   14.25 -    # package.
   14.26 -    ssh_enabled = False
   14.27 -
   14.28  #
   14.29  # Convert all integers to strings as described in the Xen API
   14.30  #
   14.31 @@ -64,13 +54,6 @@ def stringify(value):
   14.32          return value
   14.33  
   14.34  
   14.35 -# A new ServerProxy that also supports httpu urls.  An http URL comes in the
   14.36 -# form:
   14.37 -#
   14.38 -# httpu:///absolute/path/to/socket.sock
   14.39 -#
   14.40 -# It assumes that the RPC handler is /RPC2.  This probably needs to be improved
   14.41 -
   14.42  # We're forced to subclass the RequestHandler class so that we can work around
   14.43  # some bugs in Keep-Alive handling and also enabled it by default
   14.44  class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
   14.45 @@ -106,60 +89,6 @@ class XMLRPCRequestHandler(SimpleXMLRPCR
   14.46          if self.close_connection == 1:
   14.47              self.connection.shutdown(1)
   14.48  
   14.49 -class HTTPUnixConnection(HTTPConnection):
   14.50 -    def connect(self):
   14.51 -        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
   14.52 -        self.sock.connect(self.host)
   14.53 -
   14.54 -class HTTPUnix(HTTP):
   14.55 -    _connection_class = HTTPUnixConnection
   14.56 -
   14.57 -class UnixTransport(xmlrpclib.Transport):
   14.58 -    def request(self, host, handler, request_body, verbose=0):
   14.59 -        self.__handler = handler
   14.60 -        return xmlrpclib.Transport.request(self, host, '/RPC2',
   14.61 -                                           request_body, verbose)
   14.62 -    def make_connection(self, host):
   14.63 -        return HTTPUnix(self.__handler)
   14.64 -
   14.65 -
   14.66 -# See _marshalled_dispatch below.
   14.67 -def conv_string(x):
   14.68 -    if isinstance(x, StringTypes):
   14.69 -        s = string.replace(x, "'", r"\047")
   14.70 -        exec "s = '" + s + "'"
   14.71 -        return s
   14.72 -    else:
   14.73 -        return x
   14.74 -
   14.75 -
   14.76 -class ServerProxy(xmlrpclib.ServerProxy):
   14.77 -    def __init__(self, uri, transport=None, encoding=None, verbose=0,
   14.78 -                 allow_none=1):
   14.79 -        if transport == None:
   14.80 -            (protocol, rest) = uri.split(':', 1)
   14.81 -            if protocol == 'httpu':
   14.82 -                uri = 'http:' + rest
   14.83 -                transport = UnixTransport()
   14.84 -            elif protocol == 'ssh':
   14.85 -                global ssh_enabled
   14.86 -                if ssh_enabled:
   14.87 -                    (transport, uri) = SSHTransport.getHTTPURI(uri)
   14.88 -                else:
   14.89 -                    raise ValueError(
   14.90 -                        "SSH transport not supported on Python <2.4.")
   14.91 -        xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
   14.92 -                                       verbose, allow_none)
   14.93 -
   14.94 -    def __request(self, methodname, params):
   14.95 -        response = xmlrpclib.ServerProxy.__request(self, methodname, params)
   14.96 -
   14.97 -        if isinstance(response, tuple):
   14.98 -            return tuple([conv_string(x) for x in response])
   14.99 -        else:
  14.100 -            return conv_string(response)
  14.101 -
  14.102 -
  14.103  # This is a base XML-RPC server for TCP.  It sets allow_reuse_address to
  14.104  # true, and has an improved marshaller that logs and serializes exceptions.
  14.105  
    15.1 --- a/tools/python/xen/xend/XendAPI.py	Tue Mar 27 12:21:48 2007 -0600
    15.2 +++ b/tools/python/xen/xend/XendAPI.py	Wed Mar 28 10:38:41 2007 +0100
    15.3 @@ -32,7 +32,9 @@ from xen.xend.XendAuthSessions import in
    15.4  from xen.xend.XendError import *
    15.5  from xen.xend.XendClient import ERROR_INVALID_DOMAIN
    15.6  from xen.xend.XendLogging import log
    15.7 +from xen.xend.XendNetwork import XendNetwork
    15.8  from xen.xend.XendTask import XendTask
    15.9 +from xen.xend.XendPIFMetrics import XendPIFMetrics
   15.10  from xen.xend.XendVMMetrics import XendVMMetrics
   15.11  
   15.12  from xen.xend.XendAPIConstants import *
   15.13 @@ -436,6 +438,12 @@ class XendAPI(object):
   15.14              'debug'        : valid_debug,
   15.15          }
   15.16  
   15.17 +        autoplug_classes = {
   15.18 +            'network'     : XendNetwork,
   15.19 +            'VM_metrics'  : XendVMMetrics,
   15.20 +            'PIF_metrics' : XendPIFMetrics,
   15.21 +        }
   15.22 +
   15.23          # Cheat methods
   15.24          # -------------
   15.25          # Methods that have a trivial implementation for all classes.
   15.26 @@ -457,6 +465,40 @@ class XendAPI(object):
   15.27              setattr(cls, get_by_uuid, _get_by_uuid)
   15.28              setattr(cls, get_uuid,    _get_uuid)
   15.29  
   15.30 +
   15.31 +        # Autoplugging classes
   15.32 +        # --------------------
   15.33 +        # These have all of their methods grabbed out from the implementation
   15.34 +        # class, and wrapped up to be compatible with the Xen-API.
   15.35 +        
   15.36 +        for api_cls, impl_cls in autoplug_classes.items():
   15.37 +            def doit(n):
   15.38 +                getter = getattr(cls, '_%s_get' % api_cls)
   15.39 +                dot_n = '%s.%s' % (api_cls, n)
   15.40 +                full_n = '%s_%s' % (api_cls, n)
   15.41 +                if not hasattr(cls, full_n):
   15.42 +                    f = getattr(impl_cls, n)
   15.43 +                    argcounts[dot_n] = f.func_code.co_argcount + 1
   15.44 +                    setattr(cls, full_n,
   15.45 +                            lambda s, session, ref, *args: \
   15.46 +                               xen_api_success( \
   15.47 +                                   f(getter(s, session, ref), *args)))
   15.48 +
   15.49 +            ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, [])
   15.50 +            rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, [])
   15.51 +            methods  = getattr(cls, '%s_methods' % api_cls, [])
   15.52 +            funcs    = getattr(cls, '%s_funcs'   % api_cls, [])
   15.53 +            
   15.54 +            for attr_name in ro_attrs + rw_attrs:
   15.55 +                doit('get_%s' % attr_name)
   15.56 +            for attr_name in rw_attrs + cls.Base_attr_rw:
   15.57 +                doit('set_%s' % attr_name)
   15.58 +            for method_name, return_type in methods + cls.Base_methods:
   15.59 +                doit('%s' % method_name)
   15.60 +            for func_name, return_type in funcs + cls.Base_funcs:
   15.61 +                doit('%s' % func_name)
   15.62 +
   15.63 +
   15.64          # Wrapping validators around XMLRPC calls
   15.65          # ---------------------------------------
   15.66  
   15.67 @@ -466,7 +508,8 @@ class XendAPI(object):
   15.68                  n_ = n.replace('.', '_')
   15.69                  try:
   15.70                      f = getattr(cls, n_)
   15.71 -                    argcounts[n] = f.func_code.co_argcount - 1
   15.72 +                    if n not in argcounts:
   15.73 +                        argcounts[n] = f.func_code.co_argcount - 1
   15.74                      
   15.75                      validators = takes_instance and validator and \
   15.76                                   [validator] or []
   15.77 @@ -516,7 +559,7 @@ class XendAPI(object):
   15.78  
   15.79      Base_attr_ro = ['uuid']
   15.80      Base_attr_rw = []
   15.81 -    Base_methods = [('destroy', None), ('get_record', 'Struct')]
   15.82 +    Base_methods = [('get_record', 'Struct')]
   15.83      Base_funcs   = [('get_all', 'Set'), ('get_by_uuid', None)]
   15.84  
   15.85      # Xen API: Class Session
   15.86 @@ -916,59 +959,40 @@ class XendAPI(object):
   15.87  
   15.88      network_attr_ro = ['VIFs', 'PIFs']
   15.89      network_attr_rw = ['name_label',
   15.90 -                       'name_description']
   15.91 -    
   15.92 -    network_funcs = [('create', 'network')]
   15.93 +                       'name_description',
   15.94 +                       'other_config']
   15.95 +    network_methods = [('add_to_other_config', None),
   15.96 +                       ('remove_from_other_config', None),
   15.97 +                       ('destroy', None)]
   15.98 +    network_funcs = [('create', None)]
   15.99      
  15.100 -    def network_create(self, _, name_label, name_description):
  15.101 -        return xen_api_success(
  15.102 -            XendNode.instance().network_create(name_label, name_description))
  15.103 -
  15.104 -    def network_destroy(self, _, ref):
  15.105 -        return xen_api_success(XendNode.instance().network_destroy(ref))
  15.106 -
  15.107 -    def _get_network(self, ref):
  15.108 +    def _network_get(self, _, ref):
  15.109          return XendNode.instance().get_network(ref)
  15.110  
  15.111      def network_get_all(self, _):
  15.112          return xen_api_success(XendNode.instance().get_network_refs())
  15.113  
  15.114 -    def network_get_record(self, _, ref):
  15.115 -        return xen_api_success(
  15.116 -            XendNode.instance().get_network(ref).get_record())
  15.117 -
  15.118 -    def network_get_name_label(self, _, ref):
  15.119 -        return xen_api_success(self._get_network(ref).name_label)
  15.120 -
  15.121 -    def network_get_name_description(self, _, ref):
  15.122 -        return xen_api_success(self._get_network(ref).name_description)
  15.123 +    def network_create(self, _, record):
  15.124 +        return xen_api_success(XendNode.instance().network_create(record))
  15.125  
  15.126 -    def network_get_VIFs(self, _, ref):
  15.127 -        return xen_api_success(self._get_network(ref).get_VIF_UUIDs())
  15.128 -
  15.129 -    def network_get_PIFs(self, session, ref):
  15.130 -        return xen_api_success(self._get_network(ref).get_PIF_UUIDs())
  15.131 +    def network_destroy(self, _, ref):
  15.132 +        return xen_api_success(XendNode.instance().network_destroy(ref))
  15.133  
  15.134 -    def network_set_name_label(self, _, ref, val):
  15.135 -        return xen_api_success(self._get_network(ref).set_name_label(val))
  15.136 -
  15.137 -    def network_set_name_description(self, _, ref, val):
  15.138 -        return xen_api_success(self._get_network(ref).set_name_description(val))
  15.139  
  15.140      # Xen API: Class PIF
  15.141      # ----------------------------------------------------------------
  15.142  
  15.143 -    PIF_attr_ro = ['metrics']
  15.144 +    PIF_attr_ro = ['network',
  15.145 +                   'host',
  15.146 +                   'metrics']
  15.147      PIF_attr_rw = ['device',
  15.148 -                   'network',
  15.149 -                   'host',
  15.150                     'MAC',
  15.151                     'MTU',
  15.152                     'VLAN']
  15.153  
  15.154      PIF_attr_inst = PIF_attr_rw
  15.155  
  15.156 -    PIF_methods = [('create_VLAN', 'int')]
  15.157 +    PIF_methods = [('create_VLAN', 'int'), ('destroy', None)]
  15.158  
  15.159      def _get_PIF(self, ref):
  15.160          return XendNode.instance().pifs[ref]
  15.161 @@ -1049,21 +1073,9 @@ class XendAPI(object):
  15.162      def PIF_metrics_get_all(self, _):
  15.163          return xen_api_success(XendNode.instance().pif_metrics.keys())
  15.164  
  15.165 -    def _PIF_metrics_get(self, ref):
  15.166 +    def _PIF_metrics_get(self, _, ref):
  15.167          return XendNode.instance().pif_metrics[ref]
  15.168  
  15.169 -    def PIF_metrics_get_record(self, _, ref):
  15.170 -        return xen_api_success(self._PIF_metrics_get(ref).get_record())
  15.171 -
  15.172 -    def PIF_metrics_get_io_read_kbs(self, _, ref):
  15.173 -        return xen_api_success(self._PIF_metrics_get(ref).get_io_read_kbs())
  15.174 -
  15.175 -    def PIF_metrics_get_io_write_kbs(self, _, ref):
  15.176 -        return xen_api_success(self._PIF_metrics_get(ref).get_io_write_kbs())
  15.177 -
  15.178 -    def PIF_metrics_get_last_updated(self, _1, _2):
  15.179 -        return xen_api_success(now())
  15.180 -
  15.181  
  15.182      # Xen API: Class VM
  15.183      # ----------------------------------------------------------------        
  15.184 @@ -1131,7 +1143,8 @@ class XendAPI(object):
  15.185                    ('save', None),
  15.186                    ('set_memory_dynamic_max_live', None),
  15.187                    ('set_memory_dynamic_min_live', None),
  15.188 -                  ('send_trigger', None)]
  15.189 +                  ('send_trigger', None),
  15.190 +                  ('destroy', None)]
  15.191      
  15.192      VM_funcs  = [('create', 'VM'),
  15.193                   ('restore', None),
  15.194 @@ -1390,7 +1403,8 @@ class XendAPI(object):
  15.195              if key.startswith("cpumap"):
  15.196                  vcpu = int(key[6:])
  15.197                  try:
  15.198 -                    xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
  15.199 +                    cpus = map(int, value.split(","))
  15.200 +                    xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus)
  15.201                  except Exception, ex:
  15.202                      log.exception(ex)
  15.203  
  15.204 @@ -1633,14 +1647,15 @@ class XendAPI(object):
  15.205  
  15.206      def VM_send_sysrq(self, _, vm_ref, req):
  15.207          xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
  15.208 -        if xeninfo.state != XEN_API_VM_POWER_STATE_RUNNING:
  15.209 +        if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \
  15.210 +               or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED:
  15.211 +            xeninfo.send_sysrq(req)
  15.212 +            return xen_api_success_void()
  15.213 +        else:
  15.214              return xen_api_error(
  15.215                  ['VM_BAD_POWER_STATE', vm_ref,
  15.216                   XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
  15.217                   XendDomain.POWER_STATE_NAMES[xeninfo.state]])
  15.218 -        xeninfo.send_sysrq(req)
  15.219 -        return xen_api_success_void()
  15.220 -
  15.221  
  15.222      def VM_send_trigger(self, _, vm_ref, trigger, vcpu):
  15.223          xendom = XendDomain.instance()
  15.224 @@ -1675,58 +1690,31 @@ class XendAPI(object):
  15.225      VM_metrics_attr_rw = []
  15.226      VM_metrics_methods = []
  15.227  
  15.228 -    def _VM_metrics_get(self, ref):
  15.229 +    def VIF_metrics_get_all(self, session):
  15.230 +        return self.VIF_get_all(session)
  15.231 +
  15.232 +    def _VM_metrics_get(self, _, ref):
  15.233          return XendVMMetrics.get_by_uuid(ref)
  15.234  
  15.235      def VM_metrics_get_all(self, _):
  15.236          return xen_api_success(XendVMMetrics.get_all())
  15.237  
  15.238 -    def VM_metrics_get_record(self, _, ref):
  15.239 -        return xen_api_success(self._VM_metrics_get(ref).get_record())
  15.240 -
  15.241 -    def VM_metrics_get_memory_actual(self, _, ref):
  15.242 -        return xen_api_success(self._VM_metrics_get(ref).get_memory_actual())
  15.243 -
  15.244 -    def VM_metrics_get_VCPUs_number(self, _, ref):
  15.245 -        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_number())
  15.246 -
  15.247 -    def VM_metrics_get_VCPUs_utilisation(self, _, ref):
  15.248 -        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_utilisation())
  15.249 -
  15.250 -    def VM_metrics_get_VCPUs_CPU(self, _, ref):
  15.251 -        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_CPU())
  15.252 -    
  15.253 -    def VM_metrics_get_VCPUs_flags(self, _, ref):
  15.254 -        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_flags())
  15.255 -
  15.256 -    def VM_metrics_get_VCPUs_params(self, _, ref):
  15.257 -        return xen_api_success(self._VM_metrics_get(ref).get_VCPUs_params())
  15.258 -
  15.259 -    def VM_metrics_get_start_time(self, _, ref):
  15.260 -        return xen_api_success(self._VM_metrics_get(ref).get_start_time())
  15.261 -
  15.262 -    def VM_metrics_get_state(self, _, ref):
  15.263 -        return xen_api_success(self._VM_metrics_get(ref).get_state())
  15.264 -
  15.265 -    def VM_metrics_get_last_updated(self, _1, _2):
  15.266 -        return xen_api_success(now())
  15.267 -
  15.268  
  15.269      # Xen API: Class VBD
  15.270      # ----------------------------------------------------------------
  15.271  
  15.272 -    VBD_attr_ro = ['metrics',
  15.273 +    VBD_attr_ro = ['VM',
  15.274 +                   'VDI',
  15.275 +                   'metrics',
  15.276                     'runtime_properties']
  15.277 -    VBD_attr_rw = ['VM',
  15.278 -                   'VDI',
  15.279 -                   'device',
  15.280 +    VBD_attr_rw = ['device',
  15.281                     'bootable',
  15.282                     'mode',
  15.283                     'type']
  15.284  
  15.285      VBD_attr_inst = VBD_attr_rw
  15.286  
  15.287 -    VBD_methods = [('media_change', None)]
  15.288 +    VBD_methods = [('media_change', None), ('destroy', None)]
  15.289      VBD_funcs = [('create', 'VBD')]
  15.290      
  15.291      # object methods
  15.292 @@ -1868,7 +1856,10 @@ class XendAPI(object):
  15.293                             'io_write_kbs',
  15.294                             'last_updated']
  15.295      VBD_metrics_attr_rw = []
  15.296 -    VBD_methods = []
  15.297 +    VBD_metrics_methods = []
  15.298 +
  15.299 +    def VBD_metrics_get_all(self, session):
  15.300 +        return self.VBD_get_all(session)
  15.301  
  15.302      def VBD_metrics_get_record(self, _, ref):
  15.303          vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref)
  15.304 @@ -1893,16 +1884,17 @@ class XendAPI(object):
  15.305      # Xen API: Class VIF
  15.306      # ----------------------------------------------------------------
  15.307  
  15.308 -    VIF_attr_ro = ['metrics',
  15.309 +    VIF_attr_ro = ['network',
  15.310 +                   'VM',
  15.311 +                   'metrics',
  15.312                     'runtime_properties']
  15.313      VIF_attr_rw = ['device',
  15.314 -                   'network',
  15.315 -                   'VM',
  15.316                     'MAC',
  15.317                     'MTU']
  15.318  
  15.319      VIF_attr_inst = VIF_attr_rw
  15.320  
  15.321 +    VIF_methods = [('destroy', None)]
  15.322      VIF_funcs = [('create', 'VIF')]
  15.323  
  15.324                   
  15.325 @@ -1960,10 +1952,10 @@ class XendAPI(object):
  15.326          return xen_api_success(vif_ref)
  15.327  
  15.328      def VIF_get_VM(self, session, vif_ref):
  15.329 -        xendom = XendDomain.instance()        
  15.330 -        vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)        
  15.331 +        xendom = XendDomain.instance()
  15.332 +        vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
  15.333          return xen_api_success(vm.get_uuid())
  15.334 -    
  15.335 +
  15.336      def VIF_get_MTU(self, session, vif_ref):
  15.337          return self._VIF_get(vif_ref, 'MTU')
  15.338      
  15.339 @@ -2008,7 +2000,7 @@ class XendAPI(object):
  15.340                             'io_write_kbs',
  15.341                             'last_updated']
  15.342      VIF_metrics_attr_rw = []
  15.343 -    VIF_methods = []
  15.344 +    VIF_metrics_methods = []
  15.345  
  15.346      def VIF_metrics_get_record(self, _, ref):
  15.347          vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
  15.348 @@ -2044,7 +2036,7 @@ class XendAPI(object):
  15.349                     'other_config']
  15.350      VDI_attr_inst = VDI_attr_ro + VDI_attr_rw
  15.351  
  15.352 -    VDI_methods = [('snapshot', 'VDI')]
  15.353 +    VDI_methods = [('snapshot', 'VDI'), ('destroy', None)]
  15.354      VDI_funcs = [('create', 'VDI'),
  15.355                    ('get_by_name_label', 'Set(VDI)')]
  15.356  
  15.357 @@ -2161,6 +2153,7 @@ class XendAPI(object):
  15.358  
  15.359      VTPM_attr_inst = VTPM_attr_rw
  15.360  
  15.361 +    VTPM_methods = [('destroy', None)]
  15.362      VTPM_funcs = [('create', 'VTPM')]
  15.363      
  15.364      # object methods
  15.365 @@ -2319,7 +2312,7 @@ class XendAPI(object):
  15.366                      'name_label',
  15.367                      'name_description']
  15.368      
  15.369 -    SR_methods = [('clone', 'SR')]
  15.370 +    SR_methods = [('clone', 'SR'), ('destroy', None)]
  15.371      SR_funcs = [('get_by_name_label', 'Set(SR)'),
  15.372                  ('get_by_uuid', 'SR')]
  15.373  
    16.1 --- a/tools/python/xen/xend/XendClient.py	Tue Mar 27 12:21:48 2007 -0600
    16.2 +++ b/tools/python/xen/xend/XendClient.py	Wed Mar 28 10:38:41 2007 +0100
    16.3 @@ -17,7 +17,7 @@
    16.4  # Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
    16.5  #============================================================================
    16.6  
    16.7 -from xen.util.xmlrpclib2 import ServerProxy
    16.8 +from xen.util.xmlrpcclient import ServerProxy
    16.9  import os
   16.10  import sys
   16.11  
    17.1 --- a/tools/python/xen/xend/XendConfig.py	Tue Mar 27 12:21:48 2007 -0600
    17.2 +++ b/tools/python/xen/xend/XendConfig.py	Wed Mar 28 10:38:41 2007 +0100
    17.3 @@ -298,7 +298,7 @@ class XendConfig(dict):
    17.4              'actions_after_reboot': 'restart',
    17.5              'actions_after_crash': 'restart',
    17.6              'actions_after_suspend': '',
    17.7 -            'is_template': False,
    17.8 +            'is_a_template': False,
    17.9              'is_control_domain': False,
   17.10              'features': '',
   17.11              'PV_bootloader': '',
   17.12 @@ -452,7 +452,10 @@ class XendConfig(dict):
   17.13          for key, typ in XENAPI_CFG_TYPES.items():
   17.14              val = sxp.child_value(sxp_cfg, key)
   17.15              if val is not None:
   17.16 -                cfg[key] = typ(val)
   17.17 +                try:
   17.18 +                    cfg[key] = typ(val)
   17.19 +                except (ValueError, TypeError), e:
   17.20 +                    log.warn('Unable to convert type value for key: %s' % key)
   17.21  
   17.22          # Convert deprecated options to current equivalents.
   17.23          
   17.24 @@ -845,6 +848,8 @@ class XendConfig(dict):
   17.25                      sxpr.append([name, s])
   17.26  
   17.27          for xenapi, legacy in XENAPI_CFG_TO_LEGACY_CFG.items():
   17.28 +            if legacy in ('cpus'): # skip this
   17.29 +                continue
   17.30              if self.has_key(xenapi) and self[xenapi] not in (None, []):
   17.31                  if type(self[xenapi]) == bool:
   17.32                      # convert booleans to ints before making an sxp item
   17.33 @@ -858,7 +863,7 @@ class XendConfig(dict):
   17.34          sxpr.append(["memory", int(self["memory_dynamic_max"])/MiB])
   17.35  
   17.36          for legacy in LEGACY_UNSUPPORTED_BY_XENAPI_CFG:
   17.37 -            if legacy in ('domid', 'uuid'): # skip these
   17.38 +            if legacy in ('domid', 'uuid', 'cpus'): # skip these
   17.39                  continue
   17.40              if self.has_key(legacy) and self[legacy] not in (None, []):
   17.41                  sxpr.append([legacy, self[legacy]])
    18.1 --- a/tools/python/xen/xend/XendDomain.py	Tue Mar 27 12:21:48 2007 -0600
    18.2 +++ b/tools/python/xen/xend/XendDomain.py	Wed Mar 28 10:38:41 2007 +0100
    18.3 @@ -569,6 +569,26 @@ class XendDomain:
    18.4          finally:
    18.5              self.domains_lock.release()
    18.6  
    18.7 +    def autostart_domains(self):
    18.8 +        """ Autostart managed domains that are marked as such. """
    18.9 +
   18.10 +        need_starting = []
   18.11 +        
   18.12 +        self.domains_lock.acquire()
   18.13 +        try:
   18.14 +            for dom_uuid, dom in self.managed_domains.items():
   18.15 +                if dom and dom.state == DOM_STATE_HALTED:
   18.16 +                    on_xend_start = dom.info.get('on_xend_start', 'ignore')
   18.17 +                    auto_power_on = dom.info.get('auto_power_on', False)
   18.18 +                    should_start = (on_xend_start == 'start') or auto_power_on
   18.19 +                    if should_start:
   18.20 +                        need_starting.append(dom_uuid)
   18.21 +        finally:
   18.22 +            self.domains_lock.release()
   18.23 +
   18.24 +        for dom_uuid in need_starting:
   18.25 +            self.domain_start(dom_uuid, False)
   18.26 +
   18.27      def cleanup_domains(self):
   18.28          """Clean up domains that are marked as autostop.
   18.29          Should be called when Xend goes down. This is currently
    19.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Tue Mar 27 12:21:48 2007 -0600
    19.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Wed Mar 28 10:38:41 2007 +0100
    19.3 @@ -152,8 +152,9 @@ def recreate(info, priv):
    19.4      try:
    19.5          vmpath = xstransact.Read(dompath, "vm")
    19.6          if not vmpath:
    19.7 -            log.warn('/local/domain/%d/vm is missing. recreate is '
    19.8 -                     'confused, trying our best to recover' % domid)
    19.9 +            if not priv:
   19.10 +                log.warn('/local/domain/%d/vm is missing. recreate is '
   19.11 +                         'confused, trying our best to recover' % domid)
   19.12              needs_reinitialising = True
   19.13              raise XendError('reinit')
   19.14          
   19.15 @@ -2354,7 +2355,8 @@ class XendDomainInfo:
   19.16          if not dev_uuid:
   19.17              raise XendError('Failed to create device')
   19.18          
   19.19 -        if self.state == XEN_API_VM_POWER_STATE_RUNNING:
   19.20 +        if self.state == XEN_API_VM_POWER_STATE_RUNNING \
   19.21 +               or self.state == XEN_API_VM_POWER_STATE_PAUSED:
   19.22  
   19.23              _, config = self.info['devices'][dev_uuid]
   19.24              dev_control = self.getDeviceController('vif')
    20.1 --- a/tools/python/xen/xend/XendLogging.py	Tue Mar 27 12:21:48 2007 -0600
    20.2 +++ b/tools/python/xen/xend/XendLogging.py	Wed Mar 28 10:38:41 2007 +0100
    20.3 @@ -62,6 +62,7 @@ if 'TRACE' not in logging.__dict__:
    20.4      # Work around a bug in Python's inspect module: findsource is supposed to
    20.5      # raise IOError if it fails, with other functions in that module coping
    20.6      # with that, but some people are seeing IndexError raised from there.
    20.7 +    # This is Python bug 1628987.  http://python.org/sf/1628987.
    20.8      if hasattr(inspect, 'findsource'):
    20.9          real_findsource = getattr(inspect, 'findsource')
   20.10          def findsource(*args, **kwargs):
    21.1 --- a/tools/python/xen/xend/XendMonitor.py	Tue Mar 27 12:21:48 2007 -0600
    21.2 +++ b/tools/python/xen/xend/XendMonitor.py	Wed Mar 28 10:38:41 2007 +0100
    21.3 @@ -24,8 +24,8 @@ import re
    21.4  """Monitoring thread to keep track of Xend statistics. """
    21.5  
    21.6  VBD_SYSFS_PATH = '/sys/devices/xen-backend/'
    21.7 -VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_req'
    21.8 -VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_req'
    21.9 +VBD_WR_PATH = VBD_SYSFS_PATH + '%s/statistics/wr_sect'
   21.10 +VBD_RD_PATH = VBD_SYSFS_PATH + '%s/statistics/rd_sect'
   21.11  VBD_DOMAIN_RE = r'vbd-(?P<domid>\d+)-(?P<devid>\d+)$'
   21.12  
   21.13  NET_PROCFS_PATH = '/proc/net/dev'
   21.14 @@ -51,14 +51,9 @@ VIF_DOMAIN_RE = re.compile(r'vif(?P<domi
   21.15                             PROC_NET_DEV_RE)
   21.16  PIF_RE = re.compile(r'^\s*(?P<iface>peth\d+):\s*' + PROC_NET_DEV_RE)
   21.17  
   21.18 -# The VBD transfer figures are in "requests" where we don't
   21.19 -# really know how many bytes per requests. For now we make
   21.20 -# up a number roughly could be.
   21.21 -VBD_ROUGH_BYTES_PER_REQUEST = 1024 * 8 * 4
   21.22 -
   21.23  # Interval to poll xc, sysfs and proc
   21.24  POLL_INTERVAL = 2.0
   21.25 -
   21.26 +SECTOR_SIZE = 512
   21.27  class XendMonitor(threading.Thread):
   21.28      """Monitors VCPU, VBD, VIF and PIF statistics for Xen API.
   21.29  
   21.30 @@ -186,9 +181,8 @@ class XendMonitor(threading.Thread):
   21.31                  usage_at = time.time()
   21.32                  rd_stat = int(open(rd_stat_path).readline().strip())
   21.33                  wr_stat = int(open(wr_stat_path).readline().strip())
   21.34 -                rd_stat *= VBD_ROUGH_BYTES_PER_REQUEST
   21.35 -                wr_stat *= VBD_ROUGH_BYTES_PER_REQUEST
   21.36 -                
   21.37 +                rd_stat *= SECTOR_SIZE
   21.38 +                wr_stat *= SECTOR_SIZE
   21.39                  if domid not in stats:
   21.40                      stats[domid] = {}
   21.41  
    22.1 --- a/tools/python/xen/xend/XendNetwork.py	Tue Mar 27 12:21:48 2007 -0600
    22.2 +++ b/tools/python/xen/xend/XendNetwork.py	Wed Mar 28 10:38:41 2007 +0100
    22.3 @@ -28,10 +28,17 @@ from XendLogging import log
    22.4  IP_ROUTE_RE = r'^default via ([\d\.]+) dev (\w+)'
    22.5  
    22.6  class XendNetwork:
    22.7 -    def __init__(self, uuid, name, description):
    22.8 +    def __init__(self, uuid, record):
    22.9          self.uuid = uuid
   22.10 -        self.name_label = name 
   22.11 -        self.name_description = description
   22.12 +        self.name_label = record.get('name_label', '')
   22.13 +        self.name_description = record.get('name_description', '')
   22.14 +        self.other_config = record.get('other_config', {})
   22.15 +
   22.16 +    def get_name_label(self):
   22.17 +        return self.name_label
   22.18 +
   22.19 +    def get_name_description(self):
   22.20 +        return self.name_description
   22.21  
   22.22      def set_name_label(self, new_name):
   22.23          self.name_label = new_name
   22.24 @@ -41,7 +48,7 @@ class XendNetwork:
   22.25          self.name_description = new_desc
   22.26          XendNode.instance().save_networks()
   22.27  
   22.28 -    def get_VIF_UUIDs(self):
   22.29 +    def get_VIFs(self):
   22.30          result = []
   22.31          vms = XendDomain.instance().get_all_vms()
   22.32          for vm in vms:
   22.33 @@ -52,17 +59,37 @@ class XendNetwork:
   22.34                      result.append(vif)
   22.35          return result
   22.36  
   22.37 -    def get_PIF_UUIDs(self):
   22.38 +    def get_PIFs(self):
   22.39          return [x.uuid for x in XendNode.instance().pifs.values()
   22.40                  if x.network == self]
   22.41  
   22.42 -    def get_record(self, transient = True):
   22.43 +    def get_other_config(self):
   22.44 +        return self.other_config
   22.45 +
   22.46 +    def set_other_config(self, value):
   22.47 +        self.other_config = value
   22.48 +        XendNode.instance().save_networks()
   22.49 +
   22.50 +    def add_to_other_config(self, key, value):
   22.51 +        self.other_config[key] = value
   22.52 +        XendNode.instance().save_networks()
   22.53 +
   22.54 +    def remove_from_other_config(self, key):
   22.55 +        if key in self.other_config:
   22.56 +            del self.other_config[key]
   22.57 +        XendNode.instance().save_networks()
   22.58 +
   22.59 +    def get_record(self):
   22.60 +        return self.get_record_internal(True)
   22.61 +
   22.62 +    def get_record_internal(self, transient):
   22.63          result = {
   22.64              'uuid': self.uuid,
   22.65              'name_label': self.name_label,
   22.66              'name_description': self.name_description,
   22.67 +            'other_config' : self.other_config,
   22.68          }
   22.69          if transient:
   22.70 -            result['VIFs'] = self.get_VIF_UUIDs()
   22.71 -            result['PIFs'] = self.get_PIF_UUIDs()
   22.72 +            result['VIFs'] = self.get_VIFs()
   22.73 +            result['PIFs'] = self.get_PIFs()
   22.74          return result
    23.1 --- a/tools/python/xen/xend/XendNode.py	Tue Mar 27 12:21:48 2007 -0600
    23.2 +++ b/tools/python/xen/xend/XendNode.py	Wed Mar 28 10:38:41 2007 +0100
    23.3 @@ -141,11 +141,9 @@ class XendNode:
    23.4          saved_networks = self.state_store.load_state('network')
    23.5          if saved_networks:
    23.6              for net_uuid, network in saved_networks.items():
    23.7 -                self.network_create(network.get('name_label'),
    23.8 -                                    network.get('name_description', ''),
    23.9 -                                    False, net_uuid)
   23.10 +                self.network_create(network, False, net_uuid)
   23.11          else:
   23.12 -            self.network_create('net0', '', False)
   23.13 +            self.network_create({'name_label' : 'net0' }, False)
   23.14  
   23.15          # initialise PIFs
   23.16          saved_pifs = self.state_store.load_state('pif')
   23.17 @@ -199,12 +197,10 @@ class XendNode:
   23.18  
   23.19  
   23.20  
   23.21 -    def network_create(self, name_label, name_description, persist = True,
   23.22 -                       net_uuid = None):
   23.23 +    def network_create(self, record, persist = True, net_uuid = None):
   23.24          if net_uuid is None:
   23.25              net_uuid = uuid.createString()
   23.26 -        self.networks[net_uuid] = XendNetwork(net_uuid, name_label,
   23.27 -                                              name_description)
   23.28 +        self.networks[net_uuid] = XendNetwork(net_uuid, record)
   23.29          if persist:
   23.30              self.save_networks()
   23.31          return net_uuid
   23.32 @@ -280,7 +276,7 @@ class XendNode:
   23.33          self.state_store.save_state('pif', pif_records)
   23.34  
   23.35      def save_networks(self):
   23.36 -        net_records = dict([(k, v.get_record(transient = False))
   23.37 +        net_records = dict([(k, v.get_record_internal(False))
   23.38                              for k, v in self.networks.items()])
   23.39          self.state_store.save_state('network', net_records)
   23.40  
    24.1 --- a/tools/python/xen/xend/XendOptions.py	Tue Mar 27 12:21:48 2007 -0600
    24.2 +++ b/tools/python/xen/xend/XendOptions.py	Wed Mar 28 10:38:41 2007 +0100
    24.3 @@ -165,7 +165,13 @@ class XendOptions:
    24.4  
    24.5      def get_xend_tcp_xmlrpc_server_address(self):
    24.6          return self.get_config_string("xend-tcp-xmlrpc-server-address",
    24.7 -                                    self.xend_tcp_xmlrpc_server_address_default)    
    24.8 +                                      self.xend_tcp_xmlrpc_server_address_default)
    24.9 +
   24.10 +    def get_xend_tcp_xmlrpc_server_ssl_key_file(self):
   24.11 +        return self.get_config_string("xend-tcp-xmlrpc-server-ssl-key-file")
   24.12 +
   24.13 +    def get_xend_tcp_xmlrpc_server_ssl_cert_file(self):
   24.14 +        return self.get_config_string("xend-tcp-xmlrpc-server-ssl-cert-file")
   24.15  
   24.16      def get_xend_unix_xmlrpc_server(self):
   24.17          return self.get_config_bool("xend-unix-xmlrpc-server",
    25.1 --- a/tools/python/xen/xend/XendPIFMetrics.py	Tue Mar 27 12:21:48 2007 -0600
    25.2 +++ b/tools/python/xen/xend/XendPIFMetrics.py	Wed Mar 28 10:38:41 2007 +0100
    25.3 @@ -39,11 +39,13 @@ class XendPIFMetrics:
    25.4              return pifs_util[pifname][n]
    25.5          return 0.0
    25.6  
    25.7 -    def get_record(self):
    25.8 +    def get_last_updated(self):
    25.9          import xen.xend.XendAPI as XendAPI
   25.10 +        return XendAPI.now()
   25.11 +
   25.12 +    def get_record(self):
   25.13          return {'uuid'         : self.uuid,
   25.14 -                'PIF'          : self.pif.uuid,
   25.15                  'io_read_kbs'  : self.get_io_read_kbs(),
   25.16                  'io_write_kbs' : self.get_io_write_kbs(),
   25.17 -                'last_updated' : XendAPI.now(),
   25.18 +                'last_updated' : self.get_last_updated(),
   25.19                  }
    26.1 --- a/tools/python/xen/xend/XendStateStore.py	Tue Mar 27 12:21:48 2007 -0600
    26.2 +++ b/tools/python/xen/xend/XendStateStore.py	Wed Mar 28 10:38:41 2007 +0100
    26.3 @@ -126,6 +126,13 @@ class XendStateStore:
    26.4                      if val_name not in cls_dict:
    26.5                          cls_dict[val_name] = {}
    26.6                      cls_dict[val_name][val_uuid] = None
    26.7 +                elif val_type == '':
    26.8 +                    # dictionary
    26.9 +                    k = val_elem.getAttribute('key').encode('utf8')
   26.10 +                    v = val_elem.getAttribute('value').encode('utf8')
   26.11 +                    if val_name not in cls_dict:
   26.12 +                        cls_dict[val_name] = {}
   26.13 +                    cls_dict[val_name][k] = v
   26.14                  elif val_type == 'string':
   26.15                      cls_dict[val_name] = val_text.encode('utf8')
   26.16                  elif val_type == 'float':
   26.17 @@ -197,7 +204,11 @@ class XendStateStore:
   26.18                  if type(val) == dict:
   26.19                      for val_uuid in val.keys():
   26.20                          val_node = doc.createElement(key)
   26.21 -                        val_node.setAttribute('uuid', val_uuid)
   26.22 +                        if key == 'other_config':
   26.23 +                            val_node.setAttribute('key', str(val_uuid))
   26.24 +                            val_node.setAttribute('value', str(val[val_uuid]))
   26.25 +                        else:
   26.26 +                            val_node.setAttribute('uuid', val_uuid)
   26.27                          node.appendChild(val_node)
   26.28                  elif type(val) in (list, tuple):
   26.29                      for val_uuid in val:
    27.1 --- a/tools/python/xen/xend/XendVMMetrics.py	Tue Mar 27 12:21:48 2007 -0600
    27.2 +++ b/tools/python/xen/xend/XendVMMetrics.py	Wed Mar 28 10:38:41 2007 +0100
    27.3 @@ -92,7 +92,7 @@ class XendVMMetrics:
    27.4                  set_flag('blocked')
    27.5                  set_flag('online')
    27.6                  set_flag('running')
    27.7 -                vcpus_flags[i] = ",".join(flags)
    27.8 +                vcpus_flags[i] = flags
    27.9              return vcpus_flags
   27.10          else:
   27.11              return {}
   27.12 @@ -115,7 +115,7 @@ class XendVMMetrics:
   27.13                  addState("dying")
   27.14                  addState("crashed")
   27.15                  addState("shutdown")
   27.16 -                return ",".join(states)
   27.17 +                return states
   27.18          except Exception, err:
   27.19              # ignore missing domain
   27.20              log.trace("domain_getinfo(%d) failed, ignoring: %s", domid, str(err))
   27.21 @@ -140,8 +140,11 @@ class XendVMMetrics:
   27.22      def get_start_time(self):
   27.23          return self.xend_domain_instance.info.get("start_time", -1)
   27.24      
   27.25 +    def get_last_updated(self):
   27.26 +        import xen.xend.XendAPI as XendAPI
   27.27 +        return XendAPI.now()
   27.28 +    
   27.29      def get_record(self):
   27.30 -        import xen.xend.XendAPI as XendAPI
   27.31          return { 'uuid'              : self.uuid,
   27.32                   'memory_actual'     : self.get_memory_actual(),
   27.33                   'VCPUs_number'      : self.get_VCPUs_number(),
   27.34 @@ -151,5 +154,5 @@ class XendVMMetrics:
   27.35                   'VCPUs_params'      : self.get_VCPUs_params(),
   27.36                   'start_time'        : self.get_start_time(),
   27.37                   'state'             : self.get_state(),
   27.38 -                 'last_updated'      : XendAPI.now(),
   27.39 +                 'last_updated'      : self.get_last_updated(),
   27.40                 }
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/tools/python/xen/xend/server/SSLXMLRPCServer.py	Wed Mar 28 10:38:41 2007 +0100
    28.3 @@ -0,0 +1,103 @@
    28.4 +#============================================================================
    28.5 +# This library is free software; you can redistribute it and/or
    28.6 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    28.7 +# License as published by the Free Software Foundation.
    28.8 +#
    28.9 +# This library is distributed in the hope that it will be useful,
   28.10 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   28.11 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   28.12 +# Lesser General Public License for more details.
   28.13 +#
   28.14 +# You should have received a copy of the GNU Lesser General Public
   28.15 +# License along with this library; if not, write to the Free Software
   28.16 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   28.17 +#============================================================================
   28.18 +# Copyright (C) 2007 XenSource Inc.
   28.19 +#============================================================================
   28.20 +
   28.21 +
   28.22 +"""
   28.23 +HTTPS wrapper for an XML-RPC server interface.  Requires PyOpenSSL (Debian
   28.24 +package python-pyopenssl).
   28.25 +"""
   28.26 +
   28.27 +import socket
   28.28 +
   28.29 +from OpenSSL import SSL
   28.30 +
   28.31 +from xen.util.xmlrpclib2 import XMLRPCRequestHandler, TCPXMLRPCServer
   28.32 +
   28.33 +
   28.34 +class SSLXMLRPCRequestHandler(XMLRPCRequestHandler):
   28.35 +    def setup(self):
   28.36 +        self.connection = self.request
   28.37 +        self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
   28.38 +        self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
   28.39 +
   28.40 +#
   28.41 +# Taken from pyOpenSSL-0.6 examples (public-domain)
   28.42 +#
   28.43 +
   28.44 +class SSLWrapper:
   28.45 +    """
   28.46 +    """
   28.47 +    def __init__(self, conn):
   28.48 +        """
   28.49 +        Connection is not yet a new-style class,
   28.50 +        so I'm making a proxy instead of subclassing.
   28.51 +        """
   28.52 +        self.__dict__["conn"] = conn
   28.53 +    def __getattr__(self, name):
   28.54 +        return getattr(self.__dict__["conn"], name)
   28.55 +    def __setattr__(self, name, value):
   28.56 +        setattr(self.__dict__["conn"], name, value)
   28.57 +
   28.58 +    def close(self):
   28.59 +        self.shutdown()
   28.60 +        return self.__dict__["conn"].close()
   28.61 +
   28.62 +    def shutdown(self, how=1):
   28.63 +        """
   28.64 +        SimpleXMLRpcServer.doPOST calls shutdown(1),
   28.65 +        and Connection.shutdown() doesn't take
   28.66 +        an argument. So we just discard the argument.
   28.67 +        """
   28.68 +        # Block until the shutdown is complete
   28.69 +        self.__dict__["conn"].shutdown()
   28.70 +        self.__dict__["conn"].shutdown()
   28.71 +
   28.72 +    def accept(self):
   28.73 +        """
   28.74 +        This is the other part of the shutdown() workaround.
   28.75 +        Since servers create new sockets, we have to infect
   28.76 +        them with our magic. :)
   28.77 +        """
   28.78 +        c, a = self.__dict__["conn"].accept()
   28.79 +        return (SSLWrapper(c), a)
   28.80 +
   28.81 +#
   28.82 +# End of pyOpenSSL-0.6 example code.
   28.83 +#
   28.84 +
   28.85 +class SSLXMLRPCServer(TCPXMLRPCServer):
   28.86 +    def __init__(self, addr, allowed, xenapi, logRequests = 1,
   28.87 +                 ssl_key_file = None, ssl_cert_file = None):
   28.88 +
   28.89 +        TCPXMLRPCServer.__init__(self, addr, allowed, xenapi,
   28.90 +                                 SSLXMLRPCRequestHandler, logRequests)
   28.91 +
   28.92 +        if not ssl_key_file or not ssl_cert_file:
   28.93 +            raise ValueError("SSLXMLRPCServer requires ssl_key_file "
   28.94 +                             "and ssl_cert_file to be set.")
   28.95 +
   28.96 +        # make a SSL socket
   28.97 +        ctx = SSL.Context(SSL.SSLv23_METHOD)
   28.98 +        ctx.set_options(SSL.OP_NO_SSLv2)
   28.99 +        ctx.use_privatekey_file (ssl_key_file)
  28.100 +        ctx.use_certificate_file(ssl_cert_file)
  28.101 +        self.socket = SSLWrapper(SSL.Connection(ctx,
  28.102 +                                 socket.socket(self.address_family,
  28.103 +                                               self.socket_type)))
  28.104 +        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  28.105 +        self.server_bind()
  28.106 +        self.server_activate()
    29.1 --- a/tools/python/xen/xend/server/SrvServer.py	Tue Mar 27 12:21:48 2007 -0600
    29.2 +++ b/tools/python/xen/xend/server/SrvServer.py	Wed Mar 28 10:38:41 2007 +0100
    29.3 @@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio
    29.4  from xen.xend import Vifctl
    29.5  from xen.xend.XendLogging import log
    29.6  from xen.xend.XendClient import XEN_API_SOCKET
    29.7 +from xen.xend.XendDomain import instance as xenddomain
    29.8  from xen.web.SrvDir import SrvDir
    29.9  
   29.10  from SrvRoot import SrvRoot
   29.11 @@ -72,7 +73,7 @@ class XendServers:
   29.12      def add(self, server):
   29.13          self.servers.append(server)
   29.14  
   29.15 -    def cleanup(self, signum = 0, frame = None):
   29.16 +    def cleanup(self, signum = 0, frame = None, reloading = False):
   29.17          log.debug("SrvServer.cleanup()")
   29.18          self.cleaningUp = True
   29.19          for server in self.servers:
   29.20 @@ -80,12 +81,18 @@ class XendServers:
   29.21                  server.shutdown()
   29.22              except:
   29.23                  pass
   29.24 +
   29.25 +        # clean up domains for those that have on_xend_stop
   29.26 +        if not reloading:
   29.27 +            xenddomain().cleanup_domains()
   29.28 +        
   29.29          self.running = False
   29.30 +        
   29.31  
   29.32      def reloadConfig(self, signum = 0, frame = None):
   29.33          log.debug("SrvServer.reloadConfig()")
   29.34          self.reloadingConfig = True
   29.35 -        self.cleanup(signum, frame)
   29.36 +        self.cleanup(signum, frame, reloading = True)
   29.37  
   29.38      def start(self, status):
   29.39          # Running the network script will spawn another process, which takes
   29.40 @@ -144,6 +151,12 @@ class XendServers:
   29.41                  status.close()
   29.42                  status = None
   29.43  
   29.44 +            # Reaching this point means we can auto start domains
   29.45 +            try:
   29.46 +                xenddomain().autostart_domains()
   29.47 +            except Exception, e:
   29.48 +                log.exception("Failed while autostarting domains")
   29.49 +
   29.50              # loop to keep main thread alive until it receives a SIGTERM
   29.51              self.running = True
   29.52              while self.running:
   29.53 @@ -172,33 +185,49 @@ def _loadConfig(servers, root, reload):
   29.54      api_cfg = xoptions.get_xen_api_server()
   29.55      if api_cfg:
   29.56          try:
   29.57 -            addrs = [(str(x[0]).split(':'),
   29.58 -                      len(x) > 1 and x[1] or XendAPI.AUTH_PAM,
   29.59 -                      len(x) > 2 and x[2] and map(re.compile, x[2].split(" "))
   29.60 -                      or None)
   29.61 -                     for x in api_cfg]
   29.62 -            for addrport, auth, allowed in addrs:
   29.63 -                if auth not in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]:
   29.64 -                    log.error('Xen-API server configuration %s is invalid, ' +
   29.65 -                              'as %s is not a valid authentication type.',
   29.66 -                              api_cfg, auth)
   29.67 -                    break
   29.68 +            for server_cfg in api_cfg:
   29.69 +                # Parse the xen-api-server config
   29.70 +                
   29.71 +                host = 'localhost'
   29.72 +                port = 0
   29.73 +                use_tcp = False
   29.74 +                ssl_key_file = None
   29.75 +                ssl_cert_file = None
   29.76 +                auth_method = XendAPI.AUTH_NONE
   29.77 +                hosts_allowed = None
   29.78 +                
   29.79 +                host_addr = server_cfg[0].split(':', 1)
   29.80 +                if len(host_addr) == 1 and host_addr[0].lower() == 'unix':
   29.81 +                    use_tcp = False
   29.82 +                elif len(host_addr) == 1:
   29.83 +                    use_tcp = True
   29.84 +                    port = int(host_addr[0])
   29.85 +                elif len(host_addr) == 2:
   29.86 +                    use_tcp = True
   29.87 +                    host = str(host_addr[0])
   29.88 +                    port = int(host_addr[1])
   29.89  
   29.90 -                if len(addrport) == 1:
   29.91 -                    if addrport[0] == 'unix':
   29.92 -                        servers.add(XMLRPCServer(auth, True,
   29.93 -                                                 path = XEN_API_SOCKET,
   29.94 -                                                 hosts_allowed = allowed))
   29.95 -                    else:
   29.96 -                        servers.add(
   29.97 -                            XMLRPCServer(auth, True, True, '',
   29.98 -                                         int(addrport[0]),
   29.99 -                                         hosts_allowed = allowed))
  29.100 -                else:
  29.101 -                    addr, port = addrport
  29.102 -                    servers.add(XMLRPCServer(auth, True, True, addr,
  29.103 -                                             int(port),
  29.104 -                                             hosts_allowed = allowed))
  29.105 +                if len(server_cfg) > 1:
  29.106 +                    if server_cfg[1] in [XendAPI.AUTH_PAM, XendAPI.AUTH_NONE]:
  29.107 +                        auth_method = server_cfg[1]
  29.108 +
  29.109 +                if len(server_cfg) > 2:
  29.110 +                    hosts_allowed = server_cfg[2] or None
  29.111 +                
  29.112 +
  29.113 +                if len(server_cfg) > 4:
  29.114 +                    # SSL key and cert file
  29.115 +                    ssl_key_file = server_cfg[3]
  29.116 +                    ssl_cert_file = server_cfg[4]
  29.117 +
  29.118 +
  29.119 +                servers.add(XMLRPCServer(auth_method, True, use_tcp = use_tcp,
  29.120 +                                         ssl_key_file = ssl_key_file,
  29.121 +                                         ssl_cert_file = ssl_cert_file,
  29.122 +                                         host = host, port = port,
  29.123 +                                         path = XEN_API_SOCKET,
  29.124 +                                         hosts_allowed = hosts_allowed))
  29.125 +
  29.126          except (ValueError, TypeError), exn:
  29.127              log.exception('Xen API Server init failed')
  29.128              log.error('Xen-API server configuration %s is invalid.', api_cfg)
  29.129 @@ -206,8 +235,17 @@ def _loadConfig(servers, root, reload):
  29.130      if xoptions.get_xend_tcp_xmlrpc_server():
  29.131          addr = xoptions.get_xend_tcp_xmlrpc_server_address()
  29.132          port = xoptions.get_xend_tcp_xmlrpc_server_port()
  29.133 -        servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
  29.134 -                                 host = addr, port = port))
  29.135 +        ssl_key_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_key_file()
  29.136 +        ssl_cert_file = xoptions.get_xend_tcp_xmlrpc_server_ssl_cert_file()
  29.137 +
  29.138 +        if ssl_key_file and ssl_cert_file:
  29.139 +            servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
  29.140 +                                     ssl_key_file = ssl_key_file,
  29.141 +                                     ssl_cert_file = ssl_cert_file,
  29.142 +                                     host = addr, port = port))
  29.143 +        else:
  29.144 +            servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, use_tcp = True,
  29.145 +                                     host = addr, port = port))
  29.146  
  29.147      if xoptions.get_xend_unix_xmlrpc_server():
  29.148          servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False))
    30.1 --- a/tools/python/xen/xend/server/XMLRPCServer.py	Tue Mar 27 12:21:48 2007 -0600
    30.2 +++ b/tools/python/xen/xend/server/XMLRPCServer.py	Wed Mar 28 10:38:41 2007 +0100
    30.3 @@ -21,6 +21,11 @@ import socket
    30.4  import types
    30.5  import xmlrpclib
    30.6  from xen.util.xmlrpclib2 import UnixXMLRPCServer, TCPXMLRPCServer
    30.7 +try:
    30.8 +    from SSLXMLRPCServer import SSLXMLRPCServer
    30.9 +    ssl_enabled = True
   30.10 +except ImportError:
   30.11 +    ssl_enabled = False
   30.12  
   30.13  from xen.xend import XendAPI, XendDomain, XendDomainInfo, XendNode
   30.14  from xen.xend import XendLogging, XendDmesg
   30.15 @@ -87,14 +92,20 @@ methods = ['device_create', 'device_conf
   30.16  exclude = ['domain_create', 'domain_restore']
   30.17  
   30.18  class XMLRPCServer:
   30.19 -    def __init__(self, auth, use_xenapi, use_tcp=False, host = "localhost",
   30.20 -                 port = 8006, path = XML_RPC_SOCKET, hosts_allowed = None):
   30.21 +    def __init__(self, auth, use_xenapi, use_tcp = False,
   30.22 +                 ssl_key_file = None, ssl_cert_file = None,
   30.23 +                 host = "localhost", port = 8006, path = XML_RPC_SOCKET,
   30.24 +                 hosts_allowed = None):
   30.25 +        
   30.26          self.use_tcp = use_tcp
   30.27          self.port = port
   30.28          self.host = host
   30.29          self.path = path
   30.30          self.hosts_allowed = hosts_allowed
   30.31          
   30.32 +        self.ssl_key_file = ssl_key_file
   30.33 +        self.ssl_cert_file = ssl_cert_file
   30.34 +        
   30.35          self.ready = False        
   30.36          self.running = True
   30.37          self.auth = auth
   30.38 @@ -107,14 +118,33 @@ class XMLRPCServer:
   30.39  
   30.40          try:
   30.41              if self.use_tcp:
   30.42 -                log.info("Opening TCP XML-RPC server on %s%d%s",
   30.43 +                using_ssl = self.ssl_key_file and self.ssl_cert_file
   30.44 +
   30.45 +                log.info("Opening %s XML-RPC server on %s%d%s",
   30.46 +                         using_ssl and 'HTTPS' or 'TCP',
   30.47                           self.host and '%s:' % self.host or
   30.48                           'all interfaces, port ',
   30.49                           self.port, authmsg)
   30.50 -                self.server = TCPXMLRPCServer((self.host, self.port),
   30.51 -                                              self.hosts_allowed,
   30.52 -                                              self.xenapi is not None,
   30.53 -                                              logRequests = False)
   30.54 +
   30.55 +                if not ssl_enabled:
   30.56 +                    raise ValueError("pyOpenSSL not installed. "
   30.57 +                                     "Unable to start HTTPS XML-RPC server")
   30.58 +
   30.59 +                if using_ssl:
   30.60 +                    self.server = SSLXMLRPCServer(
   30.61 +                        (self.host, self.port),
   30.62 +                        self.hosts_allowed,
   30.63 +                        self.xenapi is not None,
   30.64 +                        logRequests = False,
   30.65 +                        ssl_key_file = self.ssl_key_file,
   30.66 +                        ssl_cert_file = self.ssl_cert_file)
   30.67 +                else:
   30.68 +                    self.server = TCPXMLRPCServer(
   30.69 +                        (self.host, self.port),
   30.70 +                        self.hosts_allowed,
   30.71 +                        self.xenapi is not None,
   30.72 +                        logRequests = False)
   30.73 +
   30.74              else:
   30.75                  log.info("Opening Unix domain socket XML-RPC server on %s%s",
   30.76                           self.path, authmsg)
   30.77 @@ -126,7 +156,12 @@ class XMLRPCServer:
   30.78              ready = True
   30.79              running = False
   30.80              return
   30.81 -
   30.82 +        except Exception, e:
   30.83 +            log.exception('Cannot start server: %s!', e)
   30.84 +            ready = True
   30.85 +            running = False
   30.86 +            return
   30.87 +        
   30.88          # Register Xen API Functions
   30.89          # -------------------------------------------------------------------
   30.90          # exportable functions are ones that do not begin with '_'
    31.1 --- a/tools/python/xen/xm/XenAPI.py	Tue Mar 27 12:21:48 2007 -0600
    31.2 +++ b/tools/python/xen/xm/XenAPI.py	Wed Mar 28 10:38:41 2007 +0100
    31.3 @@ -12,7 +12,7 @@
    31.4  # License along with this library; if not, write to the Free Software
    31.5  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    31.6  #============================================================================
    31.7 -# Copyright (C) 2006 XenSource Inc.
    31.8 +# Copyright (C) 2006-2007 XenSource Inc.
    31.9  #============================================================================
   31.10  #
   31.11  # Parts of this file are based upon xmlrpclib.py, the XML-RPC client
   31.12 @@ -47,7 +47,7 @@
   31.13  import gettext
   31.14  import xmlrpclib
   31.15  
   31.16 -import xen.util.xmlrpclib2
   31.17 +import xen.util.xmlrpcclient as xmlrpcclient
   31.18  
   31.19  
   31.20  translation = gettext.translation('xen-xm', fallback = True)
   31.21 @@ -85,7 +85,7 @@ class Failure(Exception):
   31.22  _RECONNECT_AND_RETRY = (lambda _ : ())
   31.23  
   31.24  
   31.25 -class Session(xen.util.xmlrpclib2.ServerProxy):
   31.26 +class Session(xmlrpcclient.ServerProxy):
   31.27      """A server proxy and session manager for communicating with Xend using
   31.28      the Xen-API.
   31.29  
   31.30 @@ -104,14 +104,16 @@ class Session(xen.util.xmlrpclib2.Server
   31.31  
   31.32      def __init__(self, uri, transport=None, encoding=None, verbose=0,
   31.33                   allow_none=1):
   31.34 -        xen.util.xmlrpclib2.ServerProxy.__init__(self, uri, transport,
   31.35 -                                                 encoding, verbose,
   31.36 -                                                 allow_none)
   31.37 +        xmlrpcclient.ServerProxy.__init__(self, uri, transport, encoding,
   31.38 +                                          verbose, allow_none)
   31.39          self._session = None
   31.40          self.last_login_method = None
   31.41          self.last_login_params = None
   31.42  
   31.43  
   31.44 +    def getSession(self):
   31.45 +        return self._session
   31.46 +
   31.47      def xenapi_request(self, methodname, params):
   31.48          if methodname.startswith('login'):
   31.49              self._login(methodname, params)
   31.50 @@ -150,7 +152,7 @@ class Session(xen.util.xmlrpclib2.Server
   31.51          elif name.startswith('login'):
   31.52              return lambda *params: self._login(name, params)
   31.53          else:
   31.54 -            return xen.util.xmlrpclib2.ServerProxy.__getattr__(self, name)
   31.55 +            return xmlrpcclient.ServerProxy.__getattr__(self, name)
   31.56  
   31.57  
   31.58  def _parse_result(result):
    32.1 --- a/tools/python/xen/xm/create.dtd	Tue Mar 27 12:21:48 2007 -0600
    32.2 +++ b/tools/python/xen/xm/create.dtd	Wed Mar 28 10:38:41 2007 +0100
    32.3 @@ -111,7 +111,7 @@
    32.4  
    32.5  <!ELEMENT other_config EMPTY>
    32.6  <!ATTLIST other_config key   CDATA #REQUIRED
    32.7 -                      value CDATA #REQUIRED>
    32.8 +                       value CDATA #REQUIRED>
    32.9  
   32.10  <!ELEMENT qos_algorithm_param EMPTY>
   32.11  <!ATTLIST qos_algorithm_param key   CDATA #REQUIRED
    33.1 --- a/tools/python/xen/xm/create.py	Tue Mar 27 12:21:48 2007 -0600
    33.2 +++ b/tools/python/xen/xm/create.py	Wed Mar 28 10:38:41 2007 +0100
    33.3 @@ -104,6 +104,11 @@ gopts.opt('xmldryrun', short='x',
    33.4            use="XML dry run - prints the resulting configuration in XML but "
    33.5            "does not create the domain.")
    33.6  
    33.7 +gopts.opt('skipdtd', short='s',
    33.8 +          fn=set_true, default=0,
    33.9 +          use="Skip DTD checking - skips checks on XML before creating. "
   33.10 +          " Experimental.  Can decrease create time." )
   33.11 +
   33.12  gopts.opt('paused', short='p',
   33.13            fn=set_true, default=0,
   33.14            use='Leave the domain paused after it is created.')
   33.15 @@ -1098,6 +1103,8 @@ def parseCommandLine(argv):
   33.16      if not gopts.vals.xauthority:
   33.17          gopts.vals.xauthority = get_xauthority()
   33.18  
   33.19 +    gopts.is_xml = False
   33.20 +
   33.21      # Process remaining args as config variables.
   33.22      for arg in args:
   33.23          if '=' in arg:
   33.24 @@ -1106,11 +1113,16 @@ def parseCommandLine(argv):
   33.25      if gopts.vals.config:
   33.26          config = gopts.vals.config
   33.27      else:
   33.28 -        gopts.load_defconfig()
   33.29 -        preprocess(gopts.vals)
   33.30 -        if not gopts.getopt('name') and gopts.getopt('defconfig'):
   33.31 -            gopts.setopt('name', os.path.basename(gopts.getopt('defconfig')))
   33.32 -        config = make_config(gopts.vals)
   33.33 +        try:
   33.34 +            gopts.load_defconfig()
   33.35 +            preprocess(gopts.vals)
   33.36 +            if not gopts.getopt('name') and gopts.getopt('defconfig'):
   33.37 +                gopts.setopt('name', os.path.basename(gopts.getopt('defconfig')))
   33.38 +            config = make_config(gopts.vals)
   33.39 +        except XMLFileError, ex:
   33.40 +            XMLFile = ex.getFile()
   33.41 +            gopts.is_xml = True
   33.42 +            config = ex.getFile()
   33.43  
   33.44      return (gopts, config)
   33.45  
   33.46 @@ -1233,6 +1245,8 @@ def help():
   33.47      return str(gopts)
   33.48  
   33.49  def main(argv):
   33.50 +    is_xml = False
   33.51 +    
   33.52      try:
   33.53          (opts, config) = parseCommandLine(argv)
   33.54      except StandardError, ex:
   33.55 @@ -1241,23 +1255,24 @@ def main(argv):
   33.56      if not opts:
   33.57          return
   33.58  
   33.59 -    if type(config) == str:
   33.60 -        try:
   33.61 -            config = sxp.parse(file(config))[0]
   33.62 -        except IOError, exn:
   33.63 -            raise OptionError("Cannot read file %s: %s" % (config, exn[1]))
   33.64 +    if not opts.is_xml:
   33.65 +        if type(config) == str:
   33.66 +            try:
   33.67 +                config = sxp.parse(file(config))[0]
   33.68 +            except IOError, exn:
   33.69 +                raise OptionError("Cannot read file %s: %s" % (config, exn[1]))
   33.70 +        
   33.71 +        if serverType == SERVER_XEN_API:
   33.72 +            from xen.xm.xenapi_create import sxp2xml
   33.73 +            sxp2xml_inst = sxp2xml()
   33.74 +            doc = sxp2xml_inst.convert_sxp_to_xml(config, transient=True)
   33.75  
   33.76 -    if serverType == SERVER_XEN_API:
   33.77 -        from xen.xm.xenapi_create import sxp2xml
   33.78 -        sxp2xml_inst = sxp2xml()
   33.79 -        doc = sxp2xml_inst.convert_sxp_to_xml(config, transient=True)
   33.80 +        if opts.vals.dryrun and not opts.is_xml:
   33.81 +            SXPPrettyPrint.prettyprint(config)
   33.82  
   33.83 -    if opts.vals.dryrun:
   33.84 -        SXPPrettyPrint.prettyprint(config)
   33.85 -
   33.86 -    if opts.vals.xmldryrun and serverType == SERVER_XEN_API:
   33.87 -        from xml.dom.ext import PrettyPrint as XMLPrettyPrint
   33.88 -        XMLPrettyPrint(doc)
   33.89 +        if opts.vals.xmldryrun and serverType == SERVER_XEN_API:
   33.90 +            from xml.dom.ext import PrettyPrint as XMLPrettyPrint
   33.91 +            XMLPrettyPrint(doc)
   33.92  
   33.93      if opts.vals.dryrun or opts.vals.xmldryrun:
   33.94          return                                               
   33.95 @@ -1268,10 +1283,15 @@ def main(argv):
   33.96      if serverType == SERVER_XEN_API:        
   33.97          from xen.xm.xenapi_create import xenapi_create
   33.98          xenapi_create_inst = xenapi_create()
   33.99 -        vm_refs = xenapi_create_inst.create(document = doc)
  33.100 +        if opts.is_xml:
  33.101 +            vm_refs = xenapi_create_inst.create(filename = config,
  33.102 +                                                skipdtd = opts.vals.skipdtd)
  33.103 +        else:
  33.104 +            vm_refs = xenapi_create_inst.create(document = doc,
  33.105 +                                                skipdtd = opts.vals.skipdtd)
  33.106  
  33.107          map(lambda vm_ref: server.xenapi.VM.start(vm_ref, 0), vm_refs)
  33.108 -    else:
  33.109 +    elif not opts.is_xml:
  33.110          if not create_security_check(config):
  33.111              raise security.ACMError(
  33.112                  'Security Configuration prevents domain from starting')
    34.1 --- a/tools/python/xen/xm/main.py	Tue Mar 27 12:21:48 2007 -0600
    34.2 +++ b/tools/python/xen/xm/main.py	Wed Mar 28 10:38:41 2007 +0100
    34.3 @@ -49,7 +49,7 @@ from xen.xend.XendConstants import *
    34.4  
    34.5  from xen.xm.opts import OptionError, Opts, wrap, set_true
    34.6  from xen.xm import console
    34.7 -from xen.util.xmlrpclib2 import ServerProxy
    34.8 +from xen.util.xmlrpcclient import ServerProxy
    34.9  
   34.10  import XenAPI
   34.11  
   34.12 @@ -722,7 +722,7 @@ def getDomains(domain_names, state, full
   34.13              states = ('running', 'blocked', 'paused', 'shutdown',
   34.14                        'crashed', 'dying')
   34.15              def state_on_off(state):
   34.16 -                if dom_metrics['state'].find(state) > -1:
   34.17 +                if state in dom_metrics['state']:
   34.18                      return state[0]
   34.19                  else:
   34.20                      return "-"
   34.21 @@ -850,7 +850,8 @@ def parse_doms_info(info):
   34.22  
   34.23  def check_sched_type(sched):
   34.24      if serverType == SERVER_XEN_API:
   34.25 -        current = server.xenapi.host.get_sched_policy(server.xenapi.session.get_this_host())
   34.26 +        current = server.xenapi.host.get_sched_policy(
   34.27 +            server.xenapi.session.get_this_host(server.getSession()))
   34.28      else:
   34.29          current = 'unknown'
   34.30          for x in server.xend.node.info()[1:]:
   34.31 @@ -952,12 +953,10 @@ def xm_vcpu_list(args):
   34.32                      ['name',       vm_records[vm_ref]['name_label']],
   34.33                      ['vcpu_count', vm_records[vm_ref]['VCPUs_max']]]
   34.34  
   34.35 -            
   34.36 -
   34.37              for i in range(int(vm_records[vm_ref]['VCPUs_max'])):
   34.38                  def chk_flag(flag):
   34.39 -                    return vm_metrics[vm_ref]['VCPUs_flags'][str(i)] \
   34.40 -                           .find(flag) > -1 and 1 or 0
   34.41 +                    return flag in vm_metrics[vm_ref]['VCPUs_flags'][str(i)] \
   34.42 +                           and 1 or 0
   34.43                  
   34.44                  vcpu_info = ['vcpu',
   34.45                               ['number',
   34.46 @@ -1044,7 +1043,7 @@ def xm_vcpu_list(args):
   34.47  
   34.48              if serverType == SERVER_XEN_API:
   34.49                  nr_cpus = len(server.xenapi.host.get_host_CPUs(
   34.50 -                    server.xenapi.session.get_this_host()))
   34.51 +                    server.xenapi.session.get_this_host(server.getSession())))
   34.52              else:
   34.53                  for x in server.xend.node.info()[1:]:
   34.54                      if len(x) > 1 and x[0] == 'nr_cpus':
   34.55 @@ -1260,8 +1259,9 @@ def xm_vcpu_pin(args):
   34.56          cpumap = cpu_make_map(args[2])
   34.57  
   34.58      if serverType == SERVER_XEN_API:
   34.59 +        cpumap = map(str, cpumap)        
   34.60          server.xenapi.VM.add_to_VCPUs_params_live(
   34.61 -            get_single_vm(dom), "cpumap%i" % vcpu, ",".join(cpumap))
   34.62 +            get_single_vm(dom), "cpumap%i" % int(vcpu), ",".join(cpumap))
   34.63      else:
   34.64          server.xend.domain.pincpu(dom, vcpu, cpumap)
   34.65  
   34.66 @@ -1509,7 +1509,7 @@ def xm_info(args):
   34.67          # Need to fake out old style xm info as people rely on parsing it
   34.68          
   34.69          host_record = server.xenapi.host.get_record(
   34.70 -            server.xenapi.session.get_this_host())        
   34.71 +            server.xenapi.session.get_this_host(server.getSession()))
   34.72  
   34.73          host_cpu_records = map(server.xenapi.host_cpu.get_record, host_record["host_CPUs"])
   34.74  
   34.75 @@ -1686,7 +1686,7 @@ def xm_debug_keys(args):
   34.76      
   34.77      if serverType == SERVER_XEN_API:
   34.78          server.xenapi.host.send_debug_keys(
   34.79 -            server.xenapi.session.get_this_host(),
   34.80 +            server.xenapi.session.get_this_host(server.getSession()),
   34.81              keys)
   34.82      else:
   34.83          server.xend.node.send_debug_keys(keys)
   34.84 @@ -1715,7 +1715,7 @@ def xm_dmesg(args):
   34.85          usage('dmesg')
   34.86  
   34.87      if serverType == SERVER_XEN_API:
   34.88 -        host = server.xenapi.session.get_this_host()
   34.89 +        host = server.xenapi.session.get_this_host(server.getSession())
   34.90          if use_clear:
   34.91              print server.xenapi.host.dmesg_clear(host),
   34.92          else:
   34.93 @@ -1731,7 +1731,7 @@ def xm_log(args):
   34.94  
   34.95      if serverType == SERVER_XEN_API:
   34.96          print server.xenapi.host.get_log(
   34.97 -            server.xenapi.session.get_this_host())
   34.98 +            server.xenapi.session.get_this_host(server.getSession()))
   34.99      else:
  34.100          print server.xend.node.log()
  34.101  
  34.102 @@ -2371,11 +2371,10 @@ def _run_cmd(cmd, cmd_name, args):
  34.103             if isinstance(e, security.ACMError):
  34.104                 err(str(e))
  34.105                 return False, 1
  34.106 -        else:
  34.107 -            print "Unexpected error:", sys.exc_info()[0]
  34.108 -            print
  34.109 -            print "Please report to xen-devel@lists.xensource.com"
  34.110 -            raise
  34.111 +        print "Unexpected error:", sys.exc_info()[0]
  34.112 +        print
  34.113 +        print "Please report to xen-devel@lists.xensource.com"
  34.114 +        raise
  34.115  
  34.116      return False, 1
  34.117  
    35.1 --- a/tools/python/xen/xm/opts.py	Tue Mar 27 12:21:48 2007 -0600
    35.2 +++ b/tools/python/xen/xm/opts.py	Wed Mar 28 10:38:41 2007 +0100
    35.3 @@ -24,6 +24,8 @@ import os.path
    35.4  import sys
    35.5  import types
    35.6  
    35.7 +
    35.8 +
    35.9  def _line_wrap(text, width = 70):
   35.10      lines = []
   35.11      current_line = ''
   35.12 @@ -60,6 +62,15 @@ class OptionError(Exception):
   35.13      def __str__(self):
   35.14          return self.message
   35.15  
   35.16 +class XMLFileError(Exception):
   35.17 +    """Thrown is input is an XML File"""
   35.18 +    def __init__(self, XMLFile):
   35.19 +        self.XMLFile = XMLFile
   35.20 +    def __str__(self):
   35.21 +        return "XMLFileError: %s" % self.XMLFile
   35.22 +    def getFile(self):
   35.23 +        return self.XMLFile
   35.24 +
   35.25  class Opt:
   35.26      """An individual option.
   35.27      """
   35.28 @@ -492,6 +503,14 @@ class Opts:
   35.29                  p = os.path.join(os.path.curdir, p)
   35.30              if os.path.exists(p):
   35.31                  self.info('Using config file "%s".' % p)
   35.32 +
   35.33 +                f = open(p)
   35.34 +                is_xml = (f.read(1) == '<')
   35.35 +                f.close()
   35.36 +
   35.37 +                if is_xml:
   35.38 +                    raise XMLFileError(p)
   35.39 +
   35.40                  self.load(p, help)
   35.41                  break
   35.42          else:
    36.1 --- a/tools/python/xen/xm/xenapi_create.py	Tue Mar 27 12:21:48 2007 -0600
    36.2 +++ b/tools/python/xen/xm/xenapi_create.py	Wed Mar 28 10:38:41 2007 +0100
    36.3 @@ -75,15 +75,20 @@ class xenapi_create:
    36.4  
    36.5          self.dtd = "/usr/lib/python/xen/xm/create.dtd"
    36.6  
    36.7 -    def create(self, filename=None, document=None):
    36.8 +    def create(self, filename=None, document=None, skipdtd=False):
    36.9          """
   36.10          Create a domain from an XML file or DOM tree
   36.11          """
   36.12 +        if skipdtd:
   36.13 +            print "Skipping DTD checks.  Dangerous!"
   36.14 +        
   36.15          if filename is not None:
   36.16 -            self.check_dtd(file)
   36.17 -            document = parse(file)
   36.18 +            if not skipdtd:
   36.19 +                self.check_dtd(filename)
   36.20 +            document = parse(filename)
   36.21          elif document is not None:
   36.22 -            self.check_dom_against_dtd(document)
   36.23 +            if not skipdtd:
   36.24 +                self.check_dom_against_dtd(document)
   36.25  
   36.26          self.check_doc(document)
   36.27  
    37.1 --- a/tools/xm-test/tests/destroy/06_destroy_dom0_neg.py	Tue Mar 27 12:21:48 2007 -0600
    37.2 +++ b/tools/xm-test/tests/destroy/06_destroy_dom0_neg.py	Wed Mar 28 10:38:41 2007 +0100
    37.3 @@ -10,5 +10,5 @@ from XmTestLib import *
    37.4  status, output = traceCommand("xm destroy 0")
    37.5  if status == 0:
    37.6      FAIL("xm destroy returned bad status, expected non 0, status is: %i" % status)
    37.7 -elif not re.search("Error", output):
    37.8 +elif not re.search("Error", output, re.I):
    37.9      FAIL("xm destroy returned bad output, expected Error:, output is: %s" % output)
    38.1 --- a/xen/acm/acm_policy.c	Tue Mar 27 12:21:48 2007 -0600
    38.2 +++ b/xen/acm/acm_policy.c	Wed Mar 28 10:38:41 2007 +0100
    38.3 @@ -62,6 +62,7 @@ int
    38.4  do_acm_set_policy(void *buf, u32 buf_size)
    38.5  {
    38.6      struct acm_policy_buffer *pol = (struct acm_policy_buffer *)buf;
    38.7 +    uint32_t offset, length;
    38.8      /* some sanity checking */
    38.9      if ((be32_to_cpu(pol->magic) != ACM_MAGIC) ||
   38.10          (buf_size != be32_to_cpu(pol->len)) ||
   38.11 @@ -92,22 +93,27 @@ do_acm_set_policy(void *buf, u32 buf_siz
   38.12      /* get bin_policy lock and rewrite policy (release old one) */
   38.13      write_lock(&acm_bin_pol_rwlock);
   38.14  
   38.15 +    offset = be32_to_cpu(pol->policy_reference_offset);
   38.16 +    length = be32_to_cpu(pol->primary_buffer_offset) - offset;
   38.17 +
   38.18      /* set label reference name */
   38.19 -    if (acm_set_policy_reference(buf + be32_to_cpu(pol->policy_reference_offset),
   38.20 -                                 be32_to_cpu(pol->primary_buffer_offset) -
   38.21 -                                 be32_to_cpu(pol->policy_reference_offset)))
   38.22 +    if ( (offset + length) > buf_size ||
   38.23 +         acm_set_policy_reference(buf + offset, length))
   38.24          goto error_lock_free;
   38.25  
   38.26      /* set primary policy data */
   38.27 -    if (acm_primary_ops->set_binary_policy(buf + be32_to_cpu(pol->primary_buffer_offset),
   38.28 -                                           be32_to_cpu(pol->secondary_buffer_offset) -
   38.29 -                                           be32_to_cpu(pol->primary_buffer_offset)))
   38.30 +    offset = be32_to_cpu(pol->primary_buffer_offset);
   38.31 +    length = be32_to_cpu(pol->secondary_buffer_offset) - offset;
   38.32 +
   38.33 +    if ( (offset + length) > buf_size ||
   38.34 +         acm_primary_ops->set_binary_policy(buf + offset, length))
   38.35          goto error_lock_free;
   38.36  
   38.37      /* set secondary policy data */
   38.38 -    if (acm_secondary_ops->set_binary_policy(buf + be32_to_cpu(pol->secondary_buffer_offset),
   38.39 -                                             be32_to_cpu(pol->len) - 
   38.40 -                                             be32_to_cpu(pol->secondary_buffer_offset)))
   38.41 +    offset = be32_to_cpu(pol->secondary_buffer_offset);
   38.42 +    length = be32_to_cpu(pol->len) - offset;
   38.43 +    if ( (offset + length) > buf_size ||
   38.44 +         acm_secondary_ops->set_binary_policy(buf + offset, length))
   38.45          goto error_lock_free;
   38.46  
   38.47      write_unlock(&acm_bin_pol_rwlock);
    39.1 --- a/xen/arch/ia64/asm-offsets.c	Tue Mar 27 12:21:48 2007 -0600
    39.2 +++ b/xen/arch/ia64/asm-offsets.c	Wed Mar 28 10:38:41 2007 +0100
    39.3 @@ -223,10 +223,11 @@ void foo(void)
    39.4  
    39.5  #ifdef PERF_COUNTERS
    39.6  	BLANK();
    39.7 -	DEFINE(RECOVER_TO_PAGE_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_page_fault));
    39.8 -	DEFINE(RECOVER_TO_BREAK_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_break_fault));
    39.9 -	DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter, fast_hyperprivop));
   39.10 -	DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter, fast_reflect));
   39.11 +	DEFINE(IA64_PERFC_recover_to_page_fault, PERFC_recover_to_page_fault);
   39.12 +	DEFINE(IA64_PERFC_recover_to_break_fault, PERFC_recover_to_break_fault);
   39.13 +	DEFINE(IA64_PERFC_fast_vhpt_translate, PERFC_fast_vhpt_translate);
   39.14 +	DEFINE(IA64_PERFC_fast_hyperprivop, PERFC_fast_hyperprivop);
   39.15 +	DEFINE(IA64_PERFC_fast_reflect, PERFC_fast_reflect);
   39.16  #endif
   39.17  
   39.18  	BLANK();
    40.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c	Tue Mar 27 12:21:48 2007 -0600
    40.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Wed Mar 28 10:38:41 2007 +0100
    40.3 @@ -113,7 +113,7 @@ ia64_handle_irq (ia64_vector vector, str
    40.4  	unsigned long saved_tpr;
    40.5  
    40.6  #ifdef XEN
    40.7 -	perfc_incrc(irqs);
    40.8 +	perfc_incr(irqs);
    40.9  #endif
   40.10  #if IRQ_DEBUG
   40.11  #ifdef XEN
    41.1 --- a/xen/arch/ia64/linux-xen/mca.c	Tue Mar 27 12:21:48 2007 -0600
    41.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Wed Mar 28 10:38:41 2007 +0100
    41.3 @@ -397,16 +397,6 @@ ia64_log_queue(int sal_info_type, int vi
    41.4  
    41.5  #ifdef XEN
    41.6  /**
    41.7 - *	Copy from linux/include/asm-generic/bug.h
    41.8 - */
    41.9 -#define WARN_ON(condition) do { \
   41.10 -	if (unlikely((condition)!=0)) { \
   41.11 -		printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
   41.12 -		dump_stack(); \
   41.13 -	} \
   41.14 -} while (0)
   41.15 -
   41.16 -/**
   41.17   *	Copy from linux/kernel/irq/manage.c
   41.18   *
   41.19   *	disable_irq_nosync - disable an irq without waiting
    42.1 --- a/xen/arch/ia64/linux-xen/smp.c	Tue Mar 27 12:21:48 2007 -0600
    42.2 +++ b/xen/arch/ia64/linux-xen/smp.c	Wed Mar 28 10:38:41 2007 +0100
    42.3 @@ -148,7 +148,7 @@ handle_IPI (int irq, void *dev_id, struc
    42.4  	unsigned long ops;
    42.5  
    42.6  #ifdef XEN
    42.7 -	perfc_incrc(ipis);
    42.8 +	perfc_incr(ipis);
    42.9  #endif
   42.10  	mb();	/* Order interrupt and bit testing. */
   42.11  	while ((ops = xchg(pending_ipis, 0)) != 0) {
    43.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Tue Mar 27 12:21:48 2007 -0600
    43.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Wed Mar 28 10:38:41 2007 +0100
    43.3 @@ -37,7 +37,7 @@ pal_emul(struct vcpu *vcpu)
    43.4  	vcpu_get_gr_nat(vcpu, 30, &gr30); 
    43.5  	vcpu_get_gr_nat(vcpu, 31, &gr31);
    43.6  
    43.7 -	perfc_incrc(vmx_pal_emul);
    43.8 +	perfc_incr(vmx_pal_emul);
    43.9  	result = xen_pal_emulator(gr28, gr29, gr30, gr31);
   43.10  
   43.11  	vcpu_set_gr(vcpu, 8, result.status, 0);
    44.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Tue Mar 27 12:21:48 2007 -0600
    44.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Wed Mar 28 10:38:41 2007 +0100
    44.3 @@ -151,7 +151,7 @@ vmx_ia64_handle_break (unsigned long ifa
    44.4      struct domain *d = current->domain;
    44.5      struct vcpu *v = current;
    44.6  
    44.7 -    perfc_incrc(vmx_ia64_handle_break);
    44.8 +    perfc_incr(vmx_ia64_handle_break);
    44.9  #ifdef CRASH_DEBUG
   44.10      if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
   44.11          IS_VMM_ADDRESS(regs->cr_iip)) {
    45.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Tue Mar 27 12:21:48 2007 -0600
    45.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Wed Mar 28 10:38:41 2007 +0100
    45.3 @@ -1401,159 +1401,159 @@ if ( (cause == 0xff && opcode == 0x1e000
    45.4  
    45.5      switch(cause) {
    45.6      case EVENT_RSM:
    45.7 -        perfc_incrc(vmx_rsm);
    45.8 +        perfc_incr(vmx_rsm);
    45.9          status=vmx_emul_rsm(vcpu, inst);
   45.10          break;
   45.11      case EVENT_SSM:
   45.12 -        perfc_incrc(vmx_ssm);
   45.13 +        perfc_incr(vmx_ssm);
   45.14          status=vmx_emul_ssm(vcpu, inst);
   45.15          break;
   45.16      case EVENT_MOV_TO_PSR:
   45.17 -        perfc_incrc(vmx_mov_to_psr);
   45.18 +        perfc_incr(vmx_mov_to_psr);
   45.19          status=vmx_emul_mov_to_psr(vcpu, inst);
   45.20          break;
   45.21      case EVENT_MOV_FROM_PSR:
   45.22 -        perfc_incrc(vmx_mov_from_psr);
   45.23 +        perfc_incr(vmx_mov_from_psr);
   45.24          status=vmx_emul_mov_from_psr(vcpu, inst);
   45.25          break;
   45.26      case EVENT_MOV_FROM_CR:
   45.27 -        perfc_incrc(vmx_mov_from_cr);
   45.28 +        perfc_incr(vmx_mov_from_cr);
   45.29          status=vmx_emul_mov_from_cr(vcpu, inst);
   45.30          break;
   45.31      case EVENT_MOV_TO_CR:
   45.32 -        perfc_incrc(vmx_mov_to_cr);
   45.33 +        perfc_incr(vmx_mov_to_cr);
   45.34          status=vmx_emul_mov_to_cr(vcpu, inst);
   45.35          break;
   45.36      case EVENT_BSW_0:
   45.37 -        perfc_incrc(vmx_bsw0);
   45.38 +        perfc_incr(vmx_bsw0);
   45.39          status=vmx_emul_bsw0(vcpu, inst);
   45.40          break;
   45.41      case EVENT_BSW_1:
   45.42 -        perfc_incrc(vmx_bsw1);
   45.43 +        perfc_incr(vmx_bsw1);
   45.44          status=vmx_emul_bsw1(vcpu, inst);
   45.45          break;
   45.46      case EVENT_COVER:
   45.47 -        perfc_incrc(vmx_cover);
   45.48 +        perfc_incr(vmx_cover);
   45.49          status=vmx_emul_cover(vcpu, inst);
   45.50          break;
   45.51      case EVENT_RFI:
   45.52 -        perfc_incrc(vmx_rfi);
   45.53 +        perfc_incr(vmx_rfi);
   45.54          status=vmx_emul_rfi(vcpu, inst);
   45.55          break;
   45.56      case EVENT_ITR_D:
   45.57 -        perfc_incrc(vmx_itr_d);
   45.58 +        perfc_incr(vmx_itr_d);
   45.59          status=vmx_emul_itr_d(vcpu, inst);
   45.60          break;
   45.61      case EVENT_ITR_I:
   45.62 -        perfc_incrc(vmx_itr_i);
   45.63 +        perfc_incr(vmx_itr_i);
   45.64          status=vmx_emul_itr_i(vcpu, inst);
   45.65          break;
   45.66      case EVENT_PTR_D:
   45.67 -        perfc_incrc(vmx_ptr_d);
   45.68 +        perfc_incr(vmx_ptr_d);
   45.69          status=vmx_emul_ptr_d(vcpu, inst);
   45.70          break;
   45.71      case EVENT_PTR_I:
   45.72 -        perfc_incrc(vmx_ptr_i);
   45.73 +        perfc_incr(vmx_ptr_i);
   45.74          status=vmx_emul_ptr_i(vcpu, inst);
   45.75          break;
   45.76      case EVENT_ITC_D:
   45.77 -        perfc_incrc(vmx_itc_d);
   45.78 +        perfc_incr(vmx_itc_d);
   45.79          status=vmx_emul_itc_d(vcpu, inst);
   45.80          break;
   45.81      case EVENT_ITC_I:
   45.82 -        perfc_incrc(vmx_itc_i);
   45.83 +        perfc_incr(vmx_itc_i);
   45.84          status=vmx_emul_itc_i(vcpu, inst);
   45.85          break;
   45.86      case EVENT_PTC_L:
   45.87 -        perfc_incrc(vmx_ptc_l);
   45.88 +        perfc_incr(vmx_ptc_l);
   45.89          status=vmx_emul_ptc_l(vcpu, inst);
   45.90          break;
   45.91      case EVENT_PTC_G:
   45.92 -        perfc_incrc(vmx_ptc_g);
   45.93 +        perfc_incr(vmx_ptc_g);
   45.94          status=vmx_emul_ptc_g(vcpu, inst);
   45.95          break;
   45.96      case EVENT_PTC_GA:
   45.97 -        perfc_incrc(vmx_ptc_ga);
   45.98 +        perfc_incr(vmx_ptc_ga);
   45.99          status=vmx_emul_ptc_ga(vcpu, inst);
  45.100          break;
  45.101      case EVENT_PTC_E:
  45.102 -        perfc_incrc(vmx_ptc_e);
  45.103 +        perfc_incr(vmx_ptc_e);
  45.104          status=vmx_emul_ptc_e(vcpu, inst);
  45.105          break;
  45.106      case EVENT_MOV_TO_RR:
  45.107 -        perfc_incrc(vmx_mov_to_rr);
  45.108 +        perfc_incr(vmx_mov_to_rr);
  45.109          status=vmx_emul_mov_to_rr(vcpu, inst);
  45.110          break;
  45.111      case EVENT_MOV_FROM_RR:
  45.112 -        perfc_incrc(vmx_mov_from_rr);
  45.113 +        perfc_incr(vmx_mov_from_rr);
  45.114          status=vmx_emul_mov_from_rr(vcpu, inst);
  45.115          break;
  45.116      case EVENT_THASH:
  45.117 -        perfc_incrc(vmx_thash);
  45.118 +        perfc_incr(vmx_thash);
  45.119          status=vmx_emul_thash(vcpu, inst);
  45.120          break;
  45.121      case EVENT_TTAG:
  45.122 -        perfc_incrc(vmx_ttag);
  45.123 +        perfc_incr(vmx_ttag);
  45.124          status=vmx_emul_ttag(vcpu, inst);
  45.125          break;
  45.126      case EVENT_TPA:
  45.127 -        perfc_incrc(vmx_tpa);
  45.128 +        perfc_incr(vmx_tpa);
  45.129          status=vmx_emul_tpa(vcpu, inst);
  45.130          break;
  45.131      case EVENT_TAK:
  45.132 -        perfc_incrc(vmx_tak);
  45.133 +        perfc_incr(vmx_tak);
  45.134          status=vmx_emul_tak(vcpu, inst);
  45.135          break;
  45.136      case EVENT_MOV_TO_AR_IMM:
  45.137 -        perfc_incrc(vmx_mov_to_ar_imm);
  45.138 +        perfc_incr(vmx_mov_to_ar_imm);
  45.139          status=vmx_emul_mov_to_ar_imm(vcpu, inst);
  45.140          break;
  45.141      case EVENT_MOV_TO_AR:
  45.142 -        perfc_incrc(vmx_mov_to_ar_reg);
  45.143 +        perfc_incr(vmx_mov_to_ar_reg);
  45.144          status=vmx_emul_mov_to_ar_reg(vcpu, inst);
  45.145          break;
  45.146      case EVENT_MOV_FROM_AR:
  45.147 -        perfc_incrc(vmx_mov_from_ar_reg);
  45.148 +        perfc_incr(vmx_mov_from_ar_reg);
  45.149          status=vmx_emul_mov_from_ar_reg(vcpu, inst);
  45.150          break;
  45.151      case EVENT_MOV_TO_DBR:
  45.152 -        perfc_incrc(vmx_mov_to_dbr);
  45.153 +        perfc_incr(vmx_mov_to_dbr);
  45.154          status=vmx_emul_mov_to_dbr(vcpu, inst);
  45.155          break;
  45.156      case EVENT_MOV_TO_IBR:
  45.157 -        perfc_incrc(vmx_mov_to_ibr);
  45.158 +        perfc_incr(vmx_mov_to_ibr);
  45.159          status=vmx_emul_mov_to_ibr(vcpu, inst);
  45.160          break;
  45.161      case EVENT_MOV_TO_PMC:
  45.162 -        perfc_incrc(vmx_mov_to_pmc);
  45.163 +        perfc_incr(vmx_mov_to_pmc);
  45.164          status=vmx_emul_mov_to_pmc(vcpu, inst);
  45.165          break;
  45.166      case EVENT_MOV_TO_PMD:
  45.167 -        perfc_incrc(vmx_mov_to_pmd);
  45.168 +        perfc_incr(vmx_mov_to_pmd);
  45.169          status=vmx_emul_mov_to_pmd(vcpu, inst);
  45.170          break;
  45.171      case EVENT_MOV_TO_PKR:
  45.172 -        perfc_incrc(vmx_mov_to_pkr);
  45.173 +        perfc_incr(vmx_mov_to_pkr);
  45.174          status=vmx_emul_mov_to_pkr(vcpu, inst);
  45.175          break;
  45.176      case EVENT_MOV_FROM_DBR:
  45.177 -        perfc_incrc(vmx_mov_from_dbr);
  45.178 +        perfc_incr(vmx_mov_from_dbr);
  45.179          status=vmx_emul_mov_from_dbr(vcpu, inst);
  45.180          break;
  45.181      case EVENT_MOV_FROM_IBR:
  45.182 -        perfc_incrc(vmx_mov_from_ibr);
  45.183 +        perfc_incr(vmx_mov_from_ibr);
  45.184          status=vmx_emul_mov_from_ibr(vcpu, inst);
  45.185          break;
  45.186      case EVENT_MOV_FROM_PMC:
  45.187 -        perfc_incrc(vmx_mov_from_pmc);
  45.188 +        perfc_incr(vmx_mov_from_pmc);
  45.189          status=vmx_emul_mov_from_pmc(vcpu, inst);
  45.190          break;
  45.191      case EVENT_MOV_FROM_PKR:
  45.192 -        perfc_incrc(vmx_mov_from_pkr);
  45.193 +        perfc_incr(vmx_mov_from_pkr);
  45.194          status=vmx_emul_mov_from_pkr(vcpu, inst);
  45.195          break;
  45.196      case EVENT_MOV_FROM_CPUID:
  45.197 -        perfc_incrc(vmx_mov_from_cpuid);
  45.198 +        perfc_incr(vmx_mov_from_cpuid);
  45.199          status=vmx_emul_mov_from_cpuid(vcpu, inst);
  45.200          break;
  45.201      case EVENT_VMSW:
    46.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Tue Mar 27 12:21:48 2007 -0600
    46.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Wed Mar 28 10:38:41 2007 +0100
    46.3 @@ -372,7 +372,7 @@ do_dom0vp_op(unsigned long cmd,
    46.4          } else {
    46.5              ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
    46.6          }
    46.7 -        perfc_incrc(dom0vp_phystomach);
    46.8 +        perfc_incr(dom0vp_phystomach);
    46.9          break;
   46.10      case IA64_DOM0VP_machtophys:
   46.11          if (!mfn_valid(arg0)) {
   46.12 @@ -380,7 +380,7 @@ do_dom0vp_op(unsigned long cmd,
   46.13              break;
   46.14          }
   46.15          ret = get_gpfn_from_mfn(arg0);
   46.16 -        perfc_incrc(dom0vp_machtophys);
   46.17 +        perfc_incr(dom0vp_machtophys);
   46.18          break;
   46.19      case IA64_DOM0VP_zap_physmap:
   46.20          ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
    47.1 --- a/xen/arch/ia64/xen/domain.c	Tue Mar 27 12:21:48 2007 -0600
    47.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Mar 28 10:38:41 2007 +0100
    47.3 @@ -131,11 +131,11 @@ static void flush_vtlb_for_context_switc
    47.4  		if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time),
    47.5  		                                  last_tlbflush_timestamp)) {
    47.6  			local_flush_tlb_all();
    47.7 -			perfc_incrc(tlbflush_clock_cswitch_purge);
    47.8 +			perfc_incr(tlbflush_clock_cswitch_purge);
    47.9  		} else {
   47.10 -			perfc_incrc(tlbflush_clock_cswitch_skip);
   47.11 +			perfc_incr(tlbflush_clock_cswitch_skip);
   47.12  		}
   47.13 -		perfc_incrc(flush_vtlb_for_context_switch);
   47.14 +		perfc_incr(flush_vtlb_for_context_switch);
   47.15  	}
   47.16  }
   47.17  
    48.1 --- a/xen/arch/ia64/xen/faults.c	Tue Mar 27 12:21:48 2007 -0600
    48.2 +++ b/xen/arch/ia64/xen/faults.c	Wed Mar 28 10:38:41 2007 +0100
    48.3 @@ -187,7 +187,7 @@ static int handle_lazy_cover(struct vcpu
    48.4  	if (!PSCB(v, interrupt_collection_enabled)) {
    48.5  		PSCB(v, ifs) = regs->cr_ifs;
    48.6  		regs->cr_ifs = 0;
    48.7 -		perfc_incrc(lazy_cover);
    48.8 +		perfc_incr(lazy_cover);
    48.9  		return 1;	// retry same instruction with cr.ifs off
   48.10  	}
   48.11  	return 0;
    49.1 --- a/xen/arch/ia64/xen/hypercall.c	Tue Mar 27 12:21:48 2007 -0600
    49.2 +++ b/xen/arch/ia64/xen/hypercall.c	Wed Mar 28 10:38:41 2007 +0100
    49.3 @@ -161,7 +161,7 @@ ia64_hypercall(struct pt_regs *regs)
    49.4  		if (regs->r28 == PAL_HALT_LIGHT) {
    49.5  			if (vcpu_deliverable_interrupts(v) ||
    49.6  				event_pending(v)) {
    49.7 -				perfc_incrc(idle_when_pending);
    49.8 +				perfc_incr(idle_when_pending);
    49.9  				vcpu_pend_unspecified_interrupt(v);
   49.10  //printk("idle w/int#%d pending!\n",pi);
   49.11  //this shouldn't happen, but it apparently does quite a bit!  so don't
   49.12 @@ -170,7 +170,7 @@ ia64_hypercall(struct pt_regs *regs)
   49.13  //as deliver_pending_interrupt is called on the way out and will deliver it
   49.14  			}
   49.15  			else {
   49.16 -				perfc_incrc(pal_halt_light);
   49.17 +				perfc_incr(pal_halt_light);
   49.18  				migrate_timer(&v->arch.hlt_timer,
   49.19  				              v->processor);
   49.20  				set_timer(&v->arch.hlt_timer,
    50.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Tue Mar 27 12:21:48 2007 -0600
    50.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Wed Mar 28 10:38:41 2007 +0100
    50.3 @@ -26,8 +26,7 @@
    50.4  # define FAST_HYPERPRIVOPS
    50.5  # ifdef PERF_COUNTERS
    50.6  #  define FAST_HYPERPRIVOP_CNT
    50.7 -#  define FAST_HYPERPRIVOP_PERFC(N) \
    50.8 -	(perfcounters + FAST_HYPERPRIVOP_PERFC_OFS + (4 * N))
    50.9 +#  define FAST_HYPERPRIVOP_PERFC(N) PERFC(fast_hyperprivop + N)
   50.10  #  define FAST_REFLECT_CNT
   50.11  # endif
   50.12  	
   50.13 @@ -364,7 +363,7 @@ GLOBAL_ENTRY(fast_tick_reflect)
   50.14  	mov rp=r29;;
   50.15  	mov cr.itm=r26;;	// ensure next tick
   50.16  #ifdef FAST_REFLECT_CNT
   50.17 -	movl r20=perfcounters+FAST_REFLECT_PERFC_OFS+((0x3000>>8)*4);;
   50.18 +	movl r20=PERFC(fast_reflect + (0x3000>>8));;
   50.19  	ld4 r21=[r20];;
   50.20  	adds r21=1,r21;;
   50.21  	st4 [r20]=r21;;
   50.22 @@ -597,7 +596,7 @@ END(fast_break_reflect)
   50.23  //	r31 == pr
   50.24  ENTRY(fast_reflect)
   50.25  #ifdef FAST_REFLECT_CNT
   50.26 -	movl r22=perfcounters+FAST_REFLECT_PERFC_OFS;
   50.27 +	movl r22=PERFC(fast_reflect);
   50.28  	shr r23=r20,8-2;;
   50.29  	add r22=r22,r23;;
   50.30  	ld4 r21=[r22];;
   50.31 @@ -938,7 +937,7 @@ 1:	// check the guest VHPT
   50.32  (p7)	br.cond.spnt.few page_not_present;;
   50.33  
   50.34  #ifdef FAST_REFLECT_CNT
   50.35 -	movl r21=perfcounter+FAST_VHPT_TRANSLATE_PERFC_OFS;;
   50.36 +	movl r21=PERFC(fast_vhpt_translate);;
   50.37  	ld4 r22=[r21];;
   50.38  	adds r22=1,r22;;
   50.39  	st4 [r21]=r22;;
   50.40 @@ -968,7 +967,7 @@ END(fast_tlb_miss_reflect)
   50.41  // we get here if fast_insert fails (e.g. due to metaphysical lookup)
   50.42  ENTRY(recover_and_page_fault)
   50.43  #ifdef PERF_COUNTERS
   50.44 -	movl r21=perfcounters + RECOVER_TO_PAGE_FAULT_PERFC_OFS;;
   50.45 +	movl r21=PERFC(recover_to_page_fault);;
   50.46  	ld4 r22=[r21];;
   50.47  	adds r22=1,r22;;
   50.48  	st4 [r21]=r22;;
   50.49 @@ -1832,7 +1831,7 @@ END(hyper_ptc_ga)
   50.50  // recovery block for hyper_itc metaphysical memory lookup
   50.51  ENTRY(recover_and_dispatch_break_fault)
   50.52  #ifdef PERF_COUNTERS
   50.53 -	movl r21=perfcounters + RECOVER_TO_BREAK_FAULT_PERFC_OFS;;
   50.54 +	movl r21=PERFC(recover_to_break_fault);;
   50.55  	ld4 r22=[r21];;
   50.56  	adds r22=1,r22;;
   50.57  	st4 [r21]=r22;;
    51.1 --- a/xen/arch/ia64/xen/mm.c	Tue Mar 27 12:21:48 2007 -0600
    51.2 +++ b/xen/arch/ia64/xen/mm.c	Wed Mar 28 10:38:41 2007 +0100
    51.3 @@ -1131,7 +1131,7 @@ assign_domain_page_replace(struct domain
    51.4              domain_put_page(d, mpaddr, pte, old_pte, 1);
    51.5          }
    51.6      }
    51.7 -    perfc_incrc(assign_domain_page_replace);
    51.8 +    perfc_incr(assign_domain_page_replace);
    51.9  }
   51.10  
   51.11  // caller must get_page(new_page) before
   51.12 @@ -1207,7 +1207,7 @@ assign_domain_page_cmpxchg_rel(struct do
   51.13      }
   51.14  
   51.15      domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
   51.16 -    perfc_incrc(assign_domain_pge_cmpxchg_rel);
   51.17 +    perfc_incr(assign_domain_pge_cmpxchg_rel);
   51.18      return 0;
   51.19  }
   51.20  
   51.21 @@ -1266,7 +1266,7 @@ zap_domain_page_one(struct domain *d, un
   51.22  
   51.23      BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));
   51.24      domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);
   51.25 -    perfc_incrc(zap_dcomain_page_one);
   51.26 +    perfc_incr(zap_dcomain_page_one);
   51.27  }
   51.28  
   51.29  unsigned long
   51.30 @@ -1279,7 +1279,7 @@ dom0vp_zap_physmap(struct domain *d, uns
   51.31      }
   51.32  
   51.33      zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1, INVALID_MFN);
   51.34 -    perfc_incrc(dom0vp_zap_physmap);
   51.35 +    perfc_incr(dom0vp_zap_physmap);
   51.36      return 0;
   51.37  }
   51.38  
   51.39 @@ -1333,7 +1333,7 @@ static unsigned long
   51.40             get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
   51.41      assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
   51.42      //don't update p2m table because this page belongs to rd, not d.
   51.43 -    perfc_incrc(dom0vp_add_physmap);
   51.44 +    perfc_incr(dom0vp_add_physmap);
   51.45  out1:
   51.46      put_domain(rd);
   51.47      return error;
   51.48 @@ -1503,7 +1503,7 @@ create_grant_host_mapping(unsigned long 
   51.49  #endif
   51.50                                 ((flags & GNTMAP_readonly) ?
   51.51                                  ASSIGN_readonly : ASSIGN_writable));
   51.52 -    perfc_incrc(create_grant_host_mapping);
   51.53 +    perfc_incr(create_grant_host_mapping);
   51.54      return GNTST_okay;
   51.55  }
   51.56  
   51.57 @@ -1568,7 +1568,7 @@ destroy_grant_host_mapping(unsigned long
   51.58      BUG_ON(pte_pgc_allocated(old_pte));
   51.59      domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
   51.60  
   51.61 -    perfc_incrc(destroy_grant_host_mapping);
   51.62 +    perfc_incr(destroy_grant_host_mapping);
   51.63      return GNTST_okay;
   51.64  }
   51.65  
   51.66 @@ -1629,7 +1629,7 @@ steal_page(struct domain *d, struct page
   51.67              free_domheap_page(new);
   51.68              return -1;
   51.69          }
   51.70 -        perfc_incrc(steal_page_refcount);
   51.71 +        perfc_incr(steal_page_refcount);
   51.72      }
   51.73  
   51.74      spin_lock(&d->page_alloc_lock);
   51.75 @@ -1693,7 +1693,7 @@ steal_page(struct domain *d, struct page
   51.76      list_del(&page->list);
   51.77  
   51.78      spin_unlock(&d->page_alloc_lock);
   51.79 -    perfc_incrc(steal_page);
   51.80 +    perfc_incr(steal_page);
   51.81      return 0;
   51.82  }
   51.83  
   51.84 @@ -1710,7 +1710,7 @@ guest_physmap_add_page(struct domain *d,
   51.85  
   51.86      //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT));
   51.87  
   51.88 -    perfc_incrc(guest_physmap_add_page);
   51.89 +    perfc_incr(guest_physmap_add_page);
   51.90  }
   51.91  
   51.92  void
   51.93 @@ -1719,7 +1719,7 @@ guest_physmap_remove_page(struct domain 
   51.94  {
   51.95      BUG_ON(mfn == 0);//XXX
   51.96      zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn);
   51.97 -    perfc_incrc(guest_physmap_remove_page);
   51.98 +    perfc_incr(guest_physmap_remove_page);
   51.99  }
  51.100  
  51.101  static void
  51.102 @@ -1799,7 +1799,7 @@ domain_page_flush_and_put(struct domain*
  51.103          break;
  51.104      }
  51.105  #endif
  51.106 -    perfc_incrc(domain_page_flush_and_put);
  51.107 +    perfc_incr(domain_page_flush_and_put);
  51.108  }
  51.109  
  51.110  int
  51.111 @@ -1996,7 +1996,7 @@ int get_page_type(struct page_info *page
  51.112  
  51.113                  if ( unlikely(!cpus_empty(mask)) )
  51.114                  {
  51.115 -                    perfc_incrc(need_flush_tlb_flush);
  51.116 +                    perfc_incr(need_flush_tlb_flush);
  51.117                      flush_tlb_mask(mask);
  51.118                  }
  51.119  
    52.1 --- a/xen/arch/ia64/xen/privop.c	Tue Mar 27 12:21:48 2007 -0600
    52.2 +++ b/xen/arch/ia64/xen/privop.c	Wed Mar 28 10:38:41 2007 +0100
    52.3 @@ -641,15 +641,15 @@ static IA64FAULT priv_handle_op(VCPU * v
    52.4  			if (inst.M29.x3 != 0)
    52.5  				break;
    52.6  			if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
    52.7 -				perfc_incrc(mov_to_ar_imm);
    52.8 +				perfc_incr(mov_to_ar_imm);
    52.9  				return priv_mov_to_ar_imm(vcpu, inst);
   52.10  			}
   52.11  			if (inst.M44.x4 == 6) {
   52.12 -				perfc_incrc(ssm);
   52.13 +				perfc_incr(ssm);
   52.14  				return priv_ssm(vcpu, inst);
   52.15  			}
   52.16  			if (inst.M44.x4 == 7) {
   52.17 -				perfc_incrc(rsm);
   52.18 +				perfc_incr(rsm);
   52.19  				return priv_rsm(vcpu, inst);
   52.20  			}
   52.21  			break;
   52.22 @@ -658,9 +658,9 @@ static IA64FAULT priv_handle_op(VCPU * v
   52.23  		x6 = inst.M29.x6;
   52.24  		if (x6 == 0x2a) {
   52.25  			if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
   52.26 -				perfc_incrc(mov_from_ar); // privified mov from kr
   52.27 +				perfc_incr(mov_from_ar); // privified mov from kr
   52.28  			else
   52.29 -				perfc_incrc(mov_to_ar_reg);
   52.30 +				perfc_incr(mov_to_ar_reg);
   52.31  			return priv_mov_to_ar_reg(vcpu, inst);
   52.32  		}
   52.33  		if (inst.M29.x3 != 0)
   52.34 @@ -676,9 +676,9 @@ static IA64FAULT priv_handle_op(VCPU * v
   52.35  			}
   52.36  		}
   52.37  		if (privify_en && x6 == 52 && inst.M28.r3 > 63)
   52.38 -			perfc_incrc(fc);
   52.39 +			perfc_incr(fc);
   52.40  		else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
   52.41 -			perfc_incrc(cpuid);
   52.42 +			perfc_incr(cpuid);
   52.43  		else
   52.44  			perfc_incra(misc_privop, x6);
   52.45  		return (*pfunc) (vcpu, inst);
   52.46 @@ -688,23 +688,23 @@ static IA64FAULT priv_handle_op(VCPU * v
   52.47  			break;
   52.48  		if (inst.B8.x6 == 0x08) {
   52.49  			IA64FAULT fault;
   52.50 -			perfc_incrc(rfi);
   52.51 +			perfc_incr(rfi);
   52.52  			fault = priv_rfi(vcpu, inst);
   52.53  			if (fault == IA64_NO_FAULT)
   52.54  				fault = IA64_RFI_IN_PROGRESS;
   52.55  			return fault;
   52.56  		}
   52.57  		if (inst.B8.x6 == 0x0c) {
   52.58 -			perfc_incrc(bsw0);
   52.59 +			perfc_incr(bsw0);
   52.60  			return priv_bsw0(vcpu, inst);
   52.61  		}
   52.62  		if (inst.B8.x6 == 0x0d) {
   52.63 -			perfc_incrc(bsw1);
   52.64 +			perfc_incr(bsw1);
   52.65  			return priv_bsw1(vcpu, inst);
   52.66  		}
   52.67  		if (inst.B8.x6 == 0x0) {
   52.68  			// break instr for privified cover
   52.69 -			perfc_incrc(cover);
   52.70 +			perfc_incr(cover);
   52.71  			return priv_cover(vcpu, inst);
   52.72  		}
   52.73  		break;
   52.74 @@ -713,7 +713,7 @@ static IA64FAULT priv_handle_op(VCPU * v
   52.75  			break;
   52.76  #if 0
   52.77  		if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
   52.78 -			perfc_incrc(cover);
   52.79 +			perfc_incr(cover);
   52.80  			return priv_cover(vcpu, inst);
   52.81  		}
   52.82  #endif
   52.83 @@ -721,13 +721,13 @@ static IA64FAULT priv_handle_op(VCPU * v
   52.84  			break;	// I26.x3 == I27.x3
   52.85  		if (inst.I26.x6 == 0x2a) {
   52.86  			if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
   52.87 -				perfc_incrc(mov_from_ar);	// privified mov from kr
   52.88 +				perfc_incr(mov_from_ar);	// privified mov from kr
   52.89  			else
   52.90 -				perfc_incrc(mov_to_ar_reg);
   52.91 +				perfc_incr(mov_to_ar_reg);
   52.92  			return priv_mov_to_ar_reg(vcpu, inst);
   52.93  		}
   52.94  		if (inst.I27.x6 == 0x0a) {
   52.95 -			perfc_incrc(mov_to_ar_imm);
   52.96 +			perfc_incr(mov_to_ar_imm);
   52.97  			return priv_mov_to_ar_imm(vcpu, inst);
   52.98  		}
   52.99  		break;
    53.1 --- a/xen/arch/ia64/xen/privop_stat.c	Tue Mar 27 12:21:48 2007 -0600
    53.2 +++ b/xen/arch/ia64/xen/privop_stat.c	Wed Mar 28 10:38:41 2007 +0100
    53.3 @@ -10,48 +10,39 @@ struct privop_addr_count {
    53.4  	unsigned long addr[PRIVOP_COUNT_NADDRS];
    53.5  	unsigned int count[PRIVOP_COUNT_NADDRS];
    53.6  	unsigned int overflow;
    53.7 -	atomic_t *perfc_addr;
    53.8 -	atomic_t *perfc_count;
    53.9 -	atomic_t *perfc_overflow;
   53.10  };
   53.11  
   53.12 -#undef  PERFCOUNTER
   53.13 -#define PERFCOUNTER(var, name)
   53.14 +struct privop_addr_info {
   53.15 +	enum perfcounter perfc_addr;
   53.16 +	enum perfcounter perfc_count;
   53.17 +	enum perfcounter perfc_overflow;
   53.18 +};
   53.19  
   53.20 -#undef  PERFCOUNTER_CPU
   53.21 -#define PERFCOUNTER_CPU(var, name)
   53.22 -
   53.23 -#undef  PERFCOUNTER_ARRAY
   53.24 +#define PERFCOUNTER(var, name)
   53.25  #define PERFCOUNTER_ARRAY(var, name, size)
   53.26  
   53.27 -#undef  PERFSTATUS
   53.28  #define PERFSTATUS(var, name)
   53.29 -
   53.30 -#undef  PERFSTATUS_CPU
   53.31 -#define PERFSTATUS_CPU(var, name)
   53.32 -
   53.33 -#undef  PERFSTATUS_ARRAY
   53.34  #define PERFSTATUS_ARRAY(var, name, size)
   53.35  
   53.36 -#undef PERFPRIVOPADDR
   53.37  #define PERFPRIVOPADDR(name)                        \
   53.38      {                                               \
   53.39 -        { 0 }, { 0 }, 0,                            \
   53.40 -        perfcounters.privop_addr_##name##_addr,     \
   53.41 -        perfcounters.privop_addr_##name##_count,    \
   53.42 -        perfcounters.privop_addr_##name##_overflow  \
   53.43 +        PERFC_privop_addr_##name##_addr,            \
   53.44 +        PERFC_privop_addr_##name##_count,           \
   53.45 +        PERFC_privop_addr_##name##_overflow         \
   53.46      },
   53.47  
   53.48 -static struct privop_addr_count privop_addr_counter[] = {
   53.49 +static const struct privop_addr_info privop_addr_info[] = {
   53.50  #include <asm/perfc_defn.h>
   53.51  };
   53.52  
   53.53  #define PRIVOP_COUNT_NINSTS \
   53.54 -        (sizeof(privop_addr_counter) / sizeof(privop_addr_counter[0]))
   53.55 +        (sizeof(privop_addr_info) / sizeof(privop_addr_info[0]))
   53.56 +
   53.57 +static DEFINE_PER_CPU(struct privop_addr_count[PRIVOP_COUNT_NINSTS], privop_addr_counter);
   53.58  
   53.59  void privop_count_addr(unsigned long iip, enum privop_inst inst)
   53.60  {
   53.61 -	struct privop_addr_count *v = &privop_addr_counter[inst];
   53.62 +	struct privop_addr_count *v = this_cpu(privop_addr_counter) + inst;
   53.63  	int i;
   53.64  
   53.65  	if (inst >= PRIVOP_COUNT_NINSTS)
   53.66 @@ -72,31 +63,44 @@ void privop_count_addr(unsigned long iip
   53.67  
   53.68  void gather_privop_addrs(void)
   53.69  {
   53.70 -	int i, j;
   53.71 -	atomic_t *v;
   53.72 -	for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
   53.73 -		/* Note: addresses are truncated!  */
   53.74 -		v = privop_addr_counter[i].perfc_addr;
   53.75 -		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
   53.76 -			atomic_set(&v[j], privop_addr_counter[i].addr[j]);
   53.77 +	unsigned int cpu;
   53.78 +
   53.79 +	for_each_cpu ( cpu ) {
   53.80 +		perfc_t *perfcounters = per_cpu(perfcounters, cpu);
   53.81 +		struct privop_addr_count *s = per_cpu(privop_addr_counter, cpu);
   53.82 +		int i, j;
   53.83 +
   53.84 +		for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, s++) {
   53.85 +			perfc_t *d;
   53.86  
   53.87 -		v = privop_addr_counter[i].perfc_count;
   53.88 -		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
   53.89 -			atomic_set(&v[j], privop_addr_counter[i].count[j]);
   53.90 +			/* Note: addresses are truncated!  */
   53.91 +			d = perfcounters + privop_addr_info[i].perfc_addr;
   53.92 +			for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
   53.93 +				d[j] = s->addr[j];
   53.94 +
   53.95 +			d = perfcounters + privop_addr_info[i].perfc_count;
   53.96 +			for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
   53.97 +				d[j] = s->count[j];
   53.98  		
   53.99 -		atomic_set(privop_addr_counter[i].perfc_overflow,
  53.100 -		           privop_addr_counter[i].overflow);
  53.101 +			perfcounters[privop_addr_info[i].perfc_overflow] =
  53.102 +				s->overflow;
  53.103 +		}
  53.104  	}
  53.105  }
  53.106  
  53.107  void reset_privop_addrs(void)
  53.108  {
  53.109 -	int i, j;
  53.110 -	for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
  53.111 -		struct privop_addr_count *v = &privop_addr_counter[i];
  53.112 -		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
  53.113 -			v->addr[j] = v->count[j] = 0;
  53.114 -		v->overflow = 0;
  53.115 +	unsigned int cpu;
  53.116 +
  53.117 +	for_each_cpu ( cpu ) {
  53.118 +		struct privop_addr_count *v = per_cpu(privop_addr_counter, cpu);
  53.119 +		int i, j;
  53.120 +
  53.121 +		for (i = 0; i < PRIVOP_COUNT_NINSTS; i++, v++) {
  53.122 +			for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
  53.123 +				v->addr[j] = v->count[j] = 0;
  53.124 +			v->overflow = 0;
  53.125 +		}
  53.126  	}
  53.127  }
  53.128  #endif
    54.1 --- a/xen/arch/ia64/xen/tlb_track.c	Tue Mar 27 12:21:48 2007 -0600
    54.2 +++ b/xen/arch/ia64/xen/tlb_track.c	Wed Mar 28 10:38:41 2007 +0100
    54.3 @@ -216,14 +216,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
    54.4      TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND;
    54.5  
    54.6  #if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */
    54.7 -    perfc_incrc(tlb_track_iod);
    54.8 +    perfc_incr(tlb_track_iod);
    54.9      if (!pte_tlb_tracking(old_pte)) {
   54.10 -        perfc_incrc(tlb_track_iod_not_tracked);
   54.11 +        perfc_incr(tlb_track_iod_not_tracked);
   54.12          return TLB_TRACK_NOT_TRACKED;
   54.13      }
   54.14  #endif
   54.15      if (pte_tlb_inserted_many(old_pte)) {
   54.16 -        perfc_incrc(tlb_track_iod_tracked_many);
   54.17 +        perfc_incr(tlb_track_iod_tracked_many);
   54.18          return TLB_TRACK_MANY;
   54.19      }
   54.20  
   54.21 @@ -260,7 +260,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.22              if (entry->vaddr == vaddr && entry->rid == rid) {
   54.23                  // tlb_track_printd("TLB_TRACK_FOUND\n");
   54.24                  ret = TLB_TRACK_FOUND;
   54.25 -                perfc_incrc(tlb_track_iod_found);
   54.26 +                perfc_incr(tlb_track_iod_found);
   54.27  #ifdef CONFIG_TLB_TRACK_CNT
   54.28                  entry->cnt++;
   54.29                  if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) {
   54.30 @@ -276,7 +276,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.31                       */
   54.32                       // tlb_track_entry_printf(entry);
   54.33                       // tlb_track_printd("cnt = %ld\n", entry->cnt);
   54.34 -                    perfc_incrc(tlb_track_iod_force_many);
   54.35 +                    perfc_incr(tlb_track_iod_force_many);
   54.36                      goto force_many;
   54.37                  }
   54.38  #endif
   54.39 @@ -294,14 +294,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.40                  if (pte_val(ret_pte) != pte_val(old_pte)) {
   54.41                      // tlb_track_printd("TLB_TRACK_AGAIN\n");
   54.42                      ret = TLB_TRACK_AGAIN;
   54.43 -                    perfc_incrc(tlb_track_iod_again);
   54.44 +                    perfc_incr(tlb_track_iod_again);
   54.45                  } else {
   54.46                      // tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n",
   54.47                      //                  entry);
   54.48                      ret = TLB_TRACK_MANY;
   54.49                      list_del(&entry->list);
   54.50                      // tlb_track_entry_printf(entry);
   54.51 -                    perfc_incrc(tlb_track_iod_tracked_many_del);
   54.52 +                    perfc_incr(tlb_track_iod_tracked_many_del);
   54.53                  }
   54.54                  goto out;
   54.55              }
   54.56 @@ -314,7 +314,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.57           */
   54.58          // tlb_track_printd("TLB_TRACK_AGAIN\n");
   54.59          ret = TLB_TRACK_AGAIN;
   54.60 -        perfc_incrc(tlb_track_iod_again);
   54.61 +        perfc_incr(tlb_track_iod_again);
   54.62          goto out;
   54.63      }
   54.64  
   54.65 @@ -323,7 +323,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.66          /* Other thread else removed the tlb_track_entry after we got old_pte
   54.67             before we got spin lock. */
   54.68          ret = TLB_TRACK_AGAIN;
   54.69 -        perfc_incrc(tlb_track_iod_again);
   54.70 +        perfc_incr(tlb_track_iod_again);
   54.71          goto out;
   54.72      }
   54.73      if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) {
   54.74 @@ -334,10 +334,10 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.75              /* entry can't be allocated.
   54.76                 fall down into full flush mode. */
   54.77              bit_to_be_set |= _PAGE_TLB_INSERTED_MANY;
   54.78 -            perfc_incrc(tlb_track_iod_new_failed);
   54.79 +            perfc_incr(tlb_track_iod_new_failed);
   54.80          }
   54.81          // tlb_track_printd("new_entry 0x%p\n", new_entry);
   54.82 -        perfc_incrc(tlb_track_iod_new_entry);
   54.83 +        perfc_incr(tlb_track_iod_new_entry);
   54.84          goto again;
   54.85      }
   54.86  
   54.87 @@ -348,7 +348,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.88          if (tlb_track_pte_zapped(old_pte, ret_pte)) {
   54.89              // tlb_track_printd("zapped TLB_TRACK_AGAIN\n");
   54.90              ret = TLB_TRACK_AGAIN;
   54.91 -            perfc_incrc(tlb_track_iod_again);
   54.92 +            perfc_incr(tlb_track_iod_again);
   54.93              goto out;
   54.94          }
   54.95  
   54.96 @@ -359,7 +359,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
   54.97              // tlb_track_printd("iserted TLB_TRACK_MANY\n");
   54.98              BUG_ON(!pte_tlb_inserted(ret_pte));
   54.99              ret = TLB_TRACK_MANY;
  54.100 -            perfc_incrc(tlb_track_iod_new_many);
  54.101 +            perfc_incr(tlb_track_iod_new_many);
  54.102              goto out;
  54.103          }
  54.104          BUG_ON(pte_tlb_inserted(ret_pte));
  54.105 @@ -381,7 +381,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
  54.106  #ifdef CONFIG_TLB_TRACK_CNT
  54.107          entry->cnt = 0;
  54.108  #endif
  54.109 -        perfc_incrc(tlb_track_iod_insert);
  54.110 +        perfc_incr(tlb_track_iod_insert);
  54.111          // tlb_track_entry_printf(entry);
  54.112      } else {
  54.113          goto out;
  54.114 @@ -392,7 +392,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
  54.115      cpu_set(v->processor, entry->pcpu_dirty_mask);
  54.116      BUG_ON(v->vcpu_id >= NR_CPUS);
  54.117      vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
  54.118 -    perfc_incrc(tlb_track_iod_dirtied);
  54.119 +    perfc_incr(tlb_track_iod_dirtied);
  54.120  
  54.121   out:
  54.122      spin_unlock(&tlb_track->hash_lock);
  54.123 @@ -432,19 +432,19 @@ tlb_track_search_and_remove(struct tlb_t
  54.124      struct list_head* head = tlb_track_hash_head(tlb_track, ptep);
  54.125      struct tlb_track_entry* entry;
  54.126  
  54.127 -    perfc_incrc(tlb_track_sar);
  54.128 +    perfc_incr(tlb_track_sar);
  54.129      if (!pte_tlb_tracking(old_pte)) {
  54.130 -        perfc_incrc(tlb_track_sar_not_tracked);
  54.131 +        perfc_incr(tlb_track_sar_not_tracked);
  54.132          return TLB_TRACK_NOT_TRACKED;
  54.133      }
  54.134      if (!pte_tlb_inserted(old_pte)) {
  54.135          BUG_ON(pte_tlb_inserted_many(old_pte));
  54.136 -        perfc_incrc(tlb_track_sar_not_found);
  54.137 +        perfc_incr(tlb_track_sar_not_found);
  54.138          return TLB_TRACK_NOT_FOUND;
  54.139      }
  54.140      if (pte_tlb_inserted_many(old_pte)) {
  54.141          BUG_ON(!pte_tlb_inserted(old_pte));
  54.142 -        perfc_incrc(tlb_track_sar_many);
  54.143 +        perfc_incr(tlb_track_sar_many);
  54.144          return TLB_TRACK_MANY;
  54.145      }
  54.146  
  54.147 @@ -475,14 +475,14 @@ tlb_track_search_and_remove(struct tlb_t
  54.148                           pte_tlb_inserted(current_pte))) {
  54.149                  BUG_ON(pte_tlb_inserted_many(current_pte));
  54.150                  spin_unlock(&tlb_track->hash_lock);
  54.151 -                perfc_incrc(tlb_track_sar_many);
  54.152 +                perfc_incr(tlb_track_sar_many);
  54.153                  return TLB_TRACK_MANY;
  54.154              }
  54.155  
  54.156              list_del(&entry->list);
  54.157              spin_unlock(&tlb_track->hash_lock);
  54.158              *entryp = entry;
  54.159 -            perfc_incrc(tlb_track_sar_found);
  54.160 +            perfc_incr(tlb_track_sar_found);
  54.161              // tlb_track_entry_printf(entry);
  54.162  #ifdef CONFIG_TLB_TRACK_CNT
  54.163              // tlb_track_printd("cnt = %ld\n", entry->cnt);
    55.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue Mar 27 12:21:48 2007 -0600
    55.2 +++ b/xen/arch/ia64/xen/vcpu.c	Wed Mar 28 10:38:41 2007 +0100
    55.3 @@ -1616,7 +1616,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
    55.4  			*pteval = (address & _PAGE_PPN_MASK) |
    55.5  				__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
    55.6  			*itir = PAGE_SHIFT << 2;
    55.7 -			perfc_incrc(phys_translate);
    55.8 +			perfc_incr(phys_translate);
    55.9  			return IA64_NO_FAULT;
   55.10  		}
   55.11  	} else if (!region && warn_region0_address) {
   55.12 @@ -1637,7 +1637,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
   55.13  		if (trp != NULL) {
   55.14  			*pteval = trp->pte.val;
   55.15  			*itir = trp->itir;
   55.16 -			perfc_incrc(tr_translate);
   55.17 +			perfc_incr(tr_translate);
   55.18  			return IA64_NO_FAULT;
   55.19  		}
   55.20  	}
   55.21 @@ -1647,7 +1647,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
   55.22  		if (trp != NULL) {
   55.23  			*pteval = trp->pte.val;
   55.24  			*itir = trp->itir;
   55.25 -			perfc_incrc(tr_translate);
   55.26 +			perfc_incr(tr_translate);
   55.27  			return IA64_NO_FAULT;
   55.28  		}
   55.29  	}
   55.30 @@ -1660,7 +1660,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
   55.31  	    && vcpu_match_tr_entry_no_p(trp, address, rid)) {
   55.32  		*pteval = pte.val;
   55.33  		*itir = trp->itir;
   55.34 -		perfc_incrc(dtlb_translate);
   55.35 +		perfc_incr(dtlb_translate);
   55.36  		return IA64_USE_TLB;
   55.37  	}
   55.38  
   55.39 @@ -1709,7 +1709,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
   55.40  out:
   55.41  	*itir = rr & RR_PS_MASK;
   55.42  	*pteval = pte.val;
   55.43 -	perfc_incrc(vhpt_translate);
   55.44 +	perfc_incr(vhpt_translate);
   55.45  	return IA64_NO_FAULT;
   55.46  }
   55.47  
    56.1 --- a/xen/arch/ia64/xen/vhpt.c	Tue Mar 27 12:21:48 2007 -0600
    56.2 +++ b/xen/arch/ia64/xen/vhpt.c	Wed Mar 28 10:38:41 2007 +0100
    56.3 @@ -48,14 +48,14 @@ local_vhpt_flush(void)
    56.4  	/* this must be after flush */
    56.5  	tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp),
    56.6  	                     flush_time);
    56.7 -	perfc_incrc(local_vhpt_flush);
    56.8 +	perfc_incr(local_vhpt_flush);
    56.9  }
   56.10  
   56.11  void
   56.12  vcpu_vhpt_flush(struct vcpu* v)
   56.13  {
   56.14  	__vhpt_flush(vcpu_vhpt_maddr(v));
   56.15 -	perfc_incrc(vcpu_vhpt_flush);
   56.16 +	perfc_incr(vcpu_vhpt_flush);
   56.17  }
   56.18  
   56.19  static void
   56.20 @@ -248,7 +248,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v)
   56.21  	   not running on this processor.  There is currently no easy way to
   56.22  	   check this.  */
   56.23  
   56.24 -	perfc_incrc(vcpu_flush_vtlb_all);
   56.25 +	perfc_incr(vcpu_flush_vtlb_all);
   56.26  }
   56.27  
   56.28  static void __vcpu_flush_vtlb_all(void *vcpu)
   56.29 @@ -280,7 +280,7 @@ void domain_flush_vtlb_all(struct domain
   56.30  						 __vcpu_flush_vtlb_all,
   56.31  						 v, 1, 1);
   56.32  	}
   56.33 -	perfc_incrc(domain_flush_vtlb_all);
   56.34 +	perfc_incr(domain_flush_vtlb_all);
   56.35  }
   56.36  
   56.37  // Callers may need to call smp_mb() before/after calling this.
   56.38 @@ -322,7 +322,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr
   56.39  		                     vadr, 1UL << log_range);
   56.40  	ia64_ptcl(vadr, log_range << 2);
   56.41  	ia64_srlz_i();
   56.42 -	perfc_incrc(vcpu_flush_tlb_vhpt_range);
   56.43 +	perfc_incr(vcpu_flush_tlb_vhpt_range);
   56.44  }
   56.45  
   56.46  void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
   56.47 @@ -361,7 +361,7 @@ void domain_flush_vtlb_range (struct dom
   56.48  
   56.49  	/* ptc.ga  */
   56.50  	platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
   56.51 -	perfc_incrc(domain_flush_vtlb_range);
   56.52 +	perfc_incr(domain_flush_vtlb_range);
   56.53  }
   56.54  
   56.55  #ifdef CONFIG_XEN_IA64_TLB_TRACK
   56.56 @@ -391,11 +391,11 @@ void
   56.57  	 */
   56.58  	vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid);
   56.59  	if (likely(rr7_rid == entry->rid)) {
   56.60 -		perfc_incrc(tlb_track_use_rr7);
   56.61 +		perfc_incr(tlb_track_use_rr7);
   56.62  	} else {
   56.63  		swap_rr0 = 1;
   56.64  		vaddr = (vaddr << 3) >> 3;// force vrn0
   56.65 -		perfc_incrc(tlb_track_swap_rr0);
   56.66 +		perfc_incr(tlb_track_swap_rr0);
   56.67  	}
   56.68  
   56.69  	// tlb_track_entry_printf(entry);
   56.70 @@ -435,18 +435,18 @@ void
   56.71  	/* ptc.ga  */
   56.72  	if (local_purge) {
   56.73  		ia64_ptcl(vaddr, PAGE_SHIFT << 2);
   56.74 -		perfc_incrc(domain_flush_vtlb_local);
   56.75 +		perfc_incr(domain_flush_vtlb_local);
   56.76  	} else {
   56.77  		/* ptc.ga has release semantics. */
   56.78  		platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
   56.79  		                          PAGE_SHIFT);
   56.80 -		perfc_incrc(domain_flush_vtlb_global);
   56.81 +		perfc_incr(domain_flush_vtlb_global);
   56.82  	}
   56.83  
   56.84  	if (swap_rr0) {
   56.85  		vcpu_set_rr(current, 0, old_rid);
   56.86  	}
   56.87 -	perfc_incrc(domain_flush_vtlb_track_entry);
   56.88 +	perfc_incr(domain_flush_vtlb_track_entry);
   56.89  }
   56.90  
   56.91  void
   56.92 @@ -512,7 +512,7 @@ void gather_vhpt_stats(void)
   56.93  		for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
   56.94  			if (!(v->ti_tag & INVALID_TI_TAG))
   56.95  				vhpt_valid++;
   56.96 -		perfc_seta(vhpt_valid_entries, cpu, vhpt_valid);
   56.97 +		per_cpu(perfcounters, cpu)[PERFC_vhpt_valid_entries] = vhpt_valid;
   56.98  	}
   56.99  }
  56.100  #endif
    57.1 --- a/xen/arch/powerpc/backtrace.c	Tue Mar 27 12:21:48 2007 -0600
    57.2 +++ b/xen/arch/powerpc/backtrace.c	Wed Mar 28 10:38:41 2007 +0100
    57.3 @@ -205,21 +205,6 @@ void show_backtrace_regs(struct cpu_user
    57.4      console_end_sync();
    57.5  }
    57.6  
    57.7 -void __warn(char *file, int line)
    57.8 -{
    57.9 -    ulong sp;
   57.10 -    ulong lr;
   57.11 -
   57.12 -    console_start_sync();
   57.13 -    printk("WARN at %s:%d\n", file, line);
   57.14 -
   57.15 -    sp = (ulong)__builtin_frame_address(0);
   57.16 -    lr = (ulong)__builtin_return_address(0);
   57.17 -    backtrace(sp, lr, lr);
   57.18 -
   57.19 -    console_end_sync();
   57.20 -}
   57.21 -
   57.22  void dump_execution_state(void)
   57.23  {
   57.24      struct cpu_user_regs *regs = guest_cpu_user_regs();
    58.1 --- a/xen/arch/powerpc/mm.c	Tue Mar 27 12:21:48 2007 -0600
    58.2 +++ b/xen/arch/powerpc/mm.c	Wed Mar 28 10:38:41 2007 +0100
    58.3 @@ -261,7 +261,7 @@ int get_page_type(struct page_info *page
    58.4  
    58.5                  if ( unlikely(!cpus_empty(mask)) )
    58.6                  {
    58.7 -                    perfc_incrc(need_flush_tlb_flush);
    58.8 +                    perfc_incr(need_flush_tlb_flush);
    58.9                      flush_tlb_mask(mask);
   58.10                  }
   58.11  
    59.1 --- a/xen/arch/x86/Rules.mk	Tue Mar 27 12:21:48 2007 -0600
    59.2 +++ b/xen/arch/x86/Rules.mk	Wed Mar 28 10:38:41 2007 +0100
    59.3 @@ -59,6 +59,4 @@ HDRS += $(wildcard $(BASEDIR)/include/as
    59.4  HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/vmx/*.h)
    59.5  
    59.6  # Require GCC v3.4+ (to avoid issues with alignment constraints in Xen headers)
    59.7 -ifneq ($(call cc-ver,$(CC),0x030400),y)
    59.8 -$(error Xen requires at least gcc-3.4)
    59.9 -endif
   59.10 +$(call cc-ver-check,CC,0x030400,"Xen requires at least gcc-3.4")
    60.1 --- a/xen/arch/x86/apic.c	Tue Mar 27 12:21:48 2007 -0600
    60.2 +++ b/xen/arch/x86/apic.c	Wed Mar 28 10:38:41 2007 +0100
    60.3 @@ -1076,7 +1076,7 @@ int reprogram_timer(s_time_t timeout)
    60.4  fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
    60.5  {
    60.6      ack_APIC_irq();
    60.7 -    perfc_incrc(apic_timer);
    60.8 +    perfc_incr(apic_timer);
    60.9      raise_softirq(TIMER_SOFTIRQ);
   60.10  }
   60.11  
    61.1 --- a/xen/arch/x86/extable.c	Tue Mar 27 12:21:48 2007 -0600
    61.2 +++ b/xen/arch/x86/extable.c	Wed Mar 28 10:38:41 2007 +0100
    61.3 @@ -72,7 +72,7 @@ search_pre_exception_table(struct cpu_us
    61.4      if ( fixup )
    61.5      {
    61.6          dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
    61.7 -        perfc_incrc(exception_fixed);
    61.8 +        perfc_incr(exception_fixed);
    61.9      }
   61.10      return fixup;
   61.11  }
    62.1 --- a/xen/arch/x86/hvm/io.c	Tue Mar 27 12:21:48 2007 -0600
    62.2 +++ b/xen/arch/x86/hvm/io.c	Wed Mar 28 10:38:41 2007 +0100
    62.3 @@ -292,7 +292,11 @@ extern long get_reg_value(int size, int 
    62.4  static inline void set_eflags_CF(int size, unsigned long v1,
    62.5                                   unsigned long v2, struct cpu_user_regs *regs)
    62.6  {
    62.7 -    unsigned long mask = (1 << (8 * size)) - 1;
    62.8 +    unsigned long mask;
    62.9 +    
   62.10 +    ASSERT((size <= sizeof(mask)) && (size > 0));
   62.11 +
   62.12 +    mask = ~0UL >> (8 * (sizeof(mask) - size));
   62.13  
   62.14      if ((v1 & mask) > (v2 & mask))
   62.15          regs->eflags |= X86_EFLAGS_CF;
   62.16 @@ -303,7 +307,13 @@ static inline void set_eflags_CF(int siz
   62.17  static inline void set_eflags_OF(int size, unsigned long v1,
   62.18                                   unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
   62.19  {
   62.20 -    if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
   62.21 +    unsigned long mask;
   62.22 +
   62.23 +    ASSERT((size <= sizeof(mask)) && (size > 0));
   62.24 +
   62.25 +    mask = ~0UL >> (8 * (sizeof(mask) - size));
   62.26 +    
   62.27 +    if ((v3 ^ v2) & (v3 ^ v1) & mask)
   62.28          regs->eflags |= X86_EFLAGS_OF;
   62.29  }
   62.30  
   62.31 @@ -317,7 +327,11 @@ static inline void set_eflags_AF(int siz
   62.32  static inline void set_eflags_ZF(int size, unsigned long v1,
   62.33                                   struct cpu_user_regs *regs)
   62.34  {
   62.35 -    unsigned long mask = (1 << (8 * size)) - 1;
   62.36 +    unsigned long mask;
   62.37 +    
   62.38 +    ASSERT((size <= sizeof(mask)) && (size > 0));
   62.39 +
   62.40 +    mask = ~0UL >> (8 * (sizeof(mask) - size));
   62.41  
   62.42      if ((v1 & mask) == 0)
   62.43          regs->eflags |= X86_EFLAGS_ZF;
   62.44 @@ -326,7 +340,13 @@ static inline void set_eflags_ZF(int siz
   62.45  static inline void set_eflags_SF(int size, unsigned long v1,
   62.46                                   struct cpu_user_regs *regs)
   62.47  {
   62.48 -    if (v1 & (1 << ((8 * size) - 1)))
   62.49 +    unsigned long mask;
   62.50 +    
   62.51 +    ASSERT((size <= sizeof(mask)) && (size > 0));
   62.52 +
   62.53 +    mask = ~0UL >> (8 * (sizeof(mask) - size));
   62.54 +
   62.55 +    if (v1 & mask)
   62.56          regs->eflags |= X86_EFLAGS_SF;
   62.57  }
   62.58  
    63.1 --- a/xen/arch/x86/hvm/svm/intr.c	Tue Mar 27 12:21:48 2007 -0600
    63.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Wed Mar 28 10:38:41 2007 +0100
    63.3 @@ -64,87 +64,75 @@ asmlinkage void svm_intr_assist(void)
    63.4  {
    63.5      struct vcpu *v = current;
    63.6      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    63.7 -    struct periodic_time *pt;
    63.8      int intr_type = APIC_DM_EXTINT;
    63.9      int intr_vector = -1;
   63.10 -    int re_injecting = 0;
   63.11  
   63.12 -    /* Check if an Injection is active */
   63.13 -    /* Previous Interrupt delivery caused this Intercept? */
   63.14 +    /*
   63.15 +     * Do not deliver a virtual interrupt (vintr) if an exception is pending.
   63.16 +     * This is because the delivery of the exception can arbitrarily delay
   63.17 +     * the injection of the vintr (for example, if the exception is handled
   63.18 +     * via an interrupt gate, hence zeroing RFLAGS.IF). In the meantime the
   63.19 +     * vTPR can be modified upwards and we can end up delivering the vintr
   63.20 +     * when it is not in fact valid to do so (because we do not re-check the
   63.21 +     * vTPR value). Moreover, the guest will be able to see the updated
   63.22 +     * APIC/PIC state (as if the interrupt had been acknowledged) yet will not
   63.23 +     * have actually received the interrupt. This could confuse the guest!
   63.24 +     */
   63.25 +    if ( vmcb->eventinj.fields.v )
   63.26 +        return;
   63.27 +
   63.28 +    /*
   63.29 +     * Previous Interrupt delivery caused this intercept?
   63.30 +     * This will happen if the injection is latched by the processor (hence
   63.31 +     * clearing vintr.fields.irq) but then subsequently a fault occurs (e.g.,
   63.32 +     * due to lack of shadow mapping of guest IDT or guest-kernel stack).
   63.33 +     * 
   63.34 +     * NB. Exceptions that fault during delivery are lost. This needs to be
   63.35 +     * fixed but we'll usually get away with it since faults are usually
   63.36 +     * idempotent. But this isn't the case for e.g. software interrupts!
   63.37 +     */
   63.38      if ( vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0) )
   63.39      {
   63.40 -        v->arch.hvm_svm.saved_irq_vector = vmcb->exitintinfo.fields.vector;
   63.41 +        intr_vector = vmcb->exitintinfo.fields.vector;
   63.42          vmcb->exitintinfo.bytes = 0;
   63.43 -        re_injecting = 1;
   63.44 +        HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
   63.45 +        svm_inject_extint(v, intr_vector);
   63.46 +        return;
   63.47      }
   63.48  
   63.49 -    /* Previous interrupt still pending? */
   63.50 +    /*
   63.51 +     * Previous interrupt still pending? This occurs if we return from VMRUN
   63.52 +     * very early in the entry-to-guest process. Usually this is because an
   63.53 +     * external physical interrupt was pending when we executed VMRUN.
   63.54 +     */
   63.55      if ( vmcb->vintr.fields.irq )
   63.56 -    {
   63.57 -        intr_vector = vmcb->vintr.fields.vector;
   63.58 -        vmcb->vintr.bytes = 0;
   63.59 -        re_injecting = 1;
   63.60 -    }
   63.61 -    /* Pending IRQ saved at last VMExit? */
   63.62 -    else if ( v->arch.hvm_svm.saved_irq_vector >= 0 )
   63.63 -    {
   63.64 -        intr_vector = v->arch.hvm_svm.saved_irq_vector;
   63.65 -        v->arch.hvm_svm.saved_irq_vector = -1;
   63.66 -        re_injecting = 1;
   63.67 -    }
   63.68 -    /* Now let's check for newer interrrupts  */
   63.69 -    else
   63.70 -    {
   63.71 -        pt_update_irq(v);
   63.72 +        return;
   63.73  
   63.74 -        hvm_set_callback_irq_level();
   63.75 +    /* Crank the handle on interrupt state and check for new interrrupts. */
   63.76 +    pt_update_irq(v);
   63.77 +    hvm_set_callback_irq_level();
   63.78 +    if ( !cpu_has_pending_irq(v) )
   63.79 +        return;
   63.80  
   63.81 -        if ( cpu_has_pending_irq(v) )
   63.82 -        {
   63.83 -            /*
   63.84 -             * Create a 'fake' virtual interrupt on to intercept as soon
   63.85 -             * as the guest _can_ take interrupts.  Do not obtain the next
   63.86 -             * interrupt from the vlapic/pic if unable to inject.
   63.87 -             */
   63.88 -            if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )  
   63.89 -            {
   63.90 -                vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
   63.91 -                HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
   63.92 -                svm_inject_extint(v, 0x0); /* actual vector doesn't really matter */
   63.93 -                return;
   63.94 -            }
   63.95 -            intr_vector = cpu_get_interrupt(v, &intr_type);
   63.96 -        }
   63.97 +    /*
   63.98 +     * Create a 'fake' virtual interrupt on to intercept as soon as the
   63.99 +     * guest _can_ take interrupts.  Do not obtain the next interrupt from
  63.100 +     * the vlapic/pic if unable to inject.
  63.101 +     */
  63.102 +    if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )  
  63.103 +    {
  63.104 +        vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
  63.105 +        HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
  63.106 +        svm_inject_extint(v, 0x0); /* actual vector doesn't matter */
  63.107 +        return;
  63.108      }
  63.109  
  63.110 -    /* have we got an interrupt to inject? */
  63.111 -    if ( intr_vector < 0 )
  63.112 -        return;
  63.113 +    /* Okay, we can deliver the interrupt: grab it and update PIC state. */
  63.114 +    intr_vector = cpu_get_interrupt(v, &intr_type);
  63.115 +    BUG_ON(intr_vector < 0);
  63.116  
  63.117 -    switch ( intr_type )
  63.118 -    {
  63.119 -    case APIC_DM_EXTINT:
  63.120 -    case APIC_DM_FIXED:
  63.121 -    case APIC_DM_LOWEST:
  63.122 -        /* Re-injecting a PIT interruptt? */
  63.123 -        if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) )
  63.124 -            ++pt->pending_intr_nr;
  63.125 -        /* let's inject this interrupt */
  63.126 -        if (re_injecting)
  63.127 -            HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
  63.128 -        else
  63.129 -            HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
  63.130 -        svm_inject_extint(v, intr_vector);
  63.131 -        break;
  63.132 -    case APIC_DM_SMI:
  63.133 -    case APIC_DM_NMI:
  63.134 -    case APIC_DM_INIT:
  63.135 -    case APIC_DM_STARTUP:
  63.136 -    default:
  63.137 -        printk("Unsupported interrupt type: %d\n", intr_type);
  63.138 -        BUG();
  63.139 -        break;
  63.140 -    }
  63.141 +    HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
  63.142 +    svm_inject_extint(v, intr_vector);
  63.143  
  63.144      pt_intr_post(v, intr_vector, intr_type);
  63.145  }
    64.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Mar 27 12:21:48 2007 -0600
    64.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Mar 28 10:38:41 2007 +0100
    64.3 @@ -64,8 +64,8 @@ extern void svm_dump_inst(unsigned long 
    64.4  extern int svm_dbg_on;
    64.5  void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
    64.6  
    64.7 -static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
    64.8 -                                            struct cpu_user_regs *regs);
    64.9 +static int svm_reset_to_realmode(struct vcpu *v,
   64.10 +                                 struct cpu_user_regs *regs);
   64.11  
   64.12  /* va of hardware host save area     */
   64.13  static void *hsa[NR_CPUS] __read_mostly;
   64.14 @@ -749,19 +749,21 @@ static void svm_init_ap_context(
   64.15      struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
   64.16  {
   64.17      struct vcpu *v;
   64.18 +    struct vmcb_struct *vmcb;
   64.19      cpu_user_regs_t *regs;
   64.20      u16 cs_sel;
   64.21  
   64.22      /* We know this is safe because hvm_bringup_ap() does it */
   64.23      v = current->domain->vcpu[vcpuid];
   64.24 +    vmcb = v->arch.hvm_svm.vmcb;
   64.25      regs = &v->arch.guest_context.user_regs;
   64.26  
   64.27      memset(ctxt, 0, sizeof(*ctxt));
   64.28  
   64.29      /*
   64.30       * We execute the trampoline code in real mode. The trampoline vector
   64.31 -     * passed to us is page alligned and is the physicall frame number for
   64.32 -     * the code. We will execute this code in real mode. 
   64.33 +     * passed to us is page alligned and is the physical frame number for
   64.34 +     * the code. We will execute this code in real mode.
   64.35       */
   64.36      cs_sel = trampoline_vector << 8;
   64.37      ctxt->user_regs.eip = 0x0;
   64.38 @@ -771,11 +773,11 @@ static void svm_init_ap_context(
   64.39       * This is the launch of an AP; set state so that we begin executing
   64.40       * the trampoline code in real-mode.
   64.41       */
   64.42 -    svm_do_vmmcall_reset_to_realmode(v, regs);  
   64.43 +    svm_reset_to_realmode(v, regs);  
   64.44      /* Adjust the vmcb's hidden register state. */
   64.45 -    v->arch.hvm_svm.vmcb->rip = 0;
   64.46 -    v->arch.hvm_svm.vmcb->cs.sel = cs_sel;
   64.47 -    v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
   64.48 +    vmcb->rip = 0;
   64.49 +    vmcb->cs.sel = cs_sel;
   64.50 +    vmcb->cs.base = (cs_sel << 4);
   64.51  }
   64.52  
   64.53  static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
   64.54 @@ -961,8 +963,6 @@ static int svm_vcpu_initialise(struct vc
   64.55      v->arch.ctxt_switch_from = svm_ctxt_switch_from;
   64.56      v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
   64.57  
   64.58 -    v->arch.hvm_svm.saved_irq_vector = -1;
   64.59 -
   64.60      v->arch.hvm_svm.launch_core = -1;
   64.61  
   64.62      if ( (rc = svm_create_vmcb(v)) != 0 )
   64.63 @@ -2494,8 +2494,8 @@ void svm_handle_invlpg(const short invlp
   64.64   *
   64.65   * returns 0 on success, non-zero otherwise
   64.66   */
   64.67 -static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v, 
   64.68 -                                            struct cpu_user_regs *regs)
   64.69 +static int svm_reset_to_realmode(struct vcpu *v, 
   64.70 +                                 struct cpu_user_regs *regs)
   64.71  {
   64.72      struct vmcb_struct *vmcb;
   64.73  
    65.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue Mar 27 12:21:48 2007 -0600
    65.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Mar 28 10:38:41 2007 +0100
    65.3 @@ -203,6 +203,7 @@ static int construct_vmcb(struct vcpu *v
    65.4          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
    65.5          vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
    65.6          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
    65.7 +        vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
    65.8      }
    65.9  
   65.10      return 0;
    66.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Tue Mar 27 12:21:48 2007 -0600
    66.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Wed Mar 28 10:38:41 2007 +0100
    66.3 @@ -89,7 +89,7 @@ static void update_tpr_threshold(struct 
    66.4  asmlinkage void vmx_intr_assist(void)
    66.5  {
    66.6      int intr_type = 0;
    66.7 -    int highest_vector;
    66.8 +    int intr_vector;
    66.9      unsigned long eflags;
   66.10      struct vcpu *v = current;
   66.11      unsigned int idtv_info_field;
   66.12 @@ -106,8 +106,9 @@ asmlinkage void vmx_intr_assist(void)
   66.13  
   66.14      if ( unlikely(v->arch.hvm_vmx.vector_injected) )
   66.15      {
   66.16 -        v->arch.hvm_vmx.vector_injected=0;
   66.17 -        if (unlikely(has_ext_irq)) enable_irq_window(v);
   66.18 +        v->arch.hvm_vmx.vector_injected = 0;
   66.19 +        if ( unlikely(has_ext_irq) )
   66.20 +            enable_irq_window(v);
   66.21          return;
   66.22      }
   66.23  
   66.24 @@ -132,7 +133,6 @@ asmlinkage void vmx_intr_assist(void)
   66.25              enable_irq_window(v);
   66.26  
   66.27          HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
   66.28 -
   66.29          return;
   66.30      }
   66.31  
   66.32 @@ -154,30 +154,13 @@ asmlinkage void vmx_intr_assist(void)
   66.33          return;
   66.34      }
   66.35  
   66.36 -    highest_vector = cpu_get_interrupt(v, &intr_type);
   66.37 -    if ( highest_vector < 0 )
   66.38 -        return;
   66.39 +    intr_vector = cpu_get_interrupt(v, &intr_type);
   66.40 +    BUG_ON(intr_vector < 0);
   66.41  
   66.42 -    switch ( intr_type )
   66.43 -    {
   66.44 -    case APIC_DM_EXTINT:
   66.45 -    case APIC_DM_FIXED:
   66.46 -    case APIC_DM_LOWEST:
   66.47 -        HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0);
   66.48 -        vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
   66.49 -        break;
   66.50 +    HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
   66.51 +    vmx_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE);
   66.52  
   66.53 -    case APIC_DM_SMI:
   66.54 -    case APIC_DM_NMI:
   66.55 -    case APIC_DM_INIT:
   66.56 -    case APIC_DM_STARTUP:
   66.57 -    default:
   66.58 -        printk("Unsupported interrupt type\n");
   66.59 -        BUG();
   66.60 -        break;
   66.61 -    }
   66.62 -
   66.63 -    pt_intr_post(v, highest_vector, intr_type);
   66.64 +    pt_intr_post(v, intr_vector, intr_type);
   66.65  }
   66.66  
   66.67  /*
    67.1 --- a/xen/arch/x86/irq.c	Tue Mar 27 12:21:48 2007 -0600
    67.2 +++ b/xen/arch/x86/irq.c	Wed Mar 28 10:38:41 2007 +0100
    67.3 @@ -56,7 +56,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
    67.4      irq_desc_t       *desc = &irq_desc[vector];
    67.5      struct irqaction *action;
    67.6  
    67.7 -    perfc_incrc(irqs);
    67.8 +    perfc_incr(irqs);
    67.9  
   67.10      spin_lock(&desc->lock);
   67.11      desc->handler->ack(vector);
    68.1 --- a/xen/arch/x86/mm.c	Tue Mar 27 12:21:48 2007 -0600
    68.2 +++ b/xen/arch/x86/mm.c	Wed Mar 28 10:38:41 2007 +0100
    68.3 @@ -1726,7 +1726,7 @@ int get_page_type(struct page_info *page
    68.4                       (!shadow_mode_enabled(page_get_owner(page)) ||
    68.5                        ((nx & PGT_type_mask) == PGT_writable_page)) )
    68.6                  {
    68.7 -                    perfc_incrc(need_flush_tlb_flush);
    68.8 +                    perfc_incr(need_flush_tlb_flush);
    68.9                      flush_tlb_mask(mask);
   68.10                  }
   68.11  
   68.12 @@ -1969,6 +1969,8 @@ int do_mmuext_op(
   68.13          if ( unlikely(!guest_handle_is_null(pdone)) )
   68.14              (void)copy_from_guest(&done, pdone, 1);
   68.15      }
   68.16 +    else
   68.17 +        perfc_incr(calls_to_mmuext_op);
   68.18  
   68.19      if ( unlikely(!guest_handle_okay(uops, count)) )
   68.20      {
   68.21 @@ -2223,6 +2225,8 @@ int do_mmuext_op(
   68.22  
   68.23      UNLOCK_BIGLOCK(d);
   68.24  
   68.25 +    perfc_add(num_mmuext_ops, i);
   68.26 +
   68.27   out:
   68.28      /* Add incremental work we have done to the @done output parameter. */
   68.29      if ( unlikely(!guest_handle_is_null(pdone)) )
   68.30 @@ -2257,6 +2261,8 @@ int do_mmu_update(
   68.31          if ( unlikely(!guest_handle_is_null(pdone)) )
   68.32              (void)copy_from_guest(&done, pdone, 1);
   68.33      }
   68.34 +    else
   68.35 +        perfc_incr(calls_to_mmu_update);
   68.36  
   68.37      if ( unlikely(!guest_handle_okay(ureqs, count)) )
   68.38      {
   68.39 @@ -2273,9 +2279,6 @@ int do_mmu_update(
   68.40      domain_mmap_cache_init(&mapcache);
   68.41      domain_mmap_cache_init(&sh_mapcache);
   68.42  
   68.43 -    perfc_incrc(calls_to_mmu_update);
   68.44 -    perfc_addc(num_page_updates, count);
   68.45 -
   68.46      LOCK_BIGLOCK(d);
   68.47  
   68.48      for ( i = 0; i < count; i++ )
   68.49 @@ -2431,13 +2434,15 @@ int do_mmu_update(
   68.50          guest_handle_add_offset(ureqs, 1);
   68.51      }
   68.52  
   68.53 -    domain_mmap_cache_destroy(&mapcache);
   68.54 -    domain_mmap_cache_destroy(&sh_mapcache);
   68.55 -
   68.56      process_deferred_ops();
   68.57  
   68.58      UNLOCK_BIGLOCK(d);
   68.59  
   68.60 +    domain_mmap_cache_destroy(&mapcache);
   68.61 +    domain_mmap_cache_destroy(&sh_mapcache);
   68.62 +
   68.63 +    perfc_add(num_page_updates, i);
   68.64 +
   68.65   out:
   68.66      /* Add incremental work we have done to the @done output parameter. */
   68.67      if ( unlikely(!guest_handle_is_null(pdone)) )
   68.68 @@ -2724,7 +2729,7 @@ int do_update_va_mapping(unsigned long v
   68.69      cpumask_t      pmask;
   68.70      int            rc  = 0;
   68.71  
   68.72 -    perfc_incrc(calls_to_update_va);
   68.73 +    perfc_incr(calls_to_update_va);
   68.74  
   68.75      if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
   68.76          return -EINVAL;
   68.77 @@ -2740,6 +2745,10 @@ int do_update_va_mapping(unsigned long v
   68.78          guest_unmap_l1e(v, pl1e);
   68.79      pl1e = NULL;
   68.80  
   68.81 +    process_deferred_ops();
   68.82 +
   68.83 +    UNLOCK_BIGLOCK(d);
   68.84 +
   68.85      switch ( flags & UVMF_FLUSHTYPE_MASK )
   68.86      {
   68.87      case UVMF_TLB_FLUSH:
   68.88 @@ -2785,10 +2794,6 @@ int do_update_va_mapping(unsigned long v
   68.89          break;
   68.90      }
   68.91  
   68.92 -    process_deferred_ops();
   68.93 -    
   68.94 -    UNLOCK_BIGLOCK(d);
   68.95 -
   68.96      return rc;
   68.97  }
   68.98  
   68.99 @@ -2806,6 +2811,9 @@ int do_update_va_mapping_otherdomain(uns
  68.100  
  68.101      rc = do_update_va_mapping(va, val64, flags);
  68.102  
  68.103 +    BUG_ON(this_cpu(percpu_mm_info).deferred_ops);
  68.104 +    process_deferred_ops(); /* only to clear foreigndom */
  68.105 +
  68.106      return rc;
  68.107  }
  68.108  
  68.109 @@ -3378,7 +3386,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
  68.110          goto bail;
  68.111  
  68.112      UNLOCK_BIGLOCK(d);
  68.113 -    perfc_incrc(ptwr_emulations);
  68.114 +    perfc_incr(ptwr_emulations);
  68.115      return EXCRET_fault_fixed;
  68.116  
  68.117   bail:
    69.1 --- a/xen/arch/x86/mm/hap/hap.c	Tue Mar 27 12:21:48 2007 -0600
    69.2 +++ b/xen/arch/x86/mm/hap/hap.c	Wed Mar 28 10:38:41 2007 +0100
    69.3 @@ -135,6 +135,7 @@ void hap_free_p2m_page(struct domain *d,
    69.4          HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
    69.5                    pg->count_info, pg->u.inuse.type_info);
    69.6      }
    69.7 +    pg->count_info = 0;
    69.8      /* Free should not decrement domain's total allocation, since 
    69.9       * these pages were allocated without an owner. */
   69.10      page_set_owner(pg, NULL); 
   69.11 @@ -182,6 +183,7 @@ hap_set_allocation(struct domain *d, uns
   69.12              list_del(&sp->list);
   69.13              d->arch.paging.hap.free_pages -= 1;
   69.14              d->arch.paging.hap.total_pages -= 1;
   69.15 +            sp->count_info = 0;
   69.16              free_domheap_pages(sp, 0);
   69.17          }
   69.18          
   69.19 @@ -367,17 +369,7 @@ void hap_destroy_monitor_table(struct vc
   69.20  {
   69.21      struct domain *d = v->domain;
   69.22  
   69.23 -#if CONFIG_PAGING_LEVELS == 4
   69.24 -    /* Need to destroy the l3 monitor page in slot 0 too */
   69.25 -    {
   69.26 -        mfn_t m3mfn;
   69.27 -        l4_pgentry_t *l4e = hap_map_domain_page(mmfn);
   69.28 -        ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
   69.29 -        m3mfn = _mfn(l4e_get_pfn(l4e[0]));
   69.30 -        hap_free(d, m3mfn);
   69.31 -        hap_unmap_domain_page(l4e);
   69.32 -    }
   69.33 -#elif CONFIG_PAGING_LEVELS == 3
   69.34 +#if CONFIG_PAGING_LEVELS == 3
   69.35      /* Need to destroy the l2 monitor page in slot 4 too */
   69.36      {
   69.37          l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
   69.38 @@ -632,10 +624,6 @@ void hap_update_paging_modes(struct vcpu
   69.39  
   69.40      v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);    
   69.41  
   69.42 -    /* use p2m map */
   69.43 -    v->arch.guest_table =
   69.44 -        pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
   69.45 -
   69.46      if ( pagetable_is_null(v->arch.monitor_table) ) {
   69.47          mfn_t mmfn = hap_make_monitor_table(v);
   69.48          v->arch.monitor_table = pagetable_from_mfn(mmfn);
    70.1 --- a/xen/arch/x86/mm/shadow/common.c	Tue Mar 27 12:21:48 2007 -0600
    70.2 +++ b/xen/arch/x86/mm/shadow/common.c	Wed Mar 28 10:38:41 2007 +0100
    70.3 @@ -276,7 +276,7 @@ hvm_emulate_write(enum x86_segment seg,
    70.4  
    70.5      /* How many emulations could we save if we unshadowed on stack writes? */
    70.6      if ( seg == x86_seg_ss )
    70.7 -        perfc_incrc(shadow_fault_emulate_stack);
    70.8 +        perfc_incr(shadow_fault_emulate_stack);
    70.9  
   70.10      rc = hvm_translate_linear_addr(
   70.11          seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
   70.12 @@ -804,7 +804,7 @@ void shadow_prealloc(struct domain *d, u
   70.13      ASSERT(v != NULL); /* Shouldn't have enabled shadows if we've no vcpus  */
   70.14  
   70.15      /* Stage one: walk the list of pinned pages, unpinning them */
   70.16 -    perfc_incrc(shadow_prealloc_1);
   70.17 +    perfc_incr(shadow_prealloc_1);
   70.18      list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
   70.19      {
   70.20          sp = list_entry(l, struct shadow_page_info, list);
   70.21 @@ -820,7 +820,7 @@ void shadow_prealloc(struct domain *d, u
   70.22      /* Stage two: all shadow pages are in use in hierarchies that are
   70.23       * loaded in cr3 on some vcpu.  Walk them, unhooking the non-Xen
   70.24       * mappings. */
   70.25 -    perfc_incrc(shadow_prealloc_2);
   70.26 +    perfc_incr(shadow_prealloc_2);
   70.27  
   70.28      for_each_vcpu(d, v2) 
   70.29          for ( i = 0 ; i < 4 ; i++ )
   70.30 @@ -929,7 +929,7 @@ mfn_t shadow_alloc(struct domain *d,
   70.31      ASSERT(shadow_locked_by_me(d));
   70.32      ASSERT(order <= SHADOW_MAX_ORDER);
   70.33      ASSERT(shadow_type != SH_type_none);
   70.34 -    perfc_incrc(shadow_alloc);
   70.35 +    perfc_incr(shadow_alloc);
   70.36  
   70.37      /* Find smallest order which can satisfy the request. */
   70.38      for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
   70.39 @@ -967,7 +967,7 @@ mfn_t shadow_alloc(struct domain *d,
   70.40          tlbflush_filter(mask, sp[i].tlbflush_timestamp);
   70.41          if ( unlikely(!cpus_empty(mask)) )
   70.42          {
   70.43 -            perfc_incrc(shadow_alloc_tlbflush);
   70.44 +            perfc_incr(shadow_alloc_tlbflush);
   70.45              flush_tlb_mask(mask);
   70.46          }
   70.47          /* Now safe to clear the page for reuse */
   70.48 @@ -997,7 +997,7 @@ void shadow_free(struct domain *d, mfn_t
   70.49      int i;
   70.50  
   70.51      ASSERT(shadow_locked_by_me(d));
   70.52 -    perfc_incrc(shadow_free);
   70.53 +    perfc_incr(shadow_free);
   70.54  
   70.55      shadow_type = sp->type;
   70.56      ASSERT(shadow_type != SH_type_none);
   70.57 @@ -1406,7 +1406,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   70.58  
   70.59      sh_hash_audit(d);
   70.60  
   70.61 -    perfc_incrc(shadow_hash_lookups);
   70.62 +    perfc_incr(shadow_hash_lookups);
   70.63      key = sh_hash(n, t);
   70.64      sh_hash_audit_bucket(d, key);
   70.65  
   70.66 @@ -1434,7 +1434,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   70.67              }
   70.68              else
   70.69              {
   70.70 -                perfc_incrc(shadow_hash_lookup_head);
   70.71 +                perfc_incr(shadow_hash_lookup_head);
   70.72              }
   70.73              return shadow_page_to_mfn(sp);
   70.74          }
   70.75 @@ -1442,7 +1442,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   70.76          sp = sp->next_shadow;
   70.77      }
   70.78  
   70.79 -    perfc_incrc(shadow_hash_lookup_miss);
   70.80 +    perfc_incr(shadow_hash_lookup_miss);
   70.81      return _mfn(INVALID_MFN);
   70.82  }
   70.83  
   70.84 @@ -1460,7 +1460,7 @@ void shadow_hash_insert(struct vcpu *v, 
   70.85  
   70.86      sh_hash_audit(d);
   70.87  
   70.88 -    perfc_incrc(shadow_hash_inserts);
   70.89 +    perfc_incr(shadow_hash_inserts);
   70.90      key = sh_hash(n, t);
   70.91      sh_hash_audit_bucket(d, key);
   70.92      
   70.93 @@ -1486,7 +1486,7 @@ void shadow_hash_delete(struct vcpu *v, 
   70.94  
   70.95      sh_hash_audit(d);
   70.96  
   70.97 -    perfc_incrc(shadow_hash_deletes);
   70.98 +    perfc_incr(shadow_hash_deletes);
   70.99      key = sh_hash(n, t);
  70.100      sh_hash_audit_bucket(d, key);
  70.101      
  70.102 @@ -1713,7 +1713,7 @@ int sh_remove_write_access(struct vcpu *
  70.103           || (pg->u.inuse.type_info & PGT_count_mask) == 0 )
  70.104          return 0;
  70.105  
  70.106 -    perfc_incrc(shadow_writeable);
  70.107 +    perfc_incr(shadow_writeable);
  70.108  
  70.109      /* If this isn't a "normal" writeable page, the domain is trying to 
  70.110       * put pagetables in special memory of some kind.  We can't allow that. */
  70.111 @@ -1735,7 +1735,7 @@ int sh_remove_write_access(struct vcpu *
  70.112  
  70.113  #define GUESS(_a, _h) do {                                                \
  70.114              if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
  70.115 -                perfc_incrc(shadow_writeable_h_ ## _h);                   \
  70.116 +                perfc_incr(shadow_writeable_h_ ## _h);                   \
  70.117              if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )          \
  70.118                  return 1;                                                 \
  70.119          } while (0)
  70.120 @@ -1808,7 +1808,7 @@ int sh_remove_write_access(struct vcpu *
  70.121              callbacks[shtype](v, last_smfn, gmfn);
  70.122  
  70.123          if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
  70.124 -            perfc_incrc(shadow_writeable_h_5);
  70.125 +            perfc_incr(shadow_writeable_h_5);
  70.126      }
  70.127  
  70.128      if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )
  70.129 @@ -1817,7 +1817,7 @@ int sh_remove_write_access(struct vcpu *
  70.130  #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
  70.131      
  70.132      /* Brute-force search of all the shadows, by walking the hash */
  70.133 -    perfc_incrc(shadow_writeable_bf);
  70.134 +    perfc_incr(shadow_writeable_bf);
  70.135      hash_foreach(v, callback_mask, callbacks, gmfn);
  70.136  
  70.137      /* If that didn't catch the mapping, something is very wrong */
  70.138 @@ -1888,7 +1888,7 @@ int sh_remove_all_mappings(struct vcpu *
  70.139          | 1 << SH_type_fl1_64_shadow
  70.140          ;
  70.141  
  70.142 -    perfc_incrc(shadow_mappings);
  70.143 +    perfc_incr(shadow_mappings);
  70.144      if ( (page->count_info & PGC_count_mask) == 0 )
  70.145          return 0;
  70.146  
  70.147 @@ -1903,7 +1903,7 @@ int sh_remove_all_mappings(struct vcpu *
  70.148       * Heuristics for finding the (probably) single mapping of this gmfn */
  70.149      
  70.150      /* Brute-force search of all the shadows, by walking the hash */
  70.151 -    perfc_incrc(shadow_mappings_bf);
  70.152 +    perfc_incr(shadow_mappings_bf);
  70.153      hash_foreach(v, callback_mask, callbacks, gmfn);
  70.154  
  70.155      /* If that didn't catch the mapping, something is very wrong */
  70.156 @@ -1992,9 +1992,9 @@ static int sh_remove_shadow_via_pointer(
  70.157      
  70.158      sh_unmap_domain_page(vaddr);
  70.159      if ( rc )
  70.160 -        perfc_incrc(shadow_up_pointer);
  70.161 +        perfc_incr(shadow_up_pointer);
  70.162      else
  70.163 -        perfc_incrc(shadow_unshadow_bf);
  70.164 +        perfc_incr(shadow_unshadow_bf);
  70.165  
  70.166      return rc;
  70.167  }
  70.168 @@ -2093,7 +2093,7 @@ void sh_remove_shadows(struct vcpu *v, m
  70.169      }
  70.170  
  70.171      /* Search for this shadow in all appropriate shadows */
  70.172 -    perfc_incrc(shadow_unshadow);
  70.173 +    perfc_incr(shadow_unshadow);
  70.174      sh_flags = pg->shadow_flags;
  70.175  
  70.176      /* Lower-level shadows need to be excised from upper-level shadows.
    71.1 --- a/xen/arch/x86/mm/shadow/multi.c	Tue Mar 27 12:21:48 2007 -0600
    71.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Mar 28 10:38:41 2007 +0100
    71.3 @@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t 
    71.4  /* Look for shadows in the hash table */
    71.5  {
    71.6      mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
    71.7 -    perfc_incrc(shadow_get_shadow_status);
    71.8 +    perfc_incr(shadow_get_shadow_status);
    71.9      return smfn;
   71.10  }
   71.11  
   71.12 @@ -209,7 +209,7 @@ guest_walk_tables(struct vcpu *v, unsign
   71.13  {
   71.14      ASSERT(!guest_op || shadow_locked_by_me(v->domain));
   71.15  
   71.16 -    perfc_incrc(shadow_guest_walk);
   71.17 +    perfc_incr(shadow_guest_walk);
   71.18      memset(gw, 0, sizeof(*gw));
   71.19      gw->va = va;
   71.20  
   71.21 @@ -448,14 +448,14 @@ static u32 guest_set_ad_bits(struct vcpu
   71.22               == (_PAGE_DIRTY | _PAGE_ACCESSED) )
   71.23              return flags;  /* Guest already has A and D bits set */
   71.24          flags |= _PAGE_DIRTY | _PAGE_ACCESSED;
   71.25 -        perfc_incrc(shadow_ad_update);
   71.26 +        perfc_incr(shadow_ad_update);
   71.27      }
   71.28      else 
   71.29      {
   71.30          if ( flags & _PAGE_ACCESSED )
   71.31              return flags;  /* Guest already has A bit set */
   71.32          flags |= _PAGE_ACCESSED;
   71.33 -        perfc_incrc(shadow_a_update);
   71.34 +        perfc_incr(shadow_a_update);
   71.35      }
   71.36  
   71.37      /* Set the bit(s) */
   71.38 @@ -863,7 +863,7 @@ shadow_write_entries(void *d, void *s, i
   71.39       * using map_domain_page() to get a writeable mapping if we need to. */
   71.40      if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) 
   71.41      {
   71.42 -        perfc_incrc(shadow_linear_map_failed);
   71.43 +        perfc_incr(shadow_linear_map_failed);
   71.44          map = sh_map_domain_page(mfn);
   71.45          ASSERT(map != NULL);
   71.46          dst = map + ((unsigned long)dst & (PAGE_SIZE - 1));
   71.47 @@ -925,7 +925,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
   71.48  
   71.49      if ( unlikely(!res) )
   71.50      {
   71.51 -        perfc_incrc(shadow_get_page_fail);
   71.52 +        perfc_incr(shadow_get_page_fail);
   71.53          SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");
   71.54      }
   71.55  
   71.56 @@ -2198,7 +2198,7 @@ static int validate_gl4e(struct vcpu *v,
   71.57      mfn_t sl3mfn = _mfn(INVALID_MFN);
   71.58      int result = 0;
   71.59  
   71.60 -    perfc_incrc(shadow_validate_gl4e_calls);
   71.61 +    perfc_incr(shadow_validate_gl4e_calls);
   71.62  
   71.63      if ( guest_l4e_get_flags(*new_gl4e) & _PAGE_PRESENT )
   71.64      {
   71.65 @@ -2250,7 +2250,7 @@ static int validate_gl3e(struct vcpu *v,
   71.66      mfn_t sl2mfn = _mfn(INVALID_MFN);
   71.67      int result = 0;
   71.68  
   71.69 -    perfc_incrc(shadow_validate_gl3e_calls);
   71.70 +    perfc_incr(shadow_validate_gl3e_calls);
   71.71  
   71.72      if ( guest_l3e_get_flags(*new_gl3e) & _PAGE_PRESENT )
   71.73      {
   71.74 @@ -2277,7 +2277,7 @@ static int validate_gl2e(struct vcpu *v,
   71.75      mfn_t sl1mfn = _mfn(INVALID_MFN);
   71.76      int result = 0;
   71.77  
   71.78 -    perfc_incrc(shadow_validate_gl2e_calls);
   71.79 +    perfc_incr(shadow_validate_gl2e_calls);
   71.80  
   71.81      if ( guest_l2e_get_flags(*new_gl2e) & _PAGE_PRESENT )
   71.82      {
   71.83 @@ -2363,7 +2363,7 @@ static int validate_gl1e(struct vcpu *v,
   71.84      mfn_t gmfn;
   71.85      int result = 0, mmio;
   71.86  
   71.87 -    perfc_incrc(shadow_validate_gl1e_calls);
   71.88 +    perfc_incr(shadow_validate_gl1e_calls);
   71.89  
   71.90      gfn = guest_l1e_get_gfn(*new_gl1e);
   71.91      gmfn = vcpu_gfn_to_mfn(v, gfn);
   71.92 @@ -2523,7 +2523,7 @@ static inline void check_for_early_unsha
   71.93          u32 flags = mfn_to_page(gmfn)->shadow_flags;
   71.94          if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
   71.95          {
   71.96 -            perfc_incrc(shadow_early_unshadow);
   71.97 +            perfc_incr(shadow_early_unshadow);
   71.98              sh_remove_shadows(v, gmfn, 0, 0 /* Slow, can fail to unshadow */ );
   71.99          } 
  71.100      }
  71.101 @@ -2642,7 +2642,7 @@ static int sh_page_fault(struct vcpu *v,
  71.102      SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n",
  71.103                     v->domain->domain_id, v->vcpu_id, va, regs->error_code);
  71.104  
  71.105 -    perfc_incrc(shadow_fault);
  71.106 +    perfc_incr(shadow_fault);
  71.107      //
  71.108      // XXX: Need to think about eventually mapping superpages directly in the
  71.109      //      shadow (when possible), as opposed to splintering them into a
  71.110 @@ -2670,7 +2670,7 @@ static int sh_page_fault(struct vcpu *v,
  71.111                      ASSERT(regs->error_code & PFEC_page_present);
  71.112                      regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
  71.113                      reset_early_unshadow(v);
  71.114 -                    perfc_incrc(shadow_fault_fast_gnp);
  71.115 +                    perfc_incr(shadow_fault_fast_gnp);
  71.116                      SHADOW_PRINTK("fast path not-present\n");
  71.117                      return 0;
  71.118                  }
  71.119 @@ -2688,7 +2688,7 @@ static int sh_page_fault(struct vcpu *v,
  71.120                         << PAGE_SHIFT) 
  71.121                      | (va & ~PAGE_MASK);
  71.122              }
  71.123 -            perfc_incrc(shadow_fault_fast_mmio);
  71.124 +            perfc_incr(shadow_fault_fast_mmio);
  71.125              SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
  71.126              reset_early_unshadow(v);
  71.127              handle_mmio(gpa);
  71.128 @@ -2699,7 +2699,7 @@ static int sh_page_fault(struct vcpu *v,
  71.129              /* This should be exceptionally rare: another vcpu has fixed
  71.130               * the tables between the fault and our reading the l1e. 
  71.131               * Retry and let the hardware give us the right fault next time. */
  71.132 -            perfc_incrc(shadow_fault_fast_fail);
  71.133 +            perfc_incr(shadow_fault_fast_fail);
  71.134              SHADOW_PRINTK("fast path false alarm!\n");            
  71.135              return EXCRET_fault_fixed;
  71.136          }
  71.137 @@ -2746,7 +2746,7 @@ static int sh_page_fault(struct vcpu *v,
  71.138              goto mmio;
  71.139          }
  71.140  
  71.141 -        perfc_incrc(shadow_fault_bail_not_present);
  71.142 +        perfc_incr(shadow_fault_bail_not_present);
  71.143          goto not_a_shadow_fault;
  71.144      }
  71.145  
  71.146 @@ -2761,7 +2761,7 @@ static int sh_page_fault(struct vcpu *v,
  71.147           !(accumulated_gflags & _PAGE_USER) )
  71.148      {
  71.149          /* illegal user-mode access to supervisor-only page */
  71.150 -        perfc_incrc(shadow_fault_bail_user_supervisor);
  71.151 +        perfc_incr(shadow_fault_bail_user_supervisor);
  71.152          goto not_a_shadow_fault;
  71.153      }
  71.154  
  71.155 @@ -2772,7 +2772,7 @@ static int sh_page_fault(struct vcpu *v,
  71.156      {
  71.157          if ( unlikely(!(accumulated_gflags & _PAGE_RW)) )
  71.158          {
  71.159 -            perfc_incrc(shadow_fault_bail_ro_mapping);
  71.160 +            perfc_incr(shadow_fault_bail_ro_mapping);
  71.161              goto not_a_shadow_fault;
  71.162          }
  71.163      }
  71.164 @@ -2787,7 +2787,7 @@ static int sh_page_fault(struct vcpu *v,
  71.165              if ( accumulated_gflags & _PAGE_NX_BIT )
  71.166              {
  71.167                  /* NX prevented this code fetch */
  71.168 -                perfc_incrc(shadow_fault_bail_nx);
  71.169 +                perfc_incr(shadow_fault_bail_nx);
  71.170                  goto not_a_shadow_fault;
  71.171              }
  71.172          }
  71.173 @@ -2802,7 +2802,7 @@ static int sh_page_fault(struct vcpu *v,
  71.174  
  71.175      if ( !mmio && !mfn_valid(gmfn) )
  71.176      {
  71.177 -        perfc_incrc(shadow_fault_bail_bad_gfn);
  71.178 +        perfc_incr(shadow_fault_bail_bad_gfn);
  71.179          SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", 
  71.180                        gfn_x(gfn), mfn_x(gmfn));
  71.181          goto not_a_shadow_fault;
  71.182 @@ -2844,12 +2844,12 @@ static int sh_page_fault(struct vcpu *v,
  71.183      {
  71.184          if ( ft == ft_demand_write )
  71.185          {
  71.186 -            perfc_incrc(shadow_fault_emulate_write);
  71.187 +            perfc_incr(shadow_fault_emulate_write);
  71.188              goto emulate;
  71.189          }
  71.190          else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read )
  71.191          {
  71.192 -            perfc_incrc(shadow_fault_emulate_read);
  71.193 +            perfc_incr(shadow_fault_emulate_read);
  71.194              goto emulate;
  71.195          }
  71.196      }
  71.197 @@ -2860,7 +2860,7 @@ static int sh_page_fault(struct vcpu *v,
  71.198          goto mmio;
  71.199      }
  71.200  
  71.201 -    perfc_incrc(shadow_fault_fixed);
  71.202 +    perfc_incr(shadow_fault_fixed);
  71.203      d->arch.paging.shadow.fault_count++;
  71.204      reset_early_unshadow(v);
  71.205  
  71.206 @@ -2920,7 +2920,7 @@ static int sh_page_fault(struct vcpu *v,
  71.207      {
  71.208          SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
  71.209                         mfn_x(gmfn));
  71.210 -        perfc_incrc(shadow_fault_emulate_failed);
  71.211 +        perfc_incr(shadow_fault_emulate_failed);
  71.212          /* If this is actually a page table, then we have a bug, and need 
  71.213           * to support more operations in the emulator.  More likely, 
  71.214           * though, this is a hint that this page should not be shadowed. */
  71.215 @@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v,
  71.216   mmio:
  71.217      if ( !guest_mode(regs) )
  71.218          goto not_a_shadow_fault;
  71.219 -    perfc_incrc(shadow_fault_mmio);
  71.220 +    perfc_incr(shadow_fault_mmio);
  71.221      sh_audit_gw(v, &gw);
  71.222      unmap_walk(v, &gw);
  71.223      SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
  71.224 @@ -2964,7 +2964,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
  71.225  {
  71.226      shadow_l2e_t sl2e;
  71.227      
  71.228 -    perfc_incrc(shadow_invlpg);
  71.229 +    perfc_incr(shadow_invlpg);
  71.230  
  71.231      /* First check that we can safely read the shadow l2e.  SMP/PAE linux can
  71.232       * run as high as 6% of invlpg calls where we haven't shadowed the l2 
  71.233 @@ -2983,7 +2983,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
  71.234                                        + shadow_l3_linear_offset(va)),
  71.235                                sizeof (sl3e)) != 0 )
  71.236          {
  71.237 -            perfc_incrc(shadow_invlpg_fault);
  71.238 +            perfc_incr(shadow_invlpg_fault);
  71.239              return 0;
  71.240          }
  71.241          if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
  71.242 @@ -3002,7 +3002,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
  71.243                            sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
  71.244                            sizeof (sl2e)) != 0 )
  71.245      {
  71.246 -        perfc_incrc(shadow_invlpg_fault);
  71.247 +        perfc_incr(shadow_invlpg_fault);
  71.248          return 0;
  71.249      }
  71.250  
    72.1 --- a/xen/arch/x86/smp.c	Tue Mar 27 12:21:48 2007 -0600
    72.2 +++ b/xen/arch/x86/smp.c	Wed Mar 28 10:38:41 2007 +0100
    72.3 @@ -169,7 +169,7 @@ static unsigned long flush_va;
    72.4  fastcall void smp_invalidate_interrupt(void)
    72.5  {
    72.6      ack_APIC_irq();
    72.7 -    perfc_incrc(ipis);
    72.8 +    perfc_incr(ipis);
    72.9      irq_enter();
   72.10      if ( !__sync_lazy_execstate() )
   72.11      {
   72.12 @@ -329,7 +329,7 @@ void smp_send_stop(void)
   72.13  fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
   72.14  {
   72.15      ack_APIC_irq();
   72.16 -    perfc_incrc(ipis);
   72.17 +    perfc_incr(ipis);
   72.18  }
   72.19  
   72.20  fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
   72.21 @@ -338,7 +338,7 @@ fastcall void smp_call_function_interrup
   72.22      void *info = call_data->info;
   72.23  
   72.24      ack_APIC_irq();
   72.25 -    perfc_incrc(ipis);
   72.26 +    perfc_incr(ipis);
   72.27  
   72.28      if ( !cpu_isset(smp_processor_id(), call_data->selected) )
   72.29          return;
    73.1 --- a/xen/arch/x86/time.c	Tue Mar 27 12:21:48 2007 -0600
    73.2 +++ b/xen/arch/x86/time.c	Wed Mar 28 10:38:41 2007 +0100
    73.3 @@ -670,14 +670,20 @@ static inline void version_update_end(u3
    73.4      (*version)++;
    73.5  }
    73.6  
    73.7 -static inline void __update_vcpu_system_time(struct vcpu *v)
    73.8 +void update_vcpu_system_time(struct vcpu *v)
    73.9  {
   73.10      struct cpu_time       *t;
   73.11      struct vcpu_time_info *u;
   73.12  
   73.13 +    if ( v->vcpu_info == NULL )
   73.14 +        return;
   73.15 +
   73.16      t = &this_cpu(cpu_time);
   73.17      u = &vcpu_info(v, time);
   73.18  
   73.19 +    if ( u->tsc_timestamp == t->local_tsc_stamp )
   73.20 +        return;
   73.21 +
   73.22      version_update_begin(&u->version);
   73.23  
   73.24      u->tsc_timestamp     = t->local_tsc_stamp;
   73.25 @@ -688,13 +694,6 @@ static inline void __update_vcpu_system_
   73.26      version_update_end(&u->version);
   73.27  }
   73.28  
   73.29 -void update_vcpu_system_time(struct vcpu *v)
   73.30 -{
   73.31 -    if ( vcpu_info(v, time.tsc_timestamp) !=
   73.32 -         this_cpu(cpu_time).local_tsc_stamp )
   73.33 -        __update_vcpu_system_time(v);
   73.34 -}
   73.35 -
   73.36  void update_domain_wallclock_time(struct domain *d)
   73.37  {
   73.38      spin_lock(&wc_lock);
   73.39 @@ -771,9 +770,10 @@ static void local_time_calibration(void 
   73.40      local_irq_enable();
   73.41  
   73.42  #if 0
   73.43 -    printk("PRE%d: tsc=%lld stime=%lld master=%lld\n",
   73.44 +    printk("PRE%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64"\n",
   73.45             smp_processor_id(), prev_tsc, prev_local_stime, prev_master_stime);
   73.46 -    printk("CUR%d: tsc=%lld stime=%lld master=%lld -> %lld\n",
   73.47 +    printk("CUR%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64
   73.48 +           " -> %"PRId64"\n",
   73.49             smp_processor_id(), curr_tsc, curr_local_stime, curr_master_stime,
   73.50             curr_master_stime - curr_local_stime);
   73.51  #endif
   73.52 @@ -855,6 +855,8 @@ static void local_time_calibration(void 
   73.53      t->stime_local_stamp  = curr_local_stime;
   73.54      t->stime_master_stamp = curr_master_stime;
   73.55  
   73.56 +    update_vcpu_system_time(current);
   73.57 +
   73.58   out:
   73.59      set_timer(&t->calibration_timer, NOW() + EPOCH);
   73.60  
    74.1 --- a/xen/arch/x86/traps.c	Tue Mar 27 12:21:48 2007 -0600
    74.2 +++ b/xen/arch/x86/traps.c	Wed Mar 28 10:38:41 2007 +0100
    74.3 @@ -637,29 +637,35 @@ asmlinkage int do_invalid_op(struct cpu_
    74.4           memcmp(bug.ud2, "\xf\xb", sizeof(bug.ud2)) ||
    74.5           (bug.ret != 0xc2) )
    74.6          goto die;
    74.7 +    eip += sizeof(bug);
    74.8  
    74.9      id = bug.id & 3;
   74.10 -    if ( id == BUGFRAME_rsvd )
   74.11 -        goto die;
   74.12  
   74.13      if ( id == BUGFRAME_dump )
   74.14      {
   74.15          show_execution_state(regs);
   74.16 -        regs->eip += sizeof(bug);
   74.17 +        regs->eip = (unsigned long)eip;
   74.18          return EXCRET_fault_fixed;
   74.19      }
   74.20  
   74.21 -    /* BUG() or ASSERT(): decode the filename pointer and line number. */
   74.22 -    ASSERT((id == BUGFRAME_bug) || (id == BUGFRAME_assert));
   74.23 -    eip += sizeof(bug);
   74.24 +    /* WARN, BUG or ASSERT: decode the filename pointer and line number. */
   74.25      if ( !is_kernel(eip) ||
   74.26           __copy_from_user(&bug_str, eip, sizeof(bug_str)) ||
   74.27           memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) )
   74.28          goto die;
   74.29 +    eip += sizeof(bug_str);
   74.30  
   74.31      filename = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>";
   74.32      lineno   = bug.id >> 2;
   74.33  
   74.34 +    if ( id == BUGFRAME_warn )
   74.35 +    {
   74.36 +        printk("Xen WARN at %.50s:%d\n", filename, lineno);
   74.37 +        show_execution_state(regs);
   74.38 +        regs->eip = (unsigned long)eip;
   74.39 +        return EXCRET_fault_fixed;
   74.40 +    }
   74.41 +
   74.42      if ( id == BUGFRAME_bug )
   74.43      {
   74.44          printk("Xen BUG at %.50s:%d\n", filename, lineno);
   74.45 @@ -668,13 +674,13 @@ asmlinkage int do_invalid_op(struct cpu_
   74.46          panic("Xen BUG at %.50s:%d\n", filename, lineno);
   74.47      }
   74.48  
   74.49 -    /* ASSERT(): decode the predicate string pointer. */
   74.50 +    /* ASSERT: decode the predicate string pointer. */
   74.51      ASSERT(id == BUGFRAME_assert);
   74.52 -    eip += sizeof(bug_str);
   74.53      if ( !is_kernel(eip) ||
   74.54           __copy_from_user(&bug_str, eip, sizeof(bug_str)) ||
   74.55           memcmp(bug_str.mov, BUG_MOV_STR, sizeof(bug_str.mov)) )
   74.56          goto die;
   74.57 +    eip += sizeof(bug_str);
   74.58  
   74.59      predicate = is_kernel(bug_str.str) ? (char *)bug_str.str : "<unknown>";
   74.60      printk("Assertion '%s' failed at %.50s:%d\n",
   74.61 @@ -950,7 +956,7 @@ asmlinkage int do_page_fault(struct cpu_
   74.62  
   74.63      DEBUGGER_trap_entry(TRAP_page_fault, regs);
   74.64  
   74.65 -    perfc_incrc(page_faults);
   74.66 +    perfc_incr(page_faults);
   74.67  
   74.68      if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
   74.69          return rc;
   74.70 @@ -962,7 +968,7 @@ asmlinkage int do_page_fault(struct cpu_
   74.71  
   74.72          if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
   74.73          {
   74.74 -            perfc_incrc(copy_user_faults);
   74.75 +            perfc_incr(copy_user_faults);
   74.76              regs->eip = fixup;
   74.77              return 0;
   74.78          }
    75.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Tue Mar 27 12:21:48 2007 -0600
    75.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Wed Mar 28 10:38:41 2007 +0100
    75.3 @@ -107,21 +107,11 @@ void __dummy__(void)
    75.4      BLANK();
    75.5  
    75.6  #if PERF_COUNTERS
    75.7 -    OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
    75.8 -    OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
    75.9 +    DEFINE(PERFC_hypercalls, PERFC_hypercalls);
   75.10 +    DEFINE(PERFC_exceptions, PERFC_exceptions);
   75.11      BLANK();
   75.12  #endif
   75.13  
   75.14 -    OFFSET(MULTICALL_op, struct multicall_entry, op);
   75.15 -    OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
   75.16 -    OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
   75.17 -    OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
   75.18 -    OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
   75.19 -    OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
   75.20 -    OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
   75.21 -    OFFSET(MULTICALL_result, struct multicall_entry, result);
   75.22 -    BLANK();
   75.23 -
   75.24      DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
   75.25      BLANK();
   75.26  
    76.1 --- a/xen/arch/x86/x86_32/domain_page.c	Tue Mar 27 12:21:48 2007 -0600
    76.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Wed Mar 28 10:38:41 2007 +0100
    76.3 @@ -50,7 +50,7 @@ void *map_domain_page(unsigned long mfn)
    76.4  
    76.5      ASSERT(!in_irq());
    76.6  
    76.7 -    perfc_incrc(map_domain_page_count);
    76.8 +    perfc_incr(map_domain_page_count);
    76.9  
   76.10      v = mapcache_current_vcpu();
   76.11  
   76.12 @@ -76,7 +76,7 @@ void *map_domain_page(unsigned long mfn)
   76.13          cache->shadow_epoch[vcpu] = cache->epoch;
   76.14          if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) )
   76.15          {
   76.16 -            perfc_incrc(domain_page_tlb_flush);
   76.17 +            perfc_incr(domain_page_tlb_flush);
   76.18              local_flush_tlb();
   76.19          }
   76.20      }
   76.21 @@ -92,7 +92,7 @@ void *map_domain_page(unsigned long mfn)
   76.22          }
   76.23  
   76.24          /* /Second/, flush TLBs. */
   76.25 -        perfc_incrc(domain_page_tlb_flush);
   76.26 +        perfc_incr(domain_page_tlb_flush);
   76.27          local_flush_tlb();
   76.28          cache->shadow_epoch[vcpu] = ++cache->epoch;
   76.29          cache->tlbflush_timestamp = tlbflush_current_time();
    77.1 --- a/xen/arch/x86/x86_32/entry.S	Tue Mar 27 12:21:48 2007 -0600
    77.2 +++ b/xen/arch/x86/x86_32/entry.S	Wed Mar 28 10:38:41 2007 +0100
    77.3 @@ -173,7 +173,7 @@ ENTRY(hypercall)
    77.4          GET_CURRENT(%ebx)
    77.5          cmpl  $NR_hypercalls,%eax
    77.6          jae   bad_hypercall
    77.7 -        PERFC_INCR(PERFC_hypercalls, %eax)
    77.8 +        PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
    77.9  #ifndef NDEBUG
   77.10          /* Create shadow parameters and corrupt those not used by this call. */
   77.11          pushl %eax
   77.12 @@ -429,7 +429,7 @@ 1:      xorl  %eax,%eax
   77.13          movl  %esp,%edx
   77.14          pushl %edx                      # push the cpu_user_regs pointer
   77.15          GET_CURRENT(%ebx)
   77.16 -        PERFC_INCR(PERFC_exceptions, %eax)
   77.17 +        PERFC_INCR(PERFC_exceptions, %eax, %ebx)
   77.18          call  *exception_table(,%eax,4)
   77.19          addl  $4,%esp
   77.20          movl  UREGS_eflags(%esp),%eax
    78.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Tue Mar 27 12:21:48 2007 -0600
    78.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Wed Mar 28 10:38:41 2007 +0100
    78.3 @@ -434,7 +434,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
    78.4          goto fail;
    78.5  
    78.6      /* Success! */
    78.7 -    perfc_incrc(seg_fixups);
    78.8 +    perfc_incr(seg_fixups);
    78.9  
   78.10      /* If requested, give a callback on otherwise unused vector 15. */
   78.11      if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
    79.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Tue Mar 27 12:21:48 2007 -0600
    79.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Wed Mar 28 10:38:41 2007 +0100
    79.3 @@ -121,30 +121,8 @@ void __dummy__(void)
    79.4      BLANK();
    79.5  
    79.6  #if PERF_COUNTERS
    79.7 -    OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
    79.8 -    OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
    79.9 -    BLANK();
   79.10 -#endif
   79.11 -
   79.12 -    OFFSET(MULTICALL_op, struct multicall_entry, op);
   79.13 -    OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
   79.14 -    OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
   79.15 -    OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
   79.16 -    OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
   79.17 -    OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
   79.18 -    OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
   79.19 -    OFFSET(MULTICALL_result, struct multicall_entry, result);
   79.20 -    BLANK();
   79.21 -
   79.22 -#ifdef CONFIG_COMPAT
   79.23 -    OFFSET(COMPAT_MULTICALL_op, struct compat_multicall_entry, op);
   79.24 -    OFFSET(COMPAT_MULTICALL_arg0, struct compat_multicall_entry, args[0]);
   79.25 -    OFFSET(COMPAT_MULTICALL_arg1, struct compat_multicall_entry, args[1]);
   79.26 -    OFFSET(COMPAT_MULTICALL_arg2, struct compat_multicall_entry, args[2]);
   79.27 -    OFFSET(COMPAT_MULTICALL_arg3, struct compat_multicall_entry, args[3]);
   79.28 -    OFFSET(COMPAT_MULTICALL_arg4, struct compat_multicall_entry, args[4]);
   79.29 -    OFFSET(COMPAT_MULTICALL_arg5, struct compat_multicall_entry, args[5]);
   79.30 -    OFFSET(COMPAT_MULTICALL_result, struct compat_multicall_entry, result);
   79.31 +    DEFINE(PERFC_hypercalls, PERFC_hypercalls);
   79.32 +    DEFINE(PERFC_exceptions, PERFC_exceptions);
   79.33      BLANK();
   79.34  #endif
   79.35  
    80.1 --- a/xen/arch/x86/x86_64/compat/entry.S	Tue Mar 27 12:21:48 2007 -0600
    80.2 +++ b/xen/arch/x86/x86_64/compat/entry.S	Wed Mar 28 10:38:41 2007 +0100
    80.3 @@ -57,7 +57,7 @@ ENTRY(compat_hypercall)
    80.4          movl  UREGS_rbx(%rsp),%edi   /* Arg 1        */
    80.5  #endif
    80.6          leaq  compat_hypercall_table(%rip),%r10
    80.7 -        PERFC_INCR(PERFC_hypercalls, %rax)
    80.8 +        PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
    80.9          callq *(%r10,%rax,8)
   80.10  #ifndef NDEBUG
   80.11          /* Deliberately corrupt parameter regs used by this hypercall. */
    81.1 --- a/xen/arch/x86/x86_64/entry.S	Tue Mar 27 12:21:48 2007 -0600
    81.2 +++ b/xen/arch/x86/x86_64/entry.S	Wed Mar 28 10:38:41 2007 +0100
    81.3 @@ -147,7 +147,7 @@ ENTRY(syscall_enter)
    81.4          pushq UREGS_rip+8(%rsp)
    81.5  #endif
    81.6          leaq  hypercall_table(%rip),%r10
    81.7 -        PERFC_INCR(PERFC_hypercalls, %rax)
    81.8 +        PERFC_INCR(PERFC_hypercalls, %rax, %rbx)
    81.9          callq *(%r10,%rax,8)
   81.10  #ifndef NDEBUG
   81.11          /* Deliberately corrupt parameter regs used by this hypercall. */
   81.12 @@ -396,7 +396,7 @@ 1:      movq  %rsp,%rdi
   81.13          movl  UREGS_entry_vector(%rsp),%eax
   81.14          leaq  exception_table(%rip),%rdx
   81.15          GET_CURRENT(%rbx)
   81.16 -        PERFC_INCR(PERFC_exceptions, %rax)
   81.17 +        PERFC_INCR(PERFC_exceptions, %rax, %rbx)
   81.18          callq *(%rdx,%rax,8)
   81.19          testb $3,UREGS_cs(%rsp)
   81.20          jz    restore_all_xen
    82.1 --- a/xen/arch/x86/x86_emulate.c	Tue Mar 27 12:21:48 2007 -0600
    82.2 +++ b/xen/arch/x86/x86_emulate.c	Wed Mar 28 10:38:41 2007 +0100
    82.3 @@ -1565,8 +1565,10 @@ x86_emulate(
    82.4              if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
    82.5              {
    82.6                  dst.bytes = op_bytes = 8;
    82.7 -                if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
    82.8 -                                     &dst.val, 8, ctxt)) != 0 )
    82.9 +                if ( dst.type == OP_REG )
   82.10 +                    dst.val = *dst.reg;
   82.11 +                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
   82.12 +                                          &dst.val, 8, ctxt)) != 0 )
   82.13                      goto done;
   82.14              }
   82.15              src.val = _regs.eip;
   82.16 @@ -1579,8 +1581,10 @@ x86_emulate(
   82.17              if ( mode_64bit() && (dst.bytes == 4) )
   82.18              {
   82.19                  dst.bytes = 8;
   82.20 -                if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
   82.21 -                                     &dst.val, 8, ctxt)) != 0 )
   82.22 +                if ( dst.type == OP_REG )
   82.23 +                    dst.val = *dst.reg;
   82.24 +                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
   82.25 +                                          &dst.val, 8, ctxt)) != 0 )
   82.26                      goto done;
   82.27              }
   82.28              if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
    83.1 --- a/xen/common/domain.c	Tue Mar 27 12:21:48 2007 -0600
    83.2 +++ b/xen/common/domain.c	Wed Mar 28 10:38:41 2007 +0100
    83.3 @@ -96,14 +96,16 @@ struct vcpu *alloc_vcpu(
    83.4  
    83.5      v->domain = d;
    83.6      v->vcpu_id = vcpu_id;
    83.7 -    v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
    83.8      spin_lock_init(&v->pause_lock);
    83.9  
   83.10      v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
   83.11      v->runstate.state_entry_time = NOW();
   83.12  
   83.13      if ( !is_idle_domain(d) )
   83.14 +    {
   83.15          set_bit(_VCPUF_down, &v->vcpu_flags);
   83.16 +        v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
   83.17 +    }
   83.18  
   83.19      if ( sched_init_vcpu(v, cpu_id) != 0 )
   83.20      {
    84.1 --- a/xen/common/multicall.c	Tue Mar 27 12:21:48 2007 -0600
    84.2 +++ b/xen/common/multicall.c	Wed Mar 28 10:38:41 2007 +0100
    84.3 @@ -10,6 +10,7 @@
    84.4  #include <xen/event.h>
    84.5  #include <xen/multicall.h>
    84.6  #include <xen/guest_access.h>
    84.7 +#include <xen/perfc.h>
    84.8  #include <asm/current.h>
    84.9  #include <asm/hardirq.h>
   84.10  
   84.11 @@ -69,14 +70,18 @@ do_multicall(
   84.12          guest_handle_add_offset(call_list, 1);
   84.13      }
   84.14  
   84.15 +    perfc_incr(calls_to_multicall);
   84.16 +    perfc_add(calls_from_multicall, nr_calls);
   84.17      mcs->flags = 0;
   84.18      return 0;
   84.19  
   84.20   fault:
   84.21 +    perfc_incr(calls_to_multicall);
   84.22      mcs->flags = 0;
   84.23      return -EFAULT;
   84.24  
   84.25   preempted:
   84.26 +    perfc_add(calls_from_multicall, i);
   84.27      mcs->flags = 0;
   84.28      return hypercall_create_continuation(
   84.29          __HYPERVISOR_multicall, "hi", call_list, nr_calls-i);
    85.1 --- a/xen/common/page_alloc.c	Tue Mar 27 12:21:48 2007 -0600
    85.2 +++ b/xen/common/page_alloc.c	Wed Mar 28 10:38:41 2007 +0100
    85.3 @@ -423,7 +423,7 @@ static struct page_info *alloc_heap_page
    85.4  
    85.5      if ( unlikely(!cpus_empty(mask)) )
    85.6      {
    85.7 -        perfc_incrc(need_flush_tlb_flush);
    85.8 +        perfc_incr(need_flush_tlb_flush);
    85.9          flush_tlb_mask(mask);
   85.10      }
   85.11  
    86.1 --- a/xen/common/perfc.c	Tue Mar 27 12:21:48 2007 -0600
    86.2 +++ b/xen/common/perfc.c	Wed Mar 28 10:38:41 2007 +0100
    86.3 @@ -10,81 +10,98 @@
    86.4  #include <public/sysctl.h>
    86.5  #include <asm/perfc.h>
    86.6  
    86.7 -#undef  PERFCOUNTER
    86.8 -#undef  PERFCOUNTER_CPU
    86.9 -#undef  PERFCOUNTER_ARRAY
   86.10 -#undef  PERFSTATUS
   86.11 -#undef  PERFSTATUS_CPU
   86.12 -#undef  PERFSTATUS_ARRAY
   86.13  #define PERFCOUNTER( var, name )              { name, TYPE_SINGLE, 0 },
   86.14 -#define PERFCOUNTER_CPU( var, name )          { name, TYPE_CPU,    0 },
   86.15  #define PERFCOUNTER_ARRAY( var, name, size )  { name, TYPE_ARRAY,  size },
   86.16  #define PERFSTATUS( var, name )               { name, TYPE_S_SINGLE, 0 },
   86.17 -#define PERFSTATUS_CPU( var, name )           { name, TYPE_S_CPU,    0 },
   86.18  #define PERFSTATUS_ARRAY( var, name, size )   { name, TYPE_S_ARRAY,  size },
   86.19 -static struct {
   86.20 -    char *name;
   86.21 -    enum { TYPE_SINGLE, TYPE_CPU, TYPE_ARRAY,
   86.22 -           TYPE_S_SINGLE, TYPE_S_CPU, TYPE_S_ARRAY
   86.23 +static const struct {
   86.24 +    const char *name;
   86.25 +    enum { TYPE_SINGLE, TYPE_ARRAY,
   86.26 +           TYPE_S_SINGLE, TYPE_S_ARRAY
   86.27      } type;
   86.28 -    int nr_elements;
   86.29 +    unsigned int nr_elements;
   86.30  } perfc_info[] = {
   86.31  #include <xen/perfc_defn.h>
   86.32  };
   86.33  
   86.34  #define NR_PERFCTRS (sizeof(perfc_info) / sizeof(perfc_info[0]))
   86.35  
   86.36 -struct perfcounter perfcounters;
   86.37 +DEFINE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters);
   86.38  
   86.39  void perfc_printall(unsigned char key)
   86.40  {
   86.41 -    unsigned int i, j, sum;
   86.42 +    unsigned int i, j;
   86.43      s_time_t now = NOW();
   86.44 -    atomic_t *counters = (atomic_t *)&perfcounters;
   86.45  
   86.46      printk("Xen performance counters SHOW  (now = 0x%08X:%08X)\n",
   86.47             (u32)(now>>32), (u32)now);
   86.48  
   86.49 -    for ( i = 0; i < NR_PERFCTRS; i++ ) 
   86.50 +    for ( i = j = 0; i < NR_PERFCTRS; i++ )
   86.51      {
   86.52 +        unsigned int k, cpu;
   86.53 +        unsigned long long sum = 0;
   86.54 +
   86.55          printk("%-32s  ",  perfc_info[i].name);
   86.56          switch ( perfc_info[i].type )
   86.57          {
   86.58          case TYPE_SINGLE:
   86.59          case TYPE_S_SINGLE:
   86.60 -            printk("TOTAL[%10d]", atomic_read(&counters[0]));
   86.61 -            counters += 1;
   86.62 -            break;
   86.63 -        case TYPE_CPU:
   86.64 -        case TYPE_S_CPU:
   86.65 -            sum = 0;
   86.66 -            for_each_online_cpu ( j )
   86.67 -                sum += atomic_read(&counters[j]);
   86.68 -            printk("TOTAL[%10u]", sum);
   86.69 -            if (sum)
   86.70 +            for_each_online_cpu ( cpu )
   86.71 +                sum += per_cpu(perfcounters, cpu)[j];
   86.72 +            printk("TOTAL[%12Lu]", sum);
   86.73 +            if ( sum )
   86.74              {
   86.75 -                for_each_online_cpu ( j )
   86.76 -                    printk("  CPU%02d[%10d]", j, atomic_read(&counters[j]));
   86.77 +                k = 0;
   86.78 +                for_each_online_cpu ( cpu )
   86.79 +                {
   86.80 +                    if ( k > 0 && (k % 4) == 0 )
   86.81 +                        printk("\n%46s", "");
   86.82 +                    printk("  CPU%02u[%10"PRIperfc"u]", cpu, per_cpu(perfcounters, cpu)[j]);
   86.83 +                    ++k;
   86.84 +                }
   86.85              }
   86.86 -            counters += NR_CPUS;
   86.87 +            ++j;
   86.88              break;
   86.89          case TYPE_ARRAY:
   86.90          case TYPE_S_ARRAY:
   86.91 -            for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ )
   86.92 -                sum += atomic_read(&counters[j]);
   86.93 -            printk("TOTAL[%10u]", sum);
   86.94 -#ifdef PERF_ARRAYS
   86.95 +            for_each_online_cpu ( cpu )
   86.96 +            {
   86.97 +                perfc_t *counters = per_cpu(perfcounters, cpu) + j;
   86.98 +
   86.99 +                for ( k = 0; k < perfc_info[i].nr_elements; k++ )
  86.100 +                    sum += counters[k];
  86.101 +            }
  86.102 +            printk("TOTAL[%12Lu]", sum);
  86.103              if (sum)
  86.104              {
  86.105 -                for ( j = 0; j < perfc_info[i].nr_elements; j++ )
  86.106 +#ifdef PERF_ARRAYS
  86.107 +                for ( k = 0; k < perfc_info[i].nr_elements; k++ )
  86.108 +                {
  86.109 +                    sum = 0;
  86.110 +                    for_each_online_cpu ( cpu )
  86.111 +                        sum += per_cpu(perfcounters, cpu)[j + k];
  86.112 +                    if ( (k % 4) == 0 )
  86.113 +                        printk("\n%16s", "");
  86.114 +                    printk("  ARR%02u[%10Lu]", k, sum);
  86.115 +                }
  86.116 +#else
  86.117 +                k = 0;
  86.118 +                for_each_online_cpu ( cpu )
  86.119                  {
  86.120 -                    if ( (j % 4) == 0 )
  86.121 -                        printk("\n                 ");
  86.122 -                    printk("  ARR%02d[%10d]", j, atomic_read(&counters[j]));
  86.123 +                    perfc_t *counters = per_cpu(perfcounters, cpu) + j;
  86.124 +                    unsigned int n;
  86.125 +
  86.126 +                    sum = 0;
  86.127 +                    for ( n = 0; n < perfc_info[i].nr_elements; n++ )
  86.128 +                        sum += counters[n];
  86.129 +                    if ( k > 0 && (k % 4) == 0 )
  86.130 +                        printk("\n%46s", "");
  86.131 +                    printk("  CPU%02u[%10Lu]", cpu, sum);
  86.132 +                    ++k;
  86.133                  }
  86.134 +#endif
  86.135              }
  86.136 -#endif
  86.137 -            counters += j;
  86.138 +            j += perfc_info[i].nr_elements;
  86.139              break;
  86.140          }
  86.141          printk("\n");
  86.142 @@ -97,7 +114,6 @@ void perfc_reset(unsigned char key)
  86.143  {
  86.144      unsigned int i, j;
  86.145      s_time_t now = NOW();
  86.146 -    atomic_t *counters = (atomic_t *)&perfcounters;
  86.147  
  86.148      if ( key != '\0' )
  86.149          printk("Xen performance counters RESET (now = 0x%08X:%08X)\n",
  86.150 @@ -105,43 +121,39 @@ void perfc_reset(unsigned char key)
  86.151  
  86.152      /* leave STATUS counters alone -- don't reset */
  86.153  
  86.154 -    for ( i = 0; i < NR_PERFCTRS; i++ ) 
  86.155 +    for ( i = j = 0; i < NR_PERFCTRS; i++ )
  86.156      {
  86.157 +        unsigned int cpu;
  86.158 +
  86.159          switch ( perfc_info[i].type )
  86.160          {
  86.161          case TYPE_SINGLE:
  86.162 -            atomic_set(&counters[0],0);
  86.163 +            for_each_cpu ( cpu )
  86.164 +                per_cpu(perfcounters, cpu)[j] = 0;
  86.165          case TYPE_S_SINGLE:
  86.166 -            counters += 1;
  86.167 -            break;
  86.168 -        case TYPE_CPU:
  86.169 -            for ( j = 0; j < NR_CPUS; j++ )
  86.170 -                atomic_set(&counters[j],0);
  86.171 -        case TYPE_S_CPU:
  86.172 -            counters += NR_CPUS;
  86.173 +            ++j;
  86.174              break;
  86.175          case TYPE_ARRAY:
  86.176 -            for ( j = 0; j < perfc_info[i].nr_elements; j++ )
  86.177 -                atomic_set(&counters[j],0);
  86.178 +            for_each_cpu ( cpu )
  86.179 +                memset(per_cpu(perfcounters, cpu) + j, 0,
  86.180 +                       perfc_info[i].nr_elements * sizeof(perfc_t));
  86.181          case TYPE_S_ARRAY:
  86.182 -            counters += perfc_info[i].nr_elements;
  86.183 +            j += perfc_info[i].nr_elements;
  86.184              break;
  86.185          }
  86.186      }
  86.187  
  86.188 -    arch_perfc_reset ();
  86.189 +    arch_perfc_reset();
  86.190  }
  86.191  
  86.192  static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
  86.193  static xen_sysctl_perfc_val_t *perfc_vals;
  86.194 -static int               perfc_nbr_vals;
  86.195 +static unsigned int      perfc_nbr_vals;
  86.196  static int               perfc_init = 0;
  86.197  static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
  86.198                             XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
  86.199  {
  86.200 -    unsigned int i, j;
  86.201 -    unsigned int v = 0;
  86.202 -    atomic_t *counters = (atomic_t *)&perfcounters;
  86.203 +    unsigned int i, j, v;
  86.204  
  86.205      /* We only copy the name and array-size information once. */
  86.206      if ( !perfc_init ) 
  86.207 @@ -154,11 +166,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
  86.208              {
  86.209              case TYPE_SINGLE:
  86.210              case TYPE_S_SINGLE:
  86.211 -                perfc_d[i].nr_vals = 1;
  86.212 -                break;
  86.213 -            case TYPE_CPU:
  86.214 -            case TYPE_S_CPU:
  86.215 -                perfc_d[i].nr_vals = num_online_cpus();
  86.216 +                perfc_d[i].nr_vals = num_possible_cpus();
  86.217                  break;
  86.218              case TYPE_ARRAY:
  86.219              case TYPE_S_ARRAY:
  86.220 @@ -181,26 +189,31 @@ static int perfc_copy_info(XEN_GUEST_HAN
  86.221      arch_perfc_gather();
  86.222  
  86.223      /* We gather the counts together every time. */
  86.224 -    for ( i = 0; i < NR_PERFCTRS; i++ )
  86.225 +    for ( i = j = v = 0; i < NR_PERFCTRS; i++ )
  86.226      {
  86.227 +        unsigned int cpu;
  86.228 +
  86.229          switch ( perfc_info[i].type )
  86.230          {
  86.231          case TYPE_SINGLE:
  86.232          case TYPE_S_SINGLE:
  86.233 -            perfc_vals[v++] = atomic_read(&counters[0]);
  86.234 -            counters += 1;
  86.235 -            break;
  86.236 -        case TYPE_CPU:
  86.237 -        case TYPE_S_CPU:
  86.238 -            for ( j = 0; j < perfc_d[i].nr_vals; j++ )
  86.239 -                perfc_vals[v++] = atomic_read(&counters[j]);
  86.240 -            counters += NR_CPUS;
  86.241 +            for_each_cpu ( cpu )
  86.242 +                perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
  86.243 +            ++j;
  86.244              break;
  86.245          case TYPE_ARRAY:
  86.246          case TYPE_S_ARRAY:
  86.247 -            for ( j = 0; j < perfc_d[i].nr_vals; j++ )
  86.248 -                perfc_vals[v++] = atomic_read(&counters[j]);
  86.249 -            counters += perfc_info[i].nr_elements;
  86.250 +            memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
  86.251 +            for_each_cpu ( cpu )
  86.252 +            {
  86.253 +                perfc_t *counters = per_cpu(perfcounters, cpu) + j;
  86.254 +                unsigned int k;
  86.255 +
  86.256 +                for ( k = 0; k < perfc_d[i].nr_vals; k++ )
  86.257 +                    perfc_vals[v + k] += counters[k];
  86.258 +            }
  86.259 +            v += perfc_d[i].nr_vals;
  86.260 +            j += perfc_info[i].nr_elements;
  86.261              break;
  86.262          }
  86.263      }
  86.264 @@ -224,14 +237,12 @@ int perfc_control(xen_sysctl_perfc_op_t 
  86.265      switch ( pc->cmd )
  86.266      {
  86.267      case XEN_SYSCTL_PERFCOP_reset:
  86.268 -        perfc_copy_info(pc->desc, pc->val);
  86.269 +        rc = perfc_copy_info(pc->desc, pc->val);
  86.270          perfc_reset(0);
  86.271 -        rc = 0;
  86.272          break;
  86.273  
  86.274      case XEN_SYSCTL_PERFCOP_query:
  86.275 -        perfc_copy_info(pc->desc, pc->val);
  86.276 -        rc = 0;
  86.277 +        rc = perfc_copy_info(pc->desc, pc->val);
  86.278          break;
  86.279  
  86.280      default:
    87.1 --- a/xen/common/schedule.c	Tue Mar 27 12:21:48 2007 -0600
    87.2 +++ b/xen/common/schedule.c	Wed Mar 28 10:38:41 2007 +0100
    87.3 @@ -606,7 +606,7 @@ static void schedule(void)
    87.4      ASSERT(!in_irq());
    87.5      ASSERT(this_cpu(mc_state).flags == 0);
    87.6  
    87.7 -    perfc_incrc(sched_run);
    87.8 +    perfc_incr(sched_run);
    87.9  
   87.10      sd = &this_cpu(schedule_data);
   87.11  
   87.12 @@ -654,16 +654,13 @@ static void schedule(void)
   87.13  
   87.14      spin_unlock_irq(&sd->schedule_lock);
   87.15  
   87.16 -    perfc_incrc(sched_ctx);
   87.17 +    perfc_incr(sched_ctx);
   87.18  
   87.19      stop_timer(&prev->periodic_timer);
   87.20  
   87.21      /* Ensure that the domain has an up-to-date time base. */
   87.22 -    if ( !is_idle_vcpu(next) )
   87.23 -    {
   87.24 -        update_vcpu_system_time(next);
   87.25 -        vcpu_periodic_timer_work(next);
   87.26 -    }
   87.27 +    update_vcpu_system_time(next);
   87.28 +    vcpu_periodic_timer_work(next);
   87.29  
   87.30      TRACE_4D(TRC_SCHED_SWITCH,
   87.31               prev->domain->domain_id, prev->vcpu_id,
   87.32 @@ -684,7 +681,7 @@ void context_saved(struct vcpu *prev)
   87.33  static void s_timer_fn(void *unused)
   87.34  {
   87.35      raise_softirq(SCHEDULE_SOFTIRQ);
   87.36 -    perfc_incrc(sched_irq);
   87.37 +    perfc_incr(sched_irq);
   87.38  }
   87.39  
   87.40  /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
    88.1 --- a/xen/drivers/char/console.c	Tue Mar 27 12:21:48 2007 -0600
    88.2 +++ b/xen/drivers/char/console.c	Wed Mar 28 10:38:41 2007 +0100
    88.3 @@ -900,12 +900,18 @@ void panic(const char *fmt, ...)
    88.4  void __bug(char *file, int line)
    88.5  {
    88.6      console_start_sync();
    88.7 -    printk("BUG at %s:%d\n", file, line);
    88.8 +    printk("Xen BUG at %s:%d\n", file, line);
    88.9      dump_execution_state();
   88.10 -    panic("BUG at %s:%d\n", file, line);
   88.11 +    panic("Xen BUG at %s:%d\n", file, line);
   88.12      for ( ; ; ) ;
   88.13  }
   88.14  
   88.15 +void __warn(char *file, int line)
   88.16 +{
   88.17 +    printk("Xen WARN at %s:%d\n", file, line);
   88.18 +    dump_execution_state();
   88.19 +}
   88.20 +
   88.21  /*
   88.22   * Local variables:
   88.23   * mode: C
    89.1 --- a/xen/include/asm-ia64/bug.h	Tue Mar 27 12:21:48 2007 -0600
    89.2 +++ b/xen/include/asm-ia64/bug.h	Wed Mar 28 10:38:41 2007 +0100
    89.3 @@ -2,5 +2,6 @@
    89.4  #define __IA64_BUG_H__
    89.5  
    89.6  #define BUG() __bug(__FILE__, __LINE__)
    89.7 +#define WARN() __warn(__FILE__, __LINE__)
    89.8  
    89.9  #endif /* __IA64_BUG_H__ */
    90.1 --- a/xen/include/asm-ia64/linux-xen/asm/asmmacro.h	Tue Mar 27 12:21:48 2007 -0600
    90.2 +++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h	Wed Mar 28 10:38:41 2007 +0100
    90.3 @@ -116,4 +116,8 @@ 2:{ .mib;						\
    90.4  # define dv_serialize_instruction
    90.5  #endif
    90.6  
    90.7 +#ifdef PERF_COUNTERS
    90.8 +#define PERFC(n) (THIS_CPU(perfcounters) + (IA64_PERFC_ ## n) * 4)
    90.9 +#endif
   90.10 +
   90.11  #endif /* _ASM_IA64_ASMMACRO_H */
    91.1 --- a/xen/include/asm-ia64/linux-xen/asm/iosapic.h	Tue Mar 27 12:21:48 2007 -0600
    91.2 +++ b/xen/include/asm-ia64/linux-xen/asm/iosapic.h	Wed Mar 28 10:38:41 2007 +0100
    91.3 @@ -123,13 +123,6 @@ static inline void list_move(struct list
    91.4  
    91.5  #define move_irq(x)
    91.6  
    91.7 -#define WARN_ON(condition) do { \
    91.8 -	if (unlikely((condition)!=0)) { \
    91.9 -		printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
   91.10 -		dump_stack(); \
   91.11 -	} \
   91.12 -} while (0)
   91.13 -
   91.14  #ifdef nop
   91.15  #undef nop
   91.16  #endif
    92.1 --- a/xen/include/asm-ia64/perfc_defn.h	Tue Mar 27 12:21:48 2007 -0600
    92.2 +++ b/xen/include/asm-ia64/perfc_defn.h	Wed Mar 28 10:38:41 2007 +0100
    92.3 @@ -1,34 +1,34 @@
    92.4  /* This file is legitimately included multiple times. */
    92.5  
    92.6 -PERFCOUNTER_CPU(dtlb_translate,       "dtlb hit")
    92.7 +PERFCOUNTER(dtlb_translate,       "dtlb hit")
    92.8  
    92.9 -PERFCOUNTER_CPU(tr_translate,         "TR hit")
   92.10 +PERFCOUNTER(tr_translate,         "TR hit")
   92.11  
   92.12 -PERFCOUNTER_CPU(vhpt_translate,       "virtual vhpt translation")
   92.13 -PERFCOUNTER_CPU(fast_vhpt_translate,  "virtual vhpt fast translation")
   92.14 +PERFCOUNTER(vhpt_translate,       "virtual vhpt translation")
   92.15 +PERFCOUNTER(fast_vhpt_translate,  "virtual vhpt fast translation")
   92.16  
   92.17  PERFCOUNTER(recover_to_page_fault,    "recoveries to page fault")
   92.18  PERFCOUNTER(recover_to_break_fault,   "recoveries to break fault")
   92.19  
   92.20 -PERFCOUNTER_CPU(phys_translate,       "metaphysical translation")
   92.21 +PERFCOUNTER(phys_translate,       "metaphysical translation")
   92.22  
   92.23 -PERFCOUNTER_CPU(idle_when_pending,    "vcpu idle at event")
   92.24 +PERFCOUNTER(idle_when_pending,    "vcpu idle at event")
   92.25  
   92.26 -PERFCOUNTER_CPU(pal_halt_light,       "calls to pal_halt_light")
   92.27 +PERFCOUNTER(pal_halt_light,       "calls to pal_halt_light")
   92.28  
   92.29 -PERFCOUNTER_CPU(lazy_cover,           "lazy cover")
   92.30 +PERFCOUNTER(lazy_cover,           "lazy cover")
   92.31  
   92.32 -PERFCOUNTER_CPU(mov_to_ar_imm,        "privop mov_to_ar_imm")
   92.33 -PERFCOUNTER_CPU(mov_to_ar_reg,        "privop mov_to_ar_reg")
   92.34 -PERFCOUNTER_CPU(mov_from_ar,          "privop privified-mov_from_ar")
   92.35 -PERFCOUNTER_CPU(ssm,                  "privop ssm")
   92.36 -PERFCOUNTER_CPU(rsm,                  "privop rsm")
   92.37 -PERFCOUNTER_CPU(rfi,                  "privop rfi")
   92.38 -PERFCOUNTER_CPU(bsw0,                 "privop bsw0")
   92.39 -PERFCOUNTER_CPU(bsw1,                 "privop bsw1")
   92.40 -PERFCOUNTER_CPU(cover,                "privop cover")
   92.41 -PERFCOUNTER_CPU(fc,                   "privop privified-fc")
   92.42 -PERFCOUNTER_CPU(cpuid,                "privop privified-cpuid")
   92.43 +PERFCOUNTER(mov_to_ar_imm,        "privop mov_to_ar_imm")
   92.44 +PERFCOUNTER(mov_to_ar_reg,        "privop mov_to_ar_reg")
   92.45 +PERFCOUNTER(mov_from_ar,          "privop privified-mov_from_ar")
   92.46 +PERFCOUNTER(ssm,                  "privop ssm")
   92.47 +PERFCOUNTER(rsm,                  "privop rsm")
   92.48 +PERFCOUNTER(rfi,                  "privop rfi")
   92.49 +PERFCOUNTER(bsw0,                 "privop bsw0")
   92.50 +PERFCOUNTER(bsw1,                 "privop bsw1")
   92.51 +PERFCOUNTER(cover,                "privop cover")
   92.52 +PERFCOUNTER(fc,                   "privop privified-fc")
   92.53 +PERFCOUNTER(cpuid,                "privop privified-cpuid")
   92.54  
   92.55  PERFCOUNTER_ARRAY(mov_to_cr,          "privop mov to cr", 128)
   92.56  PERFCOUNTER_ARRAY(mov_from_cr,        "privop mov from cr", 128)
   92.57 @@ -36,45 +36,45 @@ PERFCOUNTER_ARRAY(mov_from_cr,        "p
   92.58  PERFCOUNTER_ARRAY(misc_privop,        "privop misc", 64)
   92.59  
   92.60  // privileged instructions to fall into vmx_entry
   92.61 -PERFCOUNTER_CPU(vmx_rsm,              "vmx privop rsm")
   92.62 -PERFCOUNTER_CPU(vmx_ssm,              "vmx privop ssm")
   92.63 -PERFCOUNTER_CPU(vmx_mov_to_psr,       "vmx privop mov_to_psr")
   92.64 -PERFCOUNTER_CPU(vmx_mov_from_psr,     "vmx privop mov_from_psr")
   92.65 -PERFCOUNTER_CPU(vmx_mov_from_cr,      "vmx privop mov_from_cr")
   92.66 -PERFCOUNTER_CPU(vmx_mov_to_cr,        "vmx privop mov_to_cr")
   92.67 -PERFCOUNTER_CPU(vmx_bsw0,             "vmx privop bsw0")
   92.68 -PERFCOUNTER_CPU(vmx_bsw1,             "vmx privop bsw1")
   92.69 -PERFCOUNTER_CPU(vmx_cover,            "vmx privop cover")
   92.70 -PERFCOUNTER_CPU(vmx_rfi,              "vmx privop rfi")
   92.71 -PERFCOUNTER_CPU(vmx_itr_d,            "vmx privop itr_d")
   92.72 -PERFCOUNTER_CPU(vmx_itr_i,            "vmx privop itr_i")
   92.73 -PERFCOUNTER_CPU(vmx_ptr_d,            "vmx privop ptr_d")
   92.74 -PERFCOUNTER_CPU(vmx_ptr_i,            "vmx privop ptr_i")
   92.75 -PERFCOUNTER_CPU(vmx_itc_d,            "vmx privop itc_d")
   92.76 -PERFCOUNTER_CPU(vmx_itc_i,            "vmx privop itc_i")
   92.77 -PERFCOUNTER_CPU(vmx_ptc_l,            "vmx privop ptc_l")
   92.78 -PERFCOUNTER_CPU(vmx_ptc_g,            "vmx privop ptc_g")
   92.79 -PERFCOUNTER_CPU(vmx_ptc_ga,           "vmx privop ptc_ga")
   92.80 -PERFCOUNTER_CPU(vmx_ptc_e,            "vmx privop ptc_e")
   92.81 -PERFCOUNTER_CPU(vmx_mov_to_rr,        "vmx privop mov_to_rr")
   92.82 -PERFCOUNTER_CPU(vmx_mov_from_rr,      "vmx privop mov_from_rr")
   92.83 -PERFCOUNTER_CPU(vmx_thash,            "vmx privop thash")
   92.84 -PERFCOUNTER_CPU(vmx_ttag,             "vmx privop ttag")
   92.85 -PERFCOUNTER_CPU(vmx_tpa,              "vmx privop tpa")
   92.86 -PERFCOUNTER_CPU(vmx_tak,              "vmx privop tak")
   92.87 -PERFCOUNTER_CPU(vmx_mov_to_ar_imm,    "vmx privop mov_to_ar_imm")
   92.88 -PERFCOUNTER_CPU(vmx_mov_to_ar_reg,    "vmx privop mov_to_ar_reg")
   92.89 -PERFCOUNTER_CPU(vmx_mov_from_ar_reg,  "vmx privop mov_from_ar_reg")
   92.90 -PERFCOUNTER_CPU(vmx_mov_to_dbr,       "vmx privop mov_to_dbr")
   92.91 -PERFCOUNTER_CPU(vmx_mov_to_ibr,       "vmx privop mov_to_ibr")
   92.92 -PERFCOUNTER_CPU(vmx_mov_to_pmc,       "vmx privop mov_to_pmc")
   92.93 -PERFCOUNTER_CPU(vmx_mov_to_pmd,       "vmx privop mov_to_pmd")
   92.94 -PERFCOUNTER_CPU(vmx_mov_to_pkr,       "vmx privop mov_to_pkr")
   92.95 -PERFCOUNTER_CPU(vmx_mov_from_dbr,     "vmx privop mov_from_dbr")
   92.96 -PERFCOUNTER_CPU(vmx_mov_from_ibr,     "vmx privop mov_from_ibr")
   92.97 -PERFCOUNTER_CPU(vmx_mov_from_pmc,     "vmx privop mov_from_pmc")
   92.98 -PERFCOUNTER_CPU(vmx_mov_from_pkr,     "vmx privop mov_from_pkr")
   92.99 -PERFCOUNTER_CPU(vmx_mov_from_cpuid,   "vmx privop mov_from_cpuid")
  92.100 +PERFCOUNTER(vmx_rsm,              "vmx privop rsm")
  92.101 +PERFCOUNTER(vmx_ssm,              "vmx privop ssm")
  92.102 +PERFCOUNTER(vmx_mov_to_psr,       "vmx privop mov_to_psr")
  92.103 +PERFCOUNTER(vmx_mov_from_psr,     "vmx privop mov_from_psr")
  92.104 +PERFCOUNTER(vmx_mov_from_cr,      "vmx privop mov_from_cr")
  92.105 +PERFCOUNTER(vmx_mov_to_cr,        "vmx privop mov_to_cr")
  92.106 +PERFCOUNTER(vmx_bsw0,             "vmx privop bsw0")
  92.107 +PERFCOUNTER(vmx_bsw1,             "vmx privop bsw1")
  92.108 +PERFCOUNTER(vmx_cover,            "vmx privop cover")
  92.109 +PERFCOUNTER(vmx_rfi,              "vmx privop rfi")
  92.110 +PERFCOUNTER(vmx_itr_d,            "vmx privop itr_d")
  92.111 +PERFCOUNTER(vmx_itr_i,            "vmx privop itr_i")
  92.112 +PERFCOUNTER(vmx_ptr_d,            "vmx privop ptr_d")
  92.113 +PERFCOUNTER(vmx_ptr_i,            "vmx privop ptr_i")
  92.114 +PERFCOUNTER(vmx_itc_d,            "vmx privop itc_d")
  92.115 +PERFCOUNTER(vmx_itc_i,            "vmx privop itc_i")
  92.116 +PERFCOUNTER(vmx_ptc_l,            "vmx privop ptc_l")
  92.117 +PERFCOUNTER(vmx_ptc_g,            "vmx privop ptc_g")
  92.118 +PERFCOUNTER(vmx_ptc_ga,           "vmx privop ptc_ga")
  92.119 +PERFCOUNTER(vmx_ptc_e,            "vmx privop ptc_e")
  92.120 +PERFCOUNTER(vmx_mov_to_rr,        "vmx privop mov_to_rr")
  92.121 +PERFCOUNTER(vmx_mov_from_rr,      "vmx privop mov_from_rr")
  92.122 +PERFCOUNTER(vmx_thash,            "vmx privop thash")
  92.123 +PERFCOUNTER(vmx_ttag,             "vmx privop ttag")
  92.124 +PERFCOUNTER(vmx_tpa,              "vmx privop tpa")
  92.125 +PERFCOUNTER(vmx_tak,              "vmx privop tak")
  92.126 +PERFCOUNTER(vmx_mov_to_ar_imm,    "vmx privop mov_to_ar_imm")
  92.127 +PERFCOUNTER(vmx_mov_to_ar_reg,    "vmx privop mov_to_ar_reg")
  92.128 +PERFCOUNTER(vmx_mov_from_ar_reg,  "vmx privop mov_from_ar_reg")
  92.129 +PERFCOUNTER(vmx_mov_to_dbr,       "vmx privop mov_to_dbr")
  92.130 +PERFCOUNTER(vmx_mov_to_ibr,       "vmx privop mov_to_ibr")
  92.131 +PERFCOUNTER(vmx_mov_to_pmc,       "vmx privop mov_to_pmc")
  92.132 +PERFCOUNTER(vmx_mov_to_pmd,       "vmx privop mov_to_pmd")
  92.133 +PERFCOUNTER(vmx_mov_to_pkr,       "vmx privop mov_to_pkr")
  92.134 +PERFCOUNTER(vmx_mov_from_dbr,     "vmx privop mov_from_dbr")
  92.135 +PERFCOUNTER(vmx_mov_from_ibr,     "vmx privop mov_from_ibr")
  92.136 +PERFCOUNTER(vmx_mov_from_pmc,     "vmx privop mov_from_pmc")
  92.137 +PERFCOUNTER(vmx_mov_from_pkr,     "vmx privop mov_from_pkr")
  92.138 +PERFCOUNTER(vmx_mov_from_cpuid,   "vmx privop mov_from_cpuid")
  92.139  
  92.140  
  92.141  PERFCOUNTER_ARRAY(slow_hyperprivop,   "slow hyperprivops", HYPERPRIVOP_MAX + 1)
  92.142 @@ -84,12 +84,12 @@ PERFCOUNTER_ARRAY(slow_reflect,       "s
  92.143  PERFCOUNTER_ARRAY(fast_reflect,       "fast reflection", 0x80)
  92.144  
  92.145  PERFSTATUS(vhpt_nbr_entries,          "nbr of entries per VHPT")
  92.146 -PERFSTATUS_CPU(vhpt_valid_entries,    "nbr of valid entries in VHPT")
  92.147 +PERFSTATUS(vhpt_valid_entries,        "nbr of valid entries in VHPT")
  92.148  
  92.149  PERFCOUNTER_ARRAY(vmx_mmio_access,    "vmx_mmio_access", 8)
  92.150 -PERFCOUNTER_CPU(vmx_pal_emul,         "vmx_pal_emul")
  92.151 +PERFCOUNTER(vmx_pal_emul,         "vmx_pal_emul")
  92.152  PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
  92.153 -PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break")
  92.154 +PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break")
  92.155  PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
  92.156                                        "vmx_inject_guest_interruption", 0x80)
  92.157  PERFCOUNTER_ARRAY(fw_hypercall,       "fw_hypercall", 0x20)
  92.158 @@ -106,69 +106,71 @@ PERFSTATUS(privop_addr_##name##_overflow
  92.159  
  92.160  PERFPRIVOPADDR(get_ifa)
  92.161  PERFPRIVOPADDR(thash)
  92.162 +
  92.163 +#undef PERFPRIVOPADDR
  92.164  #endif
  92.165  
  92.166  // vhpt.c
  92.167 -PERFCOUNTER_CPU(local_vhpt_flush,               "local_vhpt_flush")
  92.168 -PERFCOUNTER_CPU(vcpu_vhpt_flush,                "vcpu_vhpt_flush")
  92.169 -PERFCOUNTER_CPU(vcpu_flush_vtlb_all,            "vcpu_flush_vtlb_all")
  92.170 -PERFCOUNTER_CPU(domain_flush_vtlb_all,          "domain_flush_vtlb_all")
  92.171 -PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range,      "vcpu_flush_tlb_vhpt_range")
  92.172 -PERFCOUNTER_CPU(domain_flush_vtlb_track_entry,  "domain_flush_vtlb_track_entry")
  92.173 -PERFCOUNTER_CPU(domain_flush_vtlb_local,        "domain_flush_vtlb_local")
  92.174 -PERFCOUNTER_CPU(domain_flush_vtlb_global,       "domain_flush_vtlb_global")
  92.175 -PERFCOUNTER_CPU(domain_flush_vtlb_range,        "domain_flush_vtlb_range")
  92.176 +PERFCOUNTER(local_vhpt_flush,               "local_vhpt_flush")
  92.177 +PERFCOUNTER(vcpu_vhpt_flush,                "vcpu_vhpt_flush")
  92.178 +PERFCOUNTER(vcpu_flush_vtlb_all,            "vcpu_flush_vtlb_all")
  92.179 +PERFCOUNTER(domain_flush_vtlb_all,          "domain_flush_vtlb_all")
  92.180 +PERFCOUNTER(vcpu_flush_tlb_vhpt_range,      "vcpu_flush_tlb_vhpt_range")
  92.181 +PERFCOUNTER(domain_flush_vtlb_track_entry,  "domain_flush_vtlb_track_entry")
  92.182 +PERFCOUNTER(domain_flush_vtlb_local,        "domain_flush_vtlb_local")
  92.183 +PERFCOUNTER(domain_flush_vtlb_global,       "domain_flush_vtlb_global")
  92.184 +PERFCOUNTER(domain_flush_vtlb_range,        "domain_flush_vtlb_range")
  92.185  
  92.186  // domain.c
  92.187 -PERFCOUNTER_CPU(flush_vtlb_for_context_switch,  "flush_vtlb_for_context_switch")
  92.188 +PERFCOUNTER(flush_vtlb_for_context_switch,  "flush_vtlb_for_context_switch")
  92.189  
  92.190  // mm.c
  92.191 -PERFCOUNTER_CPU(assign_domain_page_replace,     "assign_domain_page_replace")
  92.192 -PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel,  "assign_domain_pge_cmpxchg_rel")
  92.193 -PERFCOUNTER_CPU(zap_dcomain_page_one,           "zap_dcomain_page_one")
  92.194 -PERFCOUNTER_CPU(dom0vp_zap_physmap,             "dom0vp_zap_physmap")
  92.195 -PERFCOUNTER_CPU(dom0vp_add_physmap,             "dom0vp_add_physmap")
  92.196 -PERFCOUNTER_CPU(create_grant_host_mapping,      "create_grant_host_mapping")
  92.197 -PERFCOUNTER_CPU(destroy_grant_host_mapping,     "destroy_grant_host_mapping")
  92.198 -PERFCOUNTER_CPU(steal_page_refcount,            "steal_page_refcount")
  92.199 -PERFCOUNTER_CPU(steal_page,                     "steal_page")
  92.200 -PERFCOUNTER_CPU(guest_physmap_add_page,         "guest_physmap_add_page")
  92.201 -PERFCOUNTER_CPU(guest_physmap_remove_page,      "guest_physmap_remove_page")
  92.202 -PERFCOUNTER_CPU(domain_page_flush_and_put,      "domain_page_flush_and_put")
  92.203 +PERFCOUNTER(assign_domain_page_replace,     "assign_domain_page_replace")
  92.204 +PERFCOUNTER(assign_domain_pge_cmpxchg_rel,  "assign_domain_pge_cmpxchg_rel")
  92.205 +PERFCOUNTER(zap_dcomain_page_one,           "zap_dcomain_page_one")
  92.206 +PERFCOUNTER(dom0vp_zap_physmap,             "dom0vp_zap_physmap")
  92.207 +PERFCOUNTER(dom0vp_add_physmap,             "dom0vp_add_physmap")
  92.208 +PERFCOUNTER(create_grant_host_mapping,      "create_grant_host_mapping")
  92.209 +PERFCOUNTER(destroy_grant_host_mapping,     "destroy_grant_host_mapping")
  92.210 +PERFCOUNTER(steal_page_refcount,            "steal_page_refcount")
  92.211 +PERFCOUNTER(steal_page,                     "steal_page")
  92.212 +PERFCOUNTER(guest_physmap_add_page,         "guest_physmap_add_page")
  92.213 +PERFCOUNTER(guest_physmap_remove_page,      "guest_physmap_remove_page")
  92.214 +PERFCOUNTER(domain_page_flush_and_put,      "domain_page_flush_and_put")
  92.215  
  92.216  // dom0vp
  92.217 -PERFCOUNTER_CPU(dom0vp_phystomach,              "dom0vp_phystomach")
  92.218 -PERFCOUNTER_CPU(dom0vp_machtophys,              "dom0vp_machtophys")
  92.219 +PERFCOUNTER(dom0vp_phystomach,              "dom0vp_phystomach")
  92.220 +PERFCOUNTER(dom0vp_machtophys,              "dom0vp_machtophys")
  92.221  
  92.222  #ifdef CONFIG_XEN_IA64_TLB_TRACK
  92.223  // insert or dirty
  92.224 -PERFCOUNTER_CPU(tlb_track_iod,                  "tlb_track_iod")
  92.225 -PERFCOUNTER_CPU(tlb_track_iod_again,            "tlb_track_iod_again")
  92.226 -PERFCOUNTER_CPU(tlb_track_iod_not_tracked,      "tlb_track_iod_not_tracked")
  92.227 -PERFCOUNTER_CPU(tlb_track_iod_force_many,       "tlb_track_iod_force_many")
  92.228 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many,     "tlb_track_iod_tracked_many")
  92.229 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del")
  92.230 -PERFCOUNTER_CPU(tlb_track_iod_found,            "tlb_track_iod_found")
  92.231 -PERFCOUNTER_CPU(tlb_track_iod_new_entry,        "tlb_track_iod_new_entry")
  92.232 -PERFCOUNTER_CPU(tlb_track_iod_new_failed,       "tlb_track_iod_new_failed")
  92.233 -PERFCOUNTER_CPU(tlb_track_iod_new_many,         "tlb_track_iod_new_many")
  92.234 -PERFCOUNTER_CPU(tlb_track_iod_insert,           "tlb_track_iod_insert")
  92.235 -PERFCOUNTER_CPU(tlb_track_iod_dirtied,          "tlb_track_iod_dirtied")
  92.236 +PERFCOUNTER(tlb_track_iod,                  "tlb_track_iod")
  92.237 +PERFCOUNTER(tlb_track_iod_again,            "tlb_track_iod_again")
  92.238 +PERFCOUNTER(tlb_track_iod_not_tracked,      "tlb_track_iod_not_tracked")
  92.239 +PERFCOUNTER(tlb_track_iod_force_many,       "tlb_track_iod_force_many")
  92.240 +PERFCOUNTER(tlb_track_iod_tracked_many,     "tlb_track_iod_tracked_many")
  92.241 +PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del")
  92.242 +PERFCOUNTER(tlb_track_iod_found,            "tlb_track_iod_found")
  92.243 +PERFCOUNTER(tlb_track_iod_new_entry,        "tlb_track_iod_new_entry")
  92.244 +PERFCOUNTER(tlb_track_iod_new_failed,       "tlb_track_iod_new_failed")
  92.245 +PERFCOUNTER(tlb_track_iod_new_many,         "tlb_track_iod_new_many")
  92.246 +PERFCOUNTER(tlb_track_iod_insert,           "tlb_track_iod_insert")
  92.247 +PERFCOUNTER(tlb_track_iod_dirtied,          "tlb_track_iod_dirtied")
  92.248  
  92.249  // search and remove
  92.250 -PERFCOUNTER_CPU(tlb_track_sar,                  "tlb_track_sar")
  92.251 -PERFCOUNTER_CPU(tlb_track_sar_not_tracked,      "tlb_track_sar_not_tracked")
  92.252 -PERFCOUNTER_CPU(tlb_track_sar_not_found,        "tlb_track_sar_not_found")
  92.253 -PERFCOUNTER_CPU(tlb_track_sar_found,            "tlb_track_sar_found")
  92.254 -PERFCOUNTER_CPU(tlb_track_sar_many,             "tlb_track_sar_many")
  92.255 +PERFCOUNTER(tlb_track_sar,                  "tlb_track_sar")
  92.256 +PERFCOUNTER(tlb_track_sar_not_tracked,      "tlb_track_sar_not_tracked")
  92.257 +PERFCOUNTER(tlb_track_sar_not_found,        "tlb_track_sar_not_found")
  92.258 +PERFCOUNTER(tlb_track_sar_found,            "tlb_track_sar_found")
  92.259 +PERFCOUNTER(tlb_track_sar_many,             "tlb_track_sar_many")
  92.260  
  92.261  // flush
  92.262 -PERFCOUNTER_CPU(tlb_track_use_rr7,              "tlb_track_use_rr7")
  92.263 -PERFCOUNTER_CPU(tlb_track_swap_rr0,             "tlb_track_swap_rr0")
  92.264 +PERFCOUNTER(tlb_track_use_rr7,              "tlb_track_use_rr7")
  92.265 +PERFCOUNTER(tlb_track_swap_rr0,             "tlb_track_swap_rr0")
  92.266  #endif
  92.267  
  92.268  // tlb flush clock
  92.269  #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
  92.270 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_purge,  "tlbflush_clock_cswitch_purge")
  92.271 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_skip,   "tlbflush_clock_cswitch_skip")
  92.272 +PERFCOUNTER(tlbflush_clock_cswitch_purge,  "tlbflush_clock_cswitch_purge")
  92.273 +PERFCOUNTER(tlbflush_clock_cswitch_skip,   "tlbflush_clock_cswitch_skip")
  92.274  #endif
    93.1 --- a/xen/include/asm-ia64/privop_stat.h	Tue Mar 27 12:21:48 2007 -0600
    93.2 +++ b/xen/include/asm-ia64/privop_stat.h	Wed Mar 28 10:38:41 2007 +0100
    93.3 @@ -1,5 +1,5 @@
    93.4 -#ifndef _XEN_UA64_PRIVOP_STAT_H
    93.5 -#define _XEN_UA64_PRIVOP_STAT_H
    93.6 +#ifndef _XEN_IA64_PRIVOP_STAT_H
    93.7 +#define _XEN_IA64_PRIVOP_STAT_H
    93.8  #include <asm/config.h>
    93.9  #include <xen/types.h>
   93.10  #include <public/xen.h>
   93.11 @@ -9,31 +9,24 @@
   93.12  extern void gather_privop_addrs(void);
   93.13  extern void reset_privop_addrs(void);
   93.14  
   93.15 -#undef  PERFCOUNTER
   93.16  #define PERFCOUNTER(var, name)
   93.17 -
   93.18 -#undef  PERFCOUNTER_CPU
   93.19 -#define PERFCOUNTER_CPU(var, name)
   93.20 -
   93.21 -#undef  PERFCOUNTER_ARRAY
   93.22  #define PERFCOUNTER_ARRAY(var, name, size)
   93.23  
   93.24 -#undef  PERFSTATUS
   93.25  #define PERFSTATUS(var, name)
   93.26 -
   93.27 -#undef  PERFSTATUS_CPU
   93.28 -#define PERFSTATUS_CPU(var, name)
   93.29 -
   93.30 -#undef  PERFSTATUS_ARRAY
   93.31  #define PERFSTATUS_ARRAY(var, name, size)
   93.32  
   93.33 -#undef  PERFPRIVOPADDR
   93.34  #define PERFPRIVOPADDR(name) privop_inst_##name,
   93.35  
   93.36  enum privop_inst {
   93.37  #include <asm/perfc_defn.h>
   93.38  };
   93.39  
   93.40 +#undef PERFCOUNTER
   93.41 +#undef PERFCOUNTER_ARRAY
   93.42 +
   93.43 +#undef PERFSTATUS
   93.44 +#undef PERFSTATUS_ARRAY
   93.45 +
   93.46  #undef PERFPRIVOPADDR
   93.47  
   93.48  #define	PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
   93.49 @@ -45,4 +38,4 @@ extern void privop_count_addr(unsigned l
   93.50  #define reset_privop_addrs() do {} while (0)
   93.51  #endif
   93.52  
   93.53 -#endif /* _XEN_UA64_PRIVOP_STAT_H */
   93.54 +#endif /* _XEN_IA64_PRIVOP_STAT_H */
    94.1 --- a/xen/include/asm-ia64/tlb_track.h	Tue Mar 27 12:21:48 2007 -0600
    94.2 +++ b/xen/include/asm-ia64/tlb_track.h	Wed Mar 28 10:38:41 2007 +0100
    94.3 @@ -97,9 +97,9 @@ vcpu_tlb_track_insert_or_dirty(struct vc
    94.4  {
    94.5      /* optimization.
    94.6         non-tracking pte is most common. */
    94.7 -    perfc_incrc(tlb_track_iod);
    94.8 +    perfc_incr(tlb_track_iod);
    94.9      if (!pte_tlb_tracking(entry->used)) {
   94.10 -        perfc_incrc(tlb_track_iod_not_tracked);
   94.11 +        perfc_incr(tlb_track_iod_not_tracked);
   94.12          return;
   94.13      }
   94.14  
    95.1 --- a/xen/include/asm-powerpc/bug.h	Tue Mar 27 12:21:48 2007 -0600
    95.2 +++ b/xen/include/asm-powerpc/bug.h	Wed Mar 28 10:38:41 2007 +0100
    95.3 @@ -2,5 +2,6 @@
    95.4  #define __POWERPC_BUG_H__
    95.5  
    95.6  #define BUG() __bug(__FILE__, __LINE__)
    95.7 +#define WARN() __warn(__FILE__, __LINE__)
    95.8  
    95.9  #endif /* __POWERPC_BUG_H__ */
    96.1 --- a/xen/include/asm-powerpc/debugger.h	Tue Mar 27 12:21:48 2007 -0600
    96.2 +++ b/xen/include/asm-powerpc/debugger.h	Wed Mar 28 10:38:41 2007 +0100
    96.3 @@ -67,10 +67,6 @@ static inline void unimplemented(void)
    96.4  #endif
    96.5  }
    96.6  
    96.7 -extern void __warn(char *file, int line);
    96.8 -#define WARN() __warn(__FILE__, __LINE__)
    96.9 -#define WARN_ON(_p) do { if (_p) WARN(); } while ( 0 )
   96.10 -
   96.11  extern void __attn(void);
   96.12  #define ATTN() __attn();
   96.13  
    97.1 --- a/xen/include/asm-x86/bug.h	Tue Mar 27 12:21:48 2007 -0600
    97.2 +++ b/xen/include/asm-x86/bug.h	Wed Mar 28 10:38:41 2007 +0100
    97.3 @@ -14,8 +14,8 @@ struct bug_frame {
    97.4  } __attribute__((packed));
    97.5  
    97.6  #define BUGFRAME_dump   0
    97.7 -#define BUGFRAME_bug    1
    97.8 -#define BUGFRAME_assert 2
    97.9 -#define BUGFRAME_rsvd   3
   97.10 +#define BUGFRAME_warn   1
   97.11 +#define BUGFRAME_bug    2
   97.12 +#define BUGFRAME_assert 3
   97.13  
   97.14  #endif /* __X86_BUG_H__ */
    98.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Tue Mar 27 12:21:48 2007 -0600
    98.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Wed Mar 28 10:38:41 2007 +0100
    98.3 @@ -446,7 +446,6 @@ struct arch_svm_struct {
    98.4      u64                 vmcb_pa;
    98.5      u32                 *msrpm;
    98.6      u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
    98.7 -    int                 saved_irq_vector;
    98.8      int                 launch_core;
    98.9      
   98.10      unsigned long       flags;            /* VMCB flags */
    99.1 --- a/xen/include/asm-x86/multicall.h	Tue Mar 27 12:21:48 2007 -0600
    99.2 +++ b/xen/include/asm-x86/multicall.h	Wed Mar 28 10:38:41 2007 +0100
    99.3 @@ -6,84 +6,94 @@
    99.4  #define __ASM_X86_MULTICALL_H__
    99.5  
    99.6  #include <xen/errno.h>
    99.7 -#include <asm/asm_defns.h>
    99.8  
    99.9  #ifdef __x86_64__
   99.10  
   99.11  #define do_multicall_call(_call)                             \
   99.12      do {                                                     \
   99.13          __asm__ __volatile__ (                               \
   99.14 -            "    movq  "STR(MULTICALL_op)"(%0),%%rax; "      \
   99.15 +            "    movq  %c1(%0),%%rax; "                      \
   99.16 +            "    leaq  hypercall_table(%%rip),%%rdi; "       \
   99.17              "    cmpq  $("STR(NR_hypercalls)"),%%rax; "      \
   99.18              "    jae   2f; "                                 \
   99.19 -            "    leaq  hypercall_table(%%rip),%%rdi; "       \
   99.20 -            "    leaq  (%%rdi,%%rax,8),%%rax; "              \
   99.21 -            "    movq  "STR(MULTICALL_arg0)"(%0),%%rdi; "    \
   99.22 -            "    movq  "STR(MULTICALL_arg1)"(%0),%%rsi; "    \
   99.23 -            "    movq  "STR(MULTICALL_arg2)"(%0),%%rdx; "    \
   99.24 -            "    movq  "STR(MULTICALL_arg3)"(%0),%%rcx; "    \
   99.25 -            "    movq  "STR(MULTICALL_arg4)"(%0),%%r8; "     \
   99.26 -            "    callq *(%%rax); "                           \
   99.27 -            "1:  movq  %%rax,"STR(MULTICALL_result)"(%0)\n"  \
   99.28 +            "    movq  (%%rdi,%%rax,8),%%rax; "              \
   99.29 +            "    movq  %c2+0*%c3(%0),%%rdi; "                \
   99.30 +            "    movq  %c2+1*%c3(%0),%%rsi; "                \
   99.31 +            "    movq  %c2+2*%c3(%0),%%rdx; "                \
   99.32 +            "    movq  %c2+3*%c3(%0),%%rcx; "                \
   99.33 +            "    movq  %c2+4*%c3(%0),%%r8; "                 \
   99.34 +            "    callq *%%rax; "                             \
   99.35 +            "1:  movq  %%rax,%c4(%0)\n"                      \
   99.36              ".section .fixup,\"ax\"\n"                       \
   99.37              "2:  movq  $-"STR(ENOSYS)",%%rax\n"              \
   99.38              "    jmp   1b\n"                                 \
   99.39              ".previous\n"                                    \
   99.40 -            : : "b" (_call)                                  \
   99.41 +            :                                                \
   99.42 +            : "b" (_call),                                   \
   99.43 +              "i" (offsetof(__typeof__(*_call), op)),        \
   99.44 +              "i" (offsetof(__typeof__(*_call), args)),      \
   99.45 +              "i" (sizeof(*(_call)->args)),                  \
   99.46 +              "i" (offsetof(__typeof__(*_call), result))     \
   99.47                /* all the caller-saves registers */           \
   99.48              : "rax", "rcx", "rdx", "rsi", "rdi",             \
   99.49                "r8",  "r9",  "r10", "r11" );                  \
   99.50      } while ( 0 )
   99.51  
   99.52 -#define compat_multicall_call(_call)                              \
   99.53 -    do {                                                          \
   99.54 -        __asm__ __volatile__ (                                    \
   99.55 -            "    movl  "STR(COMPAT_MULTICALL_op)"(%0),%%eax; "    \
   99.56 -            "    leaq  compat_hypercall_table(%%rip),%%rdi; "     \
   99.57 -            "    cmpl  $("STR(NR_hypercalls)"),%%eax; "           \
   99.58 -            "    jae   2f; "                                      \
   99.59 -            "    movq  (%%rdi,%%rax,8),%%rax; "                   \
   99.60 -            "    movl  "STR(COMPAT_MULTICALL_arg0)"(%0),%%edi; "  \
   99.61 -            "    movl  "STR(COMPAT_MULTICALL_arg1)"(%0),%%esi; "  \
   99.62 -            "    movl  "STR(COMPAT_MULTICALL_arg2)"(%0),%%edx; "  \
   99.63 -            "    movl  "STR(COMPAT_MULTICALL_arg3)"(%0),%%ecx; "  \
   99.64 -            "    movl  "STR(COMPAT_MULTICALL_arg4)"(%0),%%r8d; "  \
   99.65 -            "    callq *%%rax; "                                  \
   99.66 -            "1:  movl  %%eax,"STR(COMPAT_MULTICALL_result)"(%0)\n"\
   99.67 -            ".section .fixup,\"ax\"\n"                            \
   99.68 -            "2:  movl  $-"STR(ENOSYS)",%%eax\n"                   \
   99.69 -            "    jmp   1b\n"                                      \
   99.70 -            ".previous\n"                                         \
   99.71 -            : : "b" (_call)                                       \
   99.72 -              /* all the caller-saves registers */                \
   99.73 -            : "rax", "rcx", "rdx", "rsi", "rdi",                  \
   99.74 -              "r8",  "r9",  "r10", "r11" );                       \
   99.75 -    } while ( 0 )
   99.76 +#define compat_multicall_call(_call)                         \
   99.77 +        __asm__ __volatile__ (                               \
   99.78 +            "    movl  %c1(%0),%%eax; "                      \
   99.79 +            "    leaq  compat_hypercall_table(%%rip),%%rdi; "\
   99.80 +            "    cmpl  $("STR(NR_hypercalls)"),%%eax; "      \
   99.81 +            "    jae   2f; "                                 \
   99.82 +            "    movq  (%%rdi,%%rax,8),%%rax; "              \
   99.83 +            "    movl  %c2+0*%c3(%0),%%edi; "                \
   99.84 +            "    movl  %c2+1*%c3(%0),%%esi; "                \
   99.85 +            "    movl  %c2+2*%c3(%0),%%edx; "                \
   99.86 +            "    movl  %c2+3*%c3(%0),%%ecx; "                \
   99.87 +            "    movl  %c2+4*%c3(%0),%%r8d; "                \
   99.88 +            "    callq *%%rax; "                             \
   99.89 +            "1:  movl  %%eax,%c4(%0)\n"                      \
   99.90 +            ".section .fixup,\"ax\"\n"                       \
   99.91 +            "2:  movl  $-"STR(ENOSYS)",%%eax\n"              \
   99.92 +            "    jmp   1b\n"                                 \
   99.93 +            ".previous\n"                                    \
   99.94 +            :                                                \
   99.95 +            : "b" (_call),                                   \
   99.96 +              "i" (offsetof(__typeof__(*_call), op)),        \
   99.97 +              "i" (offsetof(__typeof__(*_call), args)),      \
   99.98 +              "i" (sizeof(*(_call)->args)),                  \
   99.99 +              "i" (offsetof(__typeof__(*_call), result))     \
  99.100 +              /* all the caller-saves registers */           \
  99.101 +            : "rax", "rcx", "rdx", "rsi", "rdi",             \
  99.102 +              "r8",  "r9",  "r10", "r11" )                   \
  99.103  
  99.104  #else
  99.105  
  99.106  #define do_multicall_call(_call)                             \
  99.107 -    do {                                                     \
  99.108          __asm__ __volatile__ (                               \
  99.109 -            "    pushl "STR(MULTICALL_arg4)"(%0); "          \
  99.110 -            "    pushl "STR(MULTICALL_arg3)"(%0); "          \
  99.111 -            "    pushl "STR(MULTICALL_arg2)"(%0); "          \
  99.112 -            "    pushl "STR(MULTICALL_arg1)"(%0); "          \
  99.113 -            "    pushl "STR(MULTICALL_arg0)"(%0); "          \
  99.114 -            "    movl  "STR(MULTICALL_op)"(%0),%%eax; "      \
  99.115 +            "    movl  %c1(%0),%%eax; "                      \
  99.116 +            "    pushl %c2+4*%c3(%0); "                      \
  99.117 +            "    pushl %c2+3*%c3(%0); "                      \
  99.118 +            "    pushl %c2+2*%c3(%0); "                      \
  99.119 +            "    pushl %c2+1*%c3(%0); "                      \
  99.120 +            "    pushl %c2+0*%c3(%0); "                      \
  99.121              "    cmpl  $("STR(NR_hypercalls)"),%%eax; "      \
  99.122              "    jae   2f; "                                 \
  99.123              "    call  *hypercall_table(,%%eax,4); "         \
  99.124 -            "1:  movl  %%eax,"STR(MULTICALL_result)"(%0); "  \
  99.125 +            "1:  movl  %%eax,%c4(%0); "                      \
  99.126              "    addl  $20,%%esp\n"                          \
  99.127              ".section .fixup,\"ax\"\n"                       \
  99.128              "2:  movl  $-"STR(ENOSYS)",%%eax\n"              \
  99.129              "    jmp   1b\n"                                 \
  99.130              ".previous\n"                                    \
  99.131 -            : : "b" (_call)                                  \
  99.132 +            :                                                \
  99.133 +            : "bSD" (_call),                                 \
  99.134 +              "i" (offsetof(__typeof__(*_call), op)),        \
  99.135 +              "i" (offsetof(__typeof__(*_call), args)),      \
  99.136 +              "i" (sizeof(*(_call)->args)),                  \
  99.137 +              "i" (offsetof(__typeof__(*_call), result))     \
  99.138                /* all the caller-saves registers */           \
  99.139 -            : "eax", "ecx", "edx" );                         \
  99.140 -    } while ( 0 )
  99.141 +            : "eax", "ecx", "edx" )                          \
  99.142  
  99.143  #endif
  99.144  
   100.1 --- a/xen/include/asm-x86/perfc_defn.h	Tue Mar 27 12:21:48 2007 -0600
   100.2 +++ b/xen/include/asm-x86/perfc_defn.h	Wed Mar 28 10:38:41 2007 +0100
   100.3 @@ -12,81 +12,83 @@ PERFCOUNTER_ARRAY(cause_vector,         
   100.4  #define SVM_PERF_EXIT_REASON_SIZE (1+136)
   100.5  PERFCOUNTER_ARRAY(svmexits,             "SVMexits", SVM_PERF_EXIT_REASON_SIZE)
   100.6  
   100.7 -PERFCOUNTER_CPU(seg_fixups,             "segmentation fixups")
   100.8 +PERFCOUNTER(seg_fixups,             "segmentation fixups")
   100.9  
  100.10 -PERFCOUNTER_CPU(apic_timer,             "apic timer interrupts")
  100.11 +PERFCOUNTER(apic_timer,             "apic timer interrupts")
  100.12  
  100.13 -PERFCOUNTER_CPU(domain_page_tlb_flush,  "domain page tlb flushes")
  100.14 +PERFCOUNTER(domain_page_tlb_flush,  "domain page tlb flushes")
  100.15  
  100.16 -PERFCOUNTER_CPU(calls_to_mmu_update,    "calls_to_mmu_update")
  100.17 -PERFCOUNTER_CPU(num_page_updates,       "num_page_updates")
  100.18 -PERFCOUNTER_CPU(calls_to_update_va,     "calls_to_update_va_map")
  100.19 -PERFCOUNTER_CPU(page_faults,            "page faults")
  100.20 -PERFCOUNTER_CPU(copy_user_faults,       "copy_user faults")
  100.21 +PERFCOUNTER(calls_to_mmuext_op,         "calls to mmuext_op")
  100.22 +PERFCOUNTER(num_mmuext_ops,             "mmuext ops")
  100.23 +PERFCOUNTER(calls_to_mmu_update,        "calls to mmu_update")
  100.24 +PERFCOUNTER(num_page_updates,           "page updates")
  100.25 +PERFCOUNTER(calls_to_update_va,         "calls to update_va_map")
  100.26 +PERFCOUNTER(page_faults,            "page faults")
  100.27 +PERFCOUNTER(copy_user_faults,       "copy_user faults")
  100.28  
  100.29 -PERFCOUNTER_CPU(map_domain_page_count,  "map_domain_page count")
  100.30 -PERFCOUNTER_CPU(ptwr_emulations,        "writable pt emulations")
  100.31 +PERFCOUNTER(map_domain_page_count,  "map_domain_page count")
  100.32 +PERFCOUNTER(ptwr_emulations,        "writable pt emulations")
  100.33  
  100.34 -PERFCOUNTER_CPU(exception_fixed,        "pre-exception fixed")
  100.35 +PERFCOUNTER(exception_fixed,        "pre-exception fixed")
  100.36  
  100.37  
  100.38  /* Shadow counters */
  100.39 -PERFCOUNTER_CPU(shadow_alloc,          "calls to shadow_alloc")
  100.40 -PERFCOUNTER_CPU(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
  100.41 +PERFCOUNTER(shadow_alloc,          "calls to shadow_alloc")
  100.42 +PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
  100.43  
  100.44  /* STATUS counters do not reset when 'P' is hit */
  100.45  PERFSTATUS(shadow_alloc_count,         "number of shadow pages in use")
  100.46 -PERFCOUNTER_CPU(shadow_free,           "calls to shadow_free")
  100.47 -PERFCOUNTER_CPU(shadow_prealloc_1,     "shadow recycles old shadows")
  100.48 -PERFCOUNTER_CPU(shadow_prealloc_2,     "shadow recycles in-use shadows")
  100.49 -PERFCOUNTER_CPU(shadow_linear_map_failed, "shadow hit read-only linear map")
  100.50 -PERFCOUNTER_CPU(shadow_a_update,       "shadow A bit update")
  100.51 -PERFCOUNTER_CPU(shadow_ad_update,      "shadow A&D bit update")
  100.52 -PERFCOUNTER_CPU(shadow_fault,          "calls to shadow_fault")
  100.53 -PERFCOUNTER_CPU(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
  100.54 -PERFCOUNTER_CPU(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
  100.55 -PERFCOUNTER_CPU(shadow_fault_fast_fail, "shadow_fault fast path error")
  100.56 -PERFCOUNTER_CPU(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
  100.57 -PERFCOUNTER_CPU(shadow_fault_bail_not_present, 
  100.58 +PERFCOUNTER(shadow_free,           "calls to shadow_free")
  100.59 +PERFCOUNTER(shadow_prealloc_1,     "shadow recycles old shadows")
  100.60 +PERFCOUNTER(shadow_prealloc_2,     "shadow recycles in-use shadows")
  100.61 +PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map")
  100.62 +PERFCOUNTER(shadow_a_update,       "shadow A bit update")
  100.63 +PERFCOUNTER(shadow_ad_update,      "shadow A&D bit update")
  100.64 +PERFCOUNTER(shadow_fault,          "calls to shadow_fault")
  100.65 +PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
  100.66 +PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
  100.67 +PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error")
  100.68 +PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
  100.69 +PERFCOUNTER(shadow_fault_bail_not_present, 
  100.70                                          "shadow_fault guest not-present")
  100.71 -PERFCOUNTER_CPU(shadow_fault_bail_nx,  "shadow_fault guest NX fault")
  100.72 -PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
  100.73 -PERFCOUNTER_CPU(shadow_fault_bail_user_supervisor, 
  100.74 +PERFCOUNTER(shadow_fault_bail_nx,  "shadow_fault guest NX fault")
  100.75 +PERFCOUNTER(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
  100.76 +PERFCOUNTER(shadow_fault_bail_user_supervisor, 
  100.77                                          "shadow_fault guest U/S fault")
  100.78 -PERFCOUNTER_CPU(shadow_fault_emulate_read, "shadow_fault emulates a read")
  100.79 -PERFCOUNTER_CPU(shadow_fault_emulate_write, "shadow_fault emulates a write")
  100.80 -PERFCOUNTER_CPU(shadow_fault_emulate_failed, "shadow_fault emulator fails")
  100.81 -PERFCOUNTER_CPU(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
  100.82 -PERFCOUNTER_CPU(shadow_fault_mmio,     "shadow_fault handled as mmio")
  100.83 -PERFCOUNTER_CPU(shadow_fault_fixed,    "shadow_fault fixed fault")
  100.84 -PERFCOUNTER_CPU(shadow_ptwr_emulate,   "shadow causes ptwr to emulate")
  100.85 -PERFCOUNTER_CPU(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
  100.86 -PERFCOUNTER_CPU(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
  100.87 -PERFCOUNTER_CPU(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
  100.88 -PERFCOUNTER_CPU(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
  100.89 -PERFCOUNTER_CPU(shadow_hash_lookups,   "calls to shadow_hash_lookup")
  100.90 -PERFCOUNTER_CPU(shadow_hash_lookup_head, "shadow hash hit in bucket head")
  100.91 -PERFCOUNTER_CPU(shadow_hash_lookup_miss, "shadow hash misses")
  100.92 -PERFCOUNTER_CPU(shadow_get_shadow_status, "calls to get_shadow_status")
  100.93 -PERFCOUNTER_CPU(shadow_hash_inserts,   "calls to shadow_hash_insert")
  100.94 -PERFCOUNTER_CPU(shadow_hash_deletes,   "calls to shadow_hash_delete")
  100.95 -PERFCOUNTER_CPU(shadow_writeable,      "shadow removes write access")
  100.96 -PERFCOUNTER_CPU(shadow_writeable_h_1,  "shadow writeable: 32b w2k3")
  100.97 -PERFCOUNTER_CPU(shadow_writeable_h_2,  "shadow writeable: 32pae w2k3")
  100.98 -PERFCOUNTER_CPU(shadow_writeable_h_3,  "shadow writeable: 64b w2k3")
  100.99 -PERFCOUNTER_CPU(shadow_writeable_h_4,  "shadow writeable: 32b linux low")
 100.100 -PERFCOUNTER_CPU(shadow_writeable_h_5,  "shadow writeable: 32b linux high")
 100.101 -PERFCOUNTER_CPU(shadow_writeable_bf,   "shadow writeable brute-force")
 100.102 -PERFCOUNTER_CPU(shadow_mappings,       "shadow removes all mappings")
 100.103 -PERFCOUNTER_CPU(shadow_mappings_bf,    "shadow rm-mappings brute-force")
 100.104 -PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
 100.105 -PERFCOUNTER_CPU(shadow_unshadow,       "shadow unshadows a page")
 100.106 -PERFCOUNTER_CPU(shadow_up_pointer,     "shadow unshadow by up-pointer")
 100.107 -PERFCOUNTER_CPU(shadow_unshadow_bf,    "shadow unshadow brute-force")
 100.108 -PERFCOUNTER_CPU(shadow_get_page_fail,  "shadow_get_page_from_l1e failed")
 100.109 -PERFCOUNTER_CPU(shadow_guest_walk,     "shadow walks guest tables")
 100.110 -PERFCOUNTER_CPU(shadow_invlpg,         "shadow emulates invlpg")
 100.111 -PERFCOUNTER_CPU(shadow_invlpg_fault,   "shadow invlpg faults")
 100.112 +PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read")
 100.113 +PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write")
 100.114 +PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails")
 100.115 +PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
 100.116 +PERFCOUNTER(shadow_fault_mmio,     "shadow_fault handled as mmio")
 100.117 +PERFCOUNTER(shadow_fault_fixed,    "shadow_fault fixed fault")
 100.118 +PERFCOUNTER(shadow_ptwr_emulate,   "shadow causes ptwr to emulate")
 100.119 +PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
 100.120 +PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
 100.121 +PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
 100.122 +PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
 100.123 +PERFCOUNTER(shadow_hash_lookups,   "calls to shadow_hash_lookup")
 100.124 +PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head")
 100.125 +PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses")
 100.126 +PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status")
 100.127 +PERFCOUNTER(shadow_hash_inserts,   "calls to shadow_hash_insert")
 100.128 +PERFCOUNTER(shadow_hash_deletes,   "calls to shadow_hash_delete")
 100.129 +PERFCOUNTER(shadow_writeable,      "shadow removes write access")
 100.130 +PERFCOUNTER(shadow_writeable_h_1,  "shadow writeable: 32b w2k3")
 100.131 +PERFCOUNTER(shadow_writeable_h_2,  "shadow writeable: 32pae w2k3")
 100.132 +PERFCOUNTER(shadow_writeable_h_3,  "shadow writeable: 64b w2k3")
 100.133 +PERFCOUNTER(shadow_writeable_h_4,  "shadow writeable: 32b linux low")
 100.134 +PERFCOUNTER(shadow_writeable_h_5,  "shadow writeable: 32b linux high")
 100.135 +PERFCOUNTER(shadow_writeable_bf,   "shadow writeable brute-force")
 100.136 +PERFCOUNTER(shadow_mappings,       "shadow removes all mappings")
 100.137 +PERFCOUNTER(shadow_mappings_bf,    "shadow rm-mappings brute-force")
 100.138 +PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit")
 100.139 +PERFCOUNTER(shadow_unshadow,       "shadow unshadows a page")
 100.140 +PERFCOUNTER(shadow_up_pointer,     "shadow unshadow by up-pointer")
 100.141 +PERFCOUNTER(shadow_unshadow_bf,    "shadow unshadow brute-force")
 100.142 +PERFCOUNTER(shadow_get_page_fail,  "shadow_get_page_from_l1e failed")
 100.143 +PERFCOUNTER(shadow_guest_walk,     "shadow walks guest tables")
 100.144 +PERFCOUNTER(shadow_invlpg,         "shadow emulates invlpg")
 100.145 +PERFCOUNTER(shadow_invlpg_fault,   "shadow invlpg faults")
 100.146  
 100.147  
 100.148  /*#endif*/ /* __XEN_PERFC_DEFN_H__ */
   101.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Tue Mar 27 12:21:48 2007 -0600
   101.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Wed Mar 28 10:38:41 2007 +0100
   101.3 @@ -1,6 +1,8 @@
   101.4  #ifndef __X86_32_ASM_DEFNS_H__
   101.5  #define __X86_32_ASM_DEFNS_H__
   101.6  
   101.7 +#include <asm/percpu.h>
   101.8 +
   101.9  #ifndef NDEBUG
  101.10  /* Indicate special exception stack frame by inverting the frame pointer. */
  101.11  #define SETUP_EXCEPTION_FRAME_POINTER           \
  101.12 @@ -47,10 +49,14 @@
  101.13          1:
  101.14  
  101.15  #ifdef PERF_COUNTERS
  101.16 -#define PERFC_INCR(_name,_idx)                          \
  101.17 -        lock incl perfcounters+_name(,_idx,4)
  101.18 +#define PERFC_INCR(_name,_idx,_cur)                     \
  101.19 +        pushl _cur;                                     \
  101.20 +        movl VCPU_processor(_cur),_cur;                 \
  101.21 +        shll $PERCPU_SHIFT,_cur;                        \
  101.22 +        incl per_cpu__perfcounters+_name*4(_cur,_idx,4);\
  101.23 +        popl _cur
  101.24  #else
  101.25 -#define PERFC_INCR(_name,_idx)
  101.26 +#define PERFC_INCR(_name,_idx,_cur)
  101.27  #endif
  101.28  
  101.29  #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
   102.1 --- a/xen/include/asm-x86/x86_32/bug.h	Tue Mar 27 12:21:48 2007 -0600
   102.2 +++ b/xen/include/asm-x86/x86_32/bug.h	Wed Mar 28 10:38:41 2007 +0100
   102.3 @@ -12,6 +12,12 @@ struct bug_frame_str {
   102.4          "ud2 ; ret $%c0"                                \
   102.5          : : "i" (BUGFRAME_dump) )
   102.6  
   102.7 +#define WARN()                                          \
   102.8 +    asm volatile (                                      \
   102.9 +        "ud2 ; ret $%c0 ; .byte 0xbc ; .long %c1"       \
  102.10 +        : : "i" (BUGFRAME_warn | (__LINE__<<2)),        \
  102.11 +            "i" (__FILE__) )
  102.12 +
  102.13  #define BUG()                                           \
  102.14      asm volatile (                                      \
  102.15          "ud2 ; ret $%c0 ; .byte 0xbc ; .long %c1"       \
   103.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Tue Mar 27 12:21:48 2007 -0600
   103.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Wed Mar 28 10:38:41 2007 +0100
   103.3 @@ -1,6 +1,8 @@
   103.4  #ifndef __X86_64_ASM_DEFNS_H__
   103.5  #define __X86_64_ASM_DEFNS_H__
   103.6  
   103.7 +#include <asm/percpu.h>
   103.8 +
   103.9  #ifndef NDEBUG
  103.10  /* Indicate special exception stack frame by inverting the frame pointer. */
  103.11  #define SETUP_EXCEPTION_FRAME_POINTER           \
  103.12 @@ -47,13 +49,18 @@
  103.13          popq  %rdi;
  103.14  
  103.15  #ifdef PERF_COUNTERS
  103.16 -#define PERFC_INCR(_name,_idx)                  \
  103.17 -    pushq %rdx;                                 \
  103.18 -    leaq perfcounters+_name(%rip),%rdx;         \
  103.19 -    lock incl (%rdx,_idx,4);                    \
  103.20 -    popq %rdx;
  103.21 +#define PERFC_INCR(_name,_idx,_cur)             \
  103.22 +        pushq _cur;                             \
  103.23 +        movslq VCPU_processor(_cur),_cur;       \
  103.24 +        pushq %rdx;                             \
  103.25 +        leaq per_cpu__perfcounters(%rip),%rdx;  \
  103.26 +        shlq $PERCPU_SHIFT,_cur;                \
  103.27 +        addq %rdx,_cur;                         \
  103.28 +        popq %rdx;                              \
  103.29 +        incl _name*4(_cur,_idx,4);              \
  103.30 +        popq _cur
  103.31  #else
  103.32 -#define PERFC_INCR(_name,_idx)
  103.33 +#define PERFC_INCR(_name,_idx,_cur)
  103.34  #endif
  103.35  
  103.36  /* Work around AMD erratum #88 */
   104.1 --- a/xen/include/asm-x86/x86_64/bug.h	Tue Mar 27 12:21:48 2007 -0600
   104.2 +++ b/xen/include/asm-x86/x86_64/bug.h	Wed Mar 28 10:38:41 2007 +0100
   104.3 @@ -12,6 +12,12 @@ struct bug_frame_str {
   104.4          "ud2 ; ret $%c0"                                \
   104.5          : : "i" (BUGFRAME_dump) )
   104.6  
   104.7 +#define WARN()                                          \
   104.8 +    asm volatile (                                      \
   104.9 +        "ud2 ; ret $%c0 ; .byte 0x48,0xbc ; .quad %c1"  \
  104.10 +        : : "i" (BUGFRAME_warn | (__LINE__<<2)),        \
  104.11 +            "i" (__FILE__) )
  104.12 +
  104.13  #define BUG()                                           \
  104.14      asm volatile (                                      \
  104.15          "ud2 ; ret $%c0 ; .byte 0x48,0xbc ; .quad %c1"  \
   105.1 --- a/xen/include/public/foreign/Makefile	Tue Mar 27 12:21:48 2007 -0600
   105.2 +++ b/xen/include/public/foreign/Makefile	Wed Mar 28 10:38:41 2007 +0100
   105.3 @@ -1,5 +1,5 @@
   105.4 -XEN_ROOT := ../../../..
   105.5 -include $(XEN_ROOT)/tools/Rules.mk
   105.6 +XEN_ROOT=../../../..
   105.7 +include $(XEN_ROOT)/Config.mk
   105.8  
   105.9  architectures := x86_32 x86_64 ia64
  105.10  headers := $(patsubst %, %.h, $(architectures))
   106.1 --- a/xen/include/xen/lib.h	Tue Mar 27 12:21:48 2007 -0600
   106.2 +++ b/xen/include/xen/lib.h	Wed Mar 28 10:38:41 2007 +0100
   106.3 @@ -10,8 +10,10 @@
   106.4  #include <asm/bug.h>
   106.5  
   106.6  void __bug(char *file, int line) __attribute__((noreturn));
   106.7 +void __warn(char *file, int line);
   106.8  
   106.9 -#define BUG_ON(_p) do { if (_p) BUG(); } while ( 0 )
  106.10 +#define BUG_ON(p)  do { if (p) BUG();  } while (0)
  106.11 +#define WARN_ON(p) do { if (p) WARN(); } while (0)
  106.12  
  106.13  /* Force a compilation error if condition is true */
  106.14  #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
   107.1 --- a/xen/include/xen/perfc.h	Tue Mar 27 12:21:48 2007 -0600
   107.2 +++ b/xen/include/xen/perfc.h	Wed Mar 28 10:38:41 2007 +0100
   107.3 @@ -1,4 +1,3 @@
   107.4 -
   107.5  #ifndef __XEN_PERFC_H__
   107.6  #define __XEN_PERFC_H__
   107.7  
   107.8 @@ -6,102 +5,92 @@
   107.9  
  107.10  #include <xen/lib.h>
  107.11  #include <xen/smp.h>
  107.12 -#include <asm/atomic.h>
  107.13 +#include <xen/percpu.h>
  107.14  
  107.15 -/* 
  107.16 +/*
  107.17   * NOTE: new counters must be defined in perfc_defn.h
  107.18   * 
  107.19 + * Counter declarations:
  107.20   * PERFCOUNTER (counter, string)              define a new performance counter
  107.21 - * PERFCOUNTER_CPU (counter, string, size)    define a counter per CPU
  107.22 - * PERFCOUNTER_ARRY (counter, string, size)   define an array of counters
  107.23 + * PERFCOUNTER_ARRAY (counter, string, size)  define an array of counters
  107.24   * 
  107.25 - * unlike "COUNTERS", "STATUS" variables DO NOT RESET
  107.26 + * Unlike counters, status variables do not reset:
  107.27   * PERFSTATUS (counter, string)               define a new performance stauts
  107.28 - * PERFSTATUS_CPU (counter, string, size)     define a status var per CPU
  107.29 - * PERFSTATUS_ARRY (counter, string, size)    define an array of status vars
  107.30 + * PERFSTATUS_ARRAY (counter, string, size)   define an array of status vars
  107.31   * 
  107.32   * unsigned long perfc_value  (counter)        get value of a counter  
  107.33 - * unsigned long perfc_valuec (counter)        get value of a per CPU counter
  107.34   * unsigned long perfc_valuea (counter, index) get value of an array counter
  107.35   * unsigned long perfc_set  (counter, val)     set value of a counter  
  107.36 - * unsigned long perfc_setc (counter, val)     set value of a per CPU counter
  107.37   * unsigned long perfc_seta (counter, index, val) set value of an array counter
  107.38   * void perfc_incr  (counter)                  increment a counter          
  107.39 - * void perfc_incrc (counter, index)           increment a per CPU counter   
  107.40 + * void perfc_decr  (counter)                  decrement a status
  107.41   * void perfc_incra (counter, index)           increment an array counter   
  107.42   * void perfc_add   (counter, value)           add a value to a counter     
  107.43 - * void perfc_addc  (counter, value)           add a value to a per CPU counter
  107.44   * void perfc_adda  (counter, index, value)    add a value to array counter 
  107.45   * void perfc_print (counter)                  print out the counter
  107.46   */
  107.47  
  107.48 -#define PERFCOUNTER( var, name ) \
  107.49 -  atomic_t var[1];
  107.50 -#define PERFCOUNTER_CPU( var, name ) \
  107.51 -  atomic_t var[NR_CPUS];
  107.52 -#define PERFCOUNTER_ARRAY( var, name, size ) \
  107.53 -  atomic_t var[size];
  107.54 -#define PERFSTATUS( var, name ) \
  107.55 -  atomic_t var[1];
  107.56 -#define PERFSTATUS_CPU( var, name ) \
  107.57 -  atomic_t var[NR_CPUS];
  107.58 -#define PERFSTATUS_ARRAY( var, name, size ) \
  107.59 -  atomic_t var[size];
  107.60 +#define PERFCOUNTER( name, descr ) \
  107.61 +  PERFC_##name,
  107.62 +#define PERFCOUNTER_ARRAY( name, descr, size ) \
  107.63 +  PERFC_##name,                                \
  107.64 +  PERFC_LAST_##name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]),
  107.65  
  107.66 -struct perfcounter {
  107.67 +#define PERFSTATUS       PERFCOUNTER
  107.68 +#define PERFSTATUS_ARRAY PERFCOUNTER_ARRAY
  107.69 +
  107.70 +enum perfcounter {
  107.71  #include <xen/perfc_defn.h>
  107.72 +	NUM_PERFCOUNTERS
  107.73  };
  107.74  
  107.75 -extern struct perfcounter perfcounters;
  107.76 +#undef PERFCOUNTER
  107.77 +#undef PERFCOUNTER_ARRAY
  107.78 +#undef PERFSTATUS
  107.79 +#undef PERFSTATUS_ARRAY
  107.80  
  107.81 -#define perfc_value(x)    atomic_read(&perfcounters.x[0])
  107.82 -#define perfc_valuec(x)   atomic_read(&perfcounters.x[smp_processor_id()])
  107.83 +typedef unsigned perfc_t;
  107.84 +#define PRIperfc ""
  107.85 +
  107.86 +DECLARE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters);
  107.87 +
  107.88 +#define perfc_value(x)    this_cpu(perfcounters)[PERFC_ ## x]
  107.89  #define perfc_valuea(x,y)                                               \
  107.90 -    ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ?	\
  107.91 -	atomic_read(&perfcounters.x[y]) : 0 )
  107.92 -#define perfc_set(x,v)    atomic_set(&perfcounters.x[0], v)
  107.93 -#define perfc_setc(x,v)   atomic_set(&perfcounters.x[smp_processor_id()], v)
  107.94 +    ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ?                           \
  107.95 +	 this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 )
  107.96 +#define perfc_set(x,v)    (this_cpu(perfcounters)[PERFC_ ## x] = (v))
  107.97  #define perfc_seta(x,y,v)                                               \
  107.98 -    do {                                                                \
  107.99 -        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
 107.100 -            atomic_set(&perfcounters.x[y], v);                          \
 107.101 -    } while ( 0 )
 107.102 -#define perfc_incr(x)     atomic_inc(&perfcounters.x[0])
 107.103 -#define perfc_decr(x)     atomic_dec(&perfcounters.x[0])
 107.104 -#define perfc_incrc(x)    atomic_inc(&perfcounters.x[smp_processor_id()])
 107.105 -#define perfc_decrc(x)    atomic_dec(&perfcounters.x[smp_processor_id()])
 107.106 +    ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ?                           \
 107.107 +	 this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) )
 107.108 +#define perfc_incr(x)     (++this_cpu(perfcounters)[PERFC_ ## x])
 107.109 +#define perfc_decr(x)     (--this_cpu(perfcounters)[PERFC_ ## x])
 107.110  #define perfc_incra(x,y)                                                \
 107.111 -    do {                                                                \
 107.112 -        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
 107.113 -            atomic_inc(&perfcounters.x[y]);                             \
 107.114 -    } while ( 0 )
 107.115 -#define perfc_add(x,y)    atomic_add((y), &perfcounters.x[0])
 107.116 -#define perfc_addc(x,y)   atomic_add((y), &perfcounters.x[smp_processor_id()])
 107.117 -#define perfc_adda(x,y,z)                                               \
 107.118 -    do {                                                                \
 107.119 -        if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
 107.120 -            atomic_add((z), &perfcounters.x[y]);                        \
 107.121 -    } while ( 0 )
 107.122 +    ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ?                           \
 107.123 +	 ++this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 )
 107.124 +#define perfc_add(x,v)    (this_cpu(perfcounters)[PERFC_ ## x] += (v))
 107.125 +#define perfc_adda(x,y,v)                                               \
 107.126 +    ( (y) <= PERFC_LAST_ ## x - PERFC_ ## x ?                           \
 107.127 +	 this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) )
 107.128  
 107.129  /*
 107.130   * Histogram: special treatment for 0 and 1 count. After that equally spaced 
 107.131   * with last bucket taking the rest.
 107.132   */
 107.133  #ifdef PERF_ARRAYS
 107.134 -#define perfc_incr_histo(_x,_v,_n)                                          \
 107.135 -    do {                                                                    \
 107.136 -        if ( (_v) == 0 )                                                    \
 107.137 -            perfc_incra(_x, 0);                                             \
 107.138 -        else if ( (_v) == 1 )                                               \
 107.139 -            perfc_incra(_x, 1);                                             \
 107.140 -        else if ( (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) <               \
 107.141 -                  (PERFC_MAX_ ## _n - 3) )                                  \
 107.142 -            perfc_incra(_x, (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) + 2); \
 107.143 -        else                                                                \
 107.144 -            perfc_incra(_x, PERFC_MAX_ ## _n - 1);                          \
 107.145 +#define perfc_incr_histo(x,v)                                           \
 107.146 +    do {                                                                \
 107.147 +        if ( (v) == 0 )                                                 \
 107.148 +            perfc_incra(x, 0);                                          \
 107.149 +        else if ( (v) == 1 )                                            \
 107.150 +            perfc_incra(x, 1);                                          \
 107.151 +        else if ( (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) <           \
 107.152 +                  (PERFC_LAST_ ## x - PERFC_ ## x - 2) )                \
 107.153 +            perfc_incra(x, (((v) - 2) / PERFC_ ## x ## _BUCKET_SIZE) + 2); \
 107.154 +        else                                                            \
 107.155 +            perfc_incra(x, PERFC_LAST_ ## x - PERFC_ ## x);             \
 107.156      } while ( 0 )
 107.157  #else
 107.158 -#define perfc_incr_histo(_x,_v,_n) ((void)0)
 107.159 +#define perfc_incr_histo(x,v) ((void)0)
 107.160  #endif
 107.161  
 107.162  struct xen_sysctl_perfc_op;
 107.163 @@ -110,19 +99,14 @@ int perfc_control(struct xen_sysctl_perf
 107.164  #else /* PERF_COUNTERS */
 107.165  
 107.166  #define perfc_value(x)    (0)
 107.167 -#define perfc_valuec(x)   (0)
 107.168  #define perfc_valuea(x,y) (0)
 107.169  #define perfc_set(x,v)    ((void)0)
 107.170 -#define perfc_setc(x,v)   ((void)0)
 107.171  #define perfc_seta(x,y,v) ((void)0)
 107.172  #define perfc_incr(x)     ((void)0)
 107.173  #define perfc_decr(x)     ((void)0)
 107.174 -#define perfc_incrc(x)    ((void)0)
 107.175 -#define perfc_decrc(x)    ((void)0)
 107.176  #define perfc_incra(x,y)  ((void)0)
 107.177  #define perfc_decra(x,y)  ((void)0)
 107.178  #define perfc_add(x,y)    ((void)0)
 107.179 -#define perfc_addc(x,y)   ((void)0)
 107.180  #define perfc_adda(x,y,z) ((void)0)
 107.181  #define perfc_incr_histo(x,y,z) ((void)0)
 107.182  
   108.1 --- a/xen/include/xen/perfc_defn.h	Tue Mar 27 12:21:48 2007 -0600
   108.2 +++ b/xen/include/xen/perfc_defn.h	Wed Mar 28 10:38:41 2007 +0100
   108.3 @@ -6,13 +6,16 @@
   108.4  
   108.5  PERFCOUNTER_ARRAY(hypercalls,           "hypercalls", NR_hypercalls)
   108.6  
   108.7 -PERFCOUNTER_CPU(irqs,                   "#interrupts")
   108.8 -PERFCOUNTER_CPU(ipis,                   "#IPIs")
   108.9 +PERFCOUNTER(calls_to_multicall,         "calls to multicall")
  108.10 +PERFCOUNTER(calls_from_multicall,       "calls from multicall")
  108.11 +
  108.12 +PERFCOUNTER(irqs,                   "#interrupts")
  108.13 +PERFCOUNTER(ipis,                   "#IPIs")
  108.14  
  108.15 -PERFCOUNTER_CPU(sched_irq,              "sched: timer")
  108.16 -PERFCOUNTER_CPU(sched_run,              "sched: runs through scheduler")
  108.17 -PERFCOUNTER_CPU(sched_ctx,              "sched: context switches")
  108.18 +PERFCOUNTER(sched_irq,              "sched: timer")
  108.19 +PERFCOUNTER(sched_run,              "sched: runs through scheduler")
  108.20 +PERFCOUNTER(sched_ctx,              "sched: context switches")
  108.21  
  108.22 -PERFCOUNTER_CPU(need_flush_tlb_flush,   "PG_need_flush tlb flushes")
  108.23 +PERFCOUNTER(need_flush_tlb_flush,   "PG_need_flush tlb flushes")
  108.24  
  108.25  /*#endif*/ /* __XEN_PERFC_DEFN_H__ */