ia64/xen-unstable
changeset 9153:c4ac21dc3f16
merge
author | awilliam@xenbuild.aw |
---|---|
date | Mon Mar 06 10:21:35 2006 -0700 (2006-03-06) |
parents | ede16886f979 e8fb817c4c15 |
children | 294e032f14af |
files | linux-2.6-xen-sparse/include/xen/public/xenstored.h xen/arch/ia64/xen/xensetup.c |
line diff
1.1 --- a/.hgignore Mon Mar 06 09:09:18 2006 -0700 1.2 +++ b/.hgignore Mon Mar 06 10:21:35 2006 -0700 1.3 @@ -166,6 +166,7 @@ 1.4 ^tools/xenstore/xenstore-read$ 1.5 ^tools/xenstore/xenstore-rm$ 1.6 ^tools/xenstore/xenstore-write$ 1.7 +^tools/xenstore/xenstore-control$ 1.8 ^tools/xenstore/xenstore-ls$ 1.9 ^tools/xenstore/xenstored$ 1.10 ^tools/xenstore/xenstored_test$
2.1 --- a/docs/src/interface.tex Mon Mar 06 09:09:18 2006 -0700 2.2 +++ b/docs/src/interface.tex Mon Mar 06 10:21:35 2006 -0700 2.3 @@ -1059,6 +1059,21 @@ This path contains: 2.4 \item[domain] the name of the frontend 2.5 \end{description} 2.6 \end{description} 2.7 + 2.8 + \item[vtpm/] a directory containin vtpm backends 2.9 + \begin{description} 2.10 + \item[$<$domid$>$/] a directory containing vtpm's for domid 2.11 + \begin{description} 2.12 + \item[$<$vtpm number$>$/] a directory for each vtpm 2.13 + \item[frontend-id] the domain id of the frontend 2.14 + \item[frontend] the path to the frontend 2.15 + \item[instance] the instance of the virtual TPM that is used 2.16 + \item[pref{\textunderscore}instance] the instance number as given in the VM configuration file; 2.17 + may be different from {\bf instance} 2.18 + \item[domain] the name of the domain of the frontend 2.19 + \end{description} 2.20 + \end{description} 2.21 + 2.22 \end{description} 2.23 2.24 \item[device/] a directory containing the frontend devices for the 2.25 @@ -1094,6 +1109,18 @@ This path contains: 2.26 \item[event-channel] the event channel used for the two ring queues 2.27 \end{description} 2.28 \end{description} 2.29 + 2.30 + \item[vtpm/] a directory containing the vtpm frontend device for the 2.31 + domain 2.32 + \begin{description} 2.33 + \item[$<$id$>$] a directory for vtpm id frontend device for the domain 2.34 + \begin{description} 2.35 + \item[backend-id] the backend domain id 2.36 + \item[backend] a path to the backend's store entry 2.37 + \item[ring-ref] the grant table reference for the tx/rx ring 2.38 + \item[event-channel] the event channel used for the ring 2.39 + \end{description} 2.40 + \end{description} 2.41 2.42 \item[device-misc/] miscellanous information for devices 2.43 \begin{description} 2.44 @@ -1450,6 +1477,76 @@ The fields are as follows: 2.45 value of {\tt first\_sect}. 2.46 \end{description} 2.47 2.48 +\section{Virtual TPM} 2.49 + 2.50 +Virtual TPM (VTPM) support provides TPM functionality to each virtual 2.51 +machine that requests this functionality in its configuration file. 2.52 +The interface enables domains to access therr own private TPM like it 2.53 +was a hardware TPM built into the machine. 2.54 + 2.55 +The virtual TPM interface is implemented as a split driver, 2.56 +similar to the network and block interfaces described above. 2.57 +The user domain hosting the frontend exports a character device /dev/tpm0 2.58 +to user-level applications for communicating with the virtual TPM. 2.59 +This is the same device interface that is also offered if a hardware TPM 2.60 +is available in the system. The backend provides a single interface 2.61 +/dev/vtpm where the virtual TPM is waiting for commands from all domains 2.62 +that have located their backend in a given domain. 2.63 + 2.64 +\subsection{Data Transfer} 2.65 + 2.66 +A single shared memory ring is used between the frontend and backend 2.67 +drivers. TPM requests and responses are sent in pages where a pointer 2.68 +to those pages and other information is placed into the ring such that 2.69 +the backend can map the pages into its memory space using the grant 2.70 +table mechanism. 2.71 + 2.72 +The backend driver has been implemented to only accept well-formed 2.73 +TPM requests. To meet this requirement, the length inidicator in the 2.74 +TPM request must correctly indicate the length of the request. 2.75 +Otherwise an error message is automatically sent back by the device driver. 2.76 + 2.77 +The virtual TPM implementation listenes for TPM request on /dev/vtpm. Since 2.78 +it must be able to apply the TPM request packet to the virtual TPM instance 2.79 +associated with the virtual machine, a 4-byte virtual TPM instance 2.80 +identifier is prepended to each packet by the backend driver (in network 2.81 +byte order) for internal routing of the request. 2.82 + 2.83 +\subsection{Virtual TPM ring interface} 2.84 + 2.85 +The TPM protocol is a strict request/response protocol and therefore 2.86 +only one ring is used to send requests from the frontend to the backend 2.87 +and responses on the reverse path. 2.88 + 2.89 +The request/response structure is defined as follows: 2.90 + 2.91 +\scriptsize 2.92 +\begin{verbatim} 2.93 +typedef struct { 2.94 + unsigned long addr; /* Machine address of packet. */ 2.95 + grant_ref_t ref; /* grant table access reference. */ 2.96 + uint16_t unused; /* unused */ 2.97 + uint16_t size; /* Packet size in bytes. */ 2.98 +} tpmif_tx_request_t; 2.99 +\end{verbatim} 2.100 +\normalsize 2.101 + 2.102 +The fields are as follows: 2.103 + 2.104 +\begin{description} 2.105 +\item[addr] The machine address of the page asscoiated with the TPM 2.106 + request/response; a request/response may span multiple 2.107 + pages 2.108 +\item[ref] The grant table reference associated with the address. 2.109 +\item[size] The size of the remaining packet; up to 2.110 + PAGE{\textunderscore}SIZE bytes can be found in the 2.111 + page referenced by 'addr' 2.112 +\end{description} 2.113 + 2.114 +The frontend initially allocates several pages whose addresses 2.115 +are stored in the ring. Only these pages are used for exchange of 2.116 +requests and responses. 2.117 + 2.118 2.119 \chapter{Further Information} 2.120
3.1 --- a/install.sh Mon Mar 06 09:09:18 2006 -0700 3.2 +++ b/install.sh Mon Mar 06 10:21:35 2006 -0700 3.3 @@ -22,20 +22,30 @@ if ! [ -d $dst ]; then 3.4 exit 1 3.5 fi 3.6 3.7 +tmp="`mktemp -d`" 3.8 + 3.9 echo "Installing Xen from '$src' to '$dst'..." 3.10 -(cd $src; tar -cf - --exclude etc/init.d --exclude etc/hotplug --exclude etc/udev * ) | tar -C $dst -xf - 3.11 -cp -fdRL $src/etc/init.d/* $dst/etc/init.d/ 3.12 -echo "All done." 3.13 +(cd $src; tar -cf - * ) | tar -C "$tmp" -xf - 3.14 3.15 [ -x "$(which udevinfo)" ] && \ 3.16 UDEV_VERSION=$(udevinfo -V | sed -e 's/^[^0-9]* \([0-9]\{1,\}\)[^0-9]\{0,\}/\1/') 3.17 3.18 if [ -n "$UDEV_VERSION" ] && [ $UDEV_VERSION -ge 059 ]; then 3.19 - cp -f $src/etc/udev/rules.d/*.rules $dst/etc/udev/rules.d/ 3.20 + echo " - installing for udev-based system" 3.21 + rm -rf "$tmp/etc/hotplug" 3.22 else 3.23 - cp -f $src/etc/hotplug/*.agent $dst/etc/hotplug/ 3.24 + echo " - installing for hotplug-based system" 3.25 + rm -rf "$tmp/etc/udev" 3.26 fi 3.27 3.28 +echo " - modifying permissions" 3.29 +chmod -R a+rX "$tmp" 3.30 + 3.31 +(cd $tmp; tar -cf - *) | tar --no-same-owner -C "$dst" -xf - 3.32 +rm -rf "$tmp" 3.33 + 3.34 +echo "All done." 3.35 + 3.36 echo "Checking to see whether prerequisite tools are installed..." 3.37 cd $src/../check 3.38 ./chk install
4.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/io_apic-xen.c Mon Mar 06 09:09:18 2006 -0700 4.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/io_apic-xen.c Mon Mar 06 10:21:35 2006 -0700 4.3 @@ -61,8 +61,8 @@ static inline unsigned int xen_io_apic_r 4.4 int ret; 4.5 4.6 op.cmd = PHYSDEVOP_APIC_READ; 4.7 - op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid; 4.8 - op.u.apic_op.offset = reg; 4.9 + op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; 4.10 + op.u.apic_op.reg = reg; 4.11 ret = HYPERVISOR_physdev_op(&op); 4.12 if (ret) 4.13 return ret; 4.14 @@ -74,8 +74,8 @@ static inline void xen_io_apic_write(uns 4.15 physdev_op_t op; 4.16 4.17 op.cmd = PHYSDEVOP_APIC_WRITE; 4.18 - op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid; 4.19 - op.u.apic_op.offset = reg; 4.20 + op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; 4.21 + op.u.apic_op.reg = reg; 4.22 op.u.apic_op.value = value; 4.23 HYPERVISOR_physdev_op(&op); 4.24 }
5.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/io_apic-xen.c Mon Mar 06 09:09:18 2006 -0700 5.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/io_apic-xen.c Mon Mar 06 10:21:35 2006 -0700 5.3 @@ -108,8 +108,8 @@ static inline unsigned int xen_io_apic_r 5.4 int ret; 5.5 5.6 op.cmd = PHYSDEVOP_APIC_READ; 5.7 - op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid; 5.8 - op.u.apic_op.offset = reg; 5.9 + op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; 5.10 + op.u.apic_op.reg = reg; 5.11 ret = HYPERVISOR_physdev_op(&op); 5.12 if (ret) 5.13 return ret; 5.14 @@ -121,8 +121,8 @@ static inline void xen_io_apic_write(uns 5.15 physdev_op_t op; 5.16 5.17 op.cmd = PHYSDEVOP_APIC_WRITE; 5.18 - op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid; 5.19 - op.u.apic_op.offset = reg; 5.20 + op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; 5.21 + op.u.apic_op.reg = reg; 5.22 op.u.apic_op.value = value; 5.23 HYPERVISOR_physdev_op(&op); 5.24 }
6.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c Mon Mar 06 09:09:18 2006 -0700 6.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c Mon Mar 06 10:21:35 2006 -0700 6.3 @@ -82,6 +82,8 @@ 6.4 extern unsigned long start_pfn; 6.5 extern struct edid_info edid_info; 6.6 6.7 +extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c); 6.8 + 6.9 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page; 6.10 EXPORT_SYMBOL(HYPERVISOR_shared_info); 6.11 6.12 @@ -1405,6 +1407,8 @@ void __cpuinit identify_cpu(struct cpuin 6.13 select_idle_routine(c); 6.14 detect_ht(c); 6.15 6.16 + machine_specific_modify_cpu_capabilities(c); 6.17 + 6.18 /* 6.19 * On SMP, boot_cpu_data holds the common feature set between 6.20 * all CPUs; so make sure that we indicate which features are
7.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/x8664_ksyms-xen.c Mon Mar 06 09:09:18 2006 -0700 7.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/x8664_ksyms-xen.c Mon Mar 06 10:21:35 2006 -0700 7.3 @@ -32,8 +32,6 @@ 7.4 #include <asm/tlbflush.h> 7.5 #include <asm/kdebug.h> 7.6 7.7 -extern spinlock_t rtc_lock; 7.8 - 7.9 #ifdef CONFIG_SMP 7.10 extern void __write_lock_failed(rwlock_t *rw); 7.11 extern void __read_lock_failed(rwlock_t *rw); 7.12 @@ -42,9 +40,6 @@ extern void __read_lock_failed(rwlock_t 7.13 /* platform dependent support */ 7.14 EXPORT_SYMBOL(boot_cpu_data); 7.15 //EXPORT_SYMBOL(dump_fpu); 7.16 -EXPORT_SYMBOL(__ioremap); 7.17 -EXPORT_SYMBOL(ioremap_nocache); 7.18 -EXPORT_SYMBOL(iounmap); 7.19 EXPORT_SYMBOL(kernel_thread); 7.20 EXPORT_SYMBOL(pm_idle); 7.21 EXPORT_SYMBOL(pm_power_off); 7.22 @@ -102,8 +97,6 @@ EXPORT_SYMBOL(screen_info); 7.23 7.24 EXPORT_SYMBOL(get_wchan); 7.25 7.26 -EXPORT_SYMBOL(rtc_lock); 7.27 - 7.28 #ifdef CONFIG_X86_LOCAL_APIC 7.29 EXPORT_SYMBOL_GPL(set_nmi_callback); 7.30 EXPORT_SYMBOL_GPL(unset_nmi_callback); 7.31 @@ -166,7 +159,5 @@ EXPORT_SYMBOL(__supported_pte_mask); 7.32 EXPORT_SYMBOL(flush_tlb_page); 7.33 #endif 7.34 7.35 -EXPORT_SYMBOL(cpu_khz); 7.36 - 7.37 EXPORT_SYMBOL(load_gs_index); 7.38
8.1 --- a/linux-2.6-xen-sparse/drivers/xen/char/mem.c Mon Mar 06 09:09:18 2006 -0700 8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/char/mem.c Mon Mar 06 10:21:35 2006 -0700 8.3 @@ -43,49 +43,85 @@ static inline int uncached_access(struct 8.4 static ssize_t read_mem(struct file * file, char __user * buf, 8.5 size_t count, loff_t *ppos) 8.6 { 8.7 - unsigned long i, p = *ppos; 8.8 - ssize_t read = -EFAULT; 8.9 + unsigned long p = *ppos, ignored; 8.10 + ssize_t read = 0, sz; 8.11 void __iomem *v; 8.12 8.13 - if ((v = ioremap(p, count)) == NULL) { 8.14 + while (count > 0) { 8.15 /* 8.16 - * Some programs (e.g., dmidecode) groove off into weird RAM 8.17 - * areas where no table scan possibly exist (because Xen will 8.18 - * have stomped on them!). These programs get rather upset if 8.19 - * we let them know that Xen failed their access, so we fake 8.20 - * out a read of all zeroes. :-) 8.21 + * Handle first page in case it's not aligned 8.22 */ 8.23 - for (i = 0; i < count; i++) 8.24 - if (put_user(0, buf+i)) 8.25 + if (-p & (PAGE_SIZE - 1)) 8.26 + sz = -p & (PAGE_SIZE - 1); 8.27 + else 8.28 + sz = PAGE_SIZE; 8.29 + 8.30 + sz = min_t(unsigned long, sz, count); 8.31 + 8.32 + if ((v = ioremap(p, sz)) == NULL) { 8.33 + /* 8.34 + * Some programs (e.g., dmidecode) groove off into weird RAM 8.35 + * areas where no tables can possibly exist (because Xen will 8.36 + * have stomped on them!). These programs get rather upset if 8.37 + * we let them know that Xen failed their access, so we fake 8.38 + * out a read of all zeroes. :-) 8.39 + */ 8.40 + if (clear_user(buf, count)) 8.41 return -EFAULT; 8.42 - return count; 8.43 + read += count; 8.44 + break; 8.45 + } 8.46 + 8.47 + ignored = copy_to_user(buf, v, sz); 8.48 + iounmap(v); 8.49 + if (ignored) 8.50 + return -EFAULT; 8.51 + buf += sz; 8.52 + p += sz; 8.53 + count -= sz; 8.54 + read += sz; 8.55 } 8.56 - if (copy_to_user(buf, v, count)) 8.57 - goto out; 8.58 8.59 - read = count; 8.60 *ppos += read; 8.61 -out: 8.62 - iounmap(v); 8.63 return read; 8.64 } 8.65 8.66 static ssize_t write_mem(struct file * file, const char __user * buf, 8.67 size_t count, loff_t *ppos) 8.68 { 8.69 - unsigned long p = *ppos; 8.70 - ssize_t written = -EFAULT; 8.71 + unsigned long p = *ppos, ignored; 8.72 + ssize_t written = 0, sz; 8.73 void __iomem *v; 8.74 8.75 - if ((v = ioremap(p, count)) == NULL) 8.76 - return -EFAULT; 8.77 - if (copy_from_user(v, buf, count)) 8.78 - goto out; 8.79 + while (count > 0) { 8.80 + /* 8.81 + * Handle first page in case it's not aligned 8.82 + */ 8.83 + if (-p & (PAGE_SIZE - 1)) 8.84 + sz = -p & (PAGE_SIZE - 1); 8.85 + else 8.86 + sz = PAGE_SIZE; 8.87 + 8.88 + sz = min_t(unsigned long, sz, count); 8.89 + 8.90 + if ((v = ioremap(p, sz)) == NULL) 8.91 + break; 8.92 8.93 - written = count; 8.94 + ignored = copy_from_user(v, buf, sz); 8.95 + iounmap(v); 8.96 + if (ignored) { 8.97 + written += sz - ignored; 8.98 + if (written) 8.99 + break; 8.100 + return -EFAULT; 8.101 + } 8.102 + buf += sz; 8.103 + p += sz; 8.104 + count -= sz; 8.105 + written += sz; 8.106 + } 8.107 + 8.108 *ppos += written; 8.109 -out: 8.110 - iounmap(v); 8.111 return written; 8.112 } 8.113
9.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Mon Mar 06 09:09:18 2006 -0700 9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Mon Mar 06 10:21:35 2006 -0700 9.3 @@ -35,7 +35,6 @@ 9.4 #include <xen/interface/xen.h> 9.5 #include <asm/fixmap.h> 9.6 #include <asm/uaccess.h> 9.7 -#include <xen/public/privcmd.h> 9.8 #include <xen/gnttab.h> 9.9 #include <asm/synch_bitops.h> 9.10
10.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Mon Mar 06 09:09:18 2006 -0700 10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c Mon Mar 06 10:21:35 2006 -0700 10.3 @@ -19,8 +19,7 @@ int num_frontends = 0; 10.4 10.5 LIST_HEAD(tpmif_list); 10.6 10.7 -tpmif_t * 10.8 -alloc_tpmif(domid_t domid, long int instance) 10.9 +static tpmif_t *alloc_tpmif(domid_t domid, long int instance) 10.10 { 10.11 struct page *page; 10.12 tpmif_t *tpmif; 10.13 @@ -45,16 +44,14 @@ alloc_tpmif(domid_t domid, long int inst 10.14 return tpmif; 10.15 } 10.16 10.17 -void 10.18 -free_tpmif(tpmif_t * tpmif) 10.19 +static void free_tpmif(tpmif_t * tpmif) 10.20 { 10.21 num_frontends--; 10.22 list_del(&tpmif->tpmif_list); 10.23 kmem_cache_free(tpmif_cachep, tpmif); 10.24 } 10.25 10.26 -tpmif_t * 10.27 -tpmif_find(domid_t domid, long int instance) 10.28 +tpmif_t *tpmif_find(domid_t domid, long int instance) 10.29 { 10.30 tpmif_t *tpmif; 10.31 10.32 @@ -72,8 +69,7 @@ tpmif_find(domid_t domid, long int insta 10.33 return alloc_tpmif(domid, instance); 10.34 } 10.35 10.36 -static int 10.37 -map_frontend_page(tpmif_t *tpmif, unsigned long shared_page) 10.38 +static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page) 10.39 { 10.40 int ret; 10.41 struct gnttab_map_grant_ref op = { 10.42 @@ -99,8 +95,7 @@ map_frontend_page(tpmif_t *tpmif, unsign 10.43 return 0; 10.44 } 10.45 10.46 -static void 10.47 -unmap_frontend_page(tpmif_t *tpmif) 10.48 +static void unmap_frontend_page(tpmif_t *tpmif) 10.49 { 10.50 struct gnttab_unmap_grant_ref op; 10.51 int ret; 10.52 @@ -115,14 +110,14 @@ unmap_frontend_page(tpmif_t *tpmif) 10.53 BUG_ON(ret); 10.54 } 10.55 10.56 -int 10.57 -tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn) 10.58 +int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn) 10.59 { 10.60 int err; 10.61 evtchn_op_t op = { 10.62 .cmd = EVTCHNOP_bind_interdomain, 10.63 .u.bind_interdomain.remote_dom = tpmif->domid, 10.64 - .u.bind_interdomain.remote_port = evtchn }; 10.65 + .u.bind_interdomain.remote_port = evtchn, 10.66 + }; 10.67 10.68 if (tpmif->irq) { 10.69 return 0; 10.70 @@ -156,8 +151,7 @@ tpmif_map(tpmif_t *tpmif, unsigned long 10.71 return 0; 10.72 } 10.73 10.74 -static void 10.75 -__tpmif_disconnect_complete(void *arg) 10.76 +static void __tpmif_disconnect_complete(void *arg) 10.77 { 10.78 tpmif_t *tpmif = (tpmif_t *) arg; 10.79 10.80 @@ -172,22 +166,19 @@ static void 10.81 free_tpmif(tpmif); 10.82 } 10.83 10.84 -void 10.85 -tpmif_disconnect_complete(tpmif_t * tpmif) 10.86 +void tpmif_disconnect_complete(tpmif_t * tpmif) 10.87 { 10.88 INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif); 10.89 schedule_work(&tpmif->work); 10.90 } 10.91 10.92 -void __init 10.93 -tpmif_interface_init(void) 10.94 +void __init tpmif_interface_init(void) 10.95 { 10.96 tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t), 10.97 0, 0, NULL, NULL); 10.98 } 10.99 10.100 -void __init 10.101 -tpmif_interface_exit(void) 10.102 +void __init tpmif_interface_exit(void) 10.103 { 10.104 kmem_cache_destroy(tpmif_cachep); 10.105 }
11.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Mon Mar 06 09:09:18 2006 -0700 11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c Mon Mar 06 10:21:35 2006 -0700 11.3 @@ -22,17 +22,23 @@ 11.4 #include <xen/xenbus.h> 11.5 #include <xen/interface/grant_table.h> 11.6 11.7 - 11.8 /* local data structures */ 11.9 struct data_exchange { 11.10 struct list_head pending_pak; 11.11 struct list_head current_pak; 11.12 unsigned int copied_so_far; 11.13 u8 has_opener; 11.14 - rwlock_t pak_lock; // protects all of the previous fields 11.15 + rwlock_t pak_lock; // protects all of the previous fields 11.16 wait_queue_head_t wait_queue; 11.17 }; 11.18 11.19 +struct vtpm_resp_hdr { 11.20 + uint32_t instance_no; 11.21 + uint16_t tag_no; 11.22 + uint32_t len_no; 11.23 + uint32_t ordinal_no; 11.24 +} __attribute__ ((packed)); 11.25 + 11.26 struct packet { 11.27 struct list_head next; 11.28 unsigned int data_len; 11.29 @@ -50,36 +56,43 @@ enum { 11.30 PACKET_FLAG_CHECK_RESPONSESTATUS = 2, 11.31 }; 11.32 11.33 +/* local variables */ 11.34 static struct data_exchange dataex; 11.35 11.36 /* local function prototypes */ 11.37 -static int vtpm_queue_packet(struct packet *pak); 11.38 static int _packet_write(struct packet *pak, 11.39 - const char *data, size_t size, 11.40 - int userbuffer); 11.41 + const char *data, size_t size, int userbuffer); 11.42 static void processing_timeout(unsigned long ptr); 11.43 -static int packet_read_shmem(struct packet *pak, 11.44 - tpmif_t *tpmif, 11.45 - u32 offset, 11.46 - char *buffer, 11.47 - int isuserbuffer, 11.48 - u32 left); 11.49 - 11.50 +static int packet_read_shmem(struct packet *pak, 11.51 + tpmif_t * tpmif, 11.52 + u32 offset, 11.53 + char *buffer, int isuserbuffer, u32 left); 11.54 +static int vtpm_queue_packet(struct packet *pak); 11.55 11.56 #define MIN(x,y) (x) < (y) ? (x) : (y) 11.57 11.58 - 11.59 /*************************************************************** 11.60 - Buffer copying 11.61 + Buffer copying fo user and kernel space buffes. 11.62 ***************************************************************/ 11.63 -static inline int 11.64 -copy_from_buffer(void *to, 11.65 - const void *from, 11.66 - unsigned long size, 11.67 - int userbuffer) 11.68 +static inline int copy_from_buffer(void *to, 11.69 + const void *from, unsigned long size, 11.70 + int isuserbuffer) 11.71 { 11.72 - if (userbuffer) { 11.73 - if (copy_from_user(to, from, size)) 11.74 + if (isuserbuffer) { 11.75 + if (copy_from_user(to, (void __user *)from, size)) 11.76 + return -EFAULT; 11.77 + } else { 11.78 + memcpy(to, from, size); 11.79 + } 11.80 + return 0; 11.81 +} 11.82 + 11.83 +static inline int copy_to_buffer(void *to, 11.84 + const void *from, unsigned long size, 11.85 + int isuserbuffer) 11.86 +{ 11.87 + if (isuserbuffer) { 11.88 + if (copy_to_user((void __user *)to, from, size)) 11.89 return -EFAULT; 11.90 } else { 11.91 memcpy(to, from, size); 11.92 @@ -91,17 +104,19 @@ copy_from_buffer(void *to, 11.93 Packet-related functions 11.94 ***************************************************************/ 11.95 11.96 -static struct packet * 11.97 -packet_find_instance(struct list_head *head, u32 tpm_instance) 11.98 +static struct packet *packet_find_instance(struct list_head *head, 11.99 + u32 tpm_instance) 11.100 { 11.101 struct packet *pak; 11.102 struct list_head *p; 11.103 + 11.104 /* 11.105 * traverse the list of packets and return the first 11.106 * one with the given instance number 11.107 */ 11.108 list_for_each(p, head) { 11.109 pak = list_entry(p, struct packet, next); 11.110 + 11.111 if (pak->tpm_instance == tpm_instance) { 11.112 return pak; 11.113 } 11.114 @@ -109,17 +124,18 @@ packet_find_instance(struct list_head *h 11.115 return NULL; 11.116 } 11.117 11.118 -static struct packet * 11.119 -packet_find_packet(struct list_head *head, void *packet) 11.120 +static struct packet *packet_find_packet(struct list_head *head, void *packet) 11.121 { 11.122 struct packet *pak; 11.123 struct list_head *p; 11.124 + 11.125 /* 11.126 * traverse the list of packets and return the first 11.127 * one with the given instance number 11.128 */ 11.129 list_for_each(p, head) { 11.130 pak = list_entry(p, struct packet, next); 11.131 + 11.132 if (pak == packet) { 11.133 return pak; 11.134 } 11.135 @@ -127,22 +143,20 @@ packet_find_packet(struct list_head *hea 11.136 return NULL; 11.137 } 11.138 11.139 -static struct packet * 11.140 -packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags) 11.141 +static struct packet *packet_alloc(tpmif_t * tpmif, 11.142 + u32 size, u8 req_tag, u8 flags) 11.143 { 11.144 struct packet *pak = NULL; 11.145 - pak = kmalloc(sizeof(struct packet), 11.146 - GFP_KERNEL); 11.147 + pak = kzalloc(sizeof (struct packet), GFP_KERNEL); 11.148 if (NULL != pak) { 11.149 - memset(pak, 0x0, sizeof(*pak)); 11.150 if (tpmif) { 11.151 pak->tpmif = tpmif; 11.152 pak->tpm_instance = tpmif->tpm_instance; 11.153 } 11.154 - pak->data_len = size; 11.155 - pak->req_tag = req_tag; 11.156 + pak->data_len = size; 11.157 + pak->req_tag = req_tag; 11.158 pak->last_read = 0; 11.159 - pak->flags = flags; 11.160 + pak->flags = flags; 11.161 11.162 /* 11.163 * cannot do tpmif_get(tpmif); bad things happen 11.164 @@ -155,16 +169,16 @@ packet_alloc(tpmif_t *tpmif, u32 size, u 11.165 return pak; 11.166 } 11.167 11.168 -static void inline 11.169 -packet_reset(struct packet *pak) 11.170 +static void inline packet_reset(struct packet *pak) 11.171 { 11.172 pak->last_read = 0; 11.173 } 11.174 11.175 -static void inline 11.176 -packet_free(struct packet *pak) 11.177 +static void packet_free(struct packet *pak) 11.178 { 11.179 - del_singleshot_timer_sync(&pak->processing_timer); 11.180 + if (timer_pending(&pak->processing_timer)) { 11.181 + BUG(); 11.182 + } 11.183 kfree(pak->data_buffer); 11.184 /* 11.185 * cannot do tpmif_put(pak->tpmif); bad things happen 11.186 @@ -173,13 +187,13 @@ packet_free(struct packet *pak) 11.187 kfree(pak); 11.188 } 11.189 11.190 -static int 11.191 -packet_set(struct packet *pak, 11.192 - const unsigned char *buffer, u32 size) 11.193 +static int packet_set(struct packet *pak, 11.194 + const unsigned char *buffer, u32 size) 11.195 { 11.196 int rc = 0; 11.197 unsigned char *buf = kmalloc(size, GFP_KERNEL); 11.198 - if (NULL != buf) { 11.199 + 11.200 + if (buf) { 11.201 pak->data_buffer = buf; 11.202 memcpy(buf, buffer, size); 11.203 pak->data_len = size; 11.204 @@ -189,27 +203,21 @@ packet_set(struct packet *pak, 11.205 return rc; 11.206 } 11.207 11.208 - 11.209 /* 11.210 * Write data to the shared memory and send it to the FE. 11.211 */ 11.212 -static int 11.213 -packet_write(struct packet *pak, 11.214 - const char *data, size_t size, 11.215 - int userbuffer) 11.216 +static int packet_write(struct packet *pak, 11.217 + const char *data, size_t size, int isuserbuffer) 11.218 { 11.219 int rc = 0; 11.220 11.221 - DPRINTK("Supposed to send %d bytes to front-end!\n", 11.222 - size); 11.223 - 11.224 - if (0 != (pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) { 11.225 + if ((pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) { 11.226 #ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS 11.227 u32 res; 11.228 + 11.229 if (copy_from_buffer(&res, 11.230 - &data[2+4], 11.231 - sizeof(res), 11.232 - userbuffer)) { 11.233 + &data[2 + 4], sizeof (res), 11.234 + isuserbuffer)) { 11.235 return -EFAULT; 11.236 } 11.237 11.238 @@ -230,17 +238,14 @@ packet_write(struct packet *pak, 11.239 /* Don't send a respone to this packet. Just acknowledge it. */ 11.240 rc = size; 11.241 } else { 11.242 - rc = _packet_write(pak, data, size, userbuffer); 11.243 + rc = _packet_write(pak, data, size, isuserbuffer); 11.244 } 11.245 11.246 return rc; 11.247 } 11.248 11.249 - 11.250 -static int 11.251 -_packet_write(struct packet *pak, 11.252 - const char *data, size_t size, 11.253 - int userbuffer) 11.254 +int _packet_write(struct packet *pak, 11.255 + const char *data, size_t size, int isuserbuffer) 11.256 { 11.257 /* 11.258 * Write into the shared memory pages directly 11.259 @@ -254,7 +259,7 @@ static int 11.260 11.261 if (tpmif == NULL) { 11.262 return -EFAULT; 11.263 - } 11.264 + } 11.265 11.266 if (tpmif->status == DISCONNECTED) { 11.267 return size; 11.268 @@ -273,16 +278,13 @@ static int 11.269 return 0; 11.270 } 11.271 11.272 - map_op.host_addr = MMAP_VADDR(tpmif, i); 11.273 - map_op.flags = GNTMAP_host_map; 11.274 - map_op.ref = tx->ref; 11.275 - map_op.dom = tpmif->domid; 11.276 + map_op.host_addr = MMAP_VADDR(tpmif, i); 11.277 + map_op.flags = GNTMAP_host_map; 11.278 + map_op.ref = tx->ref; 11.279 + map_op.dom = tpmif->domid; 11.280 11.281 - if(unlikely( 11.282 - HYPERVISOR_grant_table_op( 11.283 - GNTTABOP_map_grant_ref, 11.284 - &map_op, 11.285 - 1))) { 11.286 + if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 11.287 + &map_op, 1))) { 11.288 BUG(); 11.289 } 11.290 11.291 @@ -292,28 +294,27 @@ static int 11.292 DPRINTK(" Grant table operation failure !\n"); 11.293 return 0; 11.294 } 11.295 - set_phys_to_machine(__pa(MMAP_VADDR(tpmif,i)) >> PAGE_SHIFT, 11.296 - FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT)); 11.297 + set_phys_to_machine(__pa(MMAP_VADDR(tpmif, i)) >> PAGE_SHIFT, 11.298 + FOREIGN_FRAME(map_op. 11.299 + dev_bus_addr >> PAGE_SHIFT)); 11.300 11.301 tocopy = MIN(size - offset, PAGE_SIZE); 11.302 11.303 - if (copy_from_buffer((void *)(MMAP_VADDR(tpmif,i)| 11.304 - (tx->addr & ~PAGE_MASK)), 11.305 - &data[offset], 11.306 - tocopy, 11.307 - userbuffer)) { 11.308 + if (copy_from_buffer((void *)(MMAP_VADDR(tpmif, i) | 11.309 + (tx->addr & ~PAGE_MASK)), 11.310 + &data[offset], tocopy, isuserbuffer)) { 11.311 tpmif_put(tpmif); 11.312 return -EFAULT; 11.313 } 11.314 tx->size = tocopy; 11.315 11.316 - unmap_op.host_addr = MMAP_VADDR(tpmif, i); 11.317 - unmap_op.handle = handle; 11.318 + unmap_op.host_addr = MMAP_VADDR(tpmif, i); 11.319 + unmap_op.handle = handle; 11.320 unmap_op.dev_bus_addr = 0; 11.321 11.322 - if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 11.323 - &unmap_op, 11.324 - 1))) { 11.325 + if (unlikely 11.326 + (HYPERVISOR_grant_table_op 11.327 + (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) { 11.328 BUG(); 11.329 } 11.330 11.331 @@ -322,8 +323,7 @@ static int 11.332 } 11.333 11.334 rc = offset; 11.335 - DPRINTK("Notifying frontend via irq %d\n", 11.336 - tpmif->irq); 11.337 + DPRINTK("Notifying frontend via irq %d\n", tpmif->irq); 11.338 notify_remote_via_irq(tpmif->irq); 11.339 11.340 return rc; 11.341 @@ -334,26 +334,19 @@ static int 11.342 * provided buffer. Advance the read_last indicator which tells 11.343 * how many bytes have already been read. 11.344 */ 11.345 -static int 11.346 -packet_read(struct packet *pak, size_t numbytes, 11.347 - char *buffer, size_t buffersize, 11.348 - int userbuffer) 11.349 +static int packet_read(struct packet *pak, size_t numbytes, 11.350 + char *buffer, size_t buffersize, int isuserbuffer) 11.351 { 11.352 tpmif_t *tpmif = pak->tpmif; 11.353 + 11.354 /* 11.355 - * I am supposed to read 'numbytes' of data from the 11.356 - * buffer. 11.357 - * The first 4 bytes that are read are the instance number in 11.358 - * network byte order, after that comes the data from the 11.359 - * shared memory buffer. 11.360 + * Read 'numbytes' of data from the buffer. The first 4 11.361 + * bytes are the instance number in network byte order, 11.362 + * after that come the data from the shared memory buffer. 11.363 */ 11.364 u32 to_copy; 11.365 u32 offset = 0; 11.366 u32 room_left = buffersize; 11.367 - /* 11.368 - * Ensure that we see the request when we copy it. 11.369 - */ 11.370 - mb(); 11.371 11.372 if (pak->last_read < 4) { 11.373 /* 11.374 @@ -361,18 +354,13 @@ packet_read(struct packet *pak, size_t n 11.375 */ 11.376 u32 instance_no = htonl(pak->tpm_instance); 11.377 u32 last_read = pak->last_read; 11.378 + 11.379 to_copy = MIN(4 - last_read, numbytes); 11.380 11.381 - if (userbuffer) { 11.382 - if (copy_to_user(&buffer[0], 11.383 - &(((u8 *)&instance_no)[last_read]), 11.384 - to_copy)) { 11.385 - return -EFAULT; 11.386 - } 11.387 - } else { 11.388 - memcpy(&buffer[0], 11.389 - &(((u8 *)&instance_no)[last_read]), 11.390 - to_copy); 11.391 + if (copy_to_buffer(&buffer[0], 11.392 + &(((u8 *) & instance_no)[last_read]), 11.393 + to_copy, isuserbuffer)) { 11.394 + return -EFAULT; 11.395 } 11.396 11.397 pak->last_read += to_copy; 11.398 @@ -388,39 +376,30 @@ packet_read(struct packet *pak, size_t n 11.399 if (pak->data_buffer) { 11.400 u32 to_copy = MIN(pak->data_len - offset, room_left); 11.401 u32 last_read = pak->last_read - 4; 11.402 - if (userbuffer) { 11.403 - if (copy_to_user(&buffer[offset], 11.404 - &pak->data_buffer[last_read], 11.405 - to_copy)) { 11.406 - return -EFAULT; 11.407 - } 11.408 - } else { 11.409 - memcpy(&buffer[offset], 11.410 - &pak->data_buffer[last_read], 11.411 - to_copy); 11.412 + 11.413 + if (copy_to_buffer(&buffer[offset], 11.414 + &pak->data_buffer[last_read], 11.415 + to_copy, isuserbuffer)) { 11.416 + return -EFAULT; 11.417 } 11.418 pak->last_read += to_copy; 11.419 offset += to_copy; 11.420 } else { 11.421 offset = packet_read_shmem(pak, 11.422 - tpmif, 11.423 - offset, 11.424 - buffer, 11.425 - userbuffer, 11.426 - room_left); 11.427 + tpmif, 11.428 + offset, 11.429 + buffer, 11.430 + isuserbuffer, room_left); 11.431 } 11.432 } 11.433 return offset; 11.434 } 11.435 11.436 - 11.437 -static int 11.438 -packet_read_shmem(struct packet *pak, 11.439 - tpmif_t *tpmif, 11.440 - u32 offset, 11.441 - char *buffer, 11.442 - int isuserbuffer, 11.443 - u32 room_left) { 11.444 +static int packet_read_shmem(struct packet *pak, 11.445 + tpmif_t * tpmif, 11.446 + u32 offset, char *buffer, int isuserbuffer, 11.447 + u32 room_left) 11.448 +{ 11.449 u32 last_read = pak->last_read - 4; 11.450 u32 i = (last_read / PAGE_SIZE); 11.451 u32 pg_offset = last_read & (PAGE_SIZE - 1); 11.452 @@ -428,6 +407,7 @@ packet_read_shmem(struct packet *pak, 11.453 grant_handle_t handle; 11.454 11.455 tpmif_tx_request_t *tx; 11.456 + 11.457 tx = &tpmif->tx->ring[0].req; 11.458 /* 11.459 * Start copying data at the page with index 'index' 11.460 @@ -443,13 +423,12 @@ packet_read_shmem(struct packet *pak, 11.461 tx = &tpmif->tx->ring[i].req; 11.462 11.463 map_op.host_addr = MMAP_VADDR(tpmif, i); 11.464 - map_op.flags = GNTMAP_host_map; 11.465 - map_op.ref = tx->ref; 11.466 - map_op.dom = tpmif->domid; 11.467 + map_op.flags = GNTMAP_host_map; 11.468 + map_op.ref = tx->ref; 11.469 + map_op.dom = tpmif->domid; 11.470 11.471 - if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 11.472 - &map_op, 11.473 - 1))) { 11.474 + if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 11.475 + &map_op, 1))) { 11.476 BUG(); 11.477 } 11.478 11.479 @@ -462,41 +441,33 @@ packet_read_shmem(struct packet *pak, 11.480 11.481 if (to_copy > tx->size) { 11.482 /* 11.483 - * This is the case when the user wants to read more 11.484 - * than what we have. So we just give him what we 11.485 - * have. 11.486 + * User requests more than what's available 11.487 */ 11.488 to_copy = MIN(tx->size, to_copy); 11.489 } 11.490 11.491 DPRINTK("Copying from mapped memory at %08lx\n", 11.492 - (unsigned long)(MMAP_VADDR(tpmif,i) | 11.493 - (tx->addr & ~PAGE_MASK))); 11.494 + (unsigned long)(MMAP_VADDR(tpmif, i) | 11.495 + (tx->addr & ~PAGE_MASK))); 11.496 11.497 - src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset)); 11.498 - if (isuserbuffer) { 11.499 - if (copy_to_user(&buffer[offset], 11.500 - src, 11.501 - to_copy)) { 11.502 - return -EFAULT; 11.503 - } 11.504 - } else { 11.505 - memcpy(&buffer[offset], 11.506 - src, 11.507 - to_copy); 11.508 + src = (void *)(MMAP_VADDR(tpmif, i) | 11.509 + ((tx->addr & ~PAGE_MASK) + pg_offset)); 11.510 + if (copy_to_buffer(&buffer[offset], 11.511 + src, to_copy, isuserbuffer)) { 11.512 + return -EFAULT; 11.513 } 11.514 11.515 + DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n", 11.516 + tpmif->domid, buffer[offset], buffer[offset + 1], 11.517 + buffer[offset + 2], buffer[offset + 3]); 11.518 11.519 - DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n", 11.520 - tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]); 11.521 - 11.522 - unmap_op.host_addr = MMAP_VADDR(tpmif, i); 11.523 - unmap_op.handle = handle; 11.524 + unmap_op.host_addr = MMAP_VADDR(tpmif, i); 11.525 + unmap_op.handle = handle; 11.526 unmap_op.dev_bus_addr = 0; 11.527 11.528 - if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, 11.529 - &unmap_op, 11.530 - 1))) { 11.531 + if (unlikely 11.532 + (HYPERVISOR_grant_table_op 11.533 + (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) { 11.534 BUG(); 11.535 } 11.536 11.537 @@ -507,7 +478,7 @@ packet_read_shmem(struct packet *pak, 11.538 11.539 to_copy = MIN(PAGE_SIZE, room_left); 11.540 i++; 11.541 - } /* while (to_copy > 0) */ 11.542 + } /* while (to_copy > 0) */ 11.543 /* 11.544 * Adjust the last_read pointer 11.545 */ 11.546 @@ -515,13 +486,11 @@ packet_read_shmem(struct packet *pak, 11.547 return offset; 11.548 } 11.549 11.550 - 11.551 /* ============================================================ 11.552 * The file layer for reading data from this device 11.553 * ============================================================ 11.554 */ 11.555 -static int 11.556 -vtpm_op_open(struct inode *inode, struct file *f) 11.557 +static int vtpm_op_open(struct inode *inode, struct file *f) 11.558 { 11.559 int rc = 0; 11.560 unsigned long flags; 11.561 @@ -536,9 +505,8 @@ vtpm_op_open(struct inode *inode, struct 11.562 return rc; 11.563 } 11.564 11.565 -static ssize_t 11.566 -vtpm_op_read(struct file *file, 11.567 - char __user * data, size_t size, loff_t * offset) 11.568 +static ssize_t vtpm_op_read(struct file *file, 11.569 + char __user * data, size_t size, loff_t * offset) 11.570 { 11.571 int ret_size = -ENODATA; 11.572 struct packet *pak = NULL; 11.573 @@ -549,7 +517,7 @@ vtpm_op_read(struct file *file, 11.574 if (list_empty(&dataex.pending_pak)) { 11.575 write_unlock_irqrestore(&dataex.pak_lock, flags); 11.576 wait_event_interruptible(dataex.wait_queue, 11.577 - !list_empty(&dataex.pending_pak)); 11.578 + !list_empty(&dataex.pending_pak)); 11.579 write_lock_irqsave(&dataex.pak_lock, flags); 11.580 } 11.581 11.582 @@ -561,7 +529,7 @@ vtpm_op_read(struct file *file, 11.583 11.584 DPRINTK("size given by app: %d, available: %d\n", size, left); 11.585 11.586 - ret_size = MIN(size,left); 11.587 + ret_size = MIN(size, left); 11.588 11.589 ret_size = packet_read(pak, ret_size, data, size, 1); 11.590 if (ret_size < 0) { 11.591 @@ -574,7 +542,8 @@ vtpm_op_read(struct file *file, 11.592 DPRINTK("All data from this packet given to app.\n"); 11.593 /* All data given to app */ 11.594 11.595 - del_singleshot_timer_sync(&pak->processing_timer); 11.596 + del_singleshot_timer_sync(&pak-> 11.597 + processing_timer); 11.598 list_del(&pak->next); 11.599 list_add_tail(&pak->next, &dataex.current_pak); 11.600 /* 11.601 @@ -582,7 +551,7 @@ vtpm_op_read(struct file *file, 11.602 * the more time we give the TPM to process the request. 11.603 */ 11.604 mod_timer(&pak->processing_timer, 11.605 - jiffies + (num_frontends * 60 * HZ)); 11.606 + jiffies + (num_frontends * 60 * HZ)); 11.607 dataex.copied_so_far = 0; 11.608 } 11.609 } 11.610 @@ -597,16 +566,15 @@ vtpm_op_read(struct file *file, 11.611 /* 11.612 * Write operation - only works after a previous read operation! 11.613 */ 11.614 -static ssize_t 11.615 -vtpm_op_write(struct file *file, const char __user * data, size_t size, 11.616 - loff_t * offset) 11.617 +static ssize_t vtpm_op_write(struct file *file, 11.618 + const char __user * data, size_t size, 11.619 + loff_t * offset) 11.620 { 11.621 struct packet *pak; 11.622 int rc = 0; 11.623 unsigned int off = 4; 11.624 unsigned long flags; 11.625 - u32 instance_no = 0; 11.626 - u32 len_no = 0; 11.627 + struct vtpm_resp_hdr vrh; 11.628 11.629 /* 11.630 * Minimum required packet size is: 11.631 @@ -616,45 +584,38 @@ vtpm_op_write(struct file *file, const c 11.632 * 4 bytes for the ordinal 11.633 * sum: 14 bytes 11.634 */ 11.635 - if ( size < off + 10 ) { 11.636 + if (size < sizeof (vrh)) 11.637 return -EFAULT; 11.638 - } 11.639 + 11.640 + if (copy_from_user(&vrh, data, sizeof (vrh))) 11.641 + return -EFAULT; 11.642 11.643 - if (copy_from_user(&instance_no, 11.644 - (void __user *)&data[0], 11.645 - 4)) { 11.646 + /* malformed packet? */ 11.647 + if ((off + ntohl(vrh.len_no)) != size) 11.648 + return -EFAULT; 11.649 + 11.650 + write_lock_irqsave(&dataex.pak_lock, flags); 11.651 + pak = packet_find_instance(&dataex.current_pak, 11.652 + ntohl(vrh.instance_no)); 11.653 + 11.654 + if (pak == NULL) { 11.655 + write_unlock_irqrestore(&dataex.pak_lock, flags); 11.656 + printk(KERN_ALERT "No associated packet! (inst=%d)\n", 11.657 + ntohl(vrh.instance_no)); 11.658 return -EFAULT; 11.659 } 11.660 11.661 - if (copy_from_user(&len_no, 11.662 - (void __user *)&data[off+2], 11.663 - 4) || 11.664 - (off + ntohl(len_no) != size)) { 11.665 - return -EFAULT; 11.666 - } 11.667 - 11.668 - write_lock_irqsave(&dataex.pak_lock, flags); 11.669 - pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no)); 11.670 - 11.671 - if (pak == NULL) { 11.672 - write_unlock_irqrestore(&dataex.pak_lock, flags); 11.673 - printk(KERN_ALERT "No associated packet!\n"); 11.674 - return -EFAULT; 11.675 - } else { 11.676 - del_singleshot_timer_sync(&pak->processing_timer); 11.677 - list_del(&pak->next); 11.678 - } 11.679 + del_singleshot_timer_sync(&pak->processing_timer); 11.680 + list_del(&pak->next); 11.681 11.682 write_unlock_irqrestore(&dataex.pak_lock, flags); 11.683 11.684 /* 11.685 - * The first 'offset' bytes must be the instance number. 11.686 - * I will just pull that from the packet. 11.687 + * The first 'offset' bytes must be the instance number - skip them. 11.688 */ 11.689 size -= off; 11.690 - data = &data[off]; 11.691 11.692 - rc = packet_write(pak, data, size, 1); 11.693 + rc = packet_write(pak, &data[off], size, 1); 11.694 11.695 if (rc > 0) { 11.696 /* I neglected the first 4 bytes */ 11.697 @@ -664,10 +625,10 @@ vtpm_op_write(struct file *file, const c 11.698 return rc; 11.699 } 11.700 11.701 -static int 11.702 -vtpm_op_release(struct inode *inode, struct file *file) 11.703 +static int vtpm_op_release(struct inode *inode, struct file *file) 11.704 { 11.705 unsigned long flags; 11.706 + 11.707 vtpm_release_packets(NULL, 1); 11.708 write_lock_irqsave(&dataex.pak_lock, flags); 11.709 dataex.has_opener = 0; 11.710 @@ -675,10 +636,11 @@ vtpm_op_release(struct inode *inode, str 11.711 return 0; 11.712 } 11.713 11.714 -static unsigned int 11.715 -vtpm_op_poll(struct file *file, struct poll_table_struct *pts) 11.716 +static unsigned int vtpm_op_poll(struct file *file, 11.717 + struct poll_table_struct *pts) 11.718 { 11.719 unsigned int flags = POLLOUT | POLLWRNORM; 11.720 + 11.721 poll_wait(file, &dataex.wait_queue, pts); 11.722 if (!list_empty(&dataex.pending_pak)) { 11.723 flags |= POLLIN | POLLRDNORM; 11.724 @@ -696,54 +658,47 @@ static struct file_operations vtpm_ops = 11.725 .poll = vtpm_op_poll, 11.726 }; 11.727 11.728 -static struct miscdevice ibmvtpms_miscdevice = { 11.729 +static struct miscdevice vtpms_miscdevice = { 11.730 .minor = 225, 11.731 .name = "vtpm", 11.732 .fops = &vtpm_ops, 11.733 }; 11.734 11.735 - 11.736 /*************************************************************** 11.737 Virtual TPM functions and data stuctures 11.738 ***************************************************************/ 11.739 11.740 static u8 create_cmd[] = { 11.741 - 1,193, /* 0: TPM_TAG_RQU_COMMAMD */ 11.742 - 0,0,0,19, /* 2: length */ 11.743 - 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */ 11.744 - 0, /* 10: VTPM type */ 11.745 - 0,0,0,0, /* 11: domain id */ 11.746 - 0,0,0,0 /* 15: instance id */ 11.747 + 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */ 11.748 + 0, 0, 0, 19, /* 2: length */ 11.749 + 0, 0, 0, 0x1, /* 6: VTPM_ORD_OPEN */ 11.750 + 0, /* 10: VTPM type */ 11.751 + 0, 0, 0, 0, /* 11: domain id */ 11.752 + 0, 0, 0, 0 /* 15: instance id */ 11.753 }; 11.754 11.755 -static u8 destroy_cmd[] = { 11.756 - 1,193, /* 0: TPM_TAG_RQU_COMMAMD */ 11.757 - 0,0,0,14, /* 2: length */ 11.758 - 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */ 11.759 - 0,0,0,0 /* 10: instance id */ 11.760 -}; 11.761 - 11.762 -int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance) 11.763 +int tpmif_vtpm_open(tpmif_t * tpmif, domid_t domid, u32 instance) 11.764 { 11.765 int rc = 0; 11.766 struct packet *pak; 11.767 11.768 pak = packet_alloc(tpmif, 11.769 - sizeof(create_cmd), 11.770 - create_cmd[0], 11.771 - PACKET_FLAG_DISCARD_RESPONSE| 11.772 - PACKET_FLAG_CHECK_RESPONSESTATUS); 11.773 + sizeof (create_cmd), 11.774 + create_cmd[1], 11.775 + PACKET_FLAG_DISCARD_RESPONSE | 11.776 + PACKET_FLAG_CHECK_RESPONSESTATUS); 11.777 if (pak) { 11.778 - u8 buf[sizeof(create_cmd)]; 11.779 - u32 domid_no = htonl((u32)domid); 11.780 + u8 buf[sizeof (create_cmd)]; 11.781 + u32 domid_no = htonl((u32) domid); 11.782 u32 instance_no = htonl(instance); 11.783 - memcpy(buf, create_cmd, sizeof(create_cmd)); 11.784 + 11.785 + memcpy(buf, create_cmd, sizeof (create_cmd)); 11.786 11.787 - memcpy(&buf[11], &domid_no, sizeof(u32)); 11.788 - memcpy(&buf[15], &instance_no, sizeof(u32)); 11.789 + memcpy(&buf[11], &domid_no, sizeof (u32)); 11.790 + memcpy(&buf[15], &instance_no, sizeof (u32)); 11.791 11.792 /* copy the buffer into the packet */ 11.793 - rc = packet_set(pak, buf, sizeof(buf)); 11.794 + rc = packet_set(pak, buf, sizeof (buf)); 11.795 11.796 if (rc == 0) { 11.797 pak->tpm_instance = 0; 11.798 @@ -759,23 +714,30 @@ int tpmif_vtpm_open(tpmif_t *tpmif, domi 11.799 return rc; 11.800 } 11.801 11.802 +static u8 destroy_cmd[] = { 11.803 + 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */ 11.804 + 0, 0, 0, 14, /* 2: length */ 11.805 + 0, 0, 0, 0x2, /* 6: VTPM_ORD_CLOSE */ 11.806 + 0, 0, 0, 0 /* 10: instance id */ 11.807 +}; 11.808 + 11.809 int tpmif_vtpm_close(u32 instid) 11.810 { 11.811 int rc = 0; 11.812 struct packet *pak; 11.813 11.814 pak = packet_alloc(NULL, 11.815 - sizeof(create_cmd), 11.816 - create_cmd[0], 11.817 - PACKET_FLAG_DISCARD_RESPONSE); 11.818 + sizeof (destroy_cmd), 11.819 + destroy_cmd[1], PACKET_FLAG_DISCARD_RESPONSE); 11.820 if (pak) { 11.821 - u8 buf[sizeof(destroy_cmd)]; 11.822 + u8 buf[sizeof (destroy_cmd)]; 11.823 u32 instid_no = htonl(instid); 11.824 - memcpy(buf, destroy_cmd, sizeof(destroy_cmd)); 11.825 - memcpy(&buf[10], &instid_no, sizeof(u32)); 11.826 + 11.827 + memcpy(buf, destroy_cmd, sizeof (destroy_cmd)); 11.828 + memcpy(&buf[10], &instid_no, sizeof (u32)); 11.829 11.830 /* copy the buffer into the packet */ 11.831 - rc = packet_set(pak, buf, sizeof(buf)); 11.832 + rc = packet_set(pak, buf, sizeof (buf)); 11.833 11.834 if (rc == 0) { 11.835 pak->tpm_instance = 0; 11.836 @@ -791,23 +753,22 @@ int tpmif_vtpm_close(u32 instid) 11.837 return rc; 11.838 } 11.839 11.840 - 11.841 /*************************************************************** 11.842 Utility functions 11.843 ***************************************************************/ 11.844 11.845 -static int 11.846 -tpm_send_fail_message(struct packet *pak, u8 req_tag) 11.847 +static int tpm_send_fail_message(struct packet *pak, u8 req_tag) 11.848 { 11.849 int rc; 11.850 static const unsigned char tpm_error_message_fail[] = { 11.851 0x00, 0x00, 11.852 0x00, 0x00, 0x00, 0x0a, 11.853 - 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */ 11.854 + 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */ 11.855 }; 11.856 - unsigned char buffer[sizeof(tpm_error_message_fail)]; 11.857 + unsigned char buffer[sizeof (tpm_error_message_fail)]; 11.858 11.859 - memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail)); 11.860 + memcpy(buffer, tpm_error_message_fail, 11.861 + sizeof (tpm_error_message_fail)); 11.862 /* 11.863 * Insert the right response tag depending on the given tag 11.864 * All response tags are '+3' to the request tag. 11.865 @@ -817,23 +778,24 @@ tpm_send_fail_message(struct packet *pak 11.866 /* 11.867 * Write the data to shared memory and notify the front-end 11.868 */ 11.869 - rc = packet_write(pak, buffer, sizeof(buffer), 0); 11.870 + rc = packet_write(pak, buffer, sizeof (buffer), 0); 11.871 11.872 return rc; 11.873 } 11.874 11.875 - 11.876 -static void 11.877 -_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif, 11.878 - int send_msgs) 11.879 +static void _vtpm_release_packets(struct list_head *head, 11.880 + tpmif_t * tpmif, int send_msgs) 11.881 { 11.882 struct packet *pak; 11.883 - struct list_head *pos, *tmp; 11.884 + struct list_head *pos, 11.885 + *tmp; 11.886 11.887 list_for_each_safe(pos, tmp, head) { 11.888 pak = list_entry(pos, struct packet, next); 11.889 + 11.890 if (tpmif == NULL || pak->tpmif == tpmif) { 11.891 int can_send = 0; 11.892 + 11.893 del_singleshot_timer_sync(&pak->processing_timer); 11.894 list_del(&pak->next); 11.895 11.896 @@ -849,9 +811,7 @@ static void 11.897 } 11.898 } 11.899 11.900 - 11.901 -int 11.902 -vtpm_release_packets(tpmif_t *tpmif, int send_msgs) 11.903 +int vtpm_release_packets(tpmif_t * tpmif, int send_msgs) 11.904 { 11.905 unsigned long flags; 11.906 11.907 @@ -860,23 +820,22 @@ vtpm_release_packets(tpmif_t *tpmif, int 11.908 _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs); 11.909 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs); 11.910 11.911 - write_unlock_irqrestore(&dataex.pak_lock, 11.912 - flags); 11.913 + write_unlock_irqrestore(&dataex.pak_lock, flags); 11.914 return 0; 11.915 } 11.916 11.917 - 11.918 static int vtpm_queue_packet(struct packet *pak) 11.919 { 11.920 int rc = 0; 11.921 + 11.922 if (dataex.has_opener) { 11.923 unsigned long flags; 11.924 + 11.925 write_lock_irqsave(&dataex.pak_lock, flags); 11.926 list_add_tail(&pak->next, &dataex.pending_pak); 11.927 /* give the TPM some time to pick up the request */ 11.928 mod_timer(&pak->processing_timer, jiffies + (30 * HZ)); 11.929 - write_unlock_irqrestore(&dataex.pak_lock, 11.930 - flags); 11.931 + write_unlock_irqrestore(&dataex.pak_lock, flags); 11.932 11.933 wake_up_interruptible(&dataex.wait_queue); 11.934 } else { 11.935 @@ -885,24 +844,22 @@ static int vtpm_queue_packet(struct pack 11.936 return rc; 11.937 } 11.938 11.939 - 11.940 -static int vtpm_receive(tpmif_t *tpmif, u32 size) 11.941 +static int vtpm_receive(tpmif_t * tpmif, u32 size) 11.942 { 11.943 int rc = 0; 11.944 unsigned char buffer[10]; 11.945 __be32 *native_size; 11.946 + struct packet *pak = packet_alloc(tpmif, size, 0, 0); 11.947 11.948 - struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0); 11.949 - if (NULL == pak) { 11.950 + if (!pak) 11.951 return -ENOMEM; 11.952 - } 11.953 /* 11.954 * Read 10 bytes from the received buffer to test its 11.955 * content for validity. 11.956 */ 11.957 - if (sizeof(buffer) != packet_read(pak, 11.958 - sizeof(buffer), buffer, 11.959 - sizeof(buffer), 0)) { 11.960 + if (sizeof (buffer) != packet_read(pak, 11.961 + sizeof (buffer), buffer, 11.962 + sizeof (buffer), 0)) { 11.963 goto failexit; 11.964 } 11.965 /* 11.966 @@ -911,7 +868,7 @@ static int vtpm_receive(tpmif_t *tpmif, 11.967 */ 11.968 packet_reset(pak); 11.969 11.970 - native_size = (__force __be32 *)(&buffer[4+2]); 11.971 + native_size = (__force __be32 *) (&buffer[4 + 2]); 11.972 /* 11.973 * Verify that the size of the packet is correct 11.974 * as indicated and that there's actually someone reading packets. 11.975 @@ -920,26 +877,24 @@ static int vtpm_receive(tpmif_t *tpmif, 11.976 */ 11.977 if (size < 10 || 11.978 be32_to_cpu(*native_size) != size || 11.979 - 0 == dataex.has_opener || 11.980 - tpmif->status != CONNECTED) { 11.981 - rc = -EINVAL; 11.982 - goto failexit; 11.983 + 0 == dataex.has_opener || tpmif->status != CONNECTED) { 11.984 + rc = -EINVAL; 11.985 + goto failexit; 11.986 } else { 11.987 - if ((rc = vtpm_queue_packet(pak)) < 0) { 11.988 + rc = vtpm_queue_packet(pak); 11.989 + if (rc < 0) 11.990 goto failexit; 11.991 - } 11.992 } 11.993 return 0; 11.994 11.995 -failexit: 11.996 + failexit: 11.997 if (pak) { 11.998 - tpm_send_fail_message(pak, buffer[4+1]); 11.999 + tpm_send_fail_message(pak, buffer[4 + 1]); 11.1000 packet_free(pak); 11.1001 } 11.1002 return rc; 11.1003 } 11.1004 11.1005 - 11.1006 /* 11.1007 * Timeout function that gets invoked when a packet has not been processed 11.1008 * during the timeout period. 11.1009 @@ -951,44 +906,42 @@ static void processing_timeout(unsigned 11.1010 { 11.1011 struct packet *pak = (struct packet *)ptr; 11.1012 unsigned long flags; 11.1013 + 11.1014 write_lock_irqsave(&dataex.pak_lock, flags); 11.1015 /* 11.1016 * The packet needs to be searched whether it 11.1017 * is still on the list. 11.1018 */ 11.1019 if (pak == packet_find_packet(&dataex.pending_pak, pak) || 11.1020 - pak == packet_find_packet(&dataex.current_pak, pak) ) { 11.1021 + pak == packet_find_packet(&dataex.current_pak, pak)) { 11.1022 list_del(&pak->next); 11.1023 - tpm_send_fail_message(pak, pak->req_tag); 11.1024 + if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) { 11.1025 + tpm_send_fail_message(pak, pak->req_tag); 11.1026 + } 11.1027 packet_free(pak); 11.1028 } 11.1029 11.1030 write_unlock_irqrestore(&dataex.pak_lock, flags); 11.1031 } 11.1032 11.1033 - 11.1034 - 11.1035 static void tpm_tx_action(unsigned long unused); 11.1036 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0); 11.1037 11.1038 static struct list_head tpm_schedule_list; 11.1039 static spinlock_t tpm_schedule_list_lock; 11.1040 11.1041 -static inline void 11.1042 -maybe_schedule_tx_action(void) 11.1043 +static inline void maybe_schedule_tx_action(void) 11.1044 { 11.1045 smp_mb(); 11.1046 tasklet_schedule(&tpm_tx_tasklet); 11.1047 } 11.1048 11.1049 -static inline int 11.1050 -__on_tpm_schedule_list(tpmif_t * tpmif) 11.1051 +static inline int __on_tpm_schedule_list(tpmif_t * tpmif) 11.1052 { 11.1053 return tpmif->list.next != NULL; 11.1054 } 11.1055 11.1056 -static void 11.1057 -remove_from_tpm_schedule_list(tpmif_t * tpmif) 11.1058 +static void remove_from_tpm_schedule_list(tpmif_t * tpmif) 11.1059 { 11.1060 spin_lock_irq(&tpm_schedule_list_lock); 11.1061 if (likely(__on_tpm_schedule_list(tpmif))) { 11.1062 @@ -999,8 +952,7 @@ remove_from_tpm_schedule_list(tpmif_t * 11.1063 spin_unlock_irq(&tpm_schedule_list_lock); 11.1064 } 11.1065 11.1066 -static void 11.1067 -add_to_tpm_schedule_list_tail(tpmif_t * tpmif) 11.1068 +static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif) 11.1069 { 11.1070 if (__on_tpm_schedule_list(tpmif)) 11.1071 return; 11.1072 @@ -1013,22 +965,18 @@ add_to_tpm_schedule_list_tail(tpmif_t * 11.1073 spin_unlock_irq(&tpm_schedule_list_lock); 11.1074 } 11.1075 11.1076 -void 11.1077 -tpmif_schedule_work(tpmif_t * tpmif) 11.1078 +void tpmif_schedule_work(tpmif_t * tpmif) 11.1079 { 11.1080 add_to_tpm_schedule_list_tail(tpmif); 11.1081 maybe_schedule_tx_action(); 11.1082 } 11.1083 11.1084 -void 11.1085 -tpmif_deschedule_work(tpmif_t * tpmif) 11.1086 +void tpmif_deschedule_work(tpmif_t * tpmif) 11.1087 { 11.1088 remove_from_tpm_schedule_list(tpmif); 11.1089 } 11.1090 11.1091 - 11.1092 -static void 11.1093 -tpm_tx_action(unsigned long unused) 11.1094 +static void tpm_tx_action(unsigned long unused) 11.1095 { 11.1096 struct list_head *ent; 11.1097 tpmif_t *tpmif; 11.1098 @@ -1042,10 +990,6 @@ tpm_tx_action(unsigned long unused) 11.1099 tpmif = list_entry(ent, tpmif_t, list); 11.1100 tpmif_get(tpmif); 11.1101 remove_from_tpm_schedule_list(tpmif); 11.1102 - /* 11.1103 - * Ensure that we see the request when we read from it. 11.1104 - */ 11.1105 - mb(); 11.1106 11.1107 tx = &tpmif->tx->ring[0].req; 11.1108 11.1109 @@ -1056,22 +1000,22 @@ tpm_tx_action(unsigned long unused) 11.1110 } 11.1111 } 11.1112 11.1113 -irqreturn_t 11.1114 -tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs) 11.1115 +irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs) 11.1116 { 11.1117 - tpmif_t *tpmif = dev_id; 11.1118 + tpmif_t *tpmif = (tpmif_t *) dev_id; 11.1119 + 11.1120 add_to_tpm_schedule_list_tail(tpmif); 11.1121 maybe_schedule_tx_action(); 11.1122 return IRQ_HANDLED; 11.1123 } 11.1124 11.1125 -static int __init 11.1126 -tpmback_init(void) 11.1127 +static int __init tpmback_init(void) 11.1128 { 11.1129 int rc; 11.1130 11.1131 - if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) { 11.1132 - printk(KERN_ALERT "Could not register misc device for TPM BE.\n"); 11.1133 + if ((rc = misc_register(&vtpms_miscdevice)) != 0) { 11.1134 + printk(KERN_ALERT 11.1135 + "Could not register misc device for TPM BE.\n"); 11.1136 return rc; 11.1137 } 11.1138 11.1139 @@ -1094,13 +1038,11 @@ tpmback_init(void) 11.1140 11.1141 module_init(tpmback_init); 11.1142 11.1143 -static void __exit 11.1144 -tpmback_exit(void) 11.1145 +static void __exit tpmback_exit(void) 11.1146 { 11.1147 - 11.1148 tpmif_xenbus_exit(); 11.1149 tpmif_interface_exit(); 11.1150 - misc_deregister(&ibmvtpms_miscdevice); 11.1151 + misc_deregister(&vtpms_miscdevice); 11.1152 } 11.1153 11.1154 module_exit(tpmback_exit);
12.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Mon Mar 06 09:09:18 2006 -0700 12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c Mon Mar 06 10:21:35 2006 -0700 12.3 @@ -602,7 +602,6 @@ tpm_xmit(struct tpm_private *tp, 12.4 12.5 tx = &tp->tx->ring[i].req; 12.6 12.7 - tx->id = i; 12.8 tx->addr = virt_to_machine(txb->data); 12.9 tx->size = txb->len; 12.10
13.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_client.c Mon Mar 06 09:09:18 2006 -0700 13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_client.c Mon Mar 06 10:21:35 2006 -0700 13.3 @@ -95,18 +95,25 @@ int xenbus_switch_state(struct xenbus_de 13.4 */ 13.5 13.6 int current_state; 13.7 + int err; 13.8 13.9 - int err = xenbus_scanf(xbt, dev->nodename, "state", "%d", 13.10 + if (state == dev->state) 13.11 + return 0; 13.12 + 13.13 + err = xenbus_scanf(xbt, dev->nodename, "state", "%d", 13.14 ¤t_state); 13.15 - if ((err == 1 && (XenbusState)current_state == state) || 13.16 - err == -ENOENT) 13.17 + if (err != 1) 13.18 return 0; 13.19 13.20 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 13.21 if (err) { 13.22 - xenbus_dev_fatal(dev, err, "writing new state"); 13.23 + if (state != XenbusStateClosing) /* Avoid looping */ 13.24 + xenbus_dev_fatal(dev, err, "writing new state"); 13.25 return err; 13.26 } 13.27 + 13.28 + dev->state = state; 13.29 + 13.30 return 0; 13.31 } 13.32 EXPORT_SYMBOL(xenbus_switch_state); 13.33 @@ -138,7 +145,6 @@ void _dev_error(struct xenbus_device *de 13.34 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); 13.35 13.36 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); 13.37 - dev->has_error = 1; 13.38 13.39 dev_err(&dev->dev, "%s\n", printf_buffer); 13.40
14.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Mon Mar 06 09:09:18 2006 -0700 14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Mon Mar 06 10:21:35 2006 -0700 14.3 @@ -89,14 +89,18 @@ int xb_write(const void *data, unsigned 14.4 { 14.5 struct xenstore_domain_interface *intf = xenstore_domain_interface(); 14.6 XENSTORE_RING_IDX cons, prod; 14.7 + int rc; 14.8 14.9 while (len != 0) { 14.10 void *dst; 14.11 unsigned int avail; 14.12 14.13 - wait_event_interruptible(xb_waitq, 14.14 - (intf->req_prod - intf->req_cons) != 14.15 - XENSTORE_RING_SIZE); 14.16 + rc = wait_event_interruptible( 14.17 + xb_waitq, 14.18 + (intf->req_prod - intf->req_cons) != 14.19 + XENSTORE_RING_SIZE); 14.20 + if (rc < 0) 14.21 + return rc; 14.22 14.23 /* Read indexes, then verify. */ 14.24 cons = intf->req_cons; 14.25 @@ -130,13 +134,17 @@ int xb_read(void *data, unsigned len) 14.26 { 14.27 struct xenstore_domain_interface *intf = xenstore_domain_interface(); 14.28 XENSTORE_RING_IDX cons, prod; 14.29 + int rc; 14.30 14.31 while (len != 0) { 14.32 unsigned int avail; 14.33 const char *src; 14.34 14.35 - wait_event_interruptible(xb_waitq, 14.36 - intf->rsp_cons != intf->rsp_prod); 14.37 + rc = wait_event_interruptible( 14.38 + xb_waitq, 14.39 + intf->rsp_cons != intf->rsp_prod); 14.40 + if (rc < 0) 14.41 + return rc; 14.42 14.43 /* Read indexes, then verify. */ 14.44 cons = intf->rsp_cons;
15.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Mar 06 09:09:18 2006 -0700 15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Mar 06 10:21:35 2006 -0700 15.3 @@ -45,9 +45,7 @@ 15.4 #include <asm/hypervisor.h> 15.5 #include <xen/xenbus.h> 15.6 #include <xen/xen_proc.h> 15.7 -#include <xen/balloon.h> 15.8 #include <xen/evtchn.h> 15.9 -#include <xen/public/evtchn.h> 15.10 15.11 #include "xenbus_comms.h" 15.12 15.13 @@ -886,9 +884,33 @@ void unregister_xenstore_notifier(struct 15.14 EXPORT_SYMBOL(unregister_xenstore_notifier); 15.15 15.16 15.17 +static int all_devices_ready_(struct device *dev, void *data) 15.18 +{ 15.19 + struct xenbus_device *xendev = to_xenbus_device(dev); 15.20 + int *result = data; 15.21 + 15.22 + if (xendev->state != XenbusStateConnected) { 15.23 + result = 0; 15.24 + return 1; 15.25 + } 15.26 + 15.27 + return 0; 15.28 +} 15.29 + 15.30 + 15.31 +static int all_devices_ready(void) 15.32 +{ 15.33 + int ready = 1; 15.34 + bus_for_each_dev(&xenbus_frontend.bus, NULL, &ready, 15.35 + all_devices_ready_); 15.36 + return ready; 15.37 +} 15.38 + 15.39 15.40 void xenbus_probe(void *unused) 15.41 { 15.42 + int i; 15.43 + 15.44 BUG_ON((xenstored_ready <= 0)); 15.45 15.46 /* Enumerate devices in xenstore. */ 15.47 @@ -901,12 +923,50 @@ void xenbus_probe(void *unused) 15.48 15.49 /* Notify others that xenstore is up */ 15.50 notifier_call_chain(&xenstore_chain, 0, NULL); 15.51 + 15.52 + /* On a 10 second timeout, waiting for all devices currently 15.53 + configured. We need to do this to guarantee that the filesystems 15.54 + and / or network devices needed for boot are available, before we 15.55 + can allow the boot to proceed. 15.56 + 15.57 + A possible improvement here would be to have the tools add a 15.58 + per-device flag to the store entry, indicating whether it is needed 15.59 + at boot time. This would allow people who knew what they were 15.60 + doing to accelerate their boot slightly, but of course needs tools 15.61 + or manual intervention to set up those flags correctly. 15.62 + */ 15.63 + for (i = 0; i < 10 * HZ; i++) { 15.64 + if (all_devices_ready()) 15.65 + return; 15.66 + 15.67 + set_current_state(TASK_INTERRUPTIBLE); 15.68 + schedule_timeout(1); 15.69 + } 15.70 + 15.71 + printk(KERN_WARNING 15.72 + "XENBUS: Timeout connecting to devices!\n"); 15.73 } 15.74 15.75 15.76 +static struct file_operations xsd_kva_fops; 15.77 static struct proc_dir_entry *xsd_kva_intf; 15.78 static struct proc_dir_entry *xsd_port_intf; 15.79 15.80 +static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) 15.81 +{ 15.82 + size_t size = vma->vm_end - vma->vm_start; 15.83 + 15.84 + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) 15.85 + return -EINVAL; 15.86 + 15.87 + vma->vm_pgoff = mfn_to_pfn(xen_start_info->store_mfn); 15.88 + 15.89 + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 15.90 + size, vma->vm_page_prot)) 15.91 + return -EAGAIN; 15.92 + 15.93 + return 0; 15.94 +} 15.95 15.96 static int xsd_kva_read(char *page, char **start, off_t off, 15.97 int count, int *eof, void *data) 15.98 @@ -980,9 +1040,14 @@ static int __init xenbus_probe_init(void 15.99 xen_start_info->store_evtchn = op.u.alloc_unbound.port; 15.100 15.101 /* And finally publish the above info in /proc/xen */ 15.102 - if((xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0400))) 15.103 + if ((xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0400))) { 15.104 + memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, 15.105 + sizeof(xsd_kva_fops)); 15.106 + xsd_kva_fops.mmap = xsd_kva_mmap; 15.107 + xsd_kva_intf->proc_fops = &xsd_kva_fops; 15.108 xsd_kva_intf->read_proc = xsd_kva_read; 15.109 - if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400))) 15.110 + } 15.111 + if ((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400))) 15.112 xsd_port_intf->read_proc = xsd_port_read; 15.113 } 15.114
16.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Mon Mar 06 09:09:18 2006 -0700 16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Mon Mar 06 10:21:35 2006 -0700 16.3 @@ -123,8 +123,9 @@ static void *read_reply(enum xsd_sockmsg 16.4 16.5 while (list_empty(&xs_state.reply_list)) { 16.6 spin_unlock(&xs_state.reply_lock); 16.7 - wait_event_interruptible(xs_state.reply_waitq, 16.8 - !list_empty(&xs_state.reply_list)); 16.9 + /* XXX FIXME: Avoid synchronous wait for response here. */ 16.10 + wait_event(xs_state.reply_waitq, 16.11 + !list_empty(&xs_state.reply_list)); 16.12 spin_lock(&xs_state.reply_lock); 16.13 } 16.14 16.15 @@ -685,6 +686,9 @@ static int xenwatch_thread(void *unused) 16.16 wait_event_interruptible(watch_events_waitq, 16.17 !list_empty(&watch_events)); 16.18 16.19 + if (kthread_should_stop()) 16.20 + break; 16.21 + 16.22 down(&xenwatch_mutex); 16.23 16.24 spin_lock(&watch_events_lock); 16.25 @@ -705,6 +709,8 @@ static int xenwatch_thread(void *unused) 16.26 16.27 up(&xenwatch_mutex); 16.28 } 16.29 + 16.30 + return 0; 16.31 } 16.32 16.33 static int process_msg(void) 16.34 @@ -778,7 +784,11 @@ static int xenbus_thread(void *unused) 16.35 if (err) 16.36 printk(KERN_WARNING "XENBUS error %d while reading " 16.37 "message\n", err); 16.38 + if (kthread_should_stop()) 16.39 + break; 16.40 } 16.41 + 16.42 + return 0; 16.43 } 16.44 16.45 int xs_init(void)
17.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h Mon Mar 06 09:09:18 2006 -0700 17.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h Mon Mar 06 10:21:35 2006 -0700 17.3 @@ -16,7 +16,7 @@ static char * __init machine_specific_me 17.4 return "Xen"; 17.5 } 17.6 17.7 -void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c) 17.8 +void __devinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c) 17.9 { 17.10 clear_bit(X86_FEATURE_VME, c->x86_capability); 17.11 clear_bit(X86_FEATURE_DE, c->x86_capability);
18.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h Mon Mar 06 09:09:18 2006 -0700 18.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h Mon Mar 06 10:21:35 2006 -0700 18.3 @@ -1,28 +1,12 @@ 18.4 /** 18.5 - * machine_specific_memory_setup - Hook for machine specific memory setup. 18.6 + * machine_specific_* - Hooks for machine specific setup. 18.7 * 18.8 * Description: 18.9 * This is included late in kernel/setup.c so that it can make 18.10 * use of all of the static functions. 18.11 **/ 18.12 18.13 -static char * __init machine_specific_memory_setup(void) 18.14 -{ 18.15 - char *who; 18.16 - unsigned long start_pfn, max_pfn; 18.17 - 18.18 - who = "Xen"; 18.19 - 18.20 - start_pfn = 0; 18.21 - max_pfn = xen_start_info->nr_pages; 18.22 - 18.23 - e820.nr_map = 0; 18.24 - add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM); 18.25 - 18.26 - return who; 18.27 -} 18.28 - 18.29 -void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c) 18.30 +void __cpuinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c) 18.31 { 18.32 clear_bit(X86_FEATURE_VME, c->x86_capability); 18.33 clear_bit(X86_FEATURE_DE, c->x86_capability);
19.1 --- a/linux-2.6-xen-sparse/include/xen/public/xenstored.h Mon Mar 06 09:09:18 2006 -0700 19.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 19.3 @@ -1,89 +0,0 @@ 19.4 -/* 19.5 - * Simple prototyle Xen Store Daemon providing simple tree-like database. 19.6 - * Copyright (C) 2005 Rusty Russell IBM Corporation 19.7 - * 19.8 - * This file may be distributed separately from the Linux kernel, or 19.9 - * incorporated into other software packages, subject to the following license: 19.10 - * 19.11 - * Permission is hereby granted, free of charge, to any person obtaining a copy 19.12 - * of this source file (the "Software"), to deal in the Software without 19.13 - * restriction, including without limitation the rights to use, copy, modify, 19.14 - * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19.15 - * and to permit persons to whom the Software is furnished to do so, subject to 19.16 - * the following conditions: 19.17 - * 19.18 - * The above copyright notice and this permission notice shall be included in 19.19 - * all copies or substantial portions of the Software. 19.20 - * 19.21 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19.22 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19.23 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19.24 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19.25 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19.26 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19.27 - * IN THE SOFTWARE. 19.28 - */ 19.29 - 19.30 -#ifndef _XENSTORED_H 19.31 -#define _XENSTORED_H 19.32 - 19.33 -enum xsd_sockmsg_type 19.34 -{ 19.35 - XS_DEBUG, 19.36 - XS_SHUTDOWN, 19.37 - XS_DIRECTORY, 19.38 - XS_READ, 19.39 - XS_GET_PERMS, 19.40 - XS_WATCH, 19.41 - XS_WATCH_ACK, 19.42 - XS_UNWATCH, 19.43 - XS_TRANSACTION_START, 19.44 - XS_TRANSACTION_END, 19.45 - XS_OP_READ_ONLY = XS_TRANSACTION_END, 19.46 - XS_INTRODUCE, 19.47 - XS_RELEASE, 19.48 - XS_GET_DOMAIN_PATH, 19.49 - XS_WRITE, 19.50 - XS_MKDIR, 19.51 - XS_RM, 19.52 - XS_SET_PERMS, 19.53 - XS_WATCH_EVENT, 19.54 - XS_ERROR, 19.55 -}; 19.56 - 19.57 -#define XS_WRITE_NONE "NONE" 19.58 -#define XS_WRITE_CREATE "CREATE" 19.59 -#define XS_WRITE_CREATE_EXCL "CREATE|EXCL" 19.60 - 19.61 -/* We hand errors as strings, for portability. */ 19.62 -struct xsd_errors 19.63 -{ 19.64 - int errnum; 19.65 - const char *errstring; 19.66 -}; 19.67 -#define XSD_ERROR(x) { x, #x } 19.68 -static struct xsd_errors xsd_errors[] __attribute__((unused)) = { 19.69 - XSD_ERROR(EINVAL), 19.70 - XSD_ERROR(EACCES), 19.71 - XSD_ERROR(EEXIST), 19.72 - XSD_ERROR(EISDIR), 19.73 - XSD_ERROR(ENOENT), 19.74 - XSD_ERROR(ENOMEM), 19.75 - XSD_ERROR(ENOSPC), 19.76 - XSD_ERROR(EIO), 19.77 - XSD_ERROR(ENOTEMPTY), 19.78 - XSD_ERROR(ENOSYS), 19.79 - XSD_ERROR(EROFS), 19.80 - XSD_ERROR(EBUSY), 19.81 - XSD_ERROR(EAGAIN), 19.82 - XSD_ERROR(EISCONN), 19.83 -}; 19.84 -struct xsd_sockmsg 19.85 -{ 19.86 - u32 type; 19.87 - u32 len; /* Length of data following this. */ 19.88 - 19.89 - /* Generally followed by nul-terminated string(s). */ 19.90 -}; 19.91 - 19.92 -#endif /* _XENSTORED_H */
20.1 --- a/linux-2.6-xen-sparse/include/xen/xenbus.h Mon Mar 06 09:09:18 2006 -0700 20.2 +++ b/linux-2.6-xen-sparse/include/xen/xenbus.h Mon Mar 06 10:21:35 2006 -0700 20.3 @@ -63,7 +63,7 @@ struct xenbus_device { 20.4 int otherend_id; 20.5 struct xenbus_watch otherend_watch; 20.6 struct device dev; 20.7 - int has_error; 20.8 + XenbusState state; 20.9 void *data; 20.10 }; 20.11
21.1 --- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c Mon Mar 06 09:09:18 2006 -0700 21.2 +++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c Mon Mar 06 10:21:35 2006 -0700 21.3 @@ -41,8 +41,6 @@ 21.4 21.5 #define TRACE_ENTER /* printf("enter %s\n", __FUNCTION__) */ 21.6 21.7 -long (*myptrace)(int xc_handle, enum __ptrace_request, uint32_t, long, long); 21.8 -int (*myxcwait)(int xc_handle, int domain, int *status, int options) ; 21.9 static int xc_handle; 21.10 21.11 static inline int 21.12 @@ -170,7 +168,7 @@ linux_attach (int domid) 21.13 add_thread (0, new_process); 21.14 new_process->stop_expected = 0; 21.15 21.16 - if (myptrace (xc_handle, PTRACE_ATTACH, domid, 0, 0) != 0) { 21.17 + if (xc_ptrace (xc_handle, PTRACE_ATTACH, domid, 0, isfile) != 0) { 21.18 fprintf (stderr, "Cannot attach to domain %d: %s (%d)\n", domid, 21.19 strerror (errno), errno); 21.20 fflush (stderr); 21.21 @@ -188,7 +186,7 @@ linux_kill_one_process (struct inferior_ 21.22 { 21.23 struct thread_info *thread = (struct thread_info *) entry; 21.24 struct process_info *process = get_thread_process (thread); 21.25 - myptrace (xc_handle, PTRACE_KILL, pid_of (process), 0, 0); 21.26 + xc_ptrace (xc_handle, PTRACE_KILL, pid_of (process), 0, 0); 21.27 } 21.28 21.29 21.30 @@ -202,7 +200,7 @@ static void 21.31 linux_detach_one_process (struct inferior_list_entry *entry) 21.32 { 21.33 21.34 - myptrace (xc_handle, PTRACE_DETACH, current_domid, 0, 0); 21.35 + xc_ptrace (xc_handle, PTRACE_DETACH, current_domid, 0, 0); 21.36 } 21.37 21.38 21.39 @@ -228,7 +226,7 @@ static unsigned char 21.40 linux_wait (char *status) 21.41 { 21.42 int w; 21.43 - if (myxcwait(xc_handle, current_domid, &w, 0)) 21.44 + if (xc_waitdomain(xc_handle, current_domid, &w, 0)) 21.45 return -1; 21.46 21.47 linux_set_inferior(); 21.48 @@ -250,7 +248,7 @@ linux_resume (struct thread_resume *resu 21.49 for_each_inferior(&all_threads, regcache_invalidate_one); 21.50 if (debug_threads) 21.51 fprintf(stderr, "step: %d\n", step); 21.52 - myptrace (xc_handle, step ? PTRACE_SINGLESTEP : PTRACE_CONT, 21.53 + xc_ptrace (xc_handle, step ? PTRACE_SINGLESTEP : PTRACE_CONT, 21.54 resume_info->thread, 0, 0); 21.55 21.56 } 21.57 @@ -275,7 +273,7 @@ regsets_fetch_inferior_registers () 21.58 } 21.59 21.60 buf = malloc (regset->size); 21.61 - res = myptrace (xc_handle, regset->get_request, 21.62 + res = xc_ptrace (xc_handle, regset->get_request, 21.63 curvcpuid(), 21.64 0, (PTRACE_XFER_TYPE)buf); 21.65 if (res < 0) 21.66 @@ -329,7 +327,7 @@ regsets_store_inferior_registers () 21.67 21.68 buf = malloc (regset->size); 21.69 regset->fill_function (buf); 21.70 - res = myptrace (xc_handle, regset->set_request, curvcpuid(), 0, (PTRACE_XFER_TYPE)buf); 21.71 + res = xc_ptrace (xc_handle, regset->set_request, curvcpuid(), 0, (PTRACE_XFER_TYPE)buf); 21.72 if (res < 0) 21.73 { 21.74 if (errno == EIO) 21.75 @@ -407,7 +405,7 @@ linux_read_memory (CORE_ADDR memaddr, ch 21.76 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) 21.77 { 21.78 errno = 0; 21.79 - buffer[i] = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), (PTRACE_ARG3_TYPE) addr, 0); 21.80 + buffer[i] = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), (PTRACE_ARG3_TYPE) addr, 0); 21.81 if (errno) 21.82 return errno; 21.83 } 21.84 @@ -440,13 +438,13 @@ linux_write_memory (CORE_ADDR memaddr, c 21.85 21.86 /* Fill start and end extra bytes of buffer with existing memory data. */ 21.87 21.88 - buffer[0] = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), 21.89 + buffer[0] = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), 21.90 (PTRACE_ARG3_TYPE) addr, 0); 21.91 21.92 if (count > 1) 21.93 { 21.94 buffer[count - 1] 21.95 - = myptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), 21.96 + = xc_ptrace (xc_handle, PTRACE_PEEKTEXT, curvcpuid(), 21.97 (PTRACE_ARG3_TYPE) (addr + (count - 1) 21.98 * sizeof (PTRACE_XFER_TYPE)), 21.99 0); 21.100 @@ -460,7 +458,7 @@ linux_write_memory (CORE_ADDR memaddr, c 21.101 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) 21.102 { 21.103 errno = 0; 21.104 - myptrace (xc_handle, PTRACE_POKETEXT, curvcpuid(), 21.105 + xc_ptrace (xc_handle, PTRACE_POKETEXT, curvcpuid(), 21.106 (PTRACE_ARG3_TYPE) addr, buffer[i]); 21.107 if (errno) 21.108 return errno; 21.109 @@ -561,13 +559,6 @@ initialize_low (void) 21.110 the_low_target.breakpoint_len); 21.111 init_registers (); 21.112 linux_init_signals (); 21.113 - if (isfile) { 21.114 - myptrace = xc_ptrace_core; 21.115 - myxcwait = xc_waitdomain_core; 21.116 - } else { 21.117 - myptrace = xc_ptrace; 21.118 - myxcwait = xc_waitdomain; 21.119 - } 21.120 using_threads = thread_db_init (); 21.121 21.122 }
22.1 --- a/tools/debugger/gdb/gdbbuild Mon Mar 06 09:09:18 2006 -0700 22.2 +++ b/tools/debugger/gdb/gdbbuild Mon Mar 06 10:21:35 2006 -0700 22.3 @@ -1,7 +1,9 @@ 22.4 #!/bin/sh 22.5 22.6 +[ "$GDB_MIRROR" ] || GDB_MIRROR="ftp://ftp.gnu.org/gnu/gdb/" 22.7 + 22.8 rm -rf gdb-6.2.1 gdb-6.2.1-linux-i386-xen 22.9 -[ -a gdb-6.2.1.tar.bz2 ] || wget -c ftp://ftp.gnu.org/gnu/gdb/gdb-6.2.1.tar.bz2 22.10 +[ -a gdb-6.2.1.tar.bz2 ] || wget -c "$GDB_MIRROR/gdb-6.2.1.tar.bz2" 22.11 tar xjf gdb-6.2.1.tar.bz2 22.12 22.13 cd gdb-6.2.1-xen-sparse 22.14 @@ -12,8 +14,10 @@ mkdir gdb-6.2.1-linux-i386-xen 22.15 cd gdb-6.2.1-linux-i386-xen 22.16 ../gdb-6.2.1/configure 22.17 22.18 -# some people don't have gmake 22.19 -if which gmake ; then 22.20 +# Use $MAKE if set, else use gmake if present, otherwise use make 22.21 +if [ "$MAKE" ]; then 22.22 + $MAKE 22.23 +elif which gmake ; then 22.24 gmake -j4 22.25 else 22.26 make -j4
23.1 --- a/tools/examples/block Mon Mar 06 09:09:18 2006 -0700 23.2 +++ b/tools/examples/block Mon Mar 06 10:21:35 2006 -0700 23.3 @@ -129,7 +129,14 @@ check_sharing() 23.4 same_vm() 23.5 { 23.6 local otherdom="$1" 23.7 - local othervm=$(xenstore-read "/local/domain/$otherdom/vm") 23.8 + # Note that othervm can be MISSING here, because Xend will be racing with 23.9 + # the hotplug scripts -- the entries in /local/domain can be removed by 23.10 + # Xend before the hotplug scripts have removed the entry in 23.11 + # /local/domain/0/backend/. In this case, we want to pretend that the 23.12 + # VM is the same as FRONTEND_UUID, because that way the 'sharing' will be 23.13 + # allowed. 23.14 + local othervm=$(xenstore_read_default "/local/domain/$otherdom/vm" \ 23.15 + "$FRONTEND_UUID") 23.16 23.17 [ "$FRONTEND_UUID" == "$othervm" ] 23.18 } 23.19 @@ -314,7 +321,28 @@ mount it read-write in a guest domain." 23.20 fi 23.21 fi 23.22 23.23 - f=$(readlink -f "$f") 23.24 + # Canonicalise the filename for the comparison. 23.25 + 23.26 + # I have seen this readlink fails because the filename given by 23.27 + # losetup is only the basename. This cannot happen when the loop 23.28 + # device is set up through this script, because file is 23.29 + # canonicalised above, but it may happen when loop devices are set 23.30 + # up some other way. This readlink may also conceivably fail if 23.31 + # the file backing this loop device has been removed. 23.32 + 23.33 + # For maximum safety, in the case that $f does not resolve, we 23.34 + # assume that $file and $f are in the same directory. 23.35 + 23.36 + # If you create a loopback filesystem, remove it and continue to 23.37 + # run on it, and then create another file with the same name, then 23.38 + # this check will block that -- don't do that. 23.39 + 23.40 + # If you create loop devices through some other mechanism, use 23.41 + # relative filenames, and then use the same filename through this 23.42 + # script, then this check will block that -- don't do that either. 23.43 + 23.44 + f=$(readlink -f "$f" || echo $(dirname "$file")/$(basename "$f")) 23.45 + 23.46 23.47 if [ "$f" == "$file" ] 23.48 then
24.1 --- a/tools/examples/xen-hotplug-cleanup Mon Mar 06 09:09:18 2006 -0700 24.2 +++ b/tools/examples/xen-hotplug-cleanup Mon Mar 06 10:21:35 2006 -0700 24.3 @@ -12,10 +12,11 @@ dir=$(dirname "$0") 24.4 claim_lock "block" 24.5 24.6 # remove device frontend store entries 24.7 -xenstore-rm -t $(xenstore-read "$XENBUS_PATH/frontend") || true 24.8 +xenstore-rm -t \ 24.9 + $(xenstore-read "$XENBUS_PATH/frontend" 2>/dev/null) 2>/dev/null || true 24.10 24.11 # remove device backend store entries 24.12 -xenstore-rm -t "$XENBUS_PATH" || true 24.13 -xenstore-rm -t "error/$XENBUS_PATH" || true 24.14 +xenstore-rm -t "$XENBUS_PATH" 2>/dev/null || true 24.15 +xenstore-rm -t "error/$XENBUS_PATH" 2>/dev/null || true 24.16 24.17 release_lock "block"
25.1 --- a/tools/firmware/hvmloader/Makefile Mon Mar 06 09:09:18 2006 -0700 25.2 +++ b/tools/firmware/hvmloader/Makefile Mon Mar 06 10:21:35 2006 -0700 25.3 @@ -18,6 +18,9 @@ 25.4 # Place - Suite 330, Boston, MA 02111-1307 USA. 25.5 # 25.6 25.7 +# External CFLAGS can do more harm than good. 25.8 +CFLAGS := 25.9 + 25.10 XEN_ROOT = ../../.. 25.11 include $(XEN_ROOT)/Config.mk 25.12
26.1 --- a/tools/firmware/vmxassist/Makefile Mon Mar 06 09:09:18 2006 -0700 26.2 +++ b/tools/firmware/vmxassist/Makefile Mon Mar 06 10:21:35 2006 -0700 26.3 @@ -18,6 +18,9 @@ 26.4 # Place - Suite 330, Boston, MA 02111-1307 USA. 26.5 # 26.6 26.7 +# External CFLAGS can do more harm than good. 26.8 +CFLAGS := 26.9 + 26.10 XEN_ROOT = ../../.. 26.11 include $(XEN_ROOT)/Config.mk 26.12
27.1 --- a/tools/firmware/vmxassist/setup.c Mon Mar 06 09:09:18 2006 -0700 27.2 +++ b/tools/firmware/vmxassist/setup.c Mon Mar 06 10:21:35 2006 -0700 27.3 @@ -123,6 +123,8 @@ setup_paging(void) 27.4 void 27.5 setup_gdt(void) 27.6 { 27.7 + unsigned long long addr = (unsigned long long) &tss; 27.8 + 27.9 /* setup task state segment */ 27.10 memset(&tss, 0, sizeof(tss)); 27.11 tss.ss0 = DATA_SELECTOR; 27.12 @@ -130,8 +132,7 @@ setup_gdt(void) 27.13 tss.iomap_base = offsetof(struct tss, iomap); 27.14 27.15 /* initialize gdt's tss selector */ 27.16 - unsigned long long addr = (unsigned long long) &tss; 27.17 - gdt[TSS_SELECTOR / sizeof(gdt[0])] |= 27.18 + gdt[TSS_SELECTOR / sizeof(gdt[0])] |= 27.19 ((addr & 0xFF000000) << (56-24)) | 27.20 ((addr & 0x00FF0000) << (32-16)) | 27.21 ((addr & 0x0000FFFF) << (16)) |
28.1 --- a/tools/ioemu/audio/audio.c Mon Mar 06 09:09:18 2006 -0700 28.2 +++ b/tools/ioemu/audio/audio.c Mon Mar 06 10:21:35 2006 -0700 28.3 @@ -713,7 +713,7 @@ int AUD_calc_elapsed (SWVoice *sw) 28.4 delta = now - sw->old_ticks; 28.5 bytes = (delta * sw->bytes_per_second) / ticks_per_sec; 28.6 if (delta < 0) { 28.7 - dolog ("whoops delta(<0)=%lld\n", delta); 28.8 + dolog ("whoops delta(<0)=%"PRId64"\n", delta); 28.9 return 0; 28.10 } 28.11
29.1 --- a/tools/ioemu/hw/i8254.c Mon Mar 06 09:09:18 2006 -0700 29.2 +++ b/tools/ioemu/hw/i8254.c Mon Mar 06 10:21:35 2006 -0700 29.3 @@ -249,7 +249,7 @@ void pit_reset_hvm_vectors() 29.4 req->u.data |= (irq << 16); 29.5 req->u.data |= (hvm_channel << 24); 29.6 req->u.data |= ((s->rw_mode) << 26); 29.7 - fprintf(logfile, "HVM_PIT:pass info 0x%llx to HV!\n", req->u.data); 29.8 + fprintf(logfile, "HVM_PIT:pass info 0x%"PRIx64" to HV!\n", req->u.data); 29.9 } 29.10 29.11 static inline void pit_load_count(PITChannelState *s, int val)
30.1 --- a/tools/ioemu/monitor.c Mon Mar 06 09:09:18 2006 -0700 30.2 +++ b/tools/ioemu/monitor.c Mon Mar 06 10:21:35 2006 -0700 30.3 @@ -676,19 +676,19 @@ static void monitor_handle_command(const 30.4 break; 30.5 case '-': 30.6 { 30.7 - int has_option; 30.8 + long has_option; 30.9 /* option */ 30.10 - 30.11 + 30.12 c = *typestr++; 30.13 if (c == '\0') 30.14 goto bad_type; 30.15 - while (isspace(*p)) 30.16 + while (isspace(*p)) 30.17 p++; 30.18 has_option = 0; 30.19 if (*p == '-') { 30.20 p++; 30.21 if (*p != c) { 30.22 - term_printf("%s: unsupported option -%c\n", 30.23 + term_printf("%s: unsupported option -%c\n", 30.24 cmdname, *p); 30.25 goto fail; 30.26 }
31.1 --- a/tools/ioemu/target-i386-dm/helper2.c Mon Mar 06 09:09:18 2006 -0700 31.2 +++ b/tools/ioemu/target-i386-dm/helper2.c Mon Mar 06 10:21:35 2006 -0700 31.3 @@ -138,11 +138,11 @@ void sp_info() 31.4 req = &(shared_page->vcpu_iodata[i].vp_ioreq); 31.5 term_printf("vcpu %d: event port %d\n", 31.6 i, shared_page->vcpu_iodata[i].vp_eport); 31.7 - term_printf(" req state: %x, pvalid: %x, addr: %llx, " 31.8 - "data: %llx, count: %llx, size: %llx\n", 31.9 + term_printf(" req state: %x, pvalid: %x, addr: %"PRIx64", " 31.10 + "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n", 31.11 req->state, req->pdata_valid, req->addr, 31.12 req->u.data, req->count, req->size); 31.13 - term_printf(" IO totally occurred on this vcpu: %llx\n", 31.14 + term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n", 31.15 req->io_count); 31.16 } 31.17 } 31.18 @@ -158,8 +158,8 @@ static ioreq_t* __cpu_get_ioreq(int vcpu 31.19 return req; 31.20 31.21 fprintf(logfile, "False I/O request ... in-service already: " 31.22 - "%x, pvalid: %x, port: %llx, " 31.23 - "data: %llx, count: %llx, size: %llx\n", 31.24 + "%x, pvalid: %x, port: %"PRIx64", " 31.25 + "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n", 31.26 req->state, req->pdata_valid, req->addr, 31.27 req->u.data, req->count, req->size); 31.28 return NULL; 31.29 @@ -460,12 +460,6 @@ int main_loop(void) 31.30 FD_ZERO(&wakeup_rfds); 31.31 FD_SET(evtchn_fd, &wakeup_rfds); 31.32 31.33 -#if __WORDSIZE == 32 31.34 -#define ULONGLONG_MAX 0xffffffffffffffffULL 31.35 -#else 31.36 -#define ULONGLONG_MAX ULONG_MAX 31.37 -#endif 31.38 - 31.39 tun_receive_handler(&rfds); 31.40 if ( FD_ISSET(evtchn_fd, &rfds) ) { 31.41 cpu_handle_ioreq(env);
32.1 --- a/tools/ioemu/vl.c Mon Mar 06 09:09:18 2006 -0700 32.2 +++ b/tools/ioemu/vl.c Mon Mar 06 10:21:35 2006 -0700 32.3 @@ -2672,6 +2672,7 @@ int main(int argc, char **argv) 32.4 char qemu_dm_logfilename[64]; 32.5 const char *loadvm = NULL; 32.6 unsigned long nr_pages, extra_pages, ram_pages, *page_array; 32.7 + xc_dominfo_t info; 32.8 extern void *shared_page; 32.9 extern void *shared_vram; 32.10 32.11 @@ -3132,7 +3133,7 @@ int main(int argc, char **argv) 32.12 32.13 ram_pages = ram_size/PAGE_SIZE; 32.14 #if defined(__i386__) || defined(__x86_64__) 32.15 - vgaram_pages = (vga_ram_size -1)/PAGE_SIZE + 1; 32.16 + vgaram_pages = (vga_ram_size -1) / PAGE_SIZE + 1; 32.17 free_pages = vgaram_pages / L1_PAGETABLE_ENTRIES; 32.18 extra_pages = vgaram_pages + free_pages; 32.19 #else 32.20 @@ -3142,7 +3143,6 @@ int main(int argc, char **argv) 32.21 32.22 xc_handle = xc_interface_open(); 32.23 32.24 - xc_dominfo_t info; 32.25 xc_domain_getinfo(xc_handle, domid, 1, &info); 32.26 32.27 nr_pages = info.nr_pages + extra_pages;
33.1 --- a/tools/libxc/xc_core.c Mon Mar 06 09:09:18 2006 -0700 33.2 +++ b/tools/libxc/xc_core.c Mon Mar 06 10:21:35 2006 -0700 33.3 @@ -61,7 +61,7 @@ xc_domain_dumpcore(int xc_handle, 33.4 33.5 nr_pages = info.nr_pages; 33.6 33.7 - header.xch_magic = 0xF00FEBED; 33.8 + header.xch_magic = XC_CORE_MAGIC; 33.9 header.xch_nr_vcpus = nr_vcpus; 33.10 header.xch_nr_pages = nr_pages; 33.11 header.xch_ctxt_offset = sizeof(struct xc_core_header); 33.12 @@ -71,8 +71,12 @@ xc_domain_dumpcore(int xc_handle, 33.13 (sizeof(vcpu_guest_context_t) * nr_vcpus) + 33.14 (nr_pages * sizeof(unsigned long))); 33.15 33.16 - write(dump_fd, &header, sizeof(struct xc_core_header)); 33.17 - write(dump_fd, &ctxt, sizeof(ctxt[0]) * nr_vcpus); 33.18 + if (write(dump_fd, &header, sizeof(struct xc_core_header)) < 0 || 33.19 + write(dump_fd, &ctxt, sizeof(ctxt[0]) * nr_vcpus) < 0) 33.20 + { 33.21 + PERROR("write failed"); 33.22 + goto error_out; 33.23 + } 33.24 33.25 if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) { 33.26 printf("Could not allocate memory\n"); 33.27 @@ -82,7 +86,11 @@ xc_domain_dumpcore(int xc_handle, 33.28 printf("Could not get the page frame list\n"); 33.29 goto error_out; 33.30 } 33.31 - write(dump_fd, page_array, nr_pages * sizeof(unsigned long)); 33.32 + if (write(dump_fd, page_array, nr_pages * sizeof(unsigned long)) < 0) 33.33 + { 33.34 + PERROR("write failed"); 33.35 + goto error_out; 33.36 + } 33.37 lseek(dump_fd, header.xch_pages_offset, SEEK_SET); 33.38 for (dump_mem = dump_mem_start, i = 0; i < nr_pages; i++) { 33.39 copy_from_domain_page(xc_handle, domid, page_array, i, dump_mem);
34.1 --- a/tools/libxc/xc_ptrace.c Mon Mar 06 09:09:18 2006 -0700 34.2 +++ b/tools/libxc/xc_ptrace.c Mon Mar 06 10:21:35 2006 -0700 34.3 @@ -7,14 +7,13 @@ 34.4 34.5 #include "xc_private.h" 34.6 #include "xg_private.h" 34.7 -#include <thread_db.h> 34.8 #include "xc_ptrace.h" 34.9 34.10 - 34.11 /* XXX application state */ 34.12 static long nr_pages = 0; 34.13 static unsigned long *page_array = NULL; 34.14 static int current_domid = -1; 34.15 +static int current_isfile; 34.16 34.17 static cpumap_t online_cpumap; 34.18 static cpumap_t regs_valid; 34.19 @@ -32,7 +31,8 @@ fetch_regs(int xc_handle, int cpu, int * 34.20 34.21 if (online) 34.22 *online = 0; 34.23 - if ( !(regs_valid & (1 << cpu)) ) { 34.24 + if ( !(regs_valid & (1 << cpu)) ) 34.25 + { 34.26 retval = xc_vcpu_getcontext(xc_handle, current_domid, 34.27 cpu, &ctxt[cpu]); 34.28 if ( retval ) 34.29 @@ -50,9 +50,6 @@ fetch_regs(int xc_handle, int cpu, int * 34.30 return retval; 34.31 } 34.32 34.33 -#define FETCH_REGS(cpu) if (fetch_regs(xc_handle, cpu, NULL)) goto error_out; 34.34 - 34.35 - 34.36 static struct thr_ev_handlers { 34.37 thr_ev_handler_t td_create; 34.38 thr_ev_handler_t td_death; 34.39 @@ -95,14 +92,12 @@ get_online_cpumap(int xc_handle, dom0_ge 34.40 *cpumap = 0; 34.41 for (i = 0; i <= d->max_vcpu_id; i++) { 34.42 if ((retval = fetch_regs(xc_handle, i, &online))) 34.43 - goto error_out; 34.44 + return retval; 34.45 if (online) 34.46 *cpumap |= (1 << i); 34.47 } 34.48 34.49 return 0; 34.50 - error_out: 34.51 - return retval; 34.52 } 34.53 34.54 /* 34.55 @@ -118,7 +113,8 @@ online_vcpus_changed(cpumap_t cpumap) 34.56 int index; 34.57 34.58 while ( (index = ffsll(changed_cpumap)) ) { 34.59 - if ( cpumap & (1 << (index - 1)) ) { 34.60 + if ( cpumap & (1 << (index - 1)) ) 34.61 + { 34.62 if (handlers.td_create) handlers.td_create(index - 1); 34.63 } else { 34.64 printf("thread death: %d\n", index - 1); 34.65 @@ -143,34 +139,32 @@ map_domain_va_pae( 34.66 uint64_t *l3, *l2, *l1; 34.67 static void *v; 34.68 34.69 - FETCH_REGS(cpu); 34.70 + if (fetch_regs(xc_handle, cpu, NULL)) 34.71 + return NULL; 34.72 34.73 l3 = xc_map_foreign_range( 34.74 xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT); 34.75 if ( l3 == NULL ) 34.76 - goto error_out; 34.77 + return NULL; 34.78 34.79 l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT; 34.80 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p); 34.81 if ( l2 == NULL ) 34.82 - goto error_out; 34.83 + return NULL; 34.84 34.85 l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT; 34.86 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p); 34.87 if ( l1 == NULL ) 34.88 - goto error_out; 34.89 + return NULL; 34.90 34.91 p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT; 34.92 if ( v != NULL ) 34.93 munmap(v, PAGE_SIZE); 34.94 v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p); 34.95 if ( v == NULL ) 34.96 - goto error_out; 34.97 + return NULL; 34.98 34.99 return (void *)((unsigned long)v | (va & (PAGE_SIZE - 1))); 34.100 - 34.101 - error_out: 34.102 - return NULL; 34.103 } 34.104 34.105 static void * 34.106 @@ -215,17 +209,18 @@ map_domain_va( 34.107 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL ) 34.108 { 34.109 printf("Could not allocate memory\n"); 34.110 - goto error_out; 34.111 + return NULL; 34.112 } 34.113 if ( xc_get_pfn_list(xc_handle, current_domid, 34.114 page_array, nr_pages) != nr_pages ) 34.115 { 34.116 printf("Could not get the page frame list\n"); 34.117 - goto error_out; 34.118 + return NULL; 34.119 } 34.120 } 34.121 34.122 - FETCH_REGS(cpu); 34.123 + if (fetch_regs(xc_handle, cpu, NULL)) 34.124 + return NULL; 34.125 34.126 if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] ) 34.127 { 34.128 @@ -236,10 +231,10 @@ map_domain_va( 34.129 xc_handle, current_domid, PAGE_SIZE, PROT_READ, 34.130 cr3_phys[cpu] >> PAGE_SHIFT); 34.131 if ( cr3_virt[cpu] == NULL ) 34.132 - goto error_out; 34.133 + return NULL; 34.134 } 34.135 if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 ) 34.136 - goto error_out; 34.137 + return NULL; 34.138 if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) 34.139 pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT; 34.140 if ( pde != pde_phys[cpu] ) 34.141 @@ -251,10 +246,10 @@ map_domain_va( 34.142 xc_handle, current_domid, PAGE_SIZE, PROT_READ, 34.143 pde_phys[cpu] >> PAGE_SHIFT); 34.144 if ( pde_virt[cpu] == NULL ) 34.145 - goto error_out; 34.146 + return NULL; 34.147 } 34.148 if ( (page = pde_virt[cpu][vtopti(va)]) == 0 ) 34.149 - goto error_out; 34.150 + return NULL; 34.151 if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) 34.152 page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT; 34.153 if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) ) 34.154 @@ -268,19 +263,16 @@ map_domain_va( 34.155 if ( page_virt[cpu] == NULL ) 34.156 { 34.157 page_phys[cpu] = 0; 34.158 - goto error_out; 34.159 + return NULL; 34.160 } 34.161 prev_perm[cpu] = perm; 34.162 } 34.163 34.164 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK)); 34.165 - 34.166 - error_out: 34.167 - return NULL; 34.168 } 34.169 34.170 -int 34.171 -xc_waitdomain( 34.172 +static int 34.173 +__xc_waitdomain( 34.174 int xc_handle, 34.175 int domain, 34.176 int *status, 34.177 @@ -335,7 +327,6 @@ xc_ptrace( 34.178 long edata) 34.179 { 34.180 DECLARE_DOM0_OP; 34.181 - int status = 0; 34.182 struct gdb_regs pt; 34.183 long retval = 0; 34.184 unsigned long *guest_va; 34.185 @@ -350,84 +341,83 @@ xc_ptrace( 34.186 { 34.187 case PTRACE_PEEKTEXT: 34.188 case PTRACE_PEEKDATA: 34.189 - guest_va = (unsigned long *)map_domain_va( 34.190 - xc_handle, cpu, addr, PROT_READ); 34.191 + if (current_isfile) 34.192 + guest_va = (unsigned long *)map_domain_va_core(current_domid, 34.193 + cpu, addr, ctxt); 34.194 + else 34.195 + guest_va = (unsigned long *)map_domain_va(xc_handle, 34.196 + cpu, addr, PROT_READ); 34.197 if ( guest_va == NULL ) 34.198 - { 34.199 - status = EFAULT; 34.200 - goto error_out; 34.201 - } 34.202 + goto out_error; 34.203 retval = *guest_va; 34.204 break; 34.205 34.206 case PTRACE_POKETEXT: 34.207 case PTRACE_POKEDATA: 34.208 /* XXX assume that all CPUs have the same address space */ 34.209 - guest_va = (unsigned long *)map_domain_va( 34.210 - xc_handle, cpu, addr, PROT_READ|PROT_WRITE); 34.211 - if ( guest_va == NULL ) { 34.212 - status = EFAULT; 34.213 - goto error_out; 34.214 - } 34.215 + if (current_isfile) 34.216 + guest_va = (unsigned long *)map_domain_va_core(current_domid, 34.217 + cpu, addr, ctxt); 34.218 + else 34.219 + guest_va = (unsigned long *)map_domain_va(xc_handle, 34.220 + cpu, addr, PROT_READ|PROT_WRITE); 34.221 + if ( guest_va == NULL ) 34.222 + goto out_error; 34.223 *guest_va = (unsigned long)data; 34.224 break; 34.225 34.226 case PTRACE_GETREGS: 34.227 + if (!current_isfile && fetch_regs(xc_handle, cpu, NULL)) 34.228 + goto out_error; 34.229 + SET_PT_REGS(pt, ctxt[cpu].user_regs); 34.230 + memcpy(data, &pt, sizeof(struct gdb_regs)); 34.231 + break; 34.232 + 34.233 case PTRACE_GETFPREGS: 34.234 case PTRACE_GETFPXREGS: 34.235 - 34.236 - FETCH_REGS(cpu); 34.237 - if ( request == PTRACE_GETREGS ) 34.238 - { 34.239 - SET_PT_REGS(pt, ctxt[cpu].user_regs); 34.240 - memcpy(data, &pt, sizeof(struct gdb_regs)); 34.241 - } 34.242 - else if (request == PTRACE_GETFPREGS) 34.243 - { 34.244 - memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 34.245 - } 34.246 - else /*if (request == PTRACE_GETFPXREGS)*/ 34.247 - { 34.248 - memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 34.249 - } 34.250 + if (!current_isfile && fetch_regs(xc_handle, cpu, NULL)) 34.251 + goto out_error; 34.252 + memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 34.253 break; 34.254 34.255 case PTRACE_SETREGS: 34.256 + if (!current_isfile) 34.257 + goto out_unspported; /* XXX not yet supported */ 34.258 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs); 34.259 - retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, &ctxt[cpu]); 34.260 - if (retval) 34.261 - goto error_out; 34.262 + if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, 34.263 + &ctxt[cpu]))) 34.264 + goto out_error_dom0; 34.265 break; 34.266 34.267 case PTRACE_SINGLESTEP: 34.268 + if (!current_isfile) 34.269 + goto out_unspported; /* XXX not yet supported */ 34.270 /* XXX we can still have problems if the user switches threads 34.271 * during single-stepping - but that just seems retarded 34.272 */ 34.273 ctxt[cpu].user_regs.eflags |= PSL_T; 34.274 - retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, &ctxt[cpu]); 34.275 - if ( retval ) 34.276 - { 34.277 - perror("dom0 op failed"); 34.278 - goto error_out; 34.279 - } 34.280 + if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, 34.281 + &ctxt[cpu]))) 34.282 + goto out_error_dom0; 34.283 /* FALLTHROUGH */ 34.284 34.285 case PTRACE_CONT: 34.286 case PTRACE_DETACH: 34.287 + if (!current_isfile) 34.288 + goto out_unspported; /* XXX not yet supported */ 34.289 if ( request != PTRACE_SINGLESTEP ) 34.290 { 34.291 FOREACH_CPU(cpumap, index) { 34.292 cpu = index - 1; 34.293 - FETCH_REGS(cpu); 34.294 + if (fetch_regs(xc_handle, cpu, NULL)) 34.295 + goto out_error; 34.296 /* Clear trace flag */ 34.297 - if ( ctxt[cpu].user_regs.eflags & PSL_T ) { 34.298 + if ( ctxt[cpu].user_regs.eflags & PSL_T ) 34.299 + { 34.300 ctxt[cpu].user_regs.eflags &= ~PSL_T; 34.301 - retval = xc_vcpu_setcontext(xc_handle, current_domid, 34.302 - cpu, &ctxt[cpu]); 34.303 - if ( retval ) { 34.304 - perror("dom0 op failed"); 34.305 - goto error_out; 34.306 - } 34.307 + if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, 34.308 + cpu, &ctxt[cpu]))) 34.309 + goto out_error_dom0; 34.310 } 34.311 } 34.312 } 34.313 @@ -436,31 +426,34 @@ xc_ptrace( 34.314 op.cmd = DOM0_SETDEBUGGING; 34.315 op.u.setdebugging.domain = current_domid; 34.316 op.u.setdebugging.enable = 0; 34.317 - retval = do_dom0_op(xc_handle, &op); 34.318 + if ((retval = do_dom0_op(xc_handle, &op))) 34.319 + goto out_error_dom0; 34.320 } 34.321 regs_valid = 0; 34.322 - xc_domain_unpause(xc_handle, current_domid > 0 ? current_domid : -current_domid); 34.323 + if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ? 34.324 + current_domid : -current_domid))) 34.325 + goto out_error_dom0; 34.326 break; 34.327 34.328 case PTRACE_ATTACH: 34.329 current_domid = domid_tid; 34.330 + current_isfile = (int)edata; 34.331 + if (current_isfile) 34.332 + break; 34.333 op.cmd = DOM0_GETDOMAININFO; 34.334 op.u.getdomaininfo.domain = current_domid; 34.335 retval = do_dom0_op(xc_handle, &op); 34.336 if ( retval || (op.u.getdomaininfo.domain != current_domid) ) 34.337 - { 34.338 - perror("dom0 op failed"); 34.339 - goto error_out; 34.340 - } 34.341 + goto out_error_dom0; 34.342 if ( op.u.getdomaininfo.flags & DOMFLAGS_PAUSED ) 34.343 - { 34.344 printf("domain currently paused\n"); 34.345 - } else 34.346 - retval = xc_domain_pause(xc_handle, current_domid); 34.347 + else if ((retval = xc_domain_pause(xc_handle, current_domid))) 34.348 + goto out_error_dom0; 34.349 op.cmd = DOM0_SETDEBUGGING; 34.350 op.u.setdebugging.domain = current_domid; 34.351 op.u.setdebugging.enable = 1; 34.352 - retval = do_dom0_op(xc_handle, &op); 34.353 + if ((retval = do_dom0_op(xc_handle, &op))) 34.354 + goto out_error_dom0; 34.355 34.356 if (get_online_cpumap(xc_handle, &op.u.getdomaininfo, &cpumap)) 34.357 printf("get_online_cpumap failed\n"); 34.358 @@ -474,26 +467,40 @@ xc_ptrace( 34.359 case PTRACE_POKEUSER: 34.360 case PTRACE_SYSCALL: 34.361 case PTRACE_KILL: 34.362 -#ifdef DEBUG 34.363 - printf("unsupported xc_ptrace request %s\n", ptrace_names[request]); 34.364 -#endif 34.365 - /* XXX not yet supported */ 34.366 - status = ENOSYS; 34.367 - break; 34.368 + goto out_unspported; /* XXX not yet supported */ 34.369 34.370 case PTRACE_TRACEME: 34.371 printf("PTRACE_TRACEME is an invalid request under Xen\n"); 34.372 - status = EINVAL; 34.373 - } 34.374 - 34.375 - if ( status ) 34.376 - { 34.377 - errno = status; 34.378 - retval = -1; 34.379 + goto out_error; 34.380 } 34.381 34.382 - error_out: 34.383 + return retval; 34.384 + 34.385 + out_error_dom0: 34.386 + perror("dom0 op failed"); 34.387 + out_error: 34.388 + errno = EINVAL; 34.389 return retval; 34.390 + 34.391 + out_unspported: 34.392 +#ifdef DEBUG 34.393 + printf("unsupported xc_ptrace request %s\n", ptrace_names[request]); 34.394 +#endif 34.395 + errno = ENOSYS; 34.396 + return -1; 34.397 + 34.398 +} 34.399 + 34.400 +int 34.401 +xc_waitdomain( 34.402 + int xc_handle, 34.403 + int domain, 34.404 + int *status, 34.405 + int options) 34.406 +{ 34.407 + if (current_isfile) 34.408 + return xc_waitdomain_core(xc_handle, domain, status, options, ctxt); 34.409 + return __xc_waitdomain(xc_handle, domain, status, options); 34.410 } 34.411 34.412 /*
35.1 --- a/tools/libxc/xc_ptrace.h Mon Mar 06 09:09:18 2006 -0700 35.2 +++ b/tools/libxc/xc_ptrace.h Mon Mar 06 10:21:35 2006 -0700 35.3 @@ -1,6 +1,8 @@ 35.4 #ifndef XC_PTRACE_ 35.5 #define XC_PTRACE_ 35.6 35.7 +#include <thread_db.h> 35.8 + 35.9 #ifdef XC_PTRACE_PRIVATE 35.10 #define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */ 35.11 #define X86_CR0_PG 0x80000000 /* Paging (RW) */ 35.12 @@ -8,33 +10,7 @@ 35.13 #define PDRSHIFT 22 35.14 #define PSL_T 0x00000100 /* trace enable bit */ 35.15 35.16 -char * ptrace_names[] = { 35.17 - "PTRACE_TRACEME", 35.18 - "PTRACE_PEEKTEXT", 35.19 - "PTRACE_PEEKDATA", 35.20 - "PTRACE_PEEKUSER", 35.21 - "PTRACE_POKETEXT", 35.22 - "PTRACE_POKEDATA", 35.23 - "PTRACE_POKEUSER", 35.24 - "PTRACE_CONT", 35.25 - "PTRACE_KILL", 35.26 - "PTRACE_SINGLESTEP", 35.27 - "PTRACE_INVALID", 35.28 - "PTRACE_INVALID", 35.29 - "PTRACE_GETREGS", 35.30 - "PTRACE_SETREGS", 35.31 - "PTRACE_GETFPREGS", 35.32 - "PTRACE_SETFPREGS", 35.33 - "PTRACE_ATTACH", 35.34 - "PTRACE_DETACH", 35.35 - "PTRACE_GETFPXREGS", 35.36 - "PTRACE_SETFPXREGS", 35.37 - "PTRACE_INVALID", 35.38 - "PTRACE_INVALID", 35.39 - "PTRACE_INVALID", 35.40 - "PTRACE_INVALID", 35.41 - "PTRACE_SYSCALL", 35.42 -}; 35.43 +extern const char const * ptrace_names[]; 35.44 35.45 struct gdb_regs { 35.46 long ebx; /* 0 */
36.1 --- a/tools/libxc/xc_ptrace_core.c Mon Mar 06 09:09:18 2006 -0700 36.2 +++ b/tools/libxc/xc_ptrace_core.c Mon Mar 06 10:21:35 2006 -0700 36.3 @@ -1,82 +1,11 @@ 36.4 +#define XC_PTRACE_PRIVATE 36.5 + 36.6 #include <sys/ptrace.h> 36.7 #include <sys/wait.h> 36.8 #include "xc_private.h" 36.9 +#include "xc_ptrace.h" 36.10 #include <time.h> 36.11 36.12 -#define BSD_PAGE_MASK (PAGE_SIZE-1) 36.13 -#define PDRSHIFT 22 36.14 -#define VCPU 0 /* XXX */ 36.15 - 36.16 -/* 36.17 - * long 36.18 - * ptrace(enum __ptrace_request request, pid_t pid, void *addr, void *data); 36.19 - */ 36.20 - 36.21 -struct gdb_regs { 36.22 - long ebx; /* 0 */ 36.23 - long ecx; /* 4 */ 36.24 - long edx; /* 8 */ 36.25 - long esi; /* 12 */ 36.26 - long edi; /* 16 */ 36.27 - long ebp; /* 20 */ 36.28 - long eax; /* 24 */ 36.29 - int xds; /* 28 */ 36.30 - int xes; /* 32 */ 36.31 - int xfs; /* 36 */ 36.32 - int xgs; /* 40 */ 36.33 - long orig_eax; /* 44 */ 36.34 - long eip; /* 48 */ 36.35 - int xcs; /* 52 */ 36.36 - long eflags; /* 56 */ 36.37 - long esp; /* 60 */ 36.38 - int xss; /* 64 */ 36.39 -}; 36.40 - 36.41 -#define printval(x) printf("%s = %lx\n", #x, (long)x); 36.42 -#define SET_PT_REGS(pt, xc) \ 36.43 -{ \ 36.44 - pt.ebx = xc.ebx; \ 36.45 - pt.ecx = xc.ecx; \ 36.46 - pt.edx = xc.edx; \ 36.47 - pt.esi = xc.esi; \ 36.48 - pt.edi = xc.edi; \ 36.49 - pt.ebp = xc.ebp; \ 36.50 - pt.eax = xc.eax; \ 36.51 - pt.eip = xc.eip; \ 36.52 - pt.xcs = xc.cs; \ 36.53 - pt.eflags = xc.eflags; \ 36.54 - pt.esp = xc.esp; \ 36.55 - pt.xss = xc.ss; \ 36.56 - pt.xes = xc.es; \ 36.57 - pt.xds = xc.ds; \ 36.58 - pt.xfs = xc.fs; \ 36.59 - pt.xgs = xc.gs; \ 36.60 -} 36.61 - 36.62 -#define SET_XC_REGS(pt, xc) \ 36.63 -{ \ 36.64 - xc.ebx = pt->ebx; \ 36.65 - xc.ecx = pt->ecx; \ 36.66 - xc.edx = pt->edx; \ 36.67 - xc.esi = pt->esi; \ 36.68 - xc.edi = pt->edi; \ 36.69 - xc.ebp = pt->ebp; \ 36.70 - xc.eax = pt->eax; \ 36.71 - xc.eip = pt->eip; \ 36.72 - xc.cs = pt->xcs; \ 36.73 - xc.eflags = pt->eflags; \ 36.74 - xc.esp = pt->esp; \ 36.75 - xc.ss = pt->xss; \ 36.76 - xc.es = pt->xes; \ 36.77 - xc.ds = pt->xds; \ 36.78 - xc.fs = pt->xfs; \ 36.79 - xc.gs = pt->xgs; \ 36.80 -} 36.81 - 36.82 - 36.83 -#define vtopdi(va) ((va) >> PDRSHIFT) 36.84 -#define vtopti(va) (((va) >> PAGE_SHIFT) & 0x3ff) 36.85 - 36.86 /* XXX application state */ 36.87 36.88 static long nr_pages = 0; 36.89 @@ -84,7 +13,6 @@ static unsigned long *p2m_array = NULL; 36.90 static unsigned long *m2p_array = NULL; 36.91 static unsigned long pages_offset; 36.92 static unsigned long cr3[MAX_VIRT_CPUS]; 36.93 -static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS]; 36.94 36.95 /* --------------------- */ 36.96 36.97 @@ -92,11 +20,13 @@ static unsigned long 36.98 map_mtop_offset(unsigned long ma) 36.99 { 36.100 return pages_offset + (m2p_array[ma >> PAGE_SHIFT] << PAGE_SHIFT); 36.101 + return 0; 36.102 } 36.103 36.104 36.105 -static void * 36.106 -map_domain_va(unsigned long domfd, int cpu, void * guest_va) 36.107 +void * 36.108 +map_domain_va_core(unsigned long domfd, int cpu, void * guest_va, 36.109 + vcpu_guest_context_t *ctxt) 36.110 { 36.111 unsigned long pde, page; 36.112 unsigned long va = (unsigned long)guest_va; 36.113 @@ -120,12 +50,12 @@ map_domain_va(unsigned long domfd, int c 36.114 if (v == MAP_FAILED) 36.115 { 36.116 perror("mmap failed"); 36.117 - goto error_out; 36.118 + return NULL; 36.119 } 36.120 cr3_virt[cpu] = v; 36.121 } 36.122 if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */ 36.123 - goto error_out; 36.124 + return NULL; 36.125 if (ctxt[cpu].flags & VGCF_HVM_GUEST) 36.126 pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT; 36.127 if (pde != pde_phys[cpu]) 36.128 @@ -137,11 +67,11 @@ map_domain_va(unsigned long domfd, int c 36.129 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, 36.130 map_mtop_offset(pde_phys[cpu])); 36.131 if (v == MAP_FAILED) 36.132 - goto error_out; 36.133 + return NULL; 36.134 pde_virt[cpu] = v; 36.135 } 36.136 if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */ 36.137 - goto error_out; 36.138 + return NULL; 36.139 if (ctxt[cpu].flags & VGCF_HVM_GUEST) 36.140 page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT; 36.141 if (page != page_phys[cpu]) 36.142 @@ -152,17 +82,15 @@ map_domain_va(unsigned long domfd, int c 36.143 v = mmap( 36.144 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, 36.145 map_mtop_offset(page_phys[cpu])); 36.146 - if (v == MAP_FAILED) { 36.147 + if (v == MAP_FAILED) 36.148 + { 36.149 printf("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, vtopti(va)); 36.150 page_phys[cpu] = 0; 36.151 - goto error_out; 36.152 + return NULL; 36.153 } 36.154 page_virt[cpu] = v; 36.155 } 36.156 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK)); 36.157 - 36.158 - error_out: 36.159 - return 0; 36.160 } 36.161 36.162 int 36.163 @@ -170,18 +98,26 @@ xc_waitdomain_core( 36.164 int xc_handle, 36.165 int domfd, 36.166 int *status, 36.167 - int options) 36.168 + int options, 36.169 + vcpu_guest_context_t *ctxt) 36.170 { 36.171 - int retval = -1; 36.172 int nr_vcpus; 36.173 int i; 36.174 xc_core_header_t header; 36.175 36.176 - if (nr_pages == 0) { 36.177 + if (nr_pages == 0) 36.178 + { 36.179 36.180 if (read(domfd, &header, sizeof(header)) != sizeof(header)) 36.181 return -1; 36.182 36.183 + if (header.xch_magic != XC_CORE_MAGIC) { 36.184 + printf("Magic number missmatch: 0x%08x (file) != " 36.185 + " 0x%08x (code)\n", header.xch_magic, 36.186 + XC_CORE_MAGIC); 36.187 + return -1; 36.188 + } 36.189 + 36.190 nr_pages = header.xch_nr_pages; 36.191 nr_vcpus = header.xch_nr_vcpus; 36.192 pages_offset = header.xch_pages_offset; 36.193 @@ -193,17 +129,19 @@ xc_waitdomain_core( 36.194 for (i = 0; i < nr_vcpus; i++) { 36.195 cr3[i] = ctxt[i].ctrlreg[3]; 36.196 } 36.197 - if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) { 36.198 + if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) 36.199 + { 36.200 printf("Could not allocate p2m_array\n"); 36.201 - goto error_out; 36.202 + return -1; 36.203 } 36.204 if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) != 36.205 sizeof(unsigned long)*nr_pages) 36.206 return -1; 36.207 36.208 - if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL) { 36.209 + if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == NULL) 36.210 + { 36.211 printf("Could not allocate m2p array\n"); 36.212 - goto error_out; 36.213 + return -1; 36.214 } 36.215 bzero(m2p_array, sizeof(unsigned long)* 1 << 20); 36.216 36.217 @@ -212,89 +150,7 @@ xc_waitdomain_core( 36.218 } 36.219 36.220 } 36.221 - retval = 0; 36.222 - error_out: 36.223 - return retval; 36.224 - 36.225 -} 36.226 - 36.227 -long 36.228 -xc_ptrace_core( 36.229 - int xc_handle, 36.230 - enum __ptrace_request request, 36.231 - uint32_t domfd, 36.232 - long eaddr, 36.233 - long edata) 36.234 -{ 36.235 - int status = 0; 36.236 - struct gdb_regs pt; 36.237 - long retval = 0; 36.238 - unsigned long *guest_va; 36.239 - int cpu = VCPU; 36.240 - void *addr = (char *)eaddr; 36.241 - void *data = (char *)edata; 36.242 - 36.243 -#if 0 36.244 - printf("%20s %d, %p, %p \n", ptrace_names[request], domid, addr, data); 36.245 -#endif 36.246 - switch (request) { 36.247 - case PTRACE_PEEKTEXT: 36.248 - case PTRACE_PEEKDATA: 36.249 - if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) { 36.250 - status = EFAULT; 36.251 - goto error_out; 36.252 - } 36.253 - 36.254 - retval = *guest_va; 36.255 - break; 36.256 - case PTRACE_POKETEXT: 36.257 - case PTRACE_POKEDATA: 36.258 - if ((guest_va = (unsigned long *)map_domain_va(domfd, cpu, addr)) == NULL) { 36.259 - status = EFAULT; 36.260 - goto error_out; 36.261 - } 36.262 - *guest_va = (unsigned long)data; 36.263 - break; 36.264 - case PTRACE_GETREGS: 36.265 - case PTRACE_GETFPREGS: 36.266 - case PTRACE_GETFPXREGS: 36.267 - if (request == PTRACE_GETREGS) { 36.268 - SET_PT_REGS(pt, ctxt[cpu].user_regs); 36.269 - memcpy(data, &pt, sizeof(struct gdb_regs)); 36.270 - } else if (request == PTRACE_GETFPREGS) 36.271 - memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 36.272 - else /*if (request == PTRACE_GETFPXREGS)*/ 36.273 - memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); 36.274 - break; 36.275 - case PTRACE_ATTACH: 36.276 - retval = 0; 36.277 - break; 36.278 - case PTRACE_SETREGS: 36.279 - case PTRACE_SINGLESTEP: 36.280 - case PTRACE_CONT: 36.281 - case PTRACE_DETACH: 36.282 - case PTRACE_SETFPREGS: 36.283 - case PTRACE_SETFPXREGS: 36.284 - case PTRACE_PEEKUSER: 36.285 - case PTRACE_POKEUSER: 36.286 - case PTRACE_SYSCALL: 36.287 - case PTRACE_KILL: 36.288 -#ifdef DEBUG 36.289 - printf("unsupported xc_ptrace request %s\n", ptrace_names[request]); 36.290 -#endif 36.291 - status = ENOSYS; 36.292 - break; 36.293 - case PTRACE_TRACEME: 36.294 - printf("PTRACE_TRACEME is an invalid request under Xen\n"); 36.295 - status = EINVAL; 36.296 - } 36.297 - 36.298 - if (status) { 36.299 - errno = status; 36.300 - retval = -1; 36.301 - } 36.302 - error_out: 36.303 - return retval; 36.304 + return 0; 36.305 } 36.306 36.307 /*
37.1 --- a/tools/libxc/xenctrl.h Mon Mar 06 09:09:18 2006 -0700 37.2 +++ b/tools/libxc/xenctrl.h Mon Mar 06 10:21:35 2006 -0700 37.3 @@ -92,19 +92,26 @@ typedef struct xc_core_header { 37.4 unsigned int xch_pages_offset; 37.5 } xc_core_header_t; 37.6 37.7 +#define XC_CORE_MAGIC 0xF00FEBED 37.8 37.9 long xc_ptrace_core( 37.10 int xc_handle, 37.11 enum __ptrace_request request, 37.12 uint32_t domid, 37.13 long addr, 37.14 - long data); 37.15 - 37.16 + long data, 37.17 + vcpu_guest_context_t *ctxt); 37.18 +void * map_domain_va_core( 37.19 + unsigned long domfd, 37.20 + int cpu, 37.21 + void *guest_va, 37.22 + vcpu_guest_context_t *ctxt); 37.23 int xc_waitdomain_core( 37.24 int xc_handle, 37.25 int domain, 37.26 int *status, 37.27 - int options); 37.28 + int options, 37.29 + vcpu_guest_context_t *ctxt); 37.30 37.31 /* 37.32 * DOMAIN MANAGEMENT FUNCTIONS
38.1 --- a/tools/misc/lomount/lomount.c Mon Mar 06 09:09:18 2006 -0700 38.2 +++ b/tools/misc/lomount/lomount.c Mon Mar 06 10:21:35 2006 -0700 38.3 @@ -24,16 +24,33 @@ 38.4 * THE SOFTWARE. 38.5 */ 38.6 38.7 +/* 38.8 + * Return code: 38.9 + * 38.10 + * bit 7 set: lomount wrapper failed 38.11 + * bit 7 clear: lomount wrapper ok; mount's return code in low 7 bits 38.12 + * 0 success 38.13 + */ 38.14 + 38.15 +enum 38.16 +{ 38.17 + ERR_USAGE = 0x80, // Incorrect usage 38.18 + ERR_PART_PARSE, // Failed to parse partition table 38.19 + ERR_NO_PART, // No such partition 38.20 + ERR_NO_EPART, // No such extended partition 38.21 + ERR_MOUNT // Other failure of mount command 38.22 +}; 38.23 + 38.24 #include <unistd.h> 38.25 #include <stdio.h> 38.26 #include <stdlib.h> 38.27 #include <string.h> 38.28 #include <strings.h> 38.29 +#include <sys/wait.h> 38.30 #include <errno.h> 38.31 38.32 #define BUF 4096 38.33 38.34 -//#define SECSIZE 4096 /* arbitrarilly large (it's probably just 512) */ 38.35 #define SECSIZE 512 38.36 38.37 struct pentry 38.38 @@ -50,30 +67,32 @@ struct pentry 38.39 unsigned long no_of_sectors_abs; 38.40 }; 38.41 38.42 -char * progname; 38.43 - 38.44 -int loadptable(const char *argv, struct pentry parttbl[], struct pentry **exttbl) 38.45 +int loadptable(const char *diskimage, struct pentry parttbl[], struct pentry **exttbl, int * valid) 38.46 { 38.47 - FILE *fd; 38.48 - int i, valid, total_known_sectors = 0; 38.49 + FILE *fd; 38.50 + size_t size; 38.51 + int fail = 1; 38.52 + int i, total_known_sectors = 0; 38.53 unsigned char *pi; 38.54 unsigned char data [SECSIZE]; 38.55 unsigned long extent = 0, old_extent = 0, e_count = 1; 38.56 struct pentry exttbls[4]; 38.57 38.58 - fd = fopen(argv, "r"); 38.59 + *valid = 0; 38.60 + 38.61 + fd = fopen(diskimage, "r"); 38.62 if (fd == NULL) 38.63 { 38.64 - perror ("lomount"); 38.65 - return 1; 38.66 + perror(diskimage); 38.67 + goto done; 38.68 } 38.69 - i = fread (&data, 1, sizeof (data), fd); 38.70 - if (i < SECSIZE) 38.71 + size = fread (&data, 1, sizeof(data), fd); 38.72 + if (size < (size_t)sizeof(data)) 38.73 { 38.74 - fprintf (stderr, "%s: could not read the entire first sector\n", progname); 38.75 - return 1; 38.76 + fprintf(stderr, "Could not read the entire first sector of %s.\n", diskimage); 38.77 + goto done; 38.78 } 38.79 - for (i = 0;i < 4;i++) 38.80 + for (i = 0; i < 4; i++) 38.81 { 38.82 pi = &data [446 + 16 * i]; 38.83 parttbl [i].bootable = *pi; 38.84 @@ -95,7 +114,7 @@ int loadptable(const char *argv, struct 38.85 old_extent = extent; 38.86 } 38.87 } 38.88 - valid = (data [510] == 0x55 && data [511] == 0xaa) ? 1 : 0; 38.89 + *valid = (data [510] == 0x55 && data [511] == 0xaa) ? 1 : 0; 38.90 for (i = 0; i < 4; i++) 38.91 { 38.92 total_known_sectors += parttbl[i].no_of_sectors_abs; 38.93 @@ -105,7 +124,7 @@ int loadptable(const char *argv, struct 38.94 #ifdef DEBUG 38.95 if (extent != 0) 38.96 { 38.97 - printf("extended partition detected at offset %d\n", extent); 38.98 + printf("extended partition detected at offset %ld\n", extent); 38.99 } 38.100 #endif 38.101 while (extent != 0) 38.102 @@ -113,14 +132,14 @@ int loadptable(const char *argv, struct 38.103 /* according to realloc(3) passing NULL as pointer is same as calling malloc() */ 38.104 exttbl[0] = realloc(exttbl[0], e_count * sizeof(struct pentry)); 38.105 fseek(fd, extent, SEEK_SET); 38.106 - i = fread (&data, 1, sizeof (data), fd); 38.107 - if (i < SECSIZE) 38.108 + size = fread (&data, 1, sizeof(data), fd); 38.109 + if (size < (size_t)sizeof(data)) 38.110 { 38.111 - fprintf (stderr, "%s: could not read the entire first sector\n", progname); 38.112 - return 1; 38.113 + fprintf(stderr, "Could not read extended partition of %s.", diskimage); 38.114 + goto done; 38.115 } 38.116 /* only first 2 entrys are used in extented partition tables */ 38.117 - for (i = 0;i < 2;i++) 38.118 + for (i = 0; i < 2; i++) 38.119 { 38.120 pi = &data [446 + 16 * i]; 38.121 exttbls [i].bootable = *pi; 38.122 @@ -152,7 +171,7 @@ int loadptable(const char *argv, struct 38.123 /* adjust for start of image instead of start of ext partition */ 38.124 exttbl[0][e_count-1].start_sector_abs += (extent/SECSIZE); 38.125 #ifdef DEBUG 38.126 - printf("extent %d start_sector_abs %d\n", extent, exttbl[0][e_count-1].start_sector_abs); 38.127 + printf("extent %ld start_sector_abs %ld\n", extent, exttbl[0][e_count-1].start_sector_abs); 38.128 #endif 38.129 //else if (parttbl[i].system == 0x5) 38.130 } 38.131 @@ -165,53 +184,71 @@ int loadptable(const char *argv, struct 38.132 } 38.133 e_count ++; 38.134 } 38.135 - //fclose (fd); 38.136 - //the above segfaults (?!!!) 38.137 #ifdef DEBUG 38.138 - printf("e_count = %d\n", e_count); 38.139 + printf("e_count = %ld\n", e_count); 38.140 #endif 38.141 - return valid; 38.142 + fail = 0; 38.143 + 38.144 +done: 38.145 + if (fd) 38.146 + fclose(fd); 38.147 + return fail; 38.148 +} 38.149 + 38.150 +void usage() 38.151 +{ 38.152 + fprintf(stderr, "You must specify at least -diskimage and -partition.\n"); 38.153 + fprintf(stderr, "All other arguments are passed through to 'mount'.\n"); 38.154 + fprintf(stderr, "ex. lomount -t fs-type -diskimage hda.img -partition 1 /mnt\n"); 38.155 + exit(ERR_USAGE); 38.156 } 38.157 38.158 int main(int argc, char ** argv) 38.159 { 38.160 + int status; 38.161 struct pentry perttbl [4]; 38.162 struct pentry *exttbl[1], *parttbl; 38.163 - char buf[BUF], argv2[BUF], diskimage[BUF]; 38.164 - int partition = 1, sec, num = 0, pnum = 0, len = BUF, i, f = 0, valid; 38.165 - progname = argv[0]; 38.166 + char buf[BUF], argv2[BUF]; 38.167 + const char * diskimage = 0; 38.168 + int partition = 0, sec, num = 0, pnum = 0, i, valid; 38.169 + size_t argv2_len = sizeof(argv2); 38.170 + argv2[0] = '\0'; 38.171 exttbl[0] = NULL; 38.172 + 38.173 for (i = 1; i < argc; i ++) 38.174 { 38.175 - if (strncmp(argv[i], "-diskimage", BUF)==0) 38.176 + if (strcmp(argv[i], "-diskimage")==0) 38.177 { 38.178 - strncpy(diskimage, argv[i+1], BUF); 38.179 - i++; f = 1; 38.180 + if (i == argc-1) 38.181 + usage(); 38.182 + i++; 38.183 + diskimage = argv[i]; 38.184 } 38.185 - else if (strncmp(argv[i], "-partition", BUF)==0) 38.186 + else if (strcmp(argv[i], "-partition")==0) 38.187 { 38.188 - partition = atoi(argv[i+1]); 38.189 + if (i == argc-1) 38.190 + usage(); 38.191 i++; 38.192 - if (partition < 1) partition = 1; 38.193 + partition = atoi(argv[i]); 38.194 } 38.195 else 38.196 { 38.197 - strncat(argv2, argv[i], len); 38.198 - strncat(argv2, " ", len-1); 38.199 - len -= strlen(argv[i]); 38.200 - len--; 38.201 + size_t len = strlen(argv[i]); 38.202 + if (len >= argv2_len-1) 38.203 + usage(); 38.204 + strcat(argv2, argv[i]); 38.205 + strcat(argv2, " "); 38.206 + len -= (len+1); 38.207 } 38.208 } 38.209 - if (!f) 38.210 - { 38.211 - printf("You must specify -diskimage and -partition\n"); 38.212 - printf("ex. lomount -t fs-type -diskimage hda.img -partition 1 /mnt\n"); 38.213 - return 0; 38.214 - } 38.215 - valid = loadptable(diskimage, perttbl, exttbl); 38.216 + if (! diskimage || partition < 1) 38.217 + usage(); 38.218 + 38.219 + if (loadptable(diskimage, perttbl, exttbl, &valid)) 38.220 + return ERR_PART_PARSE; 38.221 if (!valid) 38.222 { 38.223 - printf("Warning: disk image does not appear to describe a valid partition table.\n"); 38.224 + fprintf(stderr, "Warning: disk image does not appear to describe a valid partition table.\n"); 38.225 } 38.226 /* NOTE: need to make sure this always rounds down */ 38.227 //sec = total_known_sectors / sizeof_diskimage; 38.228 @@ -228,14 +265,14 @@ off by (larger than) a value less than o 38.229 { 38.230 if (exttbl[0] == NULL) 38.231 { 38.232 - printf("No extended partitions were found in %s.\n", diskimage); 38.233 - return 2; 38.234 + fprintf(stderr, "No extended partitions were found in %s.\n", diskimage); 38.235 + return ERR_NO_EPART; 38.236 } 38.237 parttbl = exttbl[0]; 38.238 if (parttbl[partition-5].no_of_sectors_abs == 0) 38.239 { 38.240 - printf("Partition %d was not found in %s.\n", partition, diskimage); 38.241 - return 3; 38.242 + fprintf(stderr, "Partition %d was not found in %s.\n", partition, diskimage); 38.243 + return ERR_NO_PART; 38.244 } 38.245 partition -= 4; 38.246 } 38.247 @@ -244,8 +281,8 @@ off by (larger than) a value less than o 38.248 parttbl = perttbl; 38.249 if (parttbl[partition-1].no_of_sectors_abs == 0) 38.250 { 38.251 - printf("Partition %d was not found in %s.\n", partition, diskimage); 38.252 - return 3; 38.253 + fprintf(stderr, "Partition %d was not found in %s.\n", partition, diskimage); 38.254 + return ERR_NO_PART; 38.255 } 38.256 } 38.257 num = parttbl[partition-1].start_sector_abs; 38.258 @@ -253,10 +290,14 @@ off by (larger than) a value less than o 38.259 #ifdef DEBUG 38.260 printf("offset = %d\n", pnum); 38.261 #endif 38.262 - snprintf(buf, BUF, "mount -oloop,offset=%d %s %s", pnum, diskimage, argv2); 38.263 + snprintf(buf, sizeof(buf), "mount -oloop,offset=%d %s %s", pnum, diskimage, argv2); 38.264 #ifdef DEBUG 38.265 printf("%s\n", buf); 38.266 #endif 38.267 - system(buf); 38.268 - return 0; 38.269 + status = system(buf); 38.270 + if (WIFEXITED(status)) 38.271 + status = WEXITSTATUS(status); 38.272 + else 38.273 + status = ERR_MOUNT; 38.274 + return status; 38.275 }
39.1 --- a/tools/python/xen/xend/XendClient.py Mon Mar 06 09:09:18 2006 -0700 39.2 +++ b/tools/python/xen/xend/XendClient.py Mon Mar 06 10:21:35 2006 -0700 39.3 @@ -196,8 +196,9 @@ class Xend: 39.4 def xend_domains(self): 39.5 return self.xendGet(self.domainurl()) 39.6 39.7 - def xend_list_domains(self): 39.8 - return self.xendGet(self.domainurl(), {'detail': '1'}) 39.9 + def xend_list_domains(self, detail = True): 39.10 + return self.xendGet(self.domainurl(), 39.11 + {'detail': detail and '1' or '0'}) 39.12 39.13 def xend_domain_vcpuinfo(self, dom): 39.14 return self.xendGet(self.domainurl(dom), {'op': 'vcpuinfo'})
40.1 --- a/tools/python/xen/xend/XendDomainInfo.py Mon Mar 06 09:09:18 2006 -0700 40.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Mon Mar 06 10:21:35 2006 -0700 40.3 @@ -13,7 +13,7 @@ 40.4 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 40.5 #============================================================================ 40.6 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com> 40.7 -# Copyright (C) 2005 XenSource Ltd 40.8 +# Copyright (C) 2005, 2006 XenSource Ltd 40.9 #============================================================================ 40.10 40.11 """Representation of a single domain. 40.12 @@ -82,7 +82,7 @@ restart_modes = [ 40.13 STATE_DOM_OK = 1 40.14 STATE_DOM_SHUTDOWN = 2 40.15 40.16 -SHUTDOWN_TIMEOUT = 30 40.17 +SHUTDOWN_TIMEOUT = 30.0 40.18 40.19 ZOMBIE_PREFIX = 'Zombie-' 40.20 40.21 @@ -182,7 +182,7 @@ def create(config): 40.22 vm.initDomain() 40.23 vm.storeVmDetails() 40.24 vm.storeDomDetails() 40.25 - vm.registerWatch() 40.26 + vm.registerWatches() 40.27 vm.refreshShutdown() 40.28 return vm 40.29 except: 40.30 @@ -238,7 +238,7 @@ def recreate(xeninfo, priv): 40.31 vm.storeVmDetails() 40.32 vm.storeDomDetails() 40.33 40.34 - vm.registerWatch() 40.35 + vm.registerWatches() 40.36 vm.refreshShutdown(xeninfo) 40.37 return vm 40.38 40.39 @@ -443,7 +443,10 @@ class XendDomainInfo: 40.40 self.console_mfn = None 40.41 40.42 self.vmWatch = None 40.43 + self.shutdownWatch = None 40.44 40.45 + self.shutdownStartTime = None 40.46 + 40.47 self.state = STATE_DOM_OK 40.48 self.state_updated = threading.Condition() 40.49 self.refresh_shutdown_lock = threading.Condition() 40.50 @@ -648,7 +651,7 @@ class XendDomainInfo: 40.51 40.52 self.introduceDomain() 40.53 self.storeDomDetails() 40.54 - self.registerWatch() 40.55 + self.registerWatches() 40.56 self.refreshShutdown() 40.57 40.58 log.debug("XendDomainInfo.completeRestore done") 40.59 @@ -711,13 +714,15 @@ class XendDomainInfo: 40.60 40.61 ## public: 40.62 40.63 - def registerWatch(self): 40.64 - """Register a watch on this VM's entries in the store, so that 40.65 - when they are changed externally, we keep up to date. This should 40.66 - only be called by {@link #create}, {@link #recreate}, or {@link 40.67 - #restore}, once the domain's details have been written, but before the 40.68 - new instance is returned.""" 40.69 + def registerWatches(self): 40.70 + """Register a watch on this VM's entries in the store, and the 40.71 + domain's control/shutdown node, so that when they are changed 40.72 + externally, we keep up to date. This should only be called by {@link 40.73 + #create}, {@link #recreate}, or {@link #restore}, once the domain's 40.74 + details have been written, but before the new instance is returned.""" 40.75 self.vmWatch = xswatch(self.vmpath, self.storeChanged) 40.76 + self.shutdownWatch = xswatch(self.dompath + '/control/shutdown', 40.77 + self.handleShutdownWatch) 40.78 40.79 40.80 def getDomid(self): 40.81 @@ -852,20 +857,14 @@ class XendDomainInfo: 40.82 # Domain is alive. If we are shutting it down, then check 40.83 # the timeout on that, and destroy it if necessary. 40.84 40.85 - sst = self.readDom('xend/shutdown_start_time') 40.86 - if sst: 40.87 - sst = float(sst) 40.88 - timeout = SHUTDOWN_TIMEOUT - time.time() + sst 40.89 + if self.shutdownStartTime: 40.90 + timeout = (SHUTDOWN_TIMEOUT - time.time() + 40.91 + self.shutdownStartTime) 40.92 if timeout < 0: 40.93 log.info( 40.94 "Domain shutdown timeout expired: name=%s id=%s", 40.95 self.info['name'], self.domid) 40.96 self.destroy() 40.97 - else: 40.98 - log.debug( 40.99 - "Scheduling refreshShutdown on domain %d in %ds.", 40.100 - self.domid, timeout) 40.101 - threading.Timer(timeout, self.refreshShutdown).start() 40.102 finally: 40.103 self.refresh_shutdown_lock.release() 40.104 40.105 @@ -873,12 +872,34 @@ class XendDomainInfo: 40.106 self.maybeRestart(restart_reason) 40.107 40.108 40.109 + def handleShutdownWatch(self, _): 40.110 + log.debug('XendDomainInfo.handleShutdownWatch') 40.111 + 40.112 + reason = self.readDom('control/shutdown') 40.113 + 40.114 + if reason and reason != 'suspend': 40.115 + sst = self.readDom('xend/shutdown_start_time') 40.116 + now = time.time() 40.117 + if sst: 40.118 + self.shutdownStartTime = float(sst) 40.119 + timeout = float(sst) + SHUTDOWN_TIMEOUT - now 40.120 + else: 40.121 + self.shutdownStartTime = now 40.122 + self.storeDom('xend/shutdown_start_time', now) 40.123 + timeout = SHUTDOWN_TIMEOUT 40.124 + 40.125 + log.trace( 40.126 + "Scheduling refreshShutdown on domain %d in %ds.", 40.127 + self.domid, timeout) 40.128 + threading.Timer(timeout, self.refreshShutdown).start() 40.129 + 40.130 + return 1 40.131 + 40.132 + 40.133 def shutdown(self, reason): 40.134 if not reason in shutdown_reasons.values(): 40.135 raise XendError('Invalid reason: %s' % reason) 40.136 self.storeDom("control/shutdown", reason) 40.137 - if reason != 'suspend': 40.138 - self.storeDom('xend/shutdown_start_time', time.time()) 40.139 40.140 40.141 ## private: 40.142 @@ -1225,6 +1246,8 @@ class XendDomainInfo: 40.143 """Cleanup domain resources; release devices. Idempotent. Nothrow 40.144 guarantee.""" 40.145 40.146 + self.unwatchShutdown() 40.147 + 40.148 self.release_devices() 40.149 40.150 if self.image: 40.151 @@ -1276,6 +1299,20 @@ class XendDomainInfo: 40.152 log.exception("Unwatching VM path failed.") 40.153 40.154 40.155 + def unwatchShutdown(self): 40.156 + """Remove the watch on the domain's control/shutdown node, if any. 40.157 + Idempotent. Nothrow guarantee.""" 40.158 + 40.159 + try: 40.160 + try: 40.161 + if self.shutdownWatch: 40.162 + self.shutdownWatch.unwatch() 40.163 + finally: 40.164 + self.shutdownWatch = None 40.165 + except: 40.166 + log.exception("Unwatching control/shutdown failed.") 40.167 + 40.168 + 40.169 ## public: 40.170 40.171 def destroy(self):
41.1 --- a/tools/python/xen/xend/image.py Mon Mar 06 09:09:18 2006 -0700 41.2 +++ b/tools/python/xen/xend/image.py Mon Mar 06 10:21:35 2006 -0700 41.3 @@ -274,6 +274,10 @@ class HVMImageHandler(ImageHandler): 41.4 uname = sxp.child_value(info, 'uname') 41.5 typedev = sxp.child_value(info, 'dev') 41.6 (_, vbdparam) = string.split(uname, ':', 1) 41.7 + 41.8 + if 'file:' in uname and not os.path.isfile(vbdparam): 41.9 + raise VmError('Disk image does not exist: %s' % vbdparam) 41.10 + 41.11 if 'ioemu:' in typedev: 41.12 (emtype, vbddev) = string.split(typedev, ':', 1) 41.13 else:
42.1 --- a/tools/python/xen/xend/server/blkif.py Mon Mar 06 09:09:18 2006 -0700 42.2 +++ b/tools/python/xen/xend/server/blkif.py Mon Mar 06 10:21:35 2006 -0700 42.3 @@ -42,10 +42,6 @@ class BlkifController(DevController): 42.4 """@see DevController.getDeviceDetails""" 42.5 42.6 dev = sxp.child_value(config, 'dev') 42.7 - if 'ioemu:' in dev: 42.8 - return (None,{},{}) 42.9 - 42.10 - devid = blkif.blkdev_name_to_number(dev) 42.11 42.12 (typ, params) = string.split(sxp.child_value(config, 'uname'), ':', 1) 42.13 back = { 'dev' : dev, 42.14 @@ -54,7 +50,13 @@ class BlkifController(DevController): 42.15 'mode' : sxp.child_value(config, 'mode', 'r') 42.16 } 42.17 42.18 - front = { 'virtual-device' : "%i" % devid } 42.19 + if 'ioemu:' in dev: 42.20 + (dummy, dev1) = string.split(dev, ':', 1) 42.21 + devid = blkif.blkdev_name_to_number(dev1) 42.22 + front = {} 42.23 + else: 42.24 + devid = blkif.blkdev_name_to_number(dev) 42.25 + front = { 'virtual-device' : "%i" % devid } 42.26 42.27 return (devid, back, front) 42.28
43.1 --- a/tools/python/xen/xm/main.py Mon Mar 06 09:09:18 2006 -0700 43.2 +++ b/tools/python/xen/xm/main.py Mon Mar 06 10:21:35 2006 -0700 43.3 @@ -396,10 +396,8 @@ def xm_vcpu_list(args): 43.4 if args: 43.5 dominfo = map(server.xend_domain_vcpuinfo, args) 43.6 else: 43.7 - doms = server.xend_list_domains() 43.8 - dominfo = map( 43.9 - lambda x: server.xend_domain_vcpuinfo(sxp.child_value(x, 'name')), 43.10 - doms) 43.11 + doms = server.xend_list_domains(False) 43.12 + dominfo = map(server.xend_domain_vcpuinfo, doms) 43.13 43.14 print 'Name ID VCPU CPU State Time(s) CPU Affinity' 43.15
44.1 --- a/tools/vtpm_manager/manager/dmictl.c Mon Mar 06 09:09:18 2006 -0700 44.2 +++ b/tools/vtpm_manager/manager/dmictl.c Mon Mar 06 10:21:35 2006 -0700 44.3 @@ -74,7 +74,13 @@ TPM_RESULT close_dmi( VTPM_DMI_RESOURCE 44.4 44.5 close(dmi_res->guest_tx_fh); dmi_res->guest_tx_fh = -1; 44.6 close(dmi_res->vtpm_tx_fh); dmi_res->vtpm_tx_fh = -1; 44.7 - 44.8 + vtpm_globals->connected_dmis--; 44.9 + 44.10 + if (vtpm_globals->connected_dmis == 0) { 44.11 + // No more DMI's connected. Close fifo to prevent a broken pipe. 44.12 + close(vtpm_globals->guest_rx_fh); 44.13 + vtpm_globals->guest_rx_fh = -1; 44.14 + } 44.15 #ifndef MANUAL_DM_LAUNCH 44.16 if (dmi_res->dmi_id != VTPM_CTL_DM) { 44.17 if (dmi_res->dmi_pid != 0) { 44.18 @@ -118,6 +124,7 @@ TPM_RESULT VTPM_Handle_New_DMI( const bu 44.19 status = TPM_BAD_PARAMETER; 44.20 goto abort_egress; 44.21 } else { 44.22 + vtpm_globals->connected_dmis++; // Put this here so we don't count Dom0 44.23 BSG_UnpackList( param_buf->bytes, 3, 44.24 BSG_TYPE_BYTE, &type, 44.25 BSG_TYPE_UINT32, &domain_id,
45.1 --- a/tools/vtpm_manager/manager/securestorage.c Mon Mar 06 09:09:18 2006 -0700 45.2 +++ b/tools/vtpm_manager/manager/securestorage.c Mon Mar 06 10:21:35 2006 -0700 45.3 @@ -307,8 +307,8 @@ TPM_RESULT VTPM_SaveService(void) { 45.4 TPM_RESULT status=TPM_SUCCESS; 45.5 int fh, dmis=-1; 45.6 45.7 - BYTE *flat_boot_key, *flat_dmis, *flat_enc; 45.8 - buffer_t clear_flat_global, enc_flat_global; 45.9 + BYTE *flat_boot_key=NULL, *flat_dmis=NULL, *flat_enc=NULL; 45.10 + buffer_t clear_flat_global=NULL_BUF, enc_flat_global=NULL_BUF; 45.11 UINT32 storageKeySize = buffer_len(&vtpm_globals->storageKeyWrap); 45.12 UINT32 bootKeySize = buffer_len(&vtpm_globals->bootKeyWrap); 45.13 struct pack_buf_t storage_key_pack = {storageKeySize, vtpm_globals->storageKeyWrap.bytes}; 45.14 @@ -328,12 +328,9 @@ TPM_RESULT VTPM_SaveService(void) { 45.15 sizeof(UINT32) +// storagekeysize 45.16 storageKeySize, NULL) ); // storage key 45.17 45.18 - flat_dmis_size = (hashtable_count(vtpm_globals->dmi_map) - 1) * // num DMIS (-1 for Dom0) 45.19 - (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info 45.20 45.21 flat_boot_key = (BYTE *) malloc( boot_key_size ); 45.22 flat_enc = (BYTE *) malloc( sizeof(UINT32) ); 45.23 - flat_dmis = (BYTE *) malloc( flat_dmis_size ); 45.24 45.25 boot_key_size = BSG_PackList(flat_boot_key, 1, 45.26 BSG_TPM_SIZE32_DATA, &boot_key_pack); 45.27 @@ -349,9 +346,13 @@ TPM_RESULT VTPM_SaveService(void) { 45.28 45.29 BSG_PackConst(buffer_len(&enc_flat_global), 4, flat_enc); 45.30 45.31 - // Per DMI values to be saved 45.32 + // Per DMI values to be saved (if any exit) 45.33 if (hashtable_count(vtpm_globals->dmi_map) > 0) { 45.34 45.35 + flat_dmis_size = (hashtable_count(vtpm_globals->dmi_map) - 1) * // num DMIS (-1 for Dom0) 45.36 + (sizeof(UINT32) + 2*sizeof(TPM_DIGEST)); // Per DMI info 45.37 + flat_dmis = (BYTE *) malloc( flat_dmis_size ); 45.38 + 45.39 dmi_itr = hashtable_iterator(vtpm_globals->dmi_map); 45.40 do { 45.41 dmi_res = (VTPM_DMI_RESOURCE *) hashtable_iterator_value(dmi_itr);
46.1 --- a/tools/vtpm_manager/manager/vtpm_manager.c Mon Mar 06 09:09:18 2006 -0700 46.2 +++ b/tools/vtpm_manager/manager/vtpm_manager.c Mon Mar 06 10:21:35 2006 -0700 46.3 @@ -754,6 +754,7 @@ TPM_RESULT VTPM_Init_Service() { 46.4 #ifndef VTPM_MULTI_VM 46.5 vtpm_globals->vtpm_rx_fh = -1; 46.6 vtpm_globals->guest_rx_fh = -1; 46.7 + vtpm_globals->connected_dmis = 0; 46.8 #endif 46.9 if ((vtpm_globals->dmi_map = create_hashtable(10, hashfunc32, equals32)) == NULL){ 46.10 status = TPM_FAIL;
47.1 --- a/tools/vtpm_manager/manager/vtpmpriv.h Mon Mar 06 09:09:18 2006 -0700 47.2 +++ b/tools/vtpm_manager/manager/vtpmpriv.h Mon Mar 06 10:21:35 2006 -0700 47.3 @@ -98,6 +98,7 @@ typedef struct tdVTPM_GLOBALS { 47.4 #ifndef VTPM_MULTI_VM 47.5 int vtpm_rx_fh; 47.6 int guest_rx_fh; 47.7 + int connected_dmis; // Used to close guest_rx when no dmis are connected 47.8 47.9 pid_t master_pid; 47.10 #endif
48.1 --- a/tools/xenstore/Makefile Mon Mar 06 09:09:18 2006 -0700 48.2 +++ b/tools/xenstore/Makefile Mon Mar 06 10:21:35 2006 -0700 48.3 @@ -27,21 +27,27 @@ CLIENTS := xenstore-exists xenstore-list 48.4 CLIENTS += xenstore-write 48.5 CLIENTS_OBJS := $(patsubst xenstore-%,xenstore_%.o,$(CLIENTS)) 48.6 48.7 -all: libxenstore.so xenstored $(CLIENTS) xs_tdb_dump xenstore-ls 48.8 +all: libxenstore.so xenstored $(CLIENTS) xs_tdb_dump xenstore-control xenstore-ls 48.9 + 48.10 +test_interleaved_transactions: test_interleaved_transactions.o 48.11 + $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@ 48.12 48.13 testcode: xs_test xenstored_test xs_random 48.14 48.15 -xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o 48.16 +xenstored: xenstored_core.o xenstored_watch.o xenstored_domain.o xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o hashtable.o 48.17 $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -lxenctrl -o $@ 48.18 48.19 $(CLIENTS): xenstore-%: xenstore_%.o libxenstore.so 48.20 - $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@ 48.21 + $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@ 48.22 48.23 $(CLIENTS_OBJS): xenstore_%.o: xenstore_client.c 48.24 $(COMPILE.c) -DCLIENT_$(*F) -o $@ $< 48.25 48.26 +xenstore-control: xenstore_control.o libxenstore.so 48.27 + $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@ 48.28 + 48.29 xenstore-ls: xsls.o libxenstore.so 48.30 - $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -lxenctrl -L. -lxenstore -o $@ 48.31 + $(LINK.o) $< $(LOADLIBES) $(LDLIBS) -L. -lxenstore -o $@ 48.32 48.33 xenstored_test: xenstored_core_test.o xenstored_watch_test.o xenstored_domain_test.o xenstored_transaction_test.o xs_lib.o talloc_test.o fake_libxc.o utils.o tdb.o 48.34 $(LINK.o) $^ $(LOADLIBES) $(LDLIBS) -o $@ 48.35 @@ -77,7 +83,8 @@ libxenstore.so: xs.opic xs_lib.opic 48.36 clean: testsuite-clean 48.37 rm -f *.o *.opic *.so 48.38 rm -f xenstored xs_random xs_stress xs_crashme 48.39 - rm -f xs_test xenstored_test xs_tdb_dump xenstore-ls $(CLIENTS) 48.40 + rm -f xs_test xenstored_test xs_tdb_dump xenstore-control xenstore-ls 48.41 + rm -f $(CLIENTS) 48.42 $(RM) $(PROG_DEP) 48.43 48.44 print-dir: 48.45 @@ -129,7 +136,7 @@ TAGS: 48.46 tarball: clean 48.47 cd .. && tar -c -j -v -h -f xenstore.tar.bz2 xenstore/ 48.48 48.49 -install: libxenstore.so xenstored xenstore-ls $(CLIENTS) 48.50 +install: all 48.51 $(INSTALL_DIR) -p $(DESTDIR)/var/run/xenstored 48.52 $(INSTALL_DIR) -p $(DESTDIR)/var/lib/xenstored 48.53 $(INSTALL_DIR) -p $(DESTDIR)/usr/bin 48.54 @@ -137,6 +144,7 @@ install: libxenstore.so xenstored xensto 48.55 $(INSTALL_DIR) -p $(DESTDIR)/usr/include 48.56 $(INSTALL_PROG) xenstored $(DESTDIR)/usr/sbin 48.57 $(INSTALL_PROG) $(CLIENTS) $(DESTDIR)/usr/bin 48.58 + $(INSTALL_PROG) xenstore-control $(DESTDIR)/usr/bin 48.59 $(INSTALL_PROG) xenstore-ls $(DESTDIR)/usr/bin 48.60 $(INSTALL_DIR) -p $(DESTDIR)/usr/$(LIBDIR) 48.61 $(INSTALL_DATA) libxenstore.so $(DESTDIR)/usr/$(LIBDIR)
49.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 49.2 +++ b/tools/xenstore/hashtable.c Mon Mar 06 10:21:35 2006 -0700 49.3 @@ -0,0 +1,276 @@ 49.4 +/* Copyright (C) 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */ 49.5 + 49.6 +#include "hashtable.h" 49.7 +#include "hashtable_private.h" 49.8 +#include <stdlib.h> 49.9 +#include <stdio.h> 49.10 +#include <string.h> 49.11 +#include <math.h> 49.12 +#include <stdint.h> 49.13 + 49.14 +/* 49.15 +Credit for primes table: Aaron Krowne 49.16 + http://br.endernet.org/~akrowne/ 49.17 + http://planetmath.org/encyclopedia/GoodHashTablePrimes.html 49.18 +*/ 49.19 +static const unsigned int primes[] = { 49.20 +53, 97, 193, 389, 49.21 +769, 1543, 3079, 6151, 49.22 +12289, 24593, 49157, 98317, 49.23 +196613, 393241, 786433, 1572869, 49.24 +3145739, 6291469, 12582917, 25165843, 49.25 +50331653, 100663319, 201326611, 402653189, 49.26 +805306457, 1610612741 49.27 +}; 49.28 +const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]); 49.29 +const unsigned int max_load_factor = 65; /* percentage */ 49.30 + 49.31 +/*****************************************************************************/ 49.32 +struct hashtable * 49.33 +create_hashtable(unsigned int minsize, 49.34 + unsigned int (*hashf) (void*), 49.35 + int (*eqf) (void*,void*)) 49.36 +{ 49.37 + struct hashtable *h; 49.38 + unsigned int pindex, size = primes[0]; 49.39 + /* Check requested hashtable isn't too large */ 49.40 + if (minsize > (1u << 30)) return NULL; 49.41 + /* Enforce size as prime */ 49.42 + for (pindex=0; pindex < prime_table_length; pindex++) { 49.43 + if (primes[pindex] > minsize) { size = primes[pindex]; break; } 49.44 + } 49.45 + h = (struct hashtable *)malloc(sizeof(struct hashtable)); 49.46 + if (NULL == h) return NULL; /*oom*/ 49.47 + h->table = (struct entry **)malloc(sizeof(struct entry*) * size); 49.48 + if (NULL == h->table) { free(h); return NULL; } /*oom*/ 49.49 + memset(h->table, 0, size * sizeof(struct entry *)); 49.50 + h->tablelength = size; 49.51 + h->primeindex = pindex; 49.52 + h->entrycount = 0; 49.53 + h->hashfn = hashf; 49.54 + h->eqfn = eqf; 49.55 + h->loadlimit = (unsigned int)(((uint64_t)size * max_load_factor) / 100); 49.56 + return h; 49.57 +} 49.58 + 49.59 +/*****************************************************************************/ 49.60 +unsigned int 49.61 +hash(struct hashtable *h, void *k) 49.62 +{ 49.63 + /* Aim to protect against poor hash functions by adding logic here 49.64 + * - logic taken from java 1.4 hashtable source */ 49.65 + unsigned int i = h->hashfn(k); 49.66 + i += ~(i << 9); 49.67 + i ^= ((i >> 14) | (i << 18)); /* >>> */ 49.68 + i += (i << 4); 49.69 + i ^= ((i >> 10) | (i << 22)); /* >>> */ 49.70 + return i; 49.71 +} 49.72 + 49.73 +/*****************************************************************************/ 49.74 +static int 49.75 +hashtable_expand(struct hashtable *h) 49.76 +{ 49.77 + /* Double the size of the table to accomodate more entries */ 49.78 + struct entry **newtable; 49.79 + struct entry *e; 49.80 + struct entry **pE; 49.81 + unsigned int newsize, i, index; 49.82 + /* Check we're not hitting max capacity */ 49.83 + if (h->primeindex == (prime_table_length - 1)) return 0; 49.84 + newsize = primes[++(h->primeindex)]; 49.85 + 49.86 + newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize); 49.87 + if (NULL != newtable) 49.88 + { 49.89 + memset(newtable, 0, newsize * sizeof(struct entry *)); 49.90 + /* This algorithm is not 'stable'. ie. it reverses the list 49.91 + * when it transfers entries between the tables */ 49.92 + for (i = 0; i < h->tablelength; i++) { 49.93 + while (NULL != (e = h->table[i])) { 49.94 + h->table[i] = e->next; 49.95 + index = indexFor(newsize,e->h); 49.96 + e->next = newtable[index]; 49.97 + newtable[index] = e; 49.98 + } 49.99 + } 49.100 + free(h->table); 49.101 + h->table = newtable; 49.102 + } 49.103 + /* Plan B: realloc instead */ 49.104 + else 49.105 + { 49.106 + newtable = (struct entry **) 49.107 + realloc(h->table, newsize * sizeof(struct entry *)); 49.108 + if (NULL == newtable) { (h->primeindex)--; return 0; } 49.109 + h->table = newtable; 49.110 + memset(newtable[h->tablelength], 0, newsize - h->tablelength); 49.111 + for (i = 0; i < h->tablelength; i++) { 49.112 + for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { 49.113 + index = indexFor(newsize,e->h); 49.114 + if (index == i) 49.115 + { 49.116 + pE = &(e->next); 49.117 + } 49.118 + else 49.119 + { 49.120 + *pE = e->next; 49.121 + e->next = newtable[index]; 49.122 + newtable[index] = e; 49.123 + } 49.124 + } 49.125 + } 49.126 + } 49.127 + h->tablelength = newsize; 49.128 + h->loadlimit = (unsigned int) 49.129 + (((uint64_t)newsize * max_load_factor) / 100); 49.130 + return -1; 49.131 +} 49.132 + 49.133 +/*****************************************************************************/ 49.134 +unsigned int 49.135 +hashtable_count(struct hashtable *h) 49.136 +{ 49.137 + return h->entrycount; 49.138 +} 49.139 + 49.140 +/*****************************************************************************/ 49.141 +int 49.142 +hashtable_insert(struct hashtable *h, void *k, void *v) 49.143 +{ 49.144 + /* This method allows duplicate keys - but they shouldn't be used */ 49.145 + unsigned int index; 49.146 + struct entry *e; 49.147 + if (++(h->entrycount) > h->loadlimit) 49.148 + { 49.149 + /* Ignore the return value. If expand fails, we should 49.150 + * still try cramming just this value into the existing table 49.151 + * -- we may not have memory for a larger table, but one more 49.152 + * element may be ok. Next time we insert, we'll try expanding again.*/ 49.153 + hashtable_expand(h); 49.154 + } 49.155 + e = (struct entry *)malloc(sizeof(struct entry)); 49.156 + if (NULL == e) { --(h->entrycount); return 0; } /*oom*/ 49.157 + e->h = hash(h,k); 49.158 + index = indexFor(h->tablelength,e->h); 49.159 + e->k = k; 49.160 + e->v = v; 49.161 + e->next = h->table[index]; 49.162 + h->table[index] = e; 49.163 + return -1; 49.164 +} 49.165 + 49.166 +/*****************************************************************************/ 49.167 +void * /* returns value associated with key */ 49.168 +hashtable_search(struct hashtable *h, void *k) 49.169 +{ 49.170 + struct entry *e; 49.171 + unsigned int hashvalue, index; 49.172 + hashvalue = hash(h,k); 49.173 + index = indexFor(h->tablelength,hashvalue); 49.174 + e = h->table[index]; 49.175 + while (NULL != e) 49.176 + { 49.177 + /* Check hash value to short circuit heavier comparison */ 49.178 + if ((hashvalue == e->h) && (h->eqfn(k, e->k))) return e->v; 49.179 + e = e->next; 49.180 + } 49.181 + return NULL; 49.182 +} 49.183 + 49.184 +/*****************************************************************************/ 49.185 +void * /* returns value associated with key */ 49.186 +hashtable_remove(struct hashtable *h, void *k) 49.187 +{ 49.188 + /* TODO: consider compacting the table when the load factor drops enough, 49.189 + * or provide a 'compact' method. */ 49.190 + 49.191 + struct entry *e; 49.192 + struct entry **pE; 49.193 + void *v; 49.194 + unsigned int hashvalue, index; 49.195 + 49.196 + hashvalue = hash(h,k); 49.197 + index = indexFor(h->tablelength,hash(h,k)); 49.198 + pE = &(h->table[index]); 49.199 + e = *pE; 49.200 + while (NULL != e) 49.201 + { 49.202 + /* Check hash value to short circuit heavier comparison */ 49.203 + if ((hashvalue == e->h) && (h->eqfn(k, e->k))) 49.204 + { 49.205 + *pE = e->next; 49.206 + h->entrycount--; 49.207 + v = e->v; 49.208 + freekey(e->k); 49.209 + free(e); 49.210 + return v; 49.211 + } 49.212 + pE = &(e->next); 49.213 + e = e->next; 49.214 + } 49.215 + return NULL; 49.216 +} 49.217 + 49.218 +/*****************************************************************************/ 49.219 +/* destroy */ 49.220 +void 49.221 +hashtable_destroy(struct hashtable *h, int free_values) 49.222 +{ 49.223 + unsigned int i; 49.224 + struct entry *e, *f; 49.225 + struct entry **table = h->table; 49.226 + if (free_values) 49.227 + { 49.228 + for (i = 0; i < h->tablelength; i++) 49.229 + { 49.230 + e = table[i]; 49.231 + while (NULL != e) 49.232 + { f = e; e = e->next; freekey(f->k); free(f->v); free(f); } 49.233 + } 49.234 + } 49.235 + else 49.236 + { 49.237 + for (i = 0; i < h->tablelength; i++) 49.238 + { 49.239 + e = table[i]; 49.240 + while (NULL != e) 49.241 + { f = e; e = e->next; freekey(f->k); free(f); } 49.242 + } 49.243 + } 49.244 + free(h->table); 49.245 + free(h); 49.246 +} 49.247 + 49.248 +/* 49.249 + * Copyright (c) 2002, Christopher Clark 49.250 + * All rights reserved. 49.251 + * 49.252 + * Redistribution and use in source and binary forms, with or without 49.253 + * modification, are permitted provided that the following conditions 49.254 + * are met: 49.255 + * 49.256 + * * Redistributions of source code must retain the above copyright 49.257 + * notice, this list of conditions and the following disclaimer. 49.258 + * 49.259 + * * Redistributions in binary form must reproduce the above copyright 49.260 + * notice, this list of conditions and the following disclaimer in the 49.261 + * documentation and/or other materials provided with the distribution. 49.262 + * 49.263 + * * Neither the name of the original author; nor the names of any contributors 49.264 + * may be used to endorse or promote products derived from this software 49.265 + * without specific prior written permission. 49.266 + * 49.267 + * 49.268 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 49.269 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 49.270 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 49.271 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 49.272 + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 49.273 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49.274 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 49.275 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 49.276 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 49.277 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 49.278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49.279 +*/
50.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 50.2 +++ b/tools/xenstore/hashtable.h Mon Mar 06 10:21:35 2006 -0700 50.3 @@ -0,0 +1,199 @@ 50.4 +/* Copyright (C) 2002 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */ 50.5 + 50.6 +#ifndef __HASHTABLE_CWC22_H__ 50.7 +#define __HASHTABLE_CWC22_H__ 50.8 + 50.9 +struct hashtable; 50.10 + 50.11 +/* Example of use: 50.12 + * 50.13 + * struct hashtable *h; 50.14 + * struct some_key *k; 50.15 + * struct some_value *v; 50.16 + * 50.17 + * static unsigned int hash_from_key_fn( void *k ); 50.18 + * static int keys_equal_fn ( void *key1, void *key2 ); 50.19 + * 50.20 + * h = create_hashtable(16, hash_from_key_fn, keys_equal_fn); 50.21 + * k = (struct some_key *) malloc(sizeof(struct some_key)); 50.22 + * v = (struct some_value *) malloc(sizeof(struct some_value)); 50.23 + * 50.24 + * (initialise k and v to suitable values) 50.25 + * 50.26 + * if (! hashtable_insert(h,k,v) ) 50.27 + * { exit(-1); } 50.28 + * 50.29 + * if (NULL == (found = hashtable_search(h,k) )) 50.30 + * { printf("not found!"); } 50.31 + * 50.32 + * if (NULL == (found = hashtable_remove(h,k) )) 50.33 + * { printf("Not found\n"); } 50.34 + * 50.35 + */ 50.36 + 50.37 +/* Macros may be used to define type-safe(r) hashtable access functions, with 50.38 + * methods specialized to take known key and value types as parameters. 50.39 + * 50.40 + * Example: 50.41 + * 50.42 + * Insert this at the start of your file: 50.43 + * 50.44 + * DEFINE_HASHTABLE_INSERT(insert_some, struct some_key, struct some_value); 50.45 + * DEFINE_HASHTABLE_SEARCH(search_some, struct some_key, struct some_value); 50.46 + * DEFINE_HASHTABLE_REMOVE(remove_some, struct some_key, struct some_value); 50.47 + * 50.48 + * This defines the functions 'insert_some', 'search_some' and 'remove_some'. 50.49 + * These operate just like hashtable_insert etc., with the same parameters, 50.50 + * but their function signatures have 'struct some_key *' rather than 50.51 + * 'void *', and hence can generate compile time errors if your program is 50.52 + * supplying incorrect data as a key (and similarly for value). 50.53 + * 50.54 + * Note that the hash and key equality functions passed to create_hashtable 50.55 + * still take 'void *' parameters instead of 'some key *'. This shouldn't be 50.56 + * a difficult issue as they're only defined and passed once, and the other 50.57 + * functions will ensure that only valid keys are supplied to them. 50.58 + * 50.59 + * The cost for this checking is increased code size and runtime overhead 50.60 + * - if performance is important, it may be worth switching back to the 50.61 + * unsafe methods once your program has been debugged with the safe methods. 50.62 + * This just requires switching to some simple alternative defines - eg: 50.63 + * #define insert_some hashtable_insert 50.64 + * 50.65 + */ 50.66 + 50.67 +/***************************************************************************** 50.68 + * create_hashtable 50.69 + 50.70 + * @name create_hashtable 50.71 + * @param minsize minimum initial size of hashtable 50.72 + * @param hashfunction function for hashing keys 50.73 + * @param key_eq_fn function for determining key equality 50.74 + * @return newly created hashtable or NULL on failure 50.75 + */ 50.76 + 50.77 +struct hashtable * 50.78 +create_hashtable(unsigned int minsize, 50.79 + unsigned int (*hashfunction) (void*), 50.80 + int (*key_eq_fn) (void*,void*)); 50.81 + 50.82 +/***************************************************************************** 50.83 + * hashtable_insert 50.84 + 50.85 + * @name hashtable_insert 50.86 + * @param h the hashtable to insert into 50.87 + * @param k the key - hashtable claims ownership and will free on removal 50.88 + * @param v the value - does not claim ownership 50.89 + * @return non-zero for successful insertion 50.90 + * 50.91 + * This function will cause the table to expand if the insertion would take 50.92 + * the ratio of entries to table size over the maximum load factor. 50.93 + * 50.94 + * This function does not check for repeated insertions with a duplicate key. 50.95 + * The value returned when using a duplicate key is undefined -- when 50.96 + * the hashtable changes size, the order of retrieval of duplicate key 50.97 + * entries is reversed. 50.98 + * If in doubt, remove before insert. 50.99 + */ 50.100 + 50.101 +int 50.102 +hashtable_insert(struct hashtable *h, void *k, void *v); 50.103 + 50.104 +#define DEFINE_HASHTABLE_INSERT(fnname, keytype, valuetype) \ 50.105 +int fnname (struct hashtable *h, keytype *k, valuetype *v) \ 50.106 +{ \ 50.107 + return hashtable_insert(h,k,v); \ 50.108 +} 50.109 + 50.110 +/***************************************************************************** 50.111 + * hashtable_search 50.112 + 50.113 + * @name hashtable_search 50.114 + * @param h the hashtable to search 50.115 + * @param k the key to search for - does not claim ownership 50.116 + * @return the value associated with the key, or NULL if none found 50.117 + */ 50.118 + 50.119 +void * 50.120 +hashtable_search(struct hashtable *h, void *k); 50.121 + 50.122 +#define DEFINE_HASHTABLE_SEARCH(fnname, keytype, valuetype) \ 50.123 +valuetype * fnname (struct hashtable *h, keytype *k) \ 50.124 +{ \ 50.125 + return (valuetype *) (hashtable_search(h,k)); \ 50.126 +} 50.127 + 50.128 +/***************************************************************************** 50.129 + * hashtable_remove 50.130 + 50.131 + * @name hashtable_remove 50.132 + * @param h the hashtable to remove the item from 50.133 + * @param k the key to search for - does not claim ownership 50.134 + * @return the value associated with the key, or NULL if none found 50.135 + */ 50.136 + 50.137 +void * /* returns value */ 50.138 +hashtable_remove(struct hashtable *h, void *k); 50.139 + 50.140 +#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype) \ 50.141 +valuetype * fnname (struct hashtable *h, keytype *k) \ 50.142 +{ \ 50.143 + return (valuetype *) (hashtable_remove(h,k)); \ 50.144 +} 50.145 + 50.146 + 50.147 +/***************************************************************************** 50.148 + * hashtable_count 50.149 + 50.150 + * @name hashtable_count 50.151 + * @param h the hashtable 50.152 + * @return the number of items stored in the hashtable 50.153 + */ 50.154 +unsigned int 50.155 +hashtable_count(struct hashtable *h); 50.156 + 50.157 + 50.158 +/***************************************************************************** 50.159 + * hashtable_destroy 50.160 + 50.161 + * @name hashtable_destroy 50.162 + * @param h the hashtable 50.163 + * @param free_values whether to call 'free' on the remaining values 50.164 + */ 50.165 + 50.166 +void 50.167 +hashtable_destroy(struct hashtable *h, int free_values); 50.168 + 50.169 +#endif /* __HASHTABLE_CWC22_H__ */ 50.170 + 50.171 +/* 50.172 + * Copyright (c) 2002, Christopher Clark 50.173 + * All rights reserved. 50.174 + * 50.175 + * Redistribution and use in source and binary forms, with or without 50.176 + * modification, are permitted provided that the following conditions 50.177 + * are met: 50.178 + * 50.179 + * * Redistributions of source code must retain the above copyright 50.180 + * notice, this list of conditions and the following disclaimer. 50.181 + * 50.182 + * * Redistributions in binary form must reproduce the above copyright 50.183 + * notice, this list of conditions and the following disclaimer in the 50.184 + * documentation and/or other materials provided with the distribution. 50.185 + * 50.186 + * * Neither the name of the original author; nor the names of any contributors 50.187 + * may be used to endorse or promote products derived from this software 50.188 + * without specific prior written permission. 50.189 + * 50.190 + * 50.191 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 50.192 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 50.193 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 50.194 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 50.195 + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 50.196 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 50.197 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 50.198 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 50.199 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 50.200 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 50.201 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 50.202 +*/
51.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 51.2 +++ b/tools/xenstore/hashtable_private.h Mon Mar 06 10:21:35 2006 -0700 51.3 @@ -0,0 +1,85 @@ 51.4 +/* Copyright (C) 2002, 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */ 51.5 + 51.6 +#ifndef __HASHTABLE_PRIVATE_CWC22_H__ 51.7 +#define __HASHTABLE_PRIVATE_CWC22_H__ 51.8 + 51.9 +#include "hashtable.h" 51.10 + 51.11 +/*****************************************************************************/ 51.12 +struct entry 51.13 +{ 51.14 + void *k, *v; 51.15 + unsigned int h; 51.16 + struct entry *next; 51.17 +}; 51.18 + 51.19 +struct hashtable { 51.20 + unsigned int tablelength; 51.21 + struct entry **table; 51.22 + unsigned int entrycount; 51.23 + unsigned int loadlimit; 51.24 + unsigned int primeindex; 51.25 + unsigned int (*hashfn) (void *k); 51.26 + int (*eqfn) (void *k1, void *k2); 51.27 +}; 51.28 + 51.29 +/*****************************************************************************/ 51.30 +unsigned int 51.31 +hash(struct hashtable *h, void *k); 51.32 + 51.33 +/*****************************************************************************/ 51.34 +/* indexFor */ 51.35 +static inline unsigned int 51.36 +indexFor(unsigned int tablelength, unsigned int hashvalue) { 51.37 + return (hashvalue % tablelength); 51.38 +}; 51.39 + 51.40 +/* Only works if tablelength == 2^N */ 51.41 +/*static inline unsigned int 51.42 +indexFor(unsigned int tablelength, unsigned int hashvalue) 51.43 +{ 51.44 + return (hashvalue & (tablelength - 1u)); 51.45 +} 51.46 +*/ 51.47 + 51.48 +/*****************************************************************************/ 51.49 +#define freekey(X) free(X) 51.50 +/*define freekey(X) ; */ 51.51 + 51.52 + 51.53 +/*****************************************************************************/ 51.54 + 51.55 +#endif /* __HASHTABLE_PRIVATE_CWC22_H__*/ 51.56 + 51.57 +/* 51.58 + * Copyright (c) 2002, Christopher Clark 51.59 + * All rights reserved. 51.60 + * 51.61 + * Redistribution and use in source and binary forms, with or without 51.62 + * modification, are permitted provided that the following conditions 51.63 + * are met: 51.64 + * 51.65 + * * Redistributions of source code must retain the above copyright 51.66 + * notice, this list of conditions and the following disclaimer. 51.67 + * 51.68 + * * Redistributions in binary form must reproduce the above copyright 51.69 + * notice, this list of conditions and the following disclaimer in the 51.70 + * documentation and/or other materials provided with the distribution. 51.71 + * 51.72 + * * Neither the name of the original author; nor the names of any contributors 51.73 + * may be used to endorse or promote products derived from this software 51.74 + * without specific prior written permission. 51.75 + * 51.76 + * 51.77 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 51.78 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 51.79 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 51.80 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 51.81 + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 51.82 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 51.83 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 51.84 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 51.85 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 51.86 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 51.87 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51.88 +*/
52.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 52.2 +++ b/tools/xenstore/xenstore_control.c Mon Mar 06 10:21:35 2006 -0700 52.3 @@ -0,0 +1,35 @@ 52.4 +#include <stdio.h> 52.5 +#include <stdlib.h> 52.6 +#include <string.h> 52.7 + 52.8 +#include "xs.h" 52.9 + 52.10 + 52.11 +int main(int argc, char **argv) 52.12 +{ 52.13 + struct xs_handle * xsh; 52.14 + 52.15 + if (argc < 2 || 52.16 + strcmp(argv[1], "check")) 52.17 + { 52.18 + fprintf(stderr, 52.19 + "Usage:\n" 52.20 + "\n" 52.21 + " %s check\n" 52.22 + "\n", argv[0]); 52.23 + return 2; 52.24 + } 52.25 + 52.26 + xsh = xs_daemon_open(); 52.27 + 52.28 + if (xsh == NULL) { 52.29 + fprintf(stderr, "Failed to contact Xenstored.\n"); 52.30 + return 1; 52.31 + } 52.32 + 52.33 + xs_debug_command(xsh, argv[1], NULL, 0); 52.34 + 52.35 + xs_daemon_close(xsh); 52.36 + 52.37 + return 0; 52.38 +}
53.1 --- a/tools/xenstore/xenstored_core.c Mon Mar 06 09:09:18 2006 -0700 53.2 +++ b/tools/xenstore/xenstored_core.c Mon Mar 06 10:21:35 2006 -0700 53.3 @@ -51,15 +51,32 @@ 53.4 #include "xenctrl.h" 53.5 #include "tdb.h" 53.6 53.7 +#include "hashtable.h" 53.8 + 53.9 + 53.10 extern int eventchn_fd; /* in xenstored_domain.c */ 53.11 53.12 -static bool verbose; 53.13 +static bool verbose = false; 53.14 LIST_HEAD(connections); 53.15 static int tracefd = -1; 53.16 +static bool recovery = true; 53.17 +static bool remove_local = true; 53.18 static int reopen_log_pipe[2]; 53.19 static char *tracefile = NULL; 53.20 static TDB_CONTEXT *tdb_ctx; 53.21 53.22 +static void corrupt(struct connection *conn, const char *fmt, ...); 53.23 +static void check_store(); 53.24 + 53.25 +#define log(...) \ 53.26 + do { \ 53.27 + char *s = talloc_asprintf(NULL, __VA_ARGS__); \ 53.28 + trace("%s\n", s); \ 53.29 + syslog(LOG_ERR, "%s", s); \ 53.30 + talloc_free(s); \ 53.31 + } while (0) 53.32 + 53.33 + 53.34 #ifdef TESTING 53.35 static bool failtest = false; 53.36 53.37 @@ -104,33 +121,6 @@ int test_mkdir(const char *dir, int perm 53.38 53.39 #include "xenstored_test.h" 53.40 53.41 -/* FIXME: Ideally, this should never be called. Some can be eliminated. */ 53.42 -/* Something is horribly wrong: shutdown immediately. */ 53.43 -void __attribute__((noreturn)) corrupt(struct connection *conn, 53.44 - const char *fmt, ...) 53.45 -{ 53.46 - va_list arglist; 53.47 - char *str; 53.48 - int saved_errno = errno; 53.49 - 53.50 - va_start(arglist, fmt); 53.51 - str = talloc_vasprintf(NULL, fmt, arglist); 53.52 - va_end(arglist); 53.53 - 53.54 - trace("xenstored corruption: connection id %i: err %s: %s", 53.55 - conn ? (int)conn->id : -1, strerror(saved_errno), str); 53.56 - eprintf("xenstored corruption: connection id %i: err %s: %s", 53.57 - conn ? (int)conn->id : -1, strerror(saved_errno), str); 53.58 -#ifdef TESTING 53.59 - /* Allow them to attach debugger. */ 53.60 - sleep(30); 53.61 -#endif 53.62 - syslog(LOG_DAEMON, 53.63 - "xenstored corruption: connection id %i: err %s: %s", 53.64 - conn ? (int)conn->id : -1, strerror(saved_errno), str); 53.65 - _exit(2); 53.66 -} 53.67 - 53.68 TDB_CONTEXT *tdb_context(struct connection *conn) 53.69 { 53.70 /* conn = NULL used in manual_node at setup. */ 53.71 @@ -216,8 +206,9 @@ static void trace_io(const struct connec 53.72 now = time(NULL); 53.73 tm = localtime(&now); 53.74 53.75 - trace("%s %p %02d:%02d:%02d %s (", prefix, conn, 53.76 - tm->tm_hour, tm->tm_min, tm->tm_sec, 53.77 + trace("%s %p %04d%02d%02d %02d:%02d:%02d %s (", prefix, conn, 53.78 + tm->tm_year + 1900, tm->tm_mon + 1, 53.79 + tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, 53.80 sockmsg_string(data->hdr.msg.type)); 53.81 53.82 for (i = 0; i < data->hdr.msg.len; i++) 53.83 @@ -415,16 +406,19 @@ static struct node *read_node(struct con 53.84 TDB_DATA key, data; 53.85 uint32_t *p; 53.86 struct node *node; 53.87 + TDB_CONTEXT * context = tdb_context(conn); 53.88 53.89 key.dptr = (void *)name; 53.90 key.dsize = strlen(name); 53.91 - data = tdb_fetch(tdb_context(conn), key); 53.92 + data = tdb_fetch(context, key); 53.93 53.94 if (data.dptr == NULL) { 53.95 - if (tdb_error(tdb_context(conn)) == TDB_ERR_NOEXIST) 53.96 + if (tdb_error(context) == TDB_ERR_NOEXIST) 53.97 errno = ENOENT; 53.98 - else 53.99 + else { 53.100 + log("TDB error on read: %s", tdb_errorstr(context)); 53.101 errno = EIO; 53.102 + } 53.103 return NULL; 53.104 } 53.105 53.106 @@ -837,8 +831,6 @@ static int destroy_node(void *_node) 53.107 return 0; 53.108 } 53.109 53.110 -/* Be careful: create heirarchy, put entry in existing parent *last*. 53.111 - * This helps fsck if we die during this. */ 53.112 static struct node *create_node(struct connection *conn, 53.113 const char *name, 53.114 void *data, unsigned int datalen) 53.115 @@ -939,8 +931,9 @@ static void delete_node(struct connectio 53.116 { 53.117 unsigned int i; 53.118 53.119 - /* Delete self, then delete children. If something goes wrong, 53.120 - * consistency check will clean up this way. */ 53.121 + /* Delete self, then delete children. If we crash, then the worst 53.122 + that can happen is the children will continue to take up space, but 53.123 + will otherwise be unreachable. */ 53.124 delete_node_single(conn, node); 53.125 53.126 /* Delete children, too. */ 53.127 @@ -950,18 +943,35 @@ static void delete_node(struct connectio 53.128 child = read_node(conn, 53.129 talloc_asprintf(node, "%s/%s", node->name, 53.130 node->children + i)); 53.131 - if (!child) 53.132 - corrupt(conn, "No child '%s' found", child); 53.133 - delete_node(conn, child); 53.134 + if (child) { 53.135 + delete_node(conn, child); 53.136 + } 53.137 + else { 53.138 + trace("delete_node: No child '%s/%s' found!\n", 53.139 + node->name, node->children + i); 53.140 + /* Skip it, we've already deleted the parent. */ 53.141 + } 53.142 } 53.143 } 53.144 53.145 + 53.146 /* Delete memory using memmove. */ 53.147 static void memdel(void *mem, unsigned off, unsigned len, unsigned total) 53.148 { 53.149 memmove(mem + off, mem + off + len, total - off - len); 53.150 } 53.151 53.152 + 53.153 +static bool remove_child_entry(struct connection *conn, struct node *node, 53.154 + size_t offset) 53.155 +{ 53.156 + size_t childlen = strlen(node->children + offset); 53.157 + memdel(node->children, offset, childlen + 1, node->childlen); 53.158 + node->childlen -= childlen + 1; 53.159 + return write_node(conn, node); 53.160 +} 53.161 + 53.162 + 53.163 static bool delete_child(struct connection *conn, 53.164 struct node *node, const char *childname) 53.165 { 53.166 @@ -969,19 +979,19 @@ static bool delete_child(struct connecti 53.167 53.168 for (i = 0; i < node->childlen; i += strlen(node->children+i) + 1) { 53.169 if (streq(node->children+i, childname)) { 53.170 - memdel(node->children, i, strlen(childname) + 1, 53.171 - node->childlen); 53.172 - node->childlen -= strlen(childname) + 1; 53.173 - return write_node(conn, node); 53.174 + return remove_child_entry(conn, node, i); 53.175 } 53.176 } 53.177 corrupt(conn, "Can't find child '%s' in %s", childname, node->name); 53.178 + return false; 53.179 } 53.180 53.181 53.182 static int _rm(struct connection *conn, struct node *node, const char *name) 53.183 { 53.184 - /* Delete from parent first, then if something explodes fsck cleans. */ 53.185 + /* Delete from parent first, then if we crash, the worst that can 53.186 + happen is the child will continue to take up space, but will 53.187 + otherwise be unreachable. */ 53.188 struct node *parent = read_node(conn, get_parent(name)); 53.189 if (!parent) { 53.190 send_error(conn, EINVAL); 53.191 @@ -1000,10 +1010,12 @@ static int _rm(struct connection *conn, 53.192 53.193 static void internal_rm(const char *name) 53.194 { 53.195 - char *tname = talloc_strdup(talloc_autofree_context(), name); 53.196 + char *tname = talloc_strdup(NULL, name); 53.197 struct node *node = read_node(NULL, tname); 53.198 if (node) 53.199 _rm(NULL, node, tname); 53.200 + talloc_free(node); 53.201 + talloc_free(tname); 53.202 } 53.203 53.204 53.205 @@ -1149,18 +1161,19 @@ static void process_message(struct conne 53.206 case XS_DEBUG: 53.207 if (streq(in->buffer, "print")) 53.208 xprintf("debug: %s", in->buffer + get_string(in, 0)); 53.209 + if (streq(in->buffer, "check")) 53.210 + check_store(); 53.211 #ifdef TESTING 53.212 /* For testing, we allow them to set id. */ 53.213 if (streq(in->buffer, "setid")) { 53.214 conn->id = atoi(in->buffer + get_string(in, 0)); 53.215 - send_ack(conn, XS_DEBUG); 53.216 } else if (streq(in->buffer, "failtest")) { 53.217 if (get_string(in, 0) < in->used) 53.218 srandom(atoi(in->buffer + get_string(in, 0))); 53.219 - send_ack(conn, XS_DEBUG); 53.220 failtest = true; 53.221 } 53.222 #endif /* TESTING */ 53.223 + send_ack(conn, XS_DEBUG); 53.224 break; 53.225 53.226 case XS_WATCH: 53.227 @@ -1258,7 +1271,7 @@ static void handle_input(struct connecti 53.228 53.229 if (in->hdr.msg.len > PATH_MAX) { 53.230 #ifndef TESTING 53.231 - syslog(LOG_DAEMON, "Client tried to feed us %i", 53.232 + syslog(LOG_ERR, "Client tried to feed us %i", 53.233 in->hdr.msg.len); 53.234 #endif 53.235 goto bad_client; 53.236 @@ -1425,10 +1438,18 @@ static void setup_structure(void) 53.237 balloon driver will pick up stale entries. In the case of 53.238 the balloon driver, this can be fatal. 53.239 */ 53.240 - char *tlocal = talloc_strdup(talloc_autofree_context(), 53.241 - "/local"); 53.242 - internal_rm("/local"); 53.243 - create_node(NULL, tlocal, NULL, 0); 53.244 + char *tlocal = talloc_strdup(NULL, "/local"); 53.245 + 53.246 + check_store(); 53.247 + 53.248 + if (remove_local) { 53.249 + internal_rm("/local"); 53.250 + create_node(NULL, tlocal, NULL, 0); 53.251 + 53.252 + check_store(); 53.253 + } 53.254 + 53.255 + talloc_free(tlocal); 53.256 } 53.257 else { 53.258 tdb_ctx = tdb_open(tdbname, 7919, TDB_FLAGS, O_RDWR|O_CREAT, 53.259 @@ -1439,11 +1460,197 @@ static void setup_structure(void) 53.260 manual_node("/", "tool"); 53.261 manual_node("/tool", "xenstored"); 53.262 manual_node("/tool/xenstored", NULL); 53.263 + 53.264 + check_store(); 53.265 + } 53.266 +} 53.267 + 53.268 + 53.269 +static unsigned int hash_from_key_fn(void *k) 53.270 +{ 53.271 + char *str = k; 53.272 + unsigned int hash = 5381; 53.273 + char c; 53.274 + 53.275 + while ((c = *str++)) 53.276 + hash = ((hash << 5) + hash) + (unsigned int)c; 53.277 + 53.278 + return hash; 53.279 +} 53.280 + 53.281 + 53.282 +static int keys_equal_fn(void *key1, void *key2) 53.283 +{ 53.284 + return 0 == strcmp((char *)key1, (char *)key2); 53.285 +} 53.286 + 53.287 + 53.288 +static char *child_name(const char *s1, const char *s2) 53.289 +{ 53.290 + if (strcmp(s1, "/")) { 53.291 + return talloc_asprintf(NULL, "%s/%s", s1, s2); 53.292 + } 53.293 + else { 53.294 + return talloc_asprintf(NULL, "/%s", s2); 53.295 + } 53.296 +} 53.297 + 53.298 + 53.299 +static void remember_string(struct hashtable *hash, const char *str) 53.300 +{ 53.301 + char *k = malloc(strlen(str) + 1); 53.302 + strcpy(k, str); 53.303 + hashtable_insert(hash, k, (void *)1); 53.304 +} 53.305 + 53.306 + 53.307 +/** 53.308 + * A node has a children field that names the children of the node, separated 53.309 + * by NULs. We check whether there are entries in there that are duplicated 53.310 + * (and if so, delete the second one), and whether there are any that do not 53.311 + * have a corresponding child node (and if so, delete them). Each valid child 53.312 + * is then recursively checked. 53.313 + * 53.314 + * No deleting is performed if the recovery flag is cleared (i.e. -R was 53.315 + * passed on the command line). 53.316 + * 53.317 + * As we go, we record each node in the given reachable hashtable. These 53.318 + * entries will be used later in clean_store. 53.319 + */ 53.320 +static void check_store_(const char *name, struct hashtable *reachable) 53.321 +{ 53.322 + struct node *node = read_node(NULL, name); 53.323 + 53.324 + if (node) { 53.325 + size_t i = 0; 53.326 + 53.327 + struct hashtable * children = 53.328 + create_hashtable(16, hash_from_key_fn, keys_equal_fn); 53.329 + 53.330 + remember_string(reachable, name); 53.331 + 53.332 + while (i < node->childlen) { 53.333 + size_t childlen = strlen(node->children + i); 53.334 + char * childname = child_name(node->name, 53.335 + node->children + i); 53.336 + struct node *childnode = read_node(NULL, childname); 53.337 + 53.338 + if (childnode) { 53.339 + if (hashtable_search(children, childname)) { 53.340 + log("check_store: '%s' is duplicated!", 53.341 + childname); 53.342 + 53.343 + if (recovery) { 53.344 + remove_child_entry(NULL, node, 53.345 + i); 53.346 + i -= childlen + 1; 53.347 + } 53.348 + } 53.349 + else { 53.350 + remember_string(children, childname); 53.351 + check_store_(childname, reachable); 53.352 + } 53.353 + } 53.354 + else { 53.355 + log("check_store: No child '%s' found!\n", 53.356 + childname); 53.357 + 53.358 + if (recovery) { 53.359 + remove_child_entry(NULL, node, i); 53.360 + i -= childlen + 1; 53.361 + } 53.362 + } 53.363 + 53.364 + talloc_free(childnode); 53.365 + talloc_free(childname); 53.366 + i += childlen + 1; 53.367 + } 53.368 + 53.369 + hashtable_destroy(children, 0 /* Don't free values (they are 53.370 + all (void *)1) */); 53.371 + talloc_free(node); 53.372 + } 53.373 + else { 53.374 + /* Impossible, because no database should ever be without the 53.375 + root, and otherwise, we've just checked in our caller 53.376 + (which made a recursive call to get here). */ 53.377 + 53.378 + log("check_store: No child '%s' found: impossible!", name); 53.379 + } 53.380 +} 53.381 + 53.382 + 53.383 +/** 53.384 + * Helper to clean_store below. 53.385 + */ 53.386 +static int clean_store_(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA val, 53.387 + void *private) 53.388 +{ 53.389 + struct hashtable *reachable = private; 53.390 + char * name = talloc_strndup(NULL, key.dptr, key.dsize); 53.391 + 53.392 + if (!hashtable_search(reachable, name)) { 53.393 + log("clean_store: '%s' is orphaned!", name); 53.394 + if (recovery) { 53.395 + tdb_delete(tdb, key); 53.396 + } 53.397 } 53.398 53.399 - /* FIXME: Fsck */ 53.400 + talloc_free(name); 53.401 + 53.402 + return 0; 53.403 +} 53.404 + 53.405 + 53.406 +/** 53.407 + * Given the list of reachable nodes, iterate over the whole store, and 53.408 + * remove any that were not reached. 53.409 + */ 53.410 +static void clean_store(struct hashtable *reachable) 53.411 +{ 53.412 + tdb_traverse(tdb_ctx, &clean_store_, reachable); 53.413 } 53.414 53.415 + 53.416 +static void check_store() 53.417 +{ 53.418 + char * root = talloc_strdup(NULL, "/"); 53.419 + struct hashtable * reachable = 53.420 + create_hashtable(16, hash_from_key_fn, keys_equal_fn); 53.421 + 53.422 + log("Checking store ..."); 53.423 + check_store_(root, reachable); 53.424 + clean_store(reachable); 53.425 + log("Checking store complete."); 53.426 + 53.427 + hashtable_destroy(reachable, 0 /* Don't free values (they are all 53.428 + (void *)1) */); 53.429 + talloc_free(root); 53.430 +} 53.431 + 53.432 + 53.433 +/* Something is horribly wrong: check the store. */ 53.434 +static void corrupt(struct connection *conn, const char *fmt, ...) 53.435 +{ 53.436 + va_list arglist; 53.437 + char *str; 53.438 + int saved_errno = errno; 53.439 + 53.440 + va_start(arglist, fmt); 53.441 + str = talloc_vasprintf(NULL, fmt, arglist); 53.442 + va_end(arglist); 53.443 + 53.444 + log("corruption detected by connection %i: err %s: %s", 53.445 + conn ? (int)conn->id : -1, strerror(saved_errno), str); 53.446 + 53.447 +#ifdef TESTING 53.448 + /* Allow them to attach debugger. */ 53.449 + sleep(30); 53.450 +#endif 53.451 + check_store(); 53.452 +} 53.453 + 53.454 + 53.455 static void write_pidfile(const char *pidfile) 53.456 { 53.457 char buf[100]; 53.458 @@ -1506,6 +1713,9 @@ static void usage(void) 53.459 " --no-fork to request that the daemon does not fork,\n" 53.460 " --output-pid to request that the pid of the daemon is output,\n" 53.461 " --trace-file <file> giving the file for logging, and\n" 53.462 +" --no-recovery to request that no recovery should be attempted when\n" 53.463 +" the store is corrupted (debug only),\n" 53.464 +" --preserve-local to request that /local is preserved on start-up,\n" 53.465 " --verbose to request verbose execution.\n"); 53.466 } 53.467 53.468 @@ -1517,6 +1727,8 @@ static struct option options[] = { 53.469 { "no-fork", 0, NULL, 'N' }, 53.470 { "output-pid", 0, NULL, 'P' }, 53.471 { "trace-file", 1, NULL, 'T' }, 53.472 + { "no-recovery", 0, NULL, 'R' }, 53.473 + { "preserve-local", 0, NULL, 'L' }, 53.474 { "verbose", 0, NULL, 'V' }, 53.475 { NULL, 0, NULL, 0 } }; 53.476 53.477 @@ -1532,7 +1744,7 @@ int main(int argc, char *argv[]) 53.478 bool no_domain_init = false; 53.479 const char *pidfile = NULL; 53.480 53.481 - while ((opt = getopt_long(argc, argv, "DF:HNPT:V", options, 53.482 + while ((opt = getopt_long(argc, argv, "DF:HNPT:RLV", options, 53.483 NULL)) != -1) { 53.484 switch (opt) { 53.485 case 'D': 53.486 @@ -1550,6 +1762,12 @@ int main(int argc, char *argv[]) 53.487 case 'P': 53.488 outputpid = true; 53.489 break; 53.490 + case 'R': 53.491 + recovery = false; 53.492 + break; 53.493 + case 'L': 53.494 + remove_local = false; 53.495 + break; 53.496 case 'T': 53.497 tracefile = optarg; 53.498 break;
54.1 --- a/tools/xenstore/xenstored_core.h Mon Mar 06 09:09:18 2006 -0700 54.2 +++ b/tools/xenstore/xenstored_core.h Mon Mar 06 10:21:35 2006 -0700 54.3 @@ -148,10 +148,6 @@ int destroy_tdb(void *_tdb); 54.4 /* Replace the tdb: required for transaction code */ 54.5 bool replace_tdb(const char *newname, TDB_CONTEXT *newtdb); 54.6 54.7 -/* Fail due to excessive corruption, capitalist pigdogs! */ 54.8 -void __attribute__((noreturn)) corrupt(struct connection *conn, 54.9 - const char *fmt, ...); 54.10 - 54.11 struct connection *new_connection(connwritefn_t *write, connreadfn_t *read); 54.12 54.13
55.1 --- a/tools/xenstore/xenstored_domain.c Mon Mar 06 09:09:18 2006 -0700 55.2 +++ b/tools/xenstore/xenstored_domain.c Mon Mar 06 10:21:35 2006 -0700 55.3 @@ -27,7 +27,6 @@ 55.4 #include <sys/types.h> 55.5 #include <sys/stat.h> 55.6 #include <fcntl.h> 55.7 -#include <paths.h> 55.8 55.9 //#define DEBUG 55.10 #include "utils.h" 55.11 @@ -466,22 +465,9 @@ static int dom0_init(void) 55.12 { 55.13 int rc, fd; 55.14 evtchn_port_t port; 55.15 - unsigned long kva; 55.16 char str[20]; 55.17 struct domain *dom0; 55.18 55.19 - fd = open(XENSTORED_PROC_KVA, O_RDONLY); 55.20 - if (fd == -1) 55.21 - return -1; 55.22 - 55.23 - rc = read(fd, str, sizeof(str)); 55.24 - if (rc == -1) 55.25 - goto outfd; 55.26 - str[rc] = '\0'; 55.27 - kva = strtoul(str, NULL, 0); 55.28 - 55.29 - close(fd); 55.30 - 55.31 fd = open(XENSTORED_PROC_PORT, O_RDONLY); 55.32 if (fd == -1) 55.33 return -1; 55.34 @@ -496,12 +482,12 @@ static int dom0_init(void) 55.35 55.36 dom0 = new_domain(NULL, 0, port); 55.37 55.38 - fd = open(_PATH_KMEM, O_RDWR); 55.39 + fd = open(XENSTORED_PROC_KVA, O_RDWR); 55.40 if (fd == -1) 55.41 return -1; 55.42 55.43 dom0->interface = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, 55.44 - MAP_SHARED, fd, kva); 55.45 + MAP_SHARED, fd, 0); 55.46 if (dom0->interface == MAP_FAILED) 55.47 goto outfd; 55.48
56.1 --- a/xen/arch/ia64/xen/dom0_ops.c Mon Mar 06 09:09:18 2006 -0700 56.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Mon Mar 06 10:21:35 2006 -0700 56.3 @@ -16,10 +16,11 @@ 56.4 #include <asm/pdb.h> 56.5 #include <xen/trace.h> 56.6 #include <xen/console.h> 56.7 +#include <xen/guest_access.h> 56.8 #include <public/sched_ctl.h> 56.9 #include <asm/vmx.h> 56.10 56.11 -long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) 56.12 +long arch_do_dom0_op(dom0_op_t *op, GUEST_HANDLE(dom0_op_t) u_dom0_op) 56.13 { 56.14 long ret = 0; 56.15 56.16 @@ -64,7 +65,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.17 56.18 put_domain(d); 56.19 56.20 - copy_to_user(u_dom0_op, op, sizeof(*op)); 56.21 + copy_to_guest(u_dom0_op, op, 1); 56.22 } 56.23 break; 56.24 56.25 @@ -74,7 +75,6 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.26 int n,j; 56.27 int num = op->u.getpageframeinfo2.num; 56.28 domid_t dom = op->u.getpageframeinfo2.domain; 56.29 - unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array; 56.30 struct domain *d; 56.31 unsigned long *l_arr; 56.32 ret = -ESRCH; 56.33 @@ -95,7 +95,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.34 { 56.35 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n); 56.36 56.37 - if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) ) 56.38 + if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array, 56.39 + n, k) ) 56.40 { 56.41 ret = -EINVAL; 56.42 break; 56.43 @@ -135,7 +136,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.44 56.45 } 56.46 56.47 - if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) ) 56.48 + if ( copy_to_guest_offset(op->u.getpageframeinfo2.array, 56.49 + n, l_arr, k) ) 56.50 { 56.51 ret = -EINVAL; 56.52 break; 56.53 @@ -160,7 +162,6 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.54 unsigned long start_page = op->u.getmemlist.max_pfns >> 32; 56.55 unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff; 56.56 unsigned long mfn; 56.57 - unsigned long *buffer = op->u.getmemlist.buffer; 56.58 56.59 ret = -EINVAL; 56.60 if ( d != NULL ) 56.61 @@ -180,16 +181,16 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.62 { 56.63 mfn = gmfn_to_mfn_foreign(d, i); 56.64 56.65 - if ( put_user(mfn, buffer) ) 56.66 + if ( copy_to_guest_offset(op->u.getmemlist.buffer, 56.67 + i - start_page, &mfn, 1) ) 56.68 { 56.69 ret = -EFAULT; 56.70 break; 56.71 } 56.72 - buffer++; 56.73 } 56.74 56.75 op->u.getmemlist.num_pfns = i - start_page; 56.76 - copy_to_user(u_dom0_op, op, sizeof(*op)); 56.77 + copy_to_guest(u_dom0_op, op, 1); 56.78 56.79 put_domain(d); 56.80 } 56.81 @@ -211,7 +212,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 56.82 memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 56.83 //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); 56.84 ret = 0; 56.85 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 56.86 + if ( copy_to_guest(u_dom0_op, op, 1) ) 56.87 ret = -EFAULT; 56.88 } 56.89 break;
57.1 --- a/xen/arch/ia64/xen/xensetup.c Mon Mar 06 09:09:18 2006 -0700 57.2 +++ b/xen/arch/ia64/xen/xensetup.c Mon Mar 06 10:21:35 2006 -0700 57.3 @@ -12,7 +12,7 @@ 57.4 #include <xen/sched.h> 57.5 #include <xen/mm.h> 57.6 #include <public/version.h> 57.7 -//#include <xen/delay.h> 57.8 +#include <xen/gdbstub.h> 57.9 #include <xen/compile.h> 57.10 #include <xen/console.h> 57.11 #include <xen/serial.h> 57.12 @@ -342,6 +342,8 @@ printk("About to call __cpu_up(%d)\n",i) 57.13 smp_cpus_done(max_cpus); 57.14 #endif 57.15 57.16 + initialise_gdb(); /* could be moved earlier */ 57.17 + 57.18 do_initcalls(); 57.19 printk("About to call sort_main_extable()\n"); 57.20 sort_main_extable();
58.1 --- a/xen/arch/x86/boot/mkelf32.c Mon Mar 06 09:09:18 2006 -0700 58.2 +++ b/xen/arch/x86/boot/mkelf32.c Mon Mar 06 10:21:35 2006 -0700 58.3 @@ -244,8 +244,8 @@ int main(int argc, char **argv) 58.4 58.5 inimage = argv[1]; 58.6 outimage = argv[2]; 58.7 - loadbase = strtoull(argv[3], NULL, 16); 58.8 - final_exec_addr = strtoul(argv[4], NULL, 16); 58.9 + loadbase = strtoul(argv[3], NULL, 16); 58.10 + final_exec_addr = strtoull(argv[4], NULL, 16); 58.11 58.12 infd = open(inimage, O_RDONLY); 58.13 if ( infd == -1 )
59.1 --- a/xen/arch/x86/dom0_ops.c Mon Mar 06 09:09:18 2006 -0700 59.2 +++ b/xen/arch/x86/dom0_ops.c Mon Mar 06 10:21:35 2006 -0700 59.3 @@ -10,6 +10,7 @@ 59.4 #include <xen/types.h> 59.5 #include <xen/lib.h> 59.6 #include <xen/mm.h> 59.7 +#include <xen/guest_access.h> 59.8 #include <public/dom0_ops.h> 59.9 #include <xen/sched.h> 59.10 #include <xen/event.h> 59.11 @@ -48,7 +49,7 @@ static void read_msr_for(void *unused) 59.12 (void)rdmsr_safe(msr_addr, msr_lo, msr_hi); 59.13 } 59.14 59.15 -long arch_do_dom0_op(struct dom0_op *op, struct dom0_op *u_dom0_op) 59.16 +long arch_do_dom0_op(struct dom0_op *op, GUEST_HANDLE(dom0_op_t) u_dom0_op) 59.17 { 59.18 long ret = 0; 59.19 59.20 @@ -75,7 +76,7 @@ long arch_do_dom0_op(struct dom0_op *op, 59.21 59.22 op->u.msr.out1 = msr_lo; 59.23 op->u.msr.out2 = msr_hi; 59.24 - copy_to_user(u_dom0_op, op, sizeof(*op)); 59.25 + copy_to_guest(u_dom0_op, op, 1); 59.26 } 59.27 ret = 0; 59.28 } 59.29 @@ -90,7 +91,7 @@ long arch_do_dom0_op(struct dom0_op *op, 59.30 { 59.31 ret = shadow_mode_control(d, &op->u.shadow_control); 59.32 put_domain(d); 59.33 - copy_to_user(u_dom0_op, op, sizeof(*op)); 59.34 + copy_to_guest(u_dom0_op, op, 1); 59.35 } 59.36 } 59.37 break; 59.38 @@ -102,10 +103,11 @@ long arch_do_dom0_op(struct dom0_op *op, 59.39 op->u.add_memtype.nr_mfns, 59.40 op->u.add_memtype.type, 59.41 1); 59.42 - if (ret > 0) 59.43 + if ( ret > 0 ) 59.44 { 59.45 - (void)__put_user(0, &u_dom0_op->u.add_memtype.handle); 59.46 - (void)__put_user(ret, &u_dom0_op->u.add_memtype.reg); 59.47 + op->u.add_memtype.handle = 0; 59.48 + op->u.add_memtype.reg = ret; 59.49 + (void)copy_to_guest(u_dom0_op, op, 1); 59.50 ret = 0; 59.51 } 59.52 } 59.53 @@ -136,9 +138,10 @@ long arch_do_dom0_op(struct dom0_op *op, 59.54 if ( op->u.read_memtype.reg < num_var_ranges ) 59.55 { 59.56 mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type); 59.57 - (void)__put_user(mfn, &u_dom0_op->u.read_memtype.mfn); 59.58 - (void)__put_user(nr_mfns, &u_dom0_op->u.read_memtype.nr_mfns); 59.59 - (void)__put_user(type, &u_dom0_op->u.read_memtype.type); 59.60 + op->u.read_memtype.mfn = mfn; 59.61 + op->u.read_memtype.nr_mfns = nr_mfns; 59.62 + op->u.read_memtype.type = type; 59.63 + (void)copy_to_guest(u_dom0_op, op, 1); 59.64 ret = 0; 59.65 } 59.66 } 59.67 @@ -147,7 +150,7 @@ long arch_do_dom0_op(struct dom0_op *op, 59.68 case DOM0_MICROCODE: 59.69 { 59.70 extern int microcode_update(void *buf, unsigned long len); 59.71 - ret = microcode_update(op->u.microcode.data, op->u.microcode.length); 59.72 + ret = microcode_update(op->u.microcode.data.p, op->u.microcode.length); 59.73 } 59.74 break; 59.75 59.76 @@ -195,7 +198,7 @@ long arch_do_dom0_op(struct dom0_op *op, 59.77 memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 59.78 memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); 59.79 ret = 0; 59.80 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 59.81 + if ( copy_to_guest(u_dom0_op, op, 1) ) 59.82 ret = -EFAULT; 59.83 } 59.84 break; 59.85 @@ -245,7 +248,7 @@ long arch_do_dom0_op(struct dom0_op *op, 59.86 59.87 put_domain(d); 59.88 59.89 - copy_to_user(u_dom0_op, op, sizeof(*op)); 59.90 + copy_to_guest(u_dom0_op, op, 1); 59.91 } 59.92 break; 59.93 59.94 @@ -255,7 +258,6 @@ long arch_do_dom0_op(struct dom0_op *op, 59.95 int n,j; 59.96 int num = op->u.getpageframeinfo2.num; 59.97 domid_t dom = op->u.getpageframeinfo2.domain; 59.98 - unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array; 59.99 struct domain *d; 59.100 unsigned long *l_arr; 59.101 ret = -ESRCH; 59.102 @@ -277,7 +279,8 @@ long arch_do_dom0_op(struct dom0_op *op, 59.103 { 59.104 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n); 59.105 59.106 - if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) ) 59.107 + if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array, 59.108 + n, k) ) 59.109 { 59.110 ret = -EINVAL; 59.111 break; 59.112 @@ -320,7 +323,8 @@ long arch_do_dom0_op(struct dom0_op *op, 59.113 59.114 } 59.115 59.116 - if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) ) 59.117 + if ( copy_to_guest_offset(op->u.getpageframeinfo2.array, 59.118 + n, l_arr, k) ) 59.119 { 59.120 ret = -EINVAL; 59.121 break; 59.122 @@ -341,7 +345,6 @@ long arch_do_dom0_op(struct dom0_op *op, 59.123 struct domain *d = find_domain_by_id(op->u.getmemlist.domain); 59.124 unsigned long max_pfns = op->u.getmemlist.max_pfns; 59.125 unsigned long mfn; 59.126 - unsigned long *buffer = op->u.getmemlist.buffer; 59.127 struct list_head *list_ent; 59.128 59.129 ret = -EINVAL; 59.130 @@ -353,19 +356,20 @@ long arch_do_dom0_op(struct dom0_op *op, 59.131 list_ent = d->page_list.next; 59.132 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ ) 59.133 { 59.134 - mfn = page_to_mfn(list_entry(list_ent, struct page_info, list)); 59.135 - if ( put_user(mfn, buffer) ) 59.136 + mfn = page_to_mfn(list_entry( 59.137 + list_ent, struct page_info, list)); 59.138 + if ( copy_to_guest_offset(op->u.getmemlist.buffer, 59.139 + i, &mfn, 1) ) 59.140 { 59.141 ret = -EFAULT; 59.142 break; 59.143 } 59.144 - buffer++; 59.145 list_ent = mfn_to_page(mfn)->list.next; 59.146 } 59.147 spin_unlock(&d->page_alloc_lock); 59.148 59.149 op->u.getmemlist.num_pfns = i; 59.150 - copy_to_user(u_dom0_op, op, sizeof(*op)); 59.151 + copy_to_guest(u_dom0_op, op, 1); 59.152 59.153 put_domain(d); 59.154 } 59.155 @@ -401,13 +405,12 @@ long arch_do_dom0_op(struct dom0_op *op, 59.156 entry.start = e820.map[i].addr; 59.157 entry.end = e820.map[i].addr + e820.map[i].size; 59.158 entry.is_ram = (e820.map[i].type == E820_RAM); 59.159 - (void)copy_to_user( 59.160 - &op->u.physical_memory_map.memory_map[i], 59.161 - &entry, sizeof(entry)); 59.162 + (void)copy_to_guest_offset( 59.163 + op->u.physical_memory_map.memory_map, i, &entry, 1); 59.164 } 59.165 59.166 op->u.physical_memory_map.nr_map_entries = i; 59.167 - (void)copy_to_user(u_dom0_op, op, sizeof(*op)); 59.168 + (void)copy_to_guest(u_dom0_op, op, 1); 59.169 } 59.170 break; 59.171
60.1 --- a/xen/arch/x86/hvm/svm/svm.c Mon Mar 06 09:09:18 2006 -0700 60.2 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Mar 06 10:21:35 2006 -0700 60.3 @@ -247,6 +247,7 @@ void svm_load_msrs(void) 60.4 void svm_restore_msrs(struct vcpu *v) 60.5 { 60.6 } 60.7 +#endif 60.8 60.9 #define IS_CANO_ADDRESS(add) 1 60.10 60.11 @@ -297,7 +298,7 @@ static inline int long_mode_do_msr_read( 60.12 return 0; 60.13 } 60.14 60.15 - HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", 60.16 + HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n", 60.17 msr_content); 60.18 60.19 regs->eax = msr_content & 0xffffffff; 60.20 @@ -311,12 +312,14 @@ static inline int long_mode_do_msr_write 60.21 struct vcpu *vc = current; 60.22 struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb; 60.23 60.24 - HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx msr_content %lx\n", 60.25 - regs->ecx, msr_content); 60.26 + HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx " 60.27 + "msr_content %"PRIx64"\n", 60.28 + (unsigned long)regs->ecx, msr_content); 60.29 60.30 switch (regs->ecx) 60.31 { 60.32 case MSR_EFER: 60.33 +#ifdef __x86_64__ 60.34 if ((msr_content & EFER_LME) ^ test_bit(SVM_CPU_STATE_LME_ENABLED, 60.35 &vc->arch.hvm_svm.cpu_state)) 60.36 { 60.37 @@ -337,6 +340,7 @@ static inline int long_mode_do_msr_write 60.38 if ((msr_content ^ vmcb->efer) & EFER_LME) 60.39 msr_content &= ~EFER_LME; 60.40 /* No update for LME/LMA since it have no effect */ 60.41 +#endif 60.42 vmcb->efer = msr_content | EFER_SVME; 60.43 break; 60.44 60.45 @@ -383,18 +387,6 @@ static inline int long_mode_do_msr_write 60.46 return 1; 60.47 } 60.48 60.49 -#else 60.50 -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) 60.51 -{ 60.52 - return 0; 60.53 -} 60.54 - 60.55 -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) 60.56 -{ 60.57 - return 0; 60.58 -} 60.59 -#endif 60.60 - 60.61 void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) 60.62 { 60.63 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 60.64 @@ -752,7 +744,8 @@ void svm_relinquish_resources(struct vcp 60.65 /* unmap IO shared page */ 60.66 struct domain *d = v->domain; 60.67 if ( d->arch.hvm_domain.shared_page_va ) 60.68 - unmap_domain_page((void *)d->arch.hvm_domain.shared_page_va); 60.69 + unmap_domain_page_global( 60.70 + (void *)d->arch.hvm_domain.shared_page_va); 60.71 shadow_direct_map_clean(d); 60.72 } 60.73 60.74 @@ -937,10 +930,8 @@ static void svm_vmexit_do_cpuid(struct v 60.75 60.76 if (input == 1) 60.77 { 60.78 -#ifndef __x86_64__ 60.79 if ( hvm_apic_support(v->domain) && 60.80 !vlapic_global_enabled((VLAPIC(v))) ) 60.81 -#endif 60.82 clear_bit(X86_FEATURE_APIC, &edx); 60.83 60.84 #if CONFIG_PAGING_LEVELS < 3
61.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Mar 06 09:09:18 2006 -0700 61.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Mar 06 10:21:35 2006 -0700 61.3 @@ -88,9 +88,6 @@ 61.4 #define STGI .byte 0x0F,0x01,0xDC 61.5 #define CLGI .byte 0x0F,0x01,0xDD 61.6 61.7 -#define DO_TSC_OFFSET 0 61.8 -#define DO_FPUSAVE 0 61.9 - 61.10 ENTRY(svm_asm_do_launch) 61.11 sti 61.12 CLGI 61.13 @@ -101,36 +98,6 @@ ENTRY(svm_asm_do_launch) 61.14 movl VCPU_svm_hsa_pa(%ebx), %eax 61.15 VMSAVE 61.16 61.17 -#if DO_FPUSAVE 61.18 - mov %cr0, %eax 61.19 - push %eax 61.20 - clts 61.21 - lea VCPU_arch_guest_fpu_ctxt(%ebx), %eax 61.22 - fxrstor (%eax) 61.23 - pop %eax 61.24 - mov %eax, %cr0 61.25 -#endif 61.26 - 61.27 -#if (DO_TSC_OFFSET) 61.28 - pushl %edx /* eax and edx get trashed by rdtsc */ 61.29 - pushl %eax 61.30 - rdtsc 61.31 - subl VCPU_svm_vmexit_tsc(%ebx),%eax /* tsc's from */ 61.32 - sbbl VCPU_svm_vmexit_tsc+4(%ebx),%edx /* last #VMEXIT? */ 61.33 - subl %eax,VMCB_tsc_offset(%ecx) /* subtract from running TSC_OFFSET */ 61.34 - sbbl %edx,VMCB_tsc_offset+4(%ecx) 61.35 - subl $20000,VMCB_tsc_offset(%ecx) /* fudge factor for VMXXX calls */ 61.36 - sbbl $0,VMCB_tsc_offset+4(%ecx) 61.37 - 61.38 - /* 61.39 - * TODO: may need to add a kludge factor to account for all the cycles 61.40 - * burned in VMLOAD, VMSAVE, VMRUN... 61.41 - */ 61.42 - 61.43 - popl %eax 61.44 - popl %edx 61.45 - #endif 61.46 - 61.47 movl VCPU_svm_vmcb_pa(%ebx), %eax 61.48 popl %ebx 61.49 popl %ecx 61.50 @@ -150,31 +117,7 @@ ENTRY(svm_asm_do_launch) 61.51 VMSAVE 61.52 /* eax is the only register we're allowed to touch here... */ 61.53 61.54 -#if DO_FPUSAVE 61.55 - mov %cr0, %eax 61.56 - push %eax 61.57 - clts 61.58 GET_CURRENT(%eax) 61.59 - lea VCPU_arch_guest_fpu_ctxt(%eax), %eax 61.60 - fxsave (%eax) 61.61 - fnclex 61.62 - pop %eax 61.63 - mov %eax, %cr0 61.64 -#endif 61.65 - 61.66 - GET_CURRENT(%eax) 61.67 - 61.68 -#if (DO_TSC_OFFSET) 61.69 - pushl %edx 61.70 - pushl %ebx 61.71 - movl %eax,%ebx 61.72 - rdtsc 61.73 - movl %eax,VCPU_svm_vmexit_tsc(%ebx) 61.74 - movl %edx,VCPU_svm_vmexit_tsc+4(%ebx) 61.75 - movl %ebx,%eax 61.76 - popl %ebx 61.77 - popl %edx 61.78 -#endif 61.79 61.80 movl VCPU_svm_hsa_pa(%eax), %eax 61.81 VMLOAD
62.1 --- a/xen/arch/x86/hvm/vioapic.c Mon Mar 06 09:09:18 2006 -0700 62.2 +++ b/xen/arch/x86/hvm/vioapic.c Mon Mar 06 10:21:35 2006 -0700 62.3 @@ -52,20 +52,6 @@ static void ioapic_enable(hvm_vioapic_t 62.4 s->flags &= ~IOAPIC_ENABLE_FLAG; 62.5 } 62.6 62.7 -static void ioapic_dump_redir(hvm_vioapic_t *s, uint8_t entry) 62.8 -{ 62.9 - RedirStatus redir = s->redirtbl[entry]; 62.10 - 62.11 - HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_dump_redir " 62.12 - "entry %x vector %x deliver_mod %x destmode %x delivestatus %x " 62.13 - "polarity %x remote_irr %x trigmod %x mask %x dest_id %x\n", 62.14 - entry, redir.RedirForm.vector, redir.RedirForm.deliver_mode, 62.15 - redir.RedirForm.destmode, redir.RedirForm.delivestatus, 62.16 - redir.RedirForm.polarity, redir.RedirForm.remoteirr, 62.17 - redir.RedirForm.trigmod, redir.RedirForm.mask, 62.18 - redir.RedirForm.dest_id); 62.19 -} 62.20 - 62.21 #ifdef HVM_DOMAIN_SAVE_RESTORE 62.22 void ioapic_save(QEMUFile* f, void* opaque) 62.23 { 62.24 @@ -534,7 +520,19 @@ void hvm_vioapic_set_irq(struct domain * 62.25 if (!IOAPICEnabled(s) || s->redirtbl[irq].RedirForm.mask) 62.26 return; 62.27 62.28 - ioapic_dump_redir(s, irq); 62.29 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_set_irq entry %x " 62.30 + "vector %x deliver_mod %x destmode %x delivestatus %x " 62.31 + "polarity %x remote_irr %x trigmod %x mask %x dest_id %x\n", 62.32 + irq, 62.33 + s->redirtbl[irq].RedirForm.vector, 62.34 + s->redirtbl[irq].RedirForm.deliver_mode, 62.35 + s->redirtbl[irq].RedirForm.destmode, 62.36 + s->redirtbl[irq].RedirForm.delivestatus, 62.37 + s->redirtbl[irq].RedirForm.polarity, 62.38 + s->redirtbl[irq].RedirForm.remoteirr, 62.39 + s->redirtbl[irq].RedirForm.trigmod, 62.40 + s->redirtbl[irq].RedirForm.mask, 62.41 + s->redirtbl[irq].RedirForm.dest_id); 62.42 62.43 if (irq >= 0 && irq < IOAPIC_NUM_PINS) { 62.44 uint32_t bit = 1 << irq;
63.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Mar 06 09:09:18 2006 -0700 63.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Mar 06 10:21:35 2006 -0700 63.3 @@ -172,7 +172,7 @@ static inline int long_mode_do_msr_read( 63.4 switch(regs->ecx){ 63.5 case MSR_EFER: 63.6 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER]; 63.7 - HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content); 63.8 + HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %"PRIx64"\n", msr_content); 63.9 if (test_bit(VMX_CPU_STATE_LME_ENABLED, 63.10 &vc->arch.hvm_vmx.cpu_state)) 63.11 msr_content |= 1 << _EFER_LME; 63.12 @@ -202,7 +202,8 @@ static inline int long_mode_do_msr_read( 63.13 default: 63.14 return 0; 63.15 } 63.16 - HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content); 63.17 + HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n", 63.18 + msr_content); 63.19 regs->eax = msr_content & 0xffffffff; 63.20 regs->edx = msr_content >> 32; 63.21 return 1; 63.22 @@ -216,8 +217,9 @@ static inline int long_mode_do_msr_write 63.23 struct vmx_msr_state * host_state = 63.24 &percpu_msr[smp_processor_id()]; 63.25 63.26 - HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n", 63.27 - regs->ecx, msr_content); 63.28 + HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx " 63.29 + "msr_content %"PRIx64"\n", 63.30 + (unsigned long)regs->ecx, msr_content); 63.31 63.32 switch (regs->ecx){ 63.33 case MSR_EFER: 63.34 @@ -882,7 +884,7 @@ static void vmx_io_instruction(struct cp 63.35 __vmread(GUEST_RFLAGS, &eflags); 63.36 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0; 63.37 63.38 - HVM_DBG_LOG(DBG_LEVEL_1, 63.39 + HVM_DBG_LOG(DBG_LEVEL_IO, 63.40 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, " 63.41 "exit_qualification = %lx", 63.42 vm86, cs, eip, exit_qualification);
64.1 --- a/xen/arch/x86/io_apic.c Mon Mar 06 09:09:18 2006 -0700 64.2 +++ b/xen/arch/x86/io_apic.c Mon Mar 06 10:21:35 2006 -0700 64.3 @@ -1548,8 +1548,9 @@ static inline void check_timer(void) 64.4 */ 64.5 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 64.6 init_8259A(1); 64.7 - timer_ack = 1; 64.8 - enable_8259A_irq(0); 64.9 + /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */ 64.10 + /*timer_ack = 1;*/ 64.11 + /*enable_8259A_irq(0);*/ 64.12 64.13 pin1 = find_isa_irq_pin(0, mp_INT); 64.14 apic1 = find_isa_irq_apic(0, mp_INT); 64.15 @@ -1617,7 +1618,7 @@ static inline void check_timer(void) 64.16 64.17 printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); 64.18 64.19 - timer_ack = 0; 64.20 + /*timer_ack = 0;*/ 64.21 init_8259A(0); 64.22 make_8259A_irq(0); 64.23 apic_write_around(APIC_LVT0, APIC_DM_EXTINT); 64.24 @@ -1633,16 +1634,6 @@ static inline void check_timer(void) 64.25 "report. Then try booting with the 'noapic' option"); 64.26 } 64.27 64.28 -#define NR_IOAPIC_BIOSIDS 256 64.29 -static u8 ioapic_biosid_to_apic_enum[NR_IOAPIC_BIOSIDS]; 64.30 -static void store_ioapic_biosid_mapping(void) 64.31 -{ 64.32 - u8 apic; 64.33 - memset(ioapic_biosid_to_apic_enum, ~0, NR_IOAPIC_BIOSIDS); 64.34 - for ( apic = 0; apic < nr_ioapics; apic++ ) 64.35 - ioapic_biosid_to_apic_enum[mp_ioapics[apic].mpc_apicid] = apic; 64.36 -} 64.37 - 64.38 /* 64.39 * 64.40 * IRQ's that are handled by the PIC in the MPS IOAPIC case. 64.41 @@ -1654,8 +1645,6 @@ static void store_ioapic_biosid_mapping( 64.42 64.43 void __init setup_IO_APIC(void) 64.44 { 64.45 - store_ioapic_biosid_mapping(); 64.46 - 64.47 enable_IO_APIC(); 64.48 64.49 if (acpi_ioapic) 64.50 @@ -1839,50 +1828,45 @@ int io_apic_set_pci_routing (int ioapic, 64.51 64.52 #endif /*CONFIG_ACPI_BOOT*/ 64.53 64.54 - 64.55 -int ioapic_guest_read(int apicid, int address, u32 *pval) 64.56 +static int ioapic_physbase_to_id(unsigned long physbase) 64.57 { 64.58 - u32 val; 64.59 - int apicenum; 64.60 - union IO_APIC_reg_00 reg_00; 64.61 + int apic; 64.62 + for ( apic = 0; apic < nr_ioapics; apic++ ) 64.63 + if ( mp_ioapics[apic].mpc_apicaddr == physbase ) 64.64 + return apic; 64.65 + return -EINVAL; 64.66 +} 64.67 + 64.68 +int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval) 64.69 +{ 64.70 + int apic; 64.71 unsigned long flags; 64.72 64.73 - if ( (apicid >= NR_IOAPIC_BIOSIDS) || 64.74 - ((apicenum = ioapic_biosid_to_apic_enum[apicid]) >= nr_ioapics) ) 64.75 - return -EINVAL; 64.76 + if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) 64.77 + return apic; 64.78 64.79 spin_lock_irqsave(&ioapic_lock, flags); 64.80 - val = io_apic_read(apicenum, address); 64.81 + *pval = io_apic_read(apic, reg); 64.82 spin_unlock_irqrestore(&ioapic_lock, flags); 64.83 64.84 - /* Rewrite APIC ID to what the BIOS originally specified. */ 64.85 - if ( address == 0 ) 64.86 - { 64.87 - reg_00.raw = val; 64.88 - reg_00.bits.ID = apicid; 64.89 - val = reg_00.raw; 64.90 - } 64.91 - 64.92 - *pval = val; 64.93 return 0; 64.94 } 64.95 64.96 -int ioapic_guest_write(int apicid, int address, u32 val) 64.97 +int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) 64.98 { 64.99 - int apicenum, pin, irq; 64.100 + int apic, pin, irq; 64.101 struct IO_APIC_route_entry rte = { 0 }; 64.102 struct irq_pin_list *entry; 64.103 unsigned long flags; 64.104 64.105 - if ( (apicid >= NR_IOAPIC_BIOSIDS) || 64.106 - ((apicenum = ioapic_biosid_to_apic_enum[apicid]) >= nr_ioapics) ) 64.107 - return -EINVAL; 64.108 + if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) 64.109 + return apic; 64.110 64.111 /* Only write to the first half of a route entry. */ 64.112 - if ( (address < 0x10) || (address & 1) ) 64.113 + if ( (reg < 0x10) || (reg & 1) ) 64.114 return 0; 64.115 64.116 - pin = (address - 0x10) >> 1; 64.117 + pin = (reg - 0x10) >> 1; 64.118 64.119 *(u32 *)&rte = val; 64.120 rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 64.121 @@ -1898,7 +1882,7 @@ int ioapic_guest_write(int apicid, int a 64.122 if ( rte.delivery_mode > dest_LowestPrio ) 64.123 { 64.124 printk("ERROR: Attempt to write weird IOAPIC destination mode!\n"); 64.125 - printk(" APIC=%d/%d, lo-reg=%x\n", apicid, pin, val); 64.126 + printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val); 64.127 return -EINVAL; 64.128 } 64.129 64.130 @@ -1923,19 +1907,19 @@ int ioapic_guest_write(int apicid, int a 64.131 /* Record the pin<->irq mapping. */ 64.132 for ( entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next] ) 64.133 { 64.134 - if ( (entry->apic == apicenum) && (entry->pin == pin) ) 64.135 + if ( (entry->apic == apic) && (entry->pin == pin) ) 64.136 break; 64.137 if ( !entry->next ) 64.138 { 64.139 - add_pin_to_irq(irq, apicenum, pin); 64.140 + add_pin_to_irq(irq, apic, pin); 64.141 break; 64.142 } 64.143 } 64.144 } 64.145 64.146 spin_lock_irqsave(&ioapic_lock, flags); 64.147 - io_apic_write(apicenum, 0x10 + 2 * pin, *(((int *)&rte) + 0)); 64.148 - io_apic_write(apicenum, 0x11 + 2 * pin, *(((int *)&rte) + 1)); 64.149 + io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0)); 64.150 + io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1)); 64.151 spin_unlock_irqrestore(&ioapic_lock, flags); 64.152 64.153 return 0;
65.1 --- a/xen/arch/x86/microcode.c Mon Mar 06 09:09:18 2006 -0700 65.2 +++ b/xen/arch/x86/microcode.c Mon Mar 06 10:21:35 2006 -0700 65.3 @@ -116,7 +116,7 @@ MODULE_LICENSE("GPL"); 65.4 #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) 65.5 65.6 /* serialize access to the physical write to MSR 0x79 */ 65.7 -static spinlock_t microcode_update_lock = SPIN_LOCK_UNLOCKED; 65.8 +static DEFINE_SPINLOCK(microcode_update_lock); 65.9 65.10 /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ 65.11 static DECLARE_MUTEX(microcode_sem); 65.12 @@ -166,7 +166,8 @@ static void collect_cpu_info (void *unus 65.13 } 65.14 65.15 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 65.16 - __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); 65.17 + /* see notes above for revision 1.07. Apparent chip bug */ 65.18 + sync_core(); 65.19 /* get the current revision from MSR 0x8B */ 65.20 rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev); 65.21 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", 65.22 @@ -366,7 +367,7 @@ static void do_update_one (void * unused 65.23 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; 65.24 65.25 if (uci->mc == NULL) { 65.26 - printk(KERN_INFO "microcode: No suitable data for CPU%d\n", cpu_num); 65.27 + printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num); 65.28 return; 65.29 } 65.30 65.31 @@ -379,7 +380,9 @@ static void do_update_one (void * unused 65.32 (unsigned long) uci->mc->bits >> 16 >> 16); 65.33 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 65.34 65.35 - __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); 65.36 + /* see notes above for revision 1.07. Apparent chip bug */ 65.37 + sync_core(); 65.38 + 65.39 /* get the current revision from MSR 0x8B */ 65.40 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 65.41
66.1 --- a/xen/arch/x86/mm.c Mon Mar 06 09:09:18 2006 -0700 66.2 +++ b/xen/arch/x86/mm.c Mon Mar 06 10:21:35 2006 -0700 66.3 @@ -506,7 +506,6 @@ get_page_from_l2e( 66.4 vaddr <<= PGT_va_shift; 66.5 rc = get_page_and_type_from_pagenr( 66.6 l2e_get_pfn(l2e), PGT_l1_page_table | vaddr, d); 66.7 - 66.8 #if CONFIG_PAGING_LEVELS == 2 66.9 if ( unlikely(!rc) ) 66.10 rc = get_linear_pagetable(l2e, pfn, d); 66.11 @@ -3187,8 +3186,8 @@ static int ptwr_emulated_update( 66.12 ptwr_flush(d, PTWR_PT_INACTIVE); 66.13 66.14 /* Read the PTE that maps the page being updated. */ 66.15 - if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)], 66.16 - sizeof(pte))) 66.17 + if ( __copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)], 66.18 + sizeof(pte)) ) 66.19 { 66.20 MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table"); 66.21 return X86EMUL_UNHANDLEABLE; 66.22 @@ -3198,15 +3197,10 @@ static int ptwr_emulated_update( 66.23 page = mfn_to_page(pfn); 66.24 66.25 /* We are looking only for read-only mappings of p.t. pages. */ 66.26 - if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) || 66.27 - ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) || 66.28 - (page_get_owner(page) != d) ) 66.29 - { 66.30 - MEM_LOG("ptwr_emulate: Page is mistyped or bad pte " 66.31 - "(%lx, %" PRtype_info ")", 66.32 - l1e_get_pfn(pte), page->u.inuse.type_info); 66.33 - return X86EMUL_UNHANDLEABLE; 66.34 - } 66.35 + ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT); 66.36 + ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table); 66.37 + ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0); 66.38 + ASSERT(page_get_owner(page) == d); 66.39 66.40 /* Check the new PTE. */ 66.41 nl1e = l1e_from_intpte(val); 66.42 @@ -3266,8 +3260,11 @@ static int ptwr_emulated_cmpxchg8b( 66.43 unsigned long new, 66.44 unsigned long new_hi) 66.45 { 66.46 - return ptwr_emulated_update( 66.47 - addr, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1); 66.48 + if ( CONFIG_PAGING_LEVELS == 2 ) 66.49 + return X86EMUL_UNHANDLEABLE; 66.50 + else 66.51 + return ptwr_emulated_update( 66.52 + addr, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1); 66.53 } 66.54 66.55 static struct x86_mem_emulator ptwr_mem_emulator = {
67.1 --- a/xen/arch/x86/nmi.c Mon Mar 06 09:09:18 2006 -0700 67.2 +++ b/xen/arch/x86/nmi.c Mon Mar 06 10:21:35 2006 -0700 67.3 @@ -322,15 +322,9 @@ void __pminit setup_apic_nmi_watchdog(vo 67.4 case X86_VENDOR_INTEL: 67.5 switch (boot_cpu_data.x86) { 67.6 case 6: 67.7 - if (boot_cpu_data.x86_model > 0xd) 67.8 - return; 67.9 - 67.10 setup_p6_watchdog(); 67.11 break; 67.12 case 15: 67.13 - if (boot_cpu_data.x86_model > 0x4) 67.14 - return; 67.15 - 67.16 if (!setup_p4_watchdog()) 67.17 return; 67.18 break;
68.1 --- a/xen/arch/x86/physdev.c Mon Mar 06 09:09:18 2006 -0700 68.2 +++ b/xen/arch/x86/physdev.c Mon Mar 06 10:21:35 2006 -0700 68.3 @@ -11,8 +11,12 @@ 68.4 #include <public/xen.h> 68.5 #include <public/physdev.h> 68.6 68.7 -extern int ioapic_guest_read(int apicid, int address, u32 *pval); 68.8 -extern int ioapic_guest_write(int apicid, int address, u32 pval); 68.9 +extern int 68.10 +ioapic_guest_read( 68.11 + unsigned long physbase, unsigned int reg, u32 *pval); 68.12 +extern int 68.13 +ioapic_guest_write( 68.14 + unsigned long physbase, unsigned int reg, u32 pval); 68.15 68.16 /* 68.17 * Demuxing hypercall. 68.18 @@ -49,7 +53,9 @@ long do_physdev_op(struct physdev_op *uo 68.19 if ( !IS_PRIV(current->domain) ) 68.20 break; 68.21 ret = ioapic_guest_read( 68.22 - op.u.apic_op.apic, op.u.apic_op.offset, &op.u.apic_op.value); 68.23 + op.u.apic_op.apic_physbase, 68.24 + op.u.apic_op.reg, 68.25 + &op.u.apic_op.value); 68.26 break; 68.27 68.28 case PHYSDEVOP_APIC_WRITE: 68.29 @@ -57,7 +63,9 @@ long do_physdev_op(struct physdev_op *uo 68.30 if ( !IS_PRIV(current->domain) ) 68.31 break; 68.32 ret = ioapic_guest_write( 68.33 - op.u.apic_op.apic, op.u.apic_op.offset, op.u.apic_op.value); 68.34 + op.u.apic_op.apic_physbase, 68.35 + op.u.apic_op.reg, 68.36 + op.u.apic_op.value); 68.37 break; 68.38 68.39 case PHYSDEVOP_ASSIGN_VECTOR:
69.1 --- a/xen/arch/x86/setup.c Mon Mar 06 09:09:18 2006 -0700 69.2 +++ b/xen/arch/x86/setup.c Mon Mar 06 10:21:35 2006 -0700 69.3 @@ -13,6 +13,7 @@ 69.4 #include <xen/multiboot.h> 69.5 #include <xen/domain_page.h> 69.6 #include <xen/compile.h> 69.7 +#include <xen/gdbstub.h> 69.8 #include <public/version.h> 69.9 #include <asm/bitops.h> 69.10 #include <asm/smp.h> 69.11 @@ -479,6 +480,8 @@ void __init __start_xen(multiboot_info_t 69.12 printk("Brought up %ld CPUs\n", (long)num_online_cpus()); 69.13 smp_cpus_done(max_cpus); 69.14 69.15 + initialise_gdb(); /* could be moved earlier */ 69.16 + 69.17 do_initcalls(); 69.18 69.19 schedulers_start();
70.1 --- a/xen/arch/x86/shadow.c Mon Mar 06 09:09:18 2006 -0700 70.2 +++ b/xen/arch/x86/shadow.c Mon Mar 06 10:21:35 2006 -0700 70.3 @@ -279,8 +279,8 @@ alloc_shadow_page(struct domain *d, 70.4 psh_type == PGT_l4_shadow ) /* allocated for PAE PDP page */ 70.5 page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); 70.6 else if ( d->arch.ops->guest_paging_levels == PAGING_L3 && 70.7 - psh_type == PGT_l3_shadow ) /* allocated for PAE PDP page */ 70.8 - page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); 70.9 + (psh_type == PGT_l3_shadow || psh_type == PGT_l4_shadow) ) 70.10 + page = alloc_domheap_pages(NULL, 0, ALLOC_DOM_DMA); /* allocated for PAE PDP page */ 70.11 else 70.12 page = alloc_domheap_page(NULL); 70.13 #endif
71.1 --- a/xen/arch/x86/shadow32.c Mon Mar 06 09:09:18 2006 -0700 71.2 +++ b/xen/arch/x86/shadow32.c Mon Mar 06 10:21:35 2006 -0700 71.3 @@ -29,6 +29,7 @@ 71.4 #include <xen/event.h> 71.5 #include <xen/sched.h> 71.6 #include <xen/trace.h> 71.7 +#include <xen/guest_access.h> 71.8 71.9 #define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned) 71.10 #define va_to_l1mfn(_ed, _va) \ 71.11 @@ -1508,14 +1509,14 @@ static int shadow_mode_table_op( 71.12 d->arch.shadow_fault_count = 0; 71.13 d->arch.shadow_dirty_count = 0; 71.14 71.15 - if ( (sc->dirty_bitmap == NULL) || 71.16 + if ( guest_handle_is_null(sc->dirty_bitmap) || 71.17 (d->arch.shadow_dirty_bitmap == NULL) ) 71.18 { 71.19 rc = -EINVAL; 71.20 break; 71.21 } 71.22 71.23 - if(sc->pages > d->arch.shadow_dirty_bitmap_size) 71.24 + if ( sc->pages > d->arch.shadow_dirty_bitmap_size ) 71.25 sc->pages = d->arch.shadow_dirty_bitmap_size; 71.26 71.27 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */ 71.28 @@ -1524,10 +1525,10 @@ static int shadow_mode_table_op( 71.29 int bytes = ((((sc->pages - i) > chunk) ? 71.30 chunk : (sc->pages - i)) + 7) / 8; 71.31 71.32 - if (copy_to_user( 71.33 - sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), 71.34 - d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), 71.35 - bytes)) 71.36 + if ( copy_to_guest_offset( 71.37 + sc->dirty_bitmap, i/(8*sizeof(unsigned long)), 71.38 + d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), 71.39 + (bytes+sizeof(unsigned long)-1) / sizeof(unsigned long)) ) 71.40 { 71.41 rc = -EINVAL; 71.42 break; 71.43 @@ -1544,18 +1545,20 @@ static int shadow_mode_table_op( 71.44 sc->stats.fault_count = d->arch.shadow_fault_count; 71.45 sc->stats.dirty_count = d->arch.shadow_dirty_count; 71.46 71.47 - if ( (sc->dirty_bitmap == NULL) || 71.48 + if ( guest_handle_is_null(sc->dirty_bitmap) || 71.49 (d->arch.shadow_dirty_bitmap == NULL) ) 71.50 { 71.51 rc = -EINVAL; 71.52 break; 71.53 } 71.54 71.55 - if(sc->pages > d->arch.shadow_dirty_bitmap_size) 71.56 + if ( sc->pages > d->arch.shadow_dirty_bitmap_size ) 71.57 sc->pages = d->arch.shadow_dirty_bitmap_size; 71.58 71.59 - if (copy_to_user(sc->dirty_bitmap, 71.60 - d->arch.shadow_dirty_bitmap, (sc->pages+7)/8)) 71.61 + if ( copy_to_guest(sc->dirty_bitmap, 71.62 + d->arch.shadow_dirty_bitmap, 71.63 + (((sc->pages+7)/8)+sizeof(unsigned long)-1) / 71.64 + sizeof(unsigned long)) ) 71.65 { 71.66 rc = -EINVAL; 71.67 break;
72.1 --- a/xen/arch/x86/shadow_public.c Mon Mar 06 09:09:18 2006 -0700 72.2 +++ b/xen/arch/x86/shadow_public.c Mon Mar 06 10:21:35 2006 -0700 72.3 @@ -29,6 +29,7 @@ 72.4 #include <xen/event.h> 72.5 #include <xen/sched.h> 72.6 #include <xen/trace.h> 72.7 +#include <xen/guest_access.h> 72.8 #include <asm/shadow_64.h> 72.9 72.10 static int alloc_p2m_table(struct domain *d); 72.11 @@ -413,7 +414,8 @@ static void alloc_monitor_pagetable(stru 72.12 (l3e_get_flags(mpl3e[i]) & _PAGE_PRESENT) ? 72.13 l2e_from_pfn(l3e_get_pfn(mpl3e[i]), __PAGE_HYPERVISOR) : 72.14 l2e_empty(); 72.15 - mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty(); 72.16 + for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) 72.17 + mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty(); 72.18 72.19 v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */ 72.20 v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e; 72.21 @@ -1266,14 +1268,14 @@ static int shadow_mode_table_op( 72.22 d->arch.shadow_fault_count = 0; 72.23 d->arch.shadow_dirty_count = 0; 72.24 72.25 - if ( (sc->dirty_bitmap == NULL) || 72.26 + if ( guest_handle_is_null(sc->dirty_bitmap) || 72.27 (d->arch.shadow_dirty_bitmap == NULL) ) 72.28 { 72.29 rc = -EINVAL; 72.30 break; 72.31 } 72.32 72.33 - if(sc->pages > d->arch.shadow_dirty_bitmap_size) 72.34 + if ( sc->pages > d->arch.shadow_dirty_bitmap_size ) 72.35 sc->pages = d->arch.shadow_dirty_bitmap_size; 72.36 72.37 #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */ 72.38 @@ -1282,10 +1284,10 @@ static int shadow_mode_table_op( 72.39 int bytes = ((((sc->pages - i) > chunk) ? 72.40 chunk : (sc->pages - i)) + 7) / 8; 72.41 72.42 - if (copy_to_user( 72.43 - sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), 72.44 + if ( copy_to_guest_offset( 72.45 + sc->dirty_bitmap, i/(8*sizeof(unsigned long)), 72.46 d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), 72.47 - bytes)) 72.48 + (bytes+sizeof(unsigned long)-1) / sizeof(unsigned long)) ) 72.49 { 72.50 rc = -EINVAL; 72.51 break; 72.52 @@ -1301,18 +1303,20 @@ static int shadow_mode_table_op( 72.53 sc->stats.fault_count = d->arch.shadow_fault_count; 72.54 sc->stats.dirty_count = d->arch.shadow_dirty_count; 72.55 72.56 - if ( (sc->dirty_bitmap == NULL) || 72.57 + if ( guest_handle_is_null(sc->dirty_bitmap) || 72.58 (d->arch.shadow_dirty_bitmap == NULL) ) 72.59 { 72.60 rc = -EINVAL; 72.61 break; 72.62 } 72.63 72.64 - if(sc->pages > d->arch.shadow_dirty_bitmap_size) 72.65 + if ( sc->pages > d->arch.shadow_dirty_bitmap_size ) 72.66 sc->pages = d->arch.shadow_dirty_bitmap_size; 72.67 72.68 - if (copy_to_user(sc->dirty_bitmap, 72.69 - d->arch.shadow_dirty_bitmap, (sc->pages+7)/8)) 72.70 + if ( copy_to_guest(sc->dirty_bitmap, 72.71 + d->arch.shadow_dirty_bitmap, 72.72 + (((sc->pages+7)/8)+sizeof(unsigned long)-1) / 72.73 + sizeof(unsigned long)) ) 72.74 { 72.75 rc = -EINVAL; 72.76 break;
73.1 --- a/xen/arch/x86/time.c Mon Mar 06 09:09:18 2006 -0700 73.2 +++ b/xen/arch/x86/time.c Mon Mar 06 10:21:35 2006 -0700 73.3 @@ -41,7 +41,6 @@ boolean_param("hpet_force", opt_hpet_for 73.4 unsigned long cpu_khz; /* CPU clock frequency in kHz. */ 73.5 unsigned long hpet_address; 73.6 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 73.7 -int timer_ack = 0; 73.8 unsigned long volatile jiffies; 73.9 static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */ 73.10 static spinlock_t wc_lock = SPIN_LOCK_UNLOCKED; 73.11 @@ -148,16 +147,6 @@ void timer_interrupt(int irq, void *dev_ 73.12 { 73.13 ASSERT(local_irq_is_enabled()); 73.14 73.15 - if ( timer_ack ) 73.16 - { 73.17 - extern spinlock_t i8259A_lock; 73.18 - spin_lock_irq(&i8259A_lock); 73.19 - outb(0x0c, 0x20); 73.20 - /* Ack the IRQ; AEOI will end it automatically. */ 73.21 - inb(0x20); 73.22 - spin_unlock_irq(&i8259A_lock); 73.23 - } 73.24 - 73.25 /* Update jiffies counter. */ 73.26 (*(unsigned long *)&jiffies)++; 73.27
74.1 --- a/xen/arch/x86/traps.c Mon Mar 06 09:09:18 2006 -0700 74.2 +++ b/xen/arch/x86/traps.c Mon Mar 06 10:21:35 2006 -0700 74.3 @@ -1410,7 +1410,13 @@ long do_set_trap_table(struct trap_info 74.4 struct trap_info *dst = current->arch.guest_context.trap_ctxt; 74.5 long rc = 0; 74.6 74.7 - LOCK_BIGLOCK(current->domain); 74.8 + /* If no table is presented then clear the entire virtual IDT. */ 74.9 + if ( traps == NULL ) 74.10 + { 74.11 + memset(dst, 0, 256 * sizeof(*dst)); 74.12 + init_int80_direct_trap(current); 74.13 + return 0; 74.14 + } 74.15 74.16 for ( ; ; ) 74.17 { 74.18 @@ -1440,8 +1446,6 @@ long do_set_trap_table(struct trap_info 74.19 traps++; 74.20 } 74.21 74.22 - UNLOCK_BIGLOCK(current->domain); 74.23 - 74.24 return rc; 74.25 } 74.26
75.1 --- a/xen/arch/x86/x86_32/domain_page.c Mon Mar 06 09:09:18 2006 -0700 75.2 +++ b/xen/arch/x86/x86_32/domain_page.c Mon Mar 06 10:21:35 2006 -0700 75.3 @@ -11,15 +11,40 @@ 75.4 #include <xen/mm.h> 75.5 #include <xen/perfc.h> 75.6 #include <xen/domain_page.h> 75.7 +#include <xen/shadow.h> 75.8 #include <asm/current.h> 75.9 #include <asm/flushtlb.h> 75.10 #include <asm/hardirq.h> 75.11 75.12 +static inline struct vcpu *mapcache_current_vcpu(void) 75.13 +{ 75.14 + struct vcpu *v; 75.15 + 75.16 + /* In the common case we use the mapcache of the running VCPU. */ 75.17 + v = current; 75.18 + 75.19 + /* 75.20 + * If guest_table is NULL, and we are running a paravirtualised guest, 75.21 + * then it means we are running on the idle domain's page table and must 75.22 + * therefore use its mapcache. 75.23 + */ 75.24 + if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) ) 75.25 + { 75.26 + /* If we really are idling, perform lazy context switch now. */ 75.27 + if ( (v = idle_vcpu[smp_processor_id()]) == current ) 75.28 + __sync_lazy_execstate(); 75.29 + /* We must now be running on the idle page table. */ 75.30 + ASSERT(read_cr3() == __pa(idle_pg_table)); 75.31 + } 75.32 + 75.33 + return v; 75.34 +} 75.35 + 75.36 void *map_domain_page(unsigned long pfn) 75.37 { 75.38 unsigned long va; 75.39 - unsigned int idx, i, vcpu = current->vcpu_id; 75.40 - struct domain *d; 75.41 + unsigned int idx, i, vcpu; 75.42 + struct vcpu *v; 75.43 struct mapcache *cache; 75.44 struct vcpu_maphash_entry *hashent; 75.45 75.46 @@ -27,12 +52,10 @@ void *map_domain_page(unsigned long pfn) 75.47 75.48 perfc_incrc(map_domain_page_count); 75.49 75.50 - /* If we are the idle domain, ensure that we run on our own page tables. */ 75.51 - d = current->domain; 75.52 - if ( unlikely(is_idle_domain(d)) ) 75.53 - __sync_lazy_execstate(); 75.54 + v = mapcache_current_vcpu(); 75.55 75.56 - cache = &d->arch.mapcache; 75.57 + vcpu = v->vcpu_id; 75.58 + cache = &v->domain->arch.mapcache; 75.59 75.60 hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)]; 75.61 if ( hashent->pfn == pfn ) 75.62 @@ -93,7 +116,8 @@ void *map_domain_page(unsigned long pfn) 75.63 void unmap_domain_page(void *va) 75.64 { 75.65 unsigned int idx; 75.66 - struct mapcache *cache = ¤t->domain->arch.mapcache; 75.67 + struct vcpu *v; 75.68 + struct mapcache *cache; 75.69 unsigned long pfn; 75.70 struct vcpu_maphash_entry *hashent; 75.71 75.72 @@ -102,9 +126,13 @@ void unmap_domain_page(void *va) 75.73 ASSERT((void *)MAPCACHE_VIRT_START <= va); 75.74 ASSERT(va < (void *)MAPCACHE_VIRT_END); 75.75 75.76 + v = mapcache_current_vcpu(); 75.77 + 75.78 + cache = &v->domain->arch.mapcache; 75.79 + 75.80 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; 75.81 pfn = l1e_get_pfn(cache->l1tab[idx]); 75.82 - hashent = &cache->vcpu_maphash[current->vcpu_id].hash[MAPHASH_HASHFN(pfn)]; 75.83 + hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)]; 75.84 75.85 if ( hashent->idx == idx ) 75.86 {
76.1 --- a/xen/common/dom0_ops.c Mon Mar 06 09:09:18 2006 -0700 76.2 +++ b/xen/common/dom0_ops.c Mon Mar 06 10:21:35 2006 -0700 76.3 @@ -17,13 +17,14 @@ 76.4 #include <xen/trace.h> 76.5 #include <xen/console.h> 76.6 #include <xen/iocap.h> 76.7 +#include <xen/guest_access.h> 76.8 #include <asm/current.h> 76.9 #include <public/dom0_ops.h> 76.10 #include <public/sched_ctl.h> 76.11 #include <acm/acm_hooks.h> 76.12 76.13 extern long arch_do_dom0_op( 76.14 - struct dom0_op *op, struct dom0_op *u_dom0_op); 76.15 + struct dom0_op *op, GUEST_HANDLE(dom0_op_t) u_dom0_op); 76.16 extern void arch_getdomaininfo_ctxt( 76.17 struct vcpu *, struct vcpu_guest_context *); 76.18 76.19 @@ -89,7 +90,7 @@ static void getdomaininfo(struct domain 76.20 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t)); 76.21 } 76.22 76.23 -long do_dom0_op(struct dom0_op *u_dom0_op) 76.24 +long do_dom0_op(GUEST_HANDLE(dom0_op_t) u_dom0_op) 76.25 { 76.26 long ret = 0; 76.27 struct dom0_op curop, *op = &curop; 76.28 @@ -99,7 +100,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.29 if ( !IS_PRIV(current->domain) ) 76.30 return -EPERM; 76.31 76.32 - if ( copy_from_user(op, u_dom0_op, sizeof(*op)) ) 76.33 + if ( copy_from_guest(op, u_dom0_op, 1) ) 76.34 return -EFAULT; 76.35 76.36 if ( op->interface_version != DOM0_INTERFACE_VERSION ) 76.37 @@ -239,7 +240,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.38 ret = 0; 76.39 76.40 op->u.createdomain.domain = d->domain_id; 76.41 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.42 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.43 ret = -EFAULT; 76.44 } 76.45 break; 76.46 @@ -357,7 +358,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.47 case DOM0_SCHEDCTL: 76.48 { 76.49 ret = sched_ctl(&op->u.schedctl); 76.50 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.51 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.52 ret = -EFAULT; 76.53 } 76.54 break; 76.55 @@ -365,7 +366,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.56 case DOM0_ADJUSTDOM: 76.57 { 76.58 ret = sched_adjdom(&op->u.adjustdom); 76.59 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.60 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.61 ret = -EFAULT; 76.62 } 76.63 break; 76.64 @@ -398,20 +399,17 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.65 76.66 getdomaininfo(d, &op->u.getdomaininfo); 76.67 76.68 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.69 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.70 ret = -EFAULT; 76.71 76.72 put_domain(d); 76.73 } 76.74 break; 76.75 76.76 - 76.77 - 76.78 case DOM0_GETDOMAININFOLIST: 76.79 { 76.80 struct domain *d; 76.81 dom0_getdomaininfo_t info; 76.82 - dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer; 76.83 u32 num_domains = 0; 76.84 76.85 read_lock(&domlist_lock); 76.86 @@ -432,13 +430,13 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.87 76.88 put_domain(d); 76.89 76.90 - if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) ) 76.91 + if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer, 76.92 + num_domains, &info, 1) ) 76.93 { 76.94 ret = -EFAULT; 76.95 break; 76.96 } 76.97 76.98 - buffer++; 76.99 num_domains++; 76.100 } 76.101 76.102 @@ -449,7 +447,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.103 76.104 op->u.getdomaininfolist.num_domains = num_domains; 76.105 76.106 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.107 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.108 ret = -EFAULT; 76.109 } 76.110 break; 76.111 @@ -489,12 +487,12 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.112 if ( v != current ) 76.113 vcpu_unpause(v); 76.114 76.115 - if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) ) 76.116 + if ( copy_to_guest(op->u.getvcpucontext.ctxt, c, 1) ) 76.117 ret = -EFAULT; 76.118 76.119 xfree(c); 76.120 76.121 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.122 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.123 ret = -EFAULT; 76.124 76.125 getvcpucontext_out: 76.126 @@ -534,7 +532,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.127 (int)sizeof(op->u.getvcpuinfo.cpumap))); 76.128 ret = 0; 76.129 76.130 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.131 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.132 ret = -EFAULT; 76.133 76.134 getvcpuinfo_out: 76.135 @@ -554,7 +552,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.136 case DOM0_TBUFCONTROL: 76.137 { 76.138 ret = tb_control(&op->u.tbufcontrol); 76.139 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.140 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.141 ret = -EFAULT; 76.142 } 76.143 break; 76.144 @@ -562,10 +560,10 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.145 case DOM0_READCONSOLE: 76.146 { 76.147 ret = read_console_ring( 76.148 - &op->u.readconsole.buffer, 76.149 + op->u.readconsole.buffer, 76.150 &op->u.readconsole.count, 76.151 op->u.readconsole.clear); 76.152 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.153 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.154 ret = -EFAULT; 76.155 } 76.156 break; 76.157 @@ -573,7 +571,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.158 case DOM0_SCHED_ID: 76.159 { 76.160 op->u.sched_id.sched_id = sched_id(); 76.161 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.162 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.163 ret = -EFAULT; 76.164 else 76.165 ret = 0; 76.166 @@ -678,15 +676,15 @@ long do_dom0_op(struct dom0_op *u_dom0_o 76.167 { 76.168 extern int perfc_control(dom0_perfccontrol_t *); 76.169 ret = perfc_control(&op->u.perfccontrol); 76.170 - if ( copy_to_user(u_dom0_op, op, sizeof(*op)) ) 76.171 + if ( copy_to_guest(u_dom0_op, op, 1) ) 76.172 ret = -EFAULT; 76.173 } 76.174 break; 76.175 #endif 76.176 76.177 default: 76.178 - ret = arch_do_dom0_op(op,u_dom0_op); 76.179 - 76.180 + ret = arch_do_dom0_op(op, u_dom0_op); 76.181 + break; 76.182 } 76.183 76.184 spin_unlock(&dom0_lock);
77.1 --- a/xen/common/domain.c Mon Mar 06 09:09:18 2006 -0700 77.2 +++ b/xen/common/domain.c Mon Mar 06 10:21:35 2006 -0700 77.3 @@ -17,6 +17,7 @@ 77.4 #include <xen/softirq.h> 77.5 #include <xen/domain_page.h> 77.6 #include <xen/rangeset.h> 77.7 +#include <xen/guest_access.h> 77.8 #include <asm/debugger.h> 77.9 #include <public/dom0_ops.h> 77.10 #include <public/sched.h> 77.11 @@ -380,7 +381,7 @@ int set_info_guest(struct domain *d, dom 77.12 domain_pause(d); 77.13 77.14 rc = -EFAULT; 77.15 - if ( copy_from_user(c, setvcpucontext->ctxt, sizeof(*c)) == 0 ) 77.16 + if ( copy_from_guest(c, setvcpucontext->ctxt, 1) == 0 ) 77.17 rc = arch_set_info_guest(v, c); 77.18 77.19 domain_unpause(d);
78.1 --- a/xen/common/gdbstub.c Mon Mar 06 09:09:18 2006 -0700 78.2 +++ b/xen/common/gdbstub.c Mon Mar 06 10:21:35 2006 -0700 78.3 @@ -376,7 +376,6 @@ process_command(struct cpu_user_regs *re 78.4 break; 78.5 case 'g': /* Read registers */ 78.6 gdb_arch_read_reg_array(regs, ctx); 78.7 - ASSERT(!local_irq_is_enabled()); 78.8 break; 78.9 case 'G': /* Write registers */ 78.10 gdb_arch_write_reg_array(regs, ctx->in_buf + 1, ctx); 78.11 @@ -395,7 +394,6 @@ process_command(struct cpu_user_regs *re 78.12 return 0; 78.13 } 78.14 gdb_cmd_read_mem(addr, length, ctx); 78.15 - ASSERT(!local_irq_is_enabled()); 78.16 break; 78.17 case 'M': /* Write memory */ 78.18 addr = simple_strtoul(ctx->in_buf + 1, &ptr, 16); 78.19 @@ -477,7 +475,7 @@ int 78.20 { 78.21 int resume = 0; 78.22 int r; 78.23 - unsigned flags; 78.24 + unsigned long flags; 78.25 78.26 if ( gdb_ctx->serhnd < 0 ) 78.27 { 78.28 @@ -506,7 +504,7 @@ int 78.29 78.30 if ( !gdb_ctx->connected ) 78.31 { 78.32 - printk("GDB connection activated\n"); 78.33 + printk("GDB connection activated.\n"); 78.34 gdb_arch_print_state(regs); 78.35 gdb_ctx->connected = 1; 78.36 } 78.37 @@ -522,7 +520,7 @@ int 78.38 78.39 /* Shouldn't really do this, but otherwise we stop for no 78.40 obvious reason, which is Bad */ 78.41 - printk("Waiting for GDB to attach to Gdb\n"); 78.42 + printk("Waiting for GDB to attach...\n"); 78.43 78.44 gdb_arch_enter(regs); 78.45 gdb_ctx->signum = gdb_arch_signal_num(regs, cookie); 78.46 @@ -535,9 +533,7 @@ int 78.47 78.48 while ( resume == 0 ) 78.49 { 78.50 - ASSERT(!local_irq_is_enabled()); 78.51 r = receive_command(gdb_ctx); 78.52 - ASSERT(!local_irq_is_enabled()); 78.53 if ( r < 0 ) 78.54 { 78.55 dbg_printk("GDB disappeared, trying to resume Xen...\n"); 78.56 @@ -545,9 +541,7 @@ int 78.57 } 78.58 else 78.59 { 78.60 - ASSERT(!local_irq_is_enabled()); 78.61 resume = process_command(regs, gdb_ctx); 78.62 - ASSERT(!local_irq_is_enabled()); 78.63 } 78.64 } 78.65 78.66 @@ -561,28 +555,14 @@ int 78.67 return 0; 78.68 } 78.69 78.70 -/* 78.71 - * initialization 78.72 - * XXX TODO 78.73 - * This should be an explicit call from architecture code. 78.74 - * initcall is far too late for some early debugging, and only the 78.75 - * architecture code knows when this call can be made. 78.76 - */ 78.77 -static int 78.78 -initialize_gdb(void) 78.79 +void 78.80 +initialise_gdb(void) 78.81 { 78.82 - if ( !strcmp(opt_gdb, "none") ) 78.83 - return 0; 78.84 gdb_ctx->serhnd = serial_parse_handle(opt_gdb); 78.85 - if ( gdb_ctx->serhnd == -1 ) 78.86 - panic("Can't parse %s as GDB serial info.\n", opt_gdb); 78.87 - 78.88 - printk("Gdb initialised.\n"); 78.89 - return 0; 78.90 + if ( gdb_ctx->serhnd != -1 ) 78.91 + printk("GDB stub initialised.\n"); 78.92 } 78.93 78.94 -__initcall(initialize_gdb); 78.95 - 78.96 /* 78.97 * Local variables: 78.98 * mode: C
79.1 --- a/xen/common/memory.c Mon Mar 06 09:09:18 2006 -0700 79.2 +++ b/xen/common/memory.c Mon Mar 06 10:21:35 2006 -0700 79.3 @@ -31,7 +31,7 @@ 79.4 static long 79.5 increase_reservation( 79.6 struct domain *d, 79.7 - GUEST_HANDLE(xen_ulong) extent_list, 79.8 + GUEST_HANDLE(ulong) extent_list, 79.9 unsigned int nr_extents, 79.10 unsigned int extent_order, 79.11 unsigned int flags, 79.12 @@ -80,7 +80,7 @@ increase_reservation( 79.13 static long 79.14 populate_physmap( 79.15 struct domain *d, 79.16 - GUEST_HANDLE(xen_ulong) extent_list, 79.17 + GUEST_HANDLE(ulong) extent_list, 79.18 unsigned int nr_extents, 79.19 unsigned int extent_order, 79.20 unsigned int flags, 79.21 @@ -141,7 +141,7 @@ populate_physmap( 79.22 static long 79.23 decrease_reservation( 79.24 struct domain *d, 79.25 - GUEST_HANDLE(xen_ulong) extent_list, 79.26 + GUEST_HANDLE(ulong) extent_list, 79.27 unsigned int nr_extents, 79.28 unsigned int extent_order, 79.29 unsigned int flags,
80.1 --- a/xen/common/perfc.c Mon Mar 06 09:09:18 2006 -0700 80.2 +++ b/xen/common/perfc.c Mon Mar 06 10:21:35 2006 -0700 80.3 @@ -5,9 +5,10 @@ 80.4 #include <xen/perfc.h> 80.5 #include <xen/keyhandler.h> 80.6 #include <xen/spinlock.h> 80.7 +#include <xen/mm.h> 80.8 +#include <xen/guest_access.h> 80.9 #include <public/dom0_ops.h> 80.10 #include <asm/uaccess.h> 80.11 -#include <xen/mm.h> 80.12 80.13 #undef PERFCOUNTER 80.14 #undef PERFCOUNTER_CPU 80.15 @@ -131,12 +132,12 @@ void perfc_reset(unsigned char key) 80.16 80.17 static dom0_perfc_desc_t perfc_d[NR_PERFCTRS]; 80.18 static int perfc_init = 0; 80.19 -static int perfc_copy_info(dom0_perfc_desc_t *desc) 80.20 +static int perfc_copy_info(GUEST_HANDLE(dom0_perfc_desc_t) desc) 80.21 { 80.22 unsigned int i, j; 80.23 atomic_t *counters = (atomic_t *)&perfcounters; 80.24 80.25 - if ( desc == NULL ) 80.26 + if ( guest_handle_is_null(desc) ) 80.27 return 0; 80.28 80.29 /* We only copy the name and array-size information once. */ 80.30 @@ -196,7 +197,7 @@ static int perfc_copy_info(dom0_perfc_de 80.31 } 80.32 } 80.33 80.34 - return (copy_to_user(desc, perfc_d, NR_PERFCTRS * sizeof(*desc)) ? 80.35 + return (copy_to_guest(desc, (dom0_perfc_desc_t *)perfc_d, NR_PERFCTRS) ? 80.36 -EFAULT : 0); 80.37 } 80.38
81.1 --- a/xen/drivers/char/console.c Mon Mar 06 09:09:18 2006 -0700 81.2 +++ b/xen/drivers/char/console.c Mon Mar 06 10:21:35 2006 -0700 81.3 @@ -20,6 +20,7 @@ 81.4 #include <xen/keyhandler.h> 81.5 #include <xen/mm.h> 81.6 #include <xen/delay.h> 81.7 +#include <xen/guest_access.h> 81.8 #include <asm/current.h> 81.9 #include <asm/uaccess.h> 81.10 #include <asm/debugger.h> 81.11 @@ -221,9 +222,8 @@ static void putchar_console_ring(int c) 81.12 conringc = conringp - CONRING_SIZE; 81.13 } 81.14 81.15 -long read_console_ring(char **pstr, u32 *pcount, int clear) 81.16 +long read_console_ring(GUEST_HANDLE(char) str, u32 *pcount, int clear) 81.17 { 81.18 - char *str = *pstr; 81.19 unsigned int idx, len, max, sofar, c; 81.20 unsigned long flags; 81.21 81.22 @@ -239,7 +239,7 @@ long read_console_ring(char **pstr, u32 81.23 len = CONRING_SIZE - idx; 81.24 if ( (sofar + len) > max ) 81.25 len = max - sofar; 81.26 - if ( copy_to_user(str + sofar, &conring[idx], len) ) 81.27 + if ( copy_to_guest_offset(str, sofar, &conring[idx], len) ) 81.28 return -EFAULT; 81.29 sofar += len; 81.30 c += len;
82.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 82.2 +++ b/xen/include/asm-ia64/guest_access.h Mon Mar 06 10:21:35 2006 -0700 82.3 @@ -0,0 +1,63 @@ 82.4 +/****************************************************************************** 82.5 + * guest_access.h 82.6 + * 82.7 + * Copyright (c) 2006, K A Fraser 82.8 + */ 82.9 + 82.10 +#ifndef __ASM_IA64_GUEST_ACCESS_H__ 82.11 +#define __ASM_IA64_GUEST_ACCESS_H__ 82.12 + 82.13 +#include <asm/uaccess.h> 82.14 + 82.15 +/* Is the guest handle a NULL reference? */ 82.16 +#define guest_handle_is_null(hnd) ((hnd).p == NULL) 82.17 + 82.18 +/* Offset the given guest handle into the array it refers to. */ 82.19 +#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr)) 82.20 + 82.21 +/* Cast a guest handle to the specified type of handle. */ 82.22 +#define guest_handle_cast(hnd, type) ({ \ 82.23 + type *_x = (hnd).p; \ 82.24 + (GUEST_HANDLE(type)) { _x }; \ 82.25 +}) 82.26 + 82.27 +/* 82.28 + * Copy an array of objects to guest context via a guest handle, 82.29 + * specifying an offset into the guest array. 82.30 + */ 82.31 +#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 82.32 + const typeof(ptr) _x = (hnd).p; \ 82.33 + const typeof(ptr) _y = (ptr); \ 82.34 + copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 82.35 +}) 82.36 + 82.37 +/* 82.38 + * Copy an array of objects from guest context via a guest handle, 82.39 + * specifying an offset into the guest array. 82.40 + */ 82.41 +#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 82.42 + const typeof(ptr) _x = (hnd).p; \ 82.43 + const typeof(ptr) _y = (ptr); \ 82.44 + copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 82.45 +}) 82.46 + 82.47 +/* 82.48 + * Pre-validate a guest handle. 82.49 + * Allows use of faster __copy_* functions. 82.50 + */ 82.51 +#define guest_handle_okay(hnd, nr) \ 82.52 + array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)) 82.53 + 82.54 +#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 82.55 + const typeof(ptr) _x = (hnd).p; \ 82.56 + const typeof(ptr) _y = (ptr); \ 82.57 + __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 82.58 +}) 82.59 + 82.60 +#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 82.61 + const typeof(ptr) _x = (hnd).p; \ 82.62 + const typeof(ptr) _y = (ptr); \ 82.63 + __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 82.64 +}) 82.65 + 82.66 +#endif /* __ASM_IA64_GUEST_ACCESS_H__ */
83.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 83.2 +++ b/xen/include/asm-x86/guest_access.h Mon Mar 06 10:21:35 2006 -0700 83.3 @@ -0,0 +1,63 @@ 83.4 +/****************************************************************************** 83.5 + * guest_access.h 83.6 + * 83.7 + * Copyright (c) 2006, K A Fraser 83.8 + */ 83.9 + 83.10 +#ifndef __ASM_X86_GUEST_ACCESS_H__ 83.11 +#define __ASM_X86_GUEST_ACCESS_H__ 83.12 + 83.13 +#include <asm/uaccess.h> 83.14 + 83.15 +/* Is the guest handle a NULL reference? */ 83.16 +#define guest_handle_is_null(hnd) ((hnd).p == NULL) 83.17 + 83.18 +/* Offset the given guest handle into the array it refers to. */ 83.19 +#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr)) 83.20 + 83.21 +/* Cast a guest handle to the specified type of handle. */ 83.22 +#define guest_handle_cast(hnd, type) ({ \ 83.23 + type *_x = (hnd).p; \ 83.24 + (GUEST_HANDLE(type)) { _x }; \ 83.25 +}) 83.26 + 83.27 +/* 83.28 + * Copy an array of objects to guest context via a guest handle, 83.29 + * specifying an offset into the guest array. 83.30 + */ 83.31 +#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 83.32 + const typeof(ptr) _x = (hnd).p; \ 83.33 + const typeof(ptr) _y = (ptr); \ 83.34 + copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 83.35 +}) 83.36 + 83.37 +/* 83.38 + * Copy an array of objects from guest context via a guest handle, 83.39 + * specifying an offset into the guest array. 83.40 + */ 83.41 +#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 83.42 + const typeof(ptr) _x = (hnd).p; \ 83.43 + const typeof(ptr) _y = (ptr); \ 83.44 + copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 83.45 +}) 83.46 + 83.47 +/* 83.48 + * Pre-validate a guest handle. 83.49 + * Allows use of faster __copy_* functions. 83.50 + */ 83.51 +#define guest_handle_okay(hnd, nr) \ 83.52 + array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)) 83.53 + 83.54 +#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 83.55 + const typeof(ptr) _x = (hnd).p; \ 83.56 + const typeof(ptr) _y = (ptr); \ 83.57 + __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 83.58 +}) 83.59 + 83.60 +#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 83.61 + const typeof(ptr) _x = (hnd).p; \ 83.62 + const typeof(ptr) _y = (ptr); \ 83.63 + __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 83.64 +}) 83.65 + 83.66 +#endif /* __ASM_X86_GUEST_ACCESS_H__ */
84.1 --- a/xen/include/asm-x86/hvm/support.h Mon Mar 06 09:09:18 2006 -0700 84.2 +++ b/xen/include/asm-x86/hvm/support.h Mon Mar 06 10:21:35 2006 -0700 84.3 @@ -26,7 +26,11 @@ 84.4 #include <asm/regs.h> 84.5 #include <asm/processor.h> 84.6 84.7 +#ifndef NDEBUG 84.8 #define HVM_DEBUG 1 84.9 +#else 84.10 +#define HVM_DEBUG 0 84.11 +#endif 84.12 84.13 #define HVM_DOMAIN(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST) 84.14 84.15 @@ -113,7 +117,7 @@ enum hval_bitmaps { 84.16 #define DBG_LEVEL_VMMU (1 << 5) 84.17 #define DBG_LEVEL_VLAPIC (1 << 6) 84.18 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7) 84.19 -#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 7) 84.20 +#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8) 84.21 #define DBG_LEVEL_IOAPIC (1 << 9) 84.22 84.23 extern unsigned int opt_hvm_debug_level;
85.1 --- a/xen/include/asm-x86/processor.h Mon Mar 06 09:09:18 2006 -0700 85.2 +++ b/xen/include/asm-x86/processor.h Mon Mar 06 10:21:35 2006 -0700 85.3 @@ -353,6 +353,13 @@ static always_inline void clear_in_cr4 ( 85.4 outb((data), 0x23); \ 85.5 } while (0) 85.6 85.7 +/* Stop speculative execution */ 85.8 +static inline void sync_core(void) 85.9 +{ 85.10 + int tmp; 85.11 + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); 85.12 +} 85.13 + 85.14 static always_inline void __monitor(const void *eax, unsigned long ecx, 85.15 unsigned long edx) 85.16 {
86.1 --- a/xen/include/asm-x86/time.h Mon Mar 06 09:09:18 2006 -0700 86.2 +++ b/xen/include/asm-x86/time.h Mon Mar 06 10:21:35 2006 -0700 86.3 @@ -4,8 +4,6 @@ 86.4 86.5 #include <asm/msr.h> 86.6 86.7 -extern int timer_ack; 86.8 - 86.9 extern void calibrate_tsc_bp(void); 86.10 extern void calibrate_tsc_ap(void); 86.11
87.1 --- a/xen/include/public/arch-ia64.h Mon Mar 06 09:09:18 2006 -0700 87.2 +++ b/xen/include/public/arch-ia64.h Mon Mar 06 10:21:35 2006 -0700 87.3 @@ -7,6 +7,28 @@ 87.4 #ifndef __HYPERVISOR_IF_IA64_H__ 87.5 #define __HYPERVISOR_IF_IA64_H__ 87.6 87.7 +#ifdef __XEN__ 87.8 +#define __DEFINE_GUEST_HANDLE(name, type) \ 87.9 + typedef struct { type *p; } __guest_handle_ ## name 87.10 +#else 87.11 +#define __DEFINE_GUEST_HANDLE(name, type) \ 87.12 + typedef type * __guest_handle_ ## name 87.13 +#endif 87.14 + 87.15 +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 87.16 +#define GUEST_HANDLE(name) __guest_handle_ ## name 87.17 + 87.18 +#ifndef __ASSEMBLY__ 87.19 +/* Guest handles for primitive C types. */ 87.20 +__DEFINE_GUEST_HANDLE(uchar, unsigned char); 87.21 +__DEFINE_GUEST_HANDLE(uint, unsigned int); 87.22 +__DEFINE_GUEST_HANDLE(ulong, unsigned long); 87.23 +DEFINE_GUEST_HANDLE(char); 87.24 +DEFINE_GUEST_HANDLE(int); 87.25 +DEFINE_GUEST_HANDLE(long); 87.26 +DEFINE_GUEST_HANDLE(void); 87.27 +#endif 87.28 + 87.29 /* Maximum number of virtual CPUs in multi-processor guests. */ 87.30 /* WARNING: before changing this, check that shared_info fits on a page */ 87.31 #define MAX_VIRT_CPUS 4 87.32 @@ -298,6 +320,7 @@ typedef struct vcpu_guest_context { 87.33 arch_initrd_info_t initrd; 87.34 char cmdline[IA64_COMMAND_LINE_SIZE]; 87.35 } vcpu_guest_context_t; 87.36 +DEFINE_GUEST_HANDLE(vcpu_guest_context_t); 87.37 87.38 #endif /* !__ASSEMBLY__ */ 87.39
88.1 --- a/xen/include/public/arch-x86_32.h Mon Mar 06 09:09:18 2006 -0700 88.2 +++ b/xen/include/public/arch-x86_32.h Mon Mar 06 10:21:35 2006 -0700 88.3 @@ -9,6 +9,28 @@ 88.4 #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ 88.5 #define __XEN_PUBLIC_ARCH_X86_32_H__ 88.6 88.7 +#ifdef __XEN__ 88.8 +#define __DEFINE_GUEST_HANDLE(name, type) \ 88.9 + typedef struct { type *p; } __guest_handle_ ## name 88.10 +#else 88.11 +#define __DEFINE_GUEST_HANDLE(name, type) \ 88.12 + typedef type * __guest_handle_ ## name 88.13 +#endif 88.14 + 88.15 +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 88.16 +#define GUEST_HANDLE(name) __guest_handle_ ## name 88.17 + 88.18 +#ifndef __ASSEMBLY__ 88.19 +/* Guest handles for primitive C types. */ 88.20 +__DEFINE_GUEST_HANDLE(uchar, unsigned char); 88.21 +__DEFINE_GUEST_HANDLE(uint, unsigned int); 88.22 +__DEFINE_GUEST_HANDLE(ulong, unsigned long); 88.23 +DEFINE_GUEST_HANDLE(char); 88.24 +DEFINE_GUEST_HANDLE(int); 88.25 +DEFINE_GUEST_HANDLE(long); 88.26 +DEFINE_GUEST_HANDLE(void); 88.27 +#endif 88.28 + 88.29 /* 88.30 * SEGMENT DESCRIPTOR TABLES 88.31 */ 88.32 @@ -130,6 +152,7 @@ typedef struct vcpu_guest_context { 88.33 unsigned long failsafe_callback_eip; 88.34 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ 88.35 } vcpu_guest_context_t; 88.36 +DEFINE_GUEST_HANDLE(vcpu_guest_context_t); 88.37 88.38 typedef struct arch_shared_info { 88.39 unsigned long max_pfn; /* max pfn that appears in table */
89.1 --- a/xen/include/public/arch-x86_64.h Mon Mar 06 09:09:18 2006 -0700 89.2 +++ b/xen/include/public/arch-x86_64.h Mon Mar 06 10:21:35 2006 -0700 89.3 @@ -9,6 +9,28 @@ 89.4 #ifndef __XEN_PUBLIC_ARCH_X86_64_H__ 89.5 #define __XEN_PUBLIC_ARCH_X86_64_H__ 89.6 89.7 +#ifdef __XEN__ 89.8 +#define __DEFINE_GUEST_HANDLE(name, type) \ 89.9 + typedef struct { type *p; } __guest_handle_ ## name 89.10 +#else 89.11 +#define __DEFINE_GUEST_HANDLE(name, type) \ 89.12 + typedef type * __guest_handle_ ## name 89.13 +#endif 89.14 + 89.15 +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 89.16 +#define GUEST_HANDLE(name) __guest_handle_ ## name 89.17 + 89.18 +#ifndef __ASSEMBLY__ 89.19 +/* Guest handles for primitive C types. */ 89.20 +__DEFINE_GUEST_HANDLE(uchar, unsigned char); 89.21 +__DEFINE_GUEST_HANDLE(uint, unsigned int); 89.22 +__DEFINE_GUEST_HANDLE(ulong, unsigned long); 89.23 +DEFINE_GUEST_HANDLE(char); 89.24 +DEFINE_GUEST_HANDLE(int); 89.25 +DEFINE_GUEST_HANDLE(long); 89.26 +DEFINE_GUEST_HANDLE(void); 89.27 +#endif 89.28 + 89.29 /* 89.30 * SEGMENT DESCRIPTOR TABLES 89.31 */ 89.32 @@ -215,6 +237,7 @@ typedef struct vcpu_guest_context { 89.33 uint64_t gs_base_kernel; 89.34 uint64_t gs_base_user; 89.35 } vcpu_guest_context_t; 89.36 +DEFINE_GUEST_HANDLE(vcpu_guest_context_t); 89.37 89.38 typedef struct arch_shared_info { 89.39 unsigned long max_pfn; /* max pfn that appears in table */
90.1 --- a/xen/include/public/dom0_ops.h Mon Mar 06 09:09:18 2006 -0700 90.2 +++ b/xen/include/public/dom0_ops.h Mon Mar 06 10:21:35 2006 -0700 90.3 @@ -28,18 +28,21 @@ typedef struct dom0_getmemlist { 90.4 /* IN variables. */ 90.5 domid_t domain; 90.6 unsigned long max_pfns; 90.7 - void *buffer; 90.8 + GUEST_HANDLE(ulong) buffer; 90.9 /* OUT variables. */ 90.10 unsigned long num_pfns; 90.11 } dom0_getmemlist_t; 90.12 +DEFINE_GUEST_HANDLE(dom0_getmemlist_t); 90.13 90.14 #define DOM0_SCHEDCTL 6 90.15 /* struct sched_ctl_cmd is from sched-ctl.h */ 90.16 typedef struct sched_ctl_cmd dom0_schedctl_t; 90.17 +DEFINE_GUEST_HANDLE(dom0_schedctl_t); 90.18 90.19 #define DOM0_ADJUSTDOM 7 90.20 /* struct sched_adjdom_cmd is from sched-ctl.h */ 90.21 typedef struct sched_adjdom_cmd dom0_adjustdom_t; 90.22 +DEFINE_GUEST_HANDLE(dom0_adjustdom_t); 90.23 90.24 #define DOM0_CREATEDOMAIN 8 90.25 typedef struct dom0_createdomain { 90.26 @@ -50,24 +53,28 @@ typedef struct dom0_createdomain { 90.27 /* Identifier for new domain (auto-allocate if zero is specified). */ 90.28 domid_t domain; 90.29 } dom0_createdomain_t; 90.30 +DEFINE_GUEST_HANDLE(dom0_createdomain_t); 90.31 90.32 #define DOM0_DESTROYDOMAIN 9 90.33 typedef struct dom0_destroydomain { 90.34 /* IN variables. */ 90.35 domid_t domain; 90.36 } dom0_destroydomain_t; 90.37 +DEFINE_GUEST_HANDLE(dom0_destroydomain_t); 90.38 90.39 #define DOM0_PAUSEDOMAIN 10 90.40 typedef struct dom0_pausedomain { 90.41 /* IN parameters. */ 90.42 domid_t domain; 90.43 } dom0_pausedomain_t; 90.44 +DEFINE_GUEST_HANDLE(dom0_pausedomain_t); 90.45 90.46 #define DOM0_UNPAUSEDOMAIN 11 90.47 typedef struct dom0_unpausedomain { 90.48 /* IN parameters. */ 90.49 domid_t domain; 90.50 } dom0_unpausedomain_t; 90.51 +DEFINE_GUEST_HANDLE(dom0_unpausedomain_t); 90.52 90.53 #define DOM0_GETDOMAININFO 12 90.54 typedef struct dom0_getdomaininfo { 90.55 @@ -93,6 +100,7 @@ typedef struct dom0_getdomaininfo { 90.56 uint32_t ssidref; 90.57 xen_domain_handle_t handle; 90.58 } dom0_getdomaininfo_t; 90.59 +DEFINE_GUEST_HANDLE(dom0_getdomaininfo_t); 90.60 90.61 #define DOM0_SETVCPUCONTEXT 13 90.62 typedef struct dom0_setvcpucontext { 90.63 @@ -100,8 +108,9 @@ typedef struct dom0_setvcpucontext { 90.64 domid_t domain; 90.65 uint32_t vcpu; 90.66 /* IN/OUT parameters */ 90.67 - vcpu_guest_context_t *ctxt; 90.68 + GUEST_HANDLE(vcpu_guest_context_t) ctxt; 90.69 } dom0_setvcpucontext_t; 90.70 +DEFINE_GUEST_HANDLE(dom0_setvcpucontext_t); 90.71 90.72 #define DOM0_MSR 15 90.73 typedef struct dom0_msr { 90.74 @@ -115,6 +124,7 @@ typedef struct dom0_msr { 90.75 uint32_t out1; 90.76 uint32_t out2; 90.77 } dom0_msr_t; 90.78 +DEFINE_GUEST_HANDLE(dom0_msr_t); 90.79 90.80 /* 90.81 * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC, 90.82 @@ -127,6 +137,7 @@ typedef struct dom0_settime { 90.83 uint32_t nsecs; 90.84 uint64_t system_time; 90.85 } dom0_settime_t; 90.86 +DEFINE_GUEST_HANDLE(dom0_settime_t); 90.87 90.88 #define DOM0_GETPAGEFRAMEINFO 18 90.89 #define NOTAB 0 /* normal page */ 90.90 @@ -147,6 +158,7 @@ typedef struct dom0_getpageframeinfo { 90.91 /* Is the page PINNED to a type? */ 90.92 uint32_t type; /* see above type defs */ 90.93 } dom0_getpageframeinfo_t; 90.94 +DEFINE_GUEST_HANDLE(dom0_getpageframeinfo_t); 90.95 90.96 /* 90.97 * Read console content from Xen buffer ring. 90.98 @@ -154,11 +166,12 @@ typedef struct dom0_getpageframeinfo { 90.99 #define DOM0_READCONSOLE 19 90.100 typedef struct dom0_readconsole { 90.101 /* IN variables. */ 90.102 - uint32_t clear; /* Non-zero -> clear after reading. */ 90.103 + uint32_t clear; /* Non-zero -> clear after reading. */ 90.104 /* IN/OUT variables. */ 90.105 - char *buffer; /* In: Buffer start; Out: Used buffer start */ 90.106 - uint32_t count; /* In: Buffer size; Out: Used buffer size */ 90.107 + GUEST_HANDLE(char) buffer; /* In: Buffer start; Out: Used buffer start */ 90.108 + uint32_t count; /* In: Buffer size; Out: Used buffer size */ 90.109 } dom0_readconsole_t; 90.110 +DEFINE_GUEST_HANDLE(dom0_readconsole_t); 90.111 90.112 /* 90.113 * Set which physical cpus a vcpu can execute on. 90.114 @@ -170,6 +183,7 @@ typedef struct dom0_setvcpuaffinity { 90.115 uint32_t vcpu; 90.116 cpumap_t cpumap; 90.117 } dom0_setvcpuaffinity_t; 90.118 +DEFINE_GUEST_HANDLE(dom0_setvcpuaffinity_t); 90.119 90.120 /* Get trace buffers machine base address */ 90.121 #define DOM0_TBUFCONTROL 21 90.122 @@ -189,6 +203,7 @@ typedef struct dom0_tbufcontrol { 90.123 unsigned long buffer_mfn; 90.124 uint32_t size; 90.125 } dom0_tbufcontrol_t; 90.126 +DEFINE_GUEST_HANDLE(dom0_tbufcontrol_t); 90.127 90.128 /* 90.129 * Get physical information about the host machine 90.130 @@ -204,6 +219,7 @@ typedef struct dom0_physinfo { 90.131 unsigned long free_pages; 90.132 uint32_t hw_cap[8]; 90.133 } dom0_physinfo_t; 90.134 +DEFINE_GUEST_HANDLE(dom0_physinfo_t); 90.135 90.136 /* 90.137 * Get the ID of the current scheduler. 90.138 @@ -213,6 +229,7 @@ typedef struct dom0_sched_id { 90.139 /* OUT variable */ 90.140 uint32_t sched_id; 90.141 } dom0_sched_id_t; 90.142 +DEFINE_GUEST_HANDLE(dom0_sched_id_t); 90.143 90.144 /* 90.145 * Control shadow pagetables operation 90.146 @@ -234,17 +251,19 @@ typedef struct dom0_shadow_control_stats 90.147 uint32_t dirty_net_count; 90.148 uint32_t dirty_block_count; 90.149 } dom0_shadow_control_stats_t; 90.150 +DEFINE_GUEST_HANDLE(dom0_shadow_control_stats_t); 90.151 90.152 typedef struct dom0_shadow_control { 90.153 /* IN variables. */ 90.154 domid_t domain; 90.155 uint32_t op; 90.156 - unsigned long *dirty_bitmap; /* pointer to locked buffer */ 90.157 + GUEST_HANDLE(ulong) dirty_bitmap; 90.158 /* IN/OUT variables. */ 90.159 unsigned long pages; /* size of buffer, updated with actual size */ 90.160 /* OUT variables. */ 90.161 dom0_shadow_control_stats_t stats; 90.162 } dom0_shadow_control_t; 90.163 +DEFINE_GUEST_HANDLE(dom0_shadow_control_t); 90.164 90.165 #define DOM0_SETDOMAINMAXMEM 28 90.166 typedef struct dom0_setdomainmaxmem { 90.167 @@ -252,6 +271,7 @@ typedef struct dom0_setdomainmaxmem { 90.168 domid_t domain; 90.169 unsigned long max_memkb; 90.170 } dom0_setdomainmaxmem_t; 90.171 +DEFINE_GUEST_HANDLE(dom0_setdomainmaxmem_t); 90.172 90.173 #define DOM0_GETPAGEFRAMEINFO2 29 /* batched interface */ 90.174 typedef struct dom0_getpageframeinfo2 { 90.175 @@ -259,8 +279,9 @@ typedef struct dom0_getpageframeinfo2 { 90.176 domid_t domain; 90.177 unsigned long num; 90.178 /* IN/OUT variables. */ 90.179 - unsigned long *array; 90.180 + GUEST_HANDLE(ulong) array; 90.181 } dom0_getpageframeinfo2_t; 90.182 +DEFINE_GUEST_HANDLE(dom0_getpageframeinfo2_t); 90.183 90.184 /* 90.185 * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. 90.186 @@ -279,6 +300,7 @@ typedef struct dom0_add_memtype { 90.187 uint32_t handle; 90.188 uint32_t reg; 90.189 } dom0_add_memtype_t; 90.190 +DEFINE_GUEST_HANDLE(dom0_add_memtype_t); 90.191 90.192 /* 90.193 * Tear down an existing memory-range type. If @handle is remembered then it 90.194 @@ -293,6 +315,7 @@ typedef struct dom0_del_memtype { 90.195 uint32_t handle; 90.196 uint32_t reg; 90.197 } dom0_del_memtype_t; 90.198 +DEFINE_GUEST_HANDLE(dom0_del_memtype_t); 90.199 90.200 /* Read current type of an MTRR (x86-specific). */ 90.201 #define DOM0_READ_MEMTYPE 33 90.202 @@ -304,6 +327,7 @@ typedef struct dom0_read_memtype { 90.203 unsigned long nr_mfns; 90.204 uint32_t type; 90.205 } dom0_read_memtype_t; 90.206 +DEFINE_GUEST_HANDLE(dom0_read_memtype_t); 90.207 90.208 /* Interface for controlling Xen software performance counters. */ 90.209 #define DOM0_PERFCCONTROL 34 90.210 @@ -315,20 +339,23 @@ typedef struct dom0_perfc_desc { 90.211 uint32_t nr_vals; /* number of values for this counter */ 90.212 uint32_t vals[64]; /* array of values */ 90.213 } dom0_perfc_desc_t; 90.214 +DEFINE_GUEST_HANDLE(dom0_perfc_desc_t); 90.215 typedef struct dom0_perfccontrol { 90.216 /* IN variables. */ 90.217 uint32_t op; /* DOM0_PERFCCONTROL_OP_??? */ 90.218 /* OUT variables. */ 90.219 uint32_t nr_counters; /* number of counters */ 90.220 - dom0_perfc_desc_t *desc; /* counter information (or NULL) */ 90.221 + GUEST_HANDLE(dom0_perfc_desc_t) desc; /* counter information (or NULL) */ 90.222 } dom0_perfccontrol_t; 90.223 +DEFINE_GUEST_HANDLE(dom0_perfccontrol_t); 90.224 90.225 #define DOM0_MICROCODE 35 90.226 typedef struct dom0_microcode { 90.227 /* IN variables. */ 90.228 - void *data; /* Pointer to microcode data */ 90.229 + GUEST_HANDLE(void) data; /* Pointer to microcode data */ 90.230 uint32_t length; /* Length of microcode data. */ 90.231 } dom0_microcode_t; 90.232 +DEFINE_GUEST_HANDLE(dom0_microcode_t); 90.233 90.234 #define DOM0_IOPORT_PERMISSION 36 90.235 typedef struct dom0_ioport_permission { 90.236 @@ -337,6 +364,7 @@ typedef struct dom0_ioport_permission { 90.237 uint32_t nr_ports; /* size of port range */ 90.238 uint8_t allow_access; /* allow or deny access to range? */ 90.239 } dom0_ioport_permission_t; 90.240 +DEFINE_GUEST_HANDLE(dom0_ioport_permission_t); 90.241 90.242 #define DOM0_GETVCPUCONTEXT 37 90.243 typedef struct dom0_getvcpucontext { 90.244 @@ -344,8 +372,9 @@ typedef struct dom0_getvcpucontext { 90.245 domid_t domain; /* domain to be affected */ 90.246 uint32_t vcpu; /* vcpu # */ 90.247 /* OUT variables. */ 90.248 - vcpu_guest_context_t *ctxt; 90.249 + GUEST_HANDLE(vcpu_guest_context_t) ctxt; 90.250 } dom0_getvcpucontext_t; 90.251 +DEFINE_GUEST_HANDLE(dom0_getvcpucontext_t); 90.252 90.253 #define DOM0_GETVCPUINFO 43 90.254 typedef struct dom0_getvcpuinfo { 90.255 @@ -360,16 +389,18 @@ typedef struct dom0_getvcpuinfo { 90.256 uint32_t cpu; /* current mapping */ 90.257 cpumap_t cpumap; /* allowable mapping */ 90.258 } dom0_getvcpuinfo_t; 90.259 +DEFINE_GUEST_HANDLE(dom0_getvcpuinfo_t); 90.260 90.261 #define DOM0_GETDOMAININFOLIST 38 90.262 typedef struct dom0_getdomaininfolist { 90.263 /* IN variables. */ 90.264 domid_t first_domain; 90.265 uint32_t max_domains; 90.266 - dom0_getdomaininfo_t *buffer; 90.267 + GUEST_HANDLE(dom0_getdomaininfo_t) buffer; 90.268 /* OUT variables. */ 90.269 uint32_t num_domains; 90.270 } dom0_getdomaininfolist_t; 90.271 +DEFINE_GUEST_HANDLE(dom0_getdomaininfolist_t); 90.272 90.273 #define DOM0_PLATFORM_QUIRK 39 90.274 #define QUIRK_NOIRQBALANCING 1 90.275 @@ -377,37 +408,44 @@ typedef struct dom0_platform_quirk { 90.276 /* IN variables. */ 90.277 uint32_t quirk_id; 90.278 } dom0_platform_quirk_t; 90.279 +DEFINE_GUEST_HANDLE(dom0_platform_quirk_t); 90.280 90.281 #define DOM0_PHYSICAL_MEMORY_MAP 40 90.282 +typedef struct dom0_memory_map_entry { 90.283 + uint64_t start, end; 90.284 + uint32_t flags; /* reserved */ 90.285 + uint8_t is_ram; 90.286 +} dom0_memory_map_entry_t; 90.287 +DEFINE_GUEST_HANDLE(dom0_memory_map_entry_t); 90.288 typedef struct dom0_physical_memory_map { 90.289 /* IN variables. */ 90.290 uint32_t max_map_entries; 90.291 /* OUT variables. */ 90.292 uint32_t nr_map_entries; 90.293 - struct dom0_memory_map_entry { 90.294 - uint64_t start, end; 90.295 - uint32_t flags; /* reserved */ 90.296 - uint8_t is_ram; 90.297 - } *memory_map; 90.298 + GUEST_HANDLE(dom0_memory_map_entry_t) memory_map; 90.299 } dom0_physical_memory_map_t; 90.300 +DEFINE_GUEST_HANDLE(dom0_physical_memory_map_t); 90.301 90.302 #define DOM0_MAX_VCPUS 41 90.303 typedef struct dom0_max_vcpus { 90.304 domid_t domain; /* domain to be affected */ 90.305 uint32_t max; /* maximum number of vcpus */ 90.306 } dom0_max_vcpus_t; 90.307 +DEFINE_GUEST_HANDLE(dom0_max_vcpus_t); 90.308 90.309 #define DOM0_SETDOMAINHANDLE 44 90.310 typedef struct dom0_setdomainhandle { 90.311 domid_t domain; 90.312 xen_domain_handle_t handle; 90.313 } dom0_setdomainhandle_t; 90.314 +DEFINE_GUEST_HANDLE(dom0_setdomainhandle_t); 90.315 90.316 #define DOM0_SETDEBUGGING 45 90.317 typedef struct dom0_setdebugging { 90.318 domid_t domain; 90.319 uint8_t enable; 90.320 } dom0_setdebugging_t; 90.321 +DEFINE_GUEST_HANDLE(dom0_setdebugging_t); 90.322 90.323 #define DOM0_IRQ_PERMISSION 46 90.324 typedef struct dom0_irq_permission { 90.325 @@ -415,6 +453,7 @@ typedef struct dom0_irq_permission { 90.326 uint8_t pirq; 90.327 uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ 90.328 } dom0_irq_permission_t; 90.329 +DEFINE_GUEST_HANDLE(dom0_irq_permission_t); 90.330 90.331 #define DOM0_IOMEM_PERMISSION 47 90.332 typedef struct dom0_iomem_permission { 90.333 @@ -423,12 +462,14 @@ typedef struct dom0_iomem_permission { 90.334 unsigned long nr_mfns; /* number of pages in range (>0) */ 90.335 uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ 90.336 } dom0_iomem_permission_t; 90.337 +DEFINE_GUEST_HANDLE(dom0_iomem_permission_t); 90.338 90.339 #define DOM0_HYPERCALL_INIT 48 90.340 typedef struct dom0_hypercall_init { 90.341 domid_t domain; /* domain to be affected */ 90.342 unsigned long mfn; /* machine frame to be initialised */ 90.343 } dom0_hypercall_init_t; 90.344 +DEFINE_GUEST_HANDLE(dom0_hypercall_init_t); 90.345 90.346 typedef struct dom0_op { 90.347 uint32_t cmd; 90.348 @@ -471,9 +512,10 @@ typedef struct dom0_op { 90.349 struct dom0_irq_permission irq_permission; 90.350 struct dom0_iomem_permission iomem_permission; 90.351 struct dom0_hypercall_init hypercall_init; 90.352 - uint8_t pad[128]; 90.353 + uint8_t pad[128]; 90.354 } u; 90.355 } dom0_op_t; 90.356 +DEFINE_GUEST_HANDLE(dom0_op_t); 90.357 90.358 #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ 90.359
91.1 --- a/xen/include/public/hvm/ioreq.h Mon Mar 06 09:09:18 2006 -0700 91.2 +++ b/xen/include/public/hvm/ioreq.h Mon Mar 06 10:21:35 2006 -0700 91.3 @@ -71,8 +71,8 @@ typedef struct { 91.4 typedef struct { 91.5 ioreq_t vp_ioreq; 91.6 /* Event channel port */ 91.7 - unsigned long vp_eport; /* VMX vcpu uses this to notify DM */ 91.8 - unsigned long dm_eport; /* DM uses this to notify VMX vcpu */ 91.9 + unsigned int vp_eport; /* VMX vcpu uses this to notify DM */ 91.10 + unsigned int dm_eport; /* DM uses this to notify VMX vcpu */ 91.11 } vcpu_iodata_t; 91.12 91.13 typedef struct {
92.1 --- a/xen/include/public/io/tpmif.h Mon Mar 06 09:09:18 2006 -0700 92.2 +++ b/xen/include/public/io/tpmif.h Mon Mar 06 10:21:35 2006 -0700 92.3 @@ -21,7 +21,7 @@ 92.4 typedef struct { 92.5 unsigned long addr; /* Machine address of packet. */ 92.6 grant_ref_t ref; /* grant table access reference */ 92.7 - uint16_t id; /* Echoed in response message. */ 92.8 + uint16_t unused; 92.9 uint16_t size; /* Packet size in bytes. */ 92.10 } tpmif_tx_request_t; 92.11
93.1 --- a/xen/include/public/memory.h Mon Mar 06 09:09:18 2006 -0700 93.2 +++ b/xen/include/public/memory.h Mon Mar 06 10:21:35 2006 -0700 93.3 @@ -29,7 +29,7 @@ typedef struct xen_memory_reservation { 93.4 * OUT: GMFN bases of extents that were allocated 93.5 * (NB. This command also updates the mach_to_phys translation table) 93.6 */ 93.7 - GUEST_HANDLE(xen_ulong) extent_start; 93.8 + GUEST_HANDLE(ulong) extent_start; 93.9 93.10 /* Number of extents, and size/alignment of each (2^extent_order pages). */ 93.11 unsigned long nr_extents; 93.12 @@ -86,7 +86,7 @@ typedef struct xen_machphys_mfn_list { 93.13 * any large discontiguities in the machine address space, 2MB gaps in 93.14 * the machphys table will be represented by an MFN base of zero. 93.15 */ 93.16 - GUEST_HANDLE(xen_ulong) extent_start; 93.17 + GUEST_HANDLE(ulong) extent_start; 93.18 93.19 /* 93.20 * Number of extents written to the above array. This will be smaller 93.21 @@ -130,13 +130,13 @@ typedef struct xen_translate_gpfn_list { 93.22 unsigned long nr_gpfns; 93.23 93.24 /* List of GPFNs to translate. */ 93.25 - GUEST_HANDLE(xen_ulong) gpfn_list; 93.26 + GUEST_HANDLE(ulong) gpfn_list; 93.27 93.28 /* 93.29 * Output list to contain MFN translations. May be the same as the input 93.30 * list (in which case each input GPFN is overwritten with the output MFN). 93.31 */ 93.32 - GUEST_HANDLE(xen_ulong) mfn_list; 93.33 + GUEST_HANDLE(ulong) mfn_list; 93.34 } xen_translate_gpfn_list_t; 93.35 DEFINE_GUEST_HANDLE(xen_translate_gpfn_list_t); 93.36
94.1 --- a/xen/include/public/physdev.h Mon Mar 06 09:09:18 2006 -0700 94.2 +++ b/xen/include/public/physdev.h Mon Mar 06 10:21:35 2006 -0700 94.3 @@ -33,8 +33,8 @@ typedef struct physdevop_set_iobitmap { 94.4 94.5 typedef struct physdevop_apic { 94.6 /* IN */ 94.7 - uint32_t apic; 94.8 - uint32_t offset; 94.9 + unsigned long apic_physbase; 94.10 + uint32_t reg; 94.11 /* IN or OUT */ 94.12 uint32_t value; 94.13 } physdevop_apic_t;
95.1 --- a/xen/include/public/xen.h Mon Mar 06 09:09:18 2006 -0700 95.2 +++ b/xen/include/public/xen.h Mon Mar 06 10:21:35 2006 -0700 95.3 @@ -9,22 +9,6 @@ 95.4 #ifndef __XEN_PUBLIC_XEN_H__ 95.5 #define __XEN_PUBLIC_XEN_H__ 95.6 95.7 -#ifdef __XEN__ 95.8 -#define DEFINE_GUEST_HANDLE(type) struct __guest_handle_ ## type { type *p; } 95.9 -#define GUEST_HANDLE(type) struct __guest_handle_ ## type 95.10 -#else 95.11 -#define DEFINE_GUEST_HANDLE(type) 95.12 -#define GUEST_HANDLE(type) type * 95.13 -#endif 95.14 - 95.15 -#ifndef __ASSEMBLY__ 95.16 -/* Guest handle for unsigned long pointer. Define a name with no whitespace. */ 95.17 -typedef unsigned long xen_ulong; 95.18 -DEFINE_GUEST_HANDLE(xen_ulong); 95.19 -/* Guest handle for arbitrary-type pointer (void *). */ 95.20 -DEFINE_GUEST_HANDLE(void); 95.21 -#endif 95.22 - 95.23 #if defined(__i386__) 95.24 #include "arch-x86_32.h" 95.25 #elif defined(__x86_64__) 95.26 @@ -396,8 +380,8 @@ typedef struct shared_info { 95.27 * a. relocated kernel image 95.28 * b. initial ram disk [mod_start, mod_len] 95.29 * c. list of allocated page frames [mfn_list, nr_pages] 95.30 - * d. bootstrap page tables [pt_base, CR3 (x86)] 95.31 - * e. start_info_t structure [register ESI (x86)] 95.32 + * d. start_info_t structure [register ESI (x86)] 95.33 + * e. bootstrap page tables [pt_base, CR3 (x86)] 95.34 * f. bootstrap stack [register ESP (x86)] 95.35 * 5. Bootstrap elements are packed together, but each is 4kB-aligned. 95.36 * 6. The initial ram disk may be omitted.
96.1 --- a/xen/include/xen/console.h Mon Mar 06 09:09:18 2006 -0700 96.2 +++ b/xen/include/xen/console.h Mon Mar 06 10:21:35 2006 -0700 96.3 @@ -8,12 +8,13 @@ 96.4 #define __CONSOLE_H__ 96.5 96.6 #include <xen/spinlock.h> 96.7 +#include <xen/guest_access.h> 96.8 96.9 extern spinlock_t console_lock; 96.10 96.11 void set_printk_prefix(const char *prefix); 96.12 96.13 -long read_console_ring(char **, u32 *, int); 96.14 +long read_console_ring(GUEST_HANDLE(char), u32 *, int); 96.15 96.16 void init_console(void); 96.17 void console_endboot(int disable_vga);
97.1 --- a/xen/include/xen/gdbstub.h Mon Mar 06 09:09:18 2006 -0700 97.2 +++ b/xen/include/xen/gdbstub.h Mon Mar 06 10:21:35 2006 -0700 97.3 @@ -21,6 +21,8 @@ 97.4 #ifndef __XEN_GDBSTUB_H__ 97.5 #define __XEN_GDBSTUB_H__ 97.6 97.7 +#ifdef CRASH_DEBUG 97.8 + 97.9 /* value <-> char (de)serialzers for arch specific gdb backends */ 97.10 char hex2char(unsigned long x); 97.11 int char2hex(unsigned char c); 97.12 @@ -84,6 +86,14 @@ void gdb_arch_exit(struct cpu_user_regs 97.13 #define SIGALRM 14 97.14 #define SIGTERM 15 97.15 97.16 +void initialise_gdb(void); 97.17 + 97.18 +#else 97.19 + 97.20 +#define initialise_gdb() ((void)0) 97.21 + 97.22 +#endif 97.23 + 97.24 #endif /* __XEN_GDBSTUB_H__ */ 97.25 97.26 /*
98.1 --- a/xen/include/xen/guest_access.h Mon Mar 06 09:09:18 2006 -0700 98.2 +++ b/xen/include/xen/guest_access.h Mon Mar 06 10:21:35 2006 -0700 98.3 @@ -7,64 +7,17 @@ 98.4 #ifndef __XEN_GUEST_ACCESS_H__ 98.5 #define __XEN_GUEST_ACCESS_H__ 98.6 98.7 -#include <asm/uaccess.h> 98.8 - 98.9 -/* Is the guest handle a NULL reference? */ 98.10 -#define guest_handle_is_null(hnd) ((hnd).p == NULL) 98.11 - 98.12 -/* Offset the given guest handle into the array it refers to. */ 98.13 -#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr)) 98.14 +#include <asm/guest_access.h> 98.15 98.16 -/* Cast a guest handle to the specified type of handle. */ 98.17 -#define guest_handle_cast(hnd, type) ({ \ 98.18 - type *_x = (hnd).p; \ 98.19 - (GUEST_HANDLE(type)) { _x }; \ 98.20 -}) 98.21 - 98.22 -/* 98.23 - * Copy an array of objects to guest context via a guest handle. 98.24 - * Optionally specify an offset into the guest array. 98.25 - */ 98.26 -#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 98.27 - const typeof(ptr) _x = (hnd).p; \ 98.28 - const typeof(ptr) _y = (ptr); \ 98.29 - copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 98.30 -}) 98.31 #define copy_to_guest(hnd, ptr, nr) \ 98.32 copy_to_guest_offset(hnd, 0, ptr, nr) 98.33 98.34 -/* 98.35 - * Copy an array of objects from guest context via a guest handle. 98.36 - * Optionally specify an offset into the guest array. 98.37 - */ 98.38 -#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 98.39 - const typeof(ptr) _x = (hnd).p; \ 98.40 - const typeof(ptr) _y = (ptr); \ 98.41 - copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 98.42 -}) 98.43 #define copy_from_guest(ptr, hnd, nr) \ 98.44 copy_from_guest_offset(ptr, hnd, 0, nr) 98.45 98.46 -/* 98.47 - * Pre-validate a guest handle. 98.48 - * Allows use of faster __copy_* functions. 98.49 - */ 98.50 -#define guest_handle_okay(hnd, nr) \ 98.51 - array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)) 98.52 - 98.53 -#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \ 98.54 - const typeof(ptr) _x = (hnd).p; \ 98.55 - const typeof(ptr) _y = (ptr); \ 98.56 - __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ 98.57 -}) 98.58 #define __copy_to_guest(hnd, ptr, nr) \ 98.59 __copy_to_guest_offset(hnd, 0, ptr, nr) 98.60 98.61 -#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \ 98.62 - const typeof(ptr) _x = (hnd).p; \ 98.63 - const typeof(ptr) _y = (ptr); \ 98.64 - __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ 98.65 -}) 98.66 #define __copy_from_guest(ptr, hnd, nr) \ 98.67 __copy_from_guest_offset(ptr, hnd, 0, nr) 98.68