ia64/xen-unstable

changeset 12519:0699c3eff7a3

merge with xen-unstable.hg
author awilliam@xenbuild.aw
date Mon Nov 20 13:11:15 2006 -0700 (2006-11-20)
parents 066094348f22 ea457d9d3fb2
children bcd2960d6dfd
files extras/mini-os/include/x86/spinlock.h tools/check/check_hotplug
line diff
     1.1 --- a/extras/mini-os/Makefile	Mon Nov 20 12:14:40 2006 -0700
     1.2 +++ b/extras/mini-os/Makefile	Mon Nov 20 13:11:15 2006 -0700
     1.3 @@ -122,6 +122,7 @@ clean:
     1.4  	rm -f *.o *~ core $(TARGET).elf $(TARGET).raw $(TARGET) $(TARGET).gz
     1.5  	rm -f libminios.a
     1.6  	find . -type l | xargs rm -f
     1.7 +	rm -f tags TAGS
     1.8  
     1.9  %.o: %.c $(HDRS) Makefile
    1.10  	$(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
    1.11 @@ -137,4 +138,7 @@ endef
    1.12  cscope:
    1.13  	$(all_sources) > cscope.files
    1.14  	cscope -k -b -q
    1.15 -
    1.16 +    
    1.17 +.PHONY: tags
    1.18 +tags:
    1.19 +	$(all_sources) | xargs ctags
     2.1 --- a/extras/mini-os/README	Mon Nov 20 12:14:40 2006 -0700
     2.2 +++ b/extras/mini-os/README	Mon Nov 20 13:11:15 2006 -0700
     2.3 @@ -26,5 +26,5 @@ Stuff it doesn't show:
     2.4  - to start it do the following in domain0 (assuming xend is running)
     2.5    # xm create domain_config
     2.6  
     2.7 -this starts the kernel and prints out a bunch of stuff and then every
     2.8 -1000 timer interrupts the system time.
     2.9 +this starts the kernel and prints out a bunch of stuff and then once
    2.10 +every second the system time.
     3.1 --- a/extras/mini-os/include/events.h	Mon Nov 20 12:14:40 2006 -0700
     3.2 +++ b/extras/mini-os/include/events.h	Mon Nov 20 13:11:15 2006 -0700
     3.3 @@ -20,7 +20,7 @@
     3.4  #define _EVENTS_H_
     3.5  
     3.6  #include<traps.h>
     3.7 -#include <xen/event_channel.h>
     3.8 +#include<xen/event_channel.h>
     3.9  
    3.10  typedef void (*evtchn_handler_t)(evtchn_port_t, struct pt_regs *, void *);
    3.11  
     4.1 --- a/extras/mini-os/include/sched.h	Mon Nov 20 12:14:40 2006 -0700
     4.2 +++ b/extras/mini-os/include/sched.h	Mon Nov 20 13:11:15 2006 -0700
     4.3 @@ -2,6 +2,7 @@
     4.4  #define __SCHED_H__
     4.5  
     4.6  #include <list.h>
     4.7 +#include <time.h>
     4.8  
     4.9  struct thread
    4.10  {
    4.11 @@ -11,6 +12,7 @@ struct thread
    4.12      unsigned long ip;  /* Instruction pointer */
    4.13      struct list_head thread_list;
    4.14      u32 flags;
    4.15 +    s_time_t wakeup_time;
    4.16  };
    4.17  
    4.18  
    4.19 @@ -36,5 +38,6 @@ static inline struct thread* get_current
    4.20  
    4.21  void wake(struct thread *thread);
    4.22  void block(struct thread *thread);
    4.23 +void sleep(u32 millisecs);
    4.24  
    4.25  #endif /* __SCHED_H__ */
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/extras/mini-os/include/spinlock.h	Mon Nov 20 13:11:15 2006 -0700
     5.3 @@ -0,0 +1,55 @@
     5.4 +#ifndef __ASM_SPINLOCK_H
     5.5 +#define __ASM_SPINLOCK_H
     5.6 +
     5.7 +#include <lib.h>
     5.8 +
     5.9 +/*
    5.10 + * Your basic SMP spinlocks, allowing only a single CPU anywhere
    5.11 + */
    5.12 +
    5.13 +typedef struct {
    5.14 +	volatile unsigned int slock;
    5.15 +} spinlock_t;
    5.16 +
    5.17 +
    5.18 +#include "arch_spinlock.h"
    5.19 +
    5.20 +
    5.21 +#define SPINLOCK_MAGIC	0xdead4ead
    5.22 +
    5.23 +#define SPIN_LOCK_UNLOCKED ARCH_SPIN_LOCK_UNLOCKED
    5.24 +
    5.25 +#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
    5.26 +
    5.27 +/*
    5.28 + * Simple spin lock operations.  There are two variants, one clears IRQ's
    5.29 + * on the local processor, one does not.
    5.30 + *
    5.31 + * We make no fairness assumptions. They have a cost.
    5.32 + */
    5.33 +
    5.34 +#define spin_is_locked(x)	arch_spin_is_locked(x)
    5.35 +
    5.36 +#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    5.37 +
    5.38 +
    5.39 +#define _spin_trylock(lock)     ({_raw_spin_trylock(lock) ? \
    5.40 +                                1 : ({ 0;});})
    5.41 +
    5.42 +#define _spin_lock(lock)        \
    5.43 +do {                            \
    5.44 +        _raw_spin_lock(lock);   \
    5.45 +} while(0)
    5.46 +
    5.47 +#define _spin_unlock(lock)      \
    5.48 +do {                            \
    5.49 +        _raw_spin_unlock(lock); \
    5.50 +} while (0)
    5.51 +
    5.52 +
    5.53 +#define spin_lock(lock)       _spin_lock(lock)
    5.54 +#define spin_unlock(lock)       _spin_unlock(lock)
    5.55 +
    5.56 +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
    5.57 +
    5.58 +#endif
     6.1 --- a/extras/mini-os/include/time.h	Mon Nov 20 12:14:40 2006 -0700
     6.2 +++ b/extras/mini-os/include/time.h	Mon Nov 20 13:11:15 2006 -0700
     6.3 @@ -7,8 +7,9 @@
     6.4   *        File: time.h
     6.5   *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
     6.6   *     Changes: Grzegorz Milos (gm281@cam.ac.uk)
     6.7 + *              Robert Kaiser (kaiser@informatik.fh-wiesbaden.de)
     6.8   *              
     6.9 - *        Date: Jul 2003, changesJun 2005
    6.10 + *        Date: Jul 2003, changes: Jun 2005, Sep 2006
    6.11   * 
    6.12   * Environment: Xen Minimal OS
    6.13   * Description: Time and timer functions
    6.14 @@ -57,7 +58,8 @@ struct timespec {
    6.15  void     init_time(void);
    6.16  s_time_t get_s_time(void);
    6.17  s_time_t get_v_time(void);
    6.18 +u64      monotonic_clock(void);
    6.19  void     gettimeofday(struct timeval *tv);
    6.20 -void     block_domain(u32 millisecs);
    6.21 +void     block_domain(s_time_t until);
    6.22  
    6.23  #endif /* _TIME_H_ */
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/extras/mini-os/include/x86/arch_spinlock.h	Mon Nov 20 13:11:15 2006 -0700
     7.3 @@ -0,0 +1,93 @@
     7.4 +
     7.5 +
     7.6 +#ifndef __ARCH_ASM_SPINLOCK_H
     7.7 +#define __ARCH_ASM_SPINLOCK_H
     7.8 +
     7.9 +#include <lib.h>
    7.10 +
    7.11 +
    7.12 +#define ARCH_SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
    7.13 +
    7.14 +/*
    7.15 + * Simple spin lock operations.  There are two variants, one clears IRQ's
    7.16 + * on the local processor, one does not.
    7.17 + *
    7.18 + * We make no fairness assumptions. They have a cost.
    7.19 + */
    7.20 +
    7.21 +#define arch_spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
    7.22 +#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    7.23 +
    7.24 +#define spin_lock_string \
    7.25 +        "1:\n" \
    7.26 +	LOCK \
    7.27 +	"decb %0\n\t" \
    7.28 +	"jns 3f\n" \
    7.29 +	"2:\t" \
    7.30 +	"rep;nop\n\t" \
    7.31 +	"cmpb $0,%0\n\t" \
    7.32 +	"jle 2b\n\t" \
    7.33 +	"jmp 1b\n" \
    7.34 +	"3:\n\t"
    7.35 +
    7.36 +#define spin_lock_string_flags \
    7.37 +        "1:\n" \
    7.38 +	LOCK \
    7.39 +	"decb %0\n\t" \
    7.40 +	"jns 4f\n\t" \
    7.41 +	"2:\t" \
    7.42 +	"testl $0x200, %1\n\t" \
    7.43 +	"jz 3f\n\t" \
    7.44 +	"#sti\n\t" \
    7.45 +	"3:\t" \
    7.46 +	"rep;nop\n\t" \
    7.47 +	"cmpb $0, %0\n\t" \
    7.48 +	"jle 3b\n\t" \
    7.49 +	"#cli\n\t" \
    7.50 +	"jmp 1b\n" \
    7.51 +	"4:\n\t"
    7.52 +
    7.53 +/*
    7.54 + * This works. Despite all the confusion.
    7.55 + * (except on PPro SMP or if we are using OOSTORE)
    7.56 + * (PPro errata 66, 92)
    7.57 + */
    7.58 +
    7.59 +#define spin_unlock_string \
    7.60 +	"xchgb %b0, %1" \
    7.61 +		:"=q" (oldval), "=m" (lock->slock) \
    7.62 +		:"0" (oldval) : "memory"
    7.63 +
    7.64 +static inline void _raw_spin_unlock(spinlock_t *lock)
    7.65 +{
    7.66 +	char oldval = 1;
    7.67 +	__asm__ __volatile__(
    7.68 +		spin_unlock_string
    7.69 +	);
    7.70 +}
    7.71 +
    7.72 +static inline int _raw_spin_trylock(spinlock_t *lock)
    7.73 +{
    7.74 +	char oldval;
    7.75 +	__asm__ __volatile__(
    7.76 +		"xchgb %b0,%1\n"
    7.77 +		:"=q" (oldval), "=m" (lock->slock)
    7.78 +		:"0" (0) : "memory");
    7.79 +	return oldval > 0;
    7.80 +}
    7.81 +
    7.82 +static inline void _raw_spin_lock(spinlock_t *lock)
    7.83 +{
    7.84 +	__asm__ __volatile__(
    7.85 +		spin_lock_string
    7.86 +		:"=m" (lock->slock) : : "memory");
    7.87 +}
    7.88 +
    7.89 +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
    7.90 +{
    7.91 +	__asm__ __volatile__(
    7.92 +		spin_lock_string_flags
    7.93 +		:"=m" (lock->slock) : "r" (flags) : "memory");
    7.94 +}
    7.95 +
    7.96 +#endif
     8.1 --- a/extras/mini-os/include/x86/os.h	Mon Nov 20 12:14:40 2006 -0700
     8.2 +++ b/extras/mini-os/include/x86/os.h	Mon Nov 20 13:11:15 2006 -0700
     8.3 @@ -19,6 +19,8 @@
     8.4  #include <types.h>
     8.5  #include <hypervisor.h>
     8.6  
     8.7 +#define USED    __attribute__ ((used))
     8.8 +
     8.9  extern void do_exit(void);
    8.10  #define BUG do_exit
    8.11  
     9.1 --- a/extras/mini-os/include/x86/spinlock.h	Mon Nov 20 12:14:40 2006 -0700
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,121 +0,0 @@
     9.4 -#ifndef __ASM_SPINLOCK_H
     9.5 -#define __ASM_SPINLOCK_H
     9.6 -
     9.7 -#include <lib.h>
     9.8 -
     9.9 -/*
    9.10 - * Your basic SMP spinlocks, allowing only a single CPU anywhere
    9.11 - */
    9.12 -
    9.13 -typedef struct {
    9.14 -	volatile unsigned int slock;
    9.15 -} spinlock_t;
    9.16 -
    9.17 -#define SPINLOCK_MAGIC	0xdead4ead
    9.18 -
    9.19 -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
    9.20 -
    9.21 -#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
    9.22 -
    9.23 -/*
    9.24 - * Simple spin lock operations.  There are two variants, one clears IRQ's
    9.25 - * on the local processor, one does not.
    9.26 - *
    9.27 - * We make no fairness assumptions. They have a cost.
    9.28 - */
    9.29 -
    9.30 -#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
    9.31 -#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    9.32 -
    9.33 -#define spin_lock_string \
    9.34 -        "1:\n" \
    9.35 -	LOCK \
    9.36 -	"decb %0\n\t" \
    9.37 -	"jns 3f\n" \
    9.38 -	"2:\t" \
    9.39 -	"rep;nop\n\t" \
    9.40 -	"cmpb $0,%0\n\t" \
    9.41 -	"jle 2b\n\t" \
    9.42 -	"jmp 1b\n" \
    9.43 -	"3:\n\t"
    9.44 -
    9.45 -#define spin_lock_string_flags \
    9.46 -        "1:\n" \
    9.47 -	LOCK \
    9.48 -	"decb %0\n\t" \
    9.49 -	"jns 4f\n\t" \
    9.50 -	"2:\t" \
    9.51 -	"testl $0x200, %1\n\t" \
    9.52 -	"jz 3f\n\t" \
    9.53 -	"#sti\n\t" \
    9.54 -	"3:\t" \
    9.55 -	"rep;nop\n\t" \
    9.56 -	"cmpb $0, %0\n\t" \
    9.57 -	"jle 3b\n\t" \
    9.58 -	"#cli\n\t" \
    9.59 -	"jmp 1b\n" \
    9.60 -	"4:\n\t"
    9.61 -
    9.62 -/*
    9.63 - * This works. Despite all the confusion.
    9.64 - * (except on PPro SMP or if we are using OOSTORE)
    9.65 - * (PPro errata 66, 92)
    9.66 - */
    9.67 -
    9.68 -#define spin_unlock_string \
    9.69 -	"xchgb %b0, %1" \
    9.70 -		:"=q" (oldval), "=m" (lock->slock) \
    9.71 -		:"0" (oldval) : "memory"
    9.72 -
    9.73 -static inline void _raw_spin_unlock(spinlock_t *lock)
    9.74 -{
    9.75 -	char oldval = 1;
    9.76 -	__asm__ __volatile__(
    9.77 -		spin_unlock_string
    9.78 -	);
    9.79 -}
    9.80 -
    9.81 -static inline int _raw_spin_trylock(spinlock_t *lock)
    9.82 -{
    9.83 -	char oldval;
    9.84 -	__asm__ __volatile__(
    9.85 -		"xchgb %b0,%1\n"
    9.86 -		:"=q" (oldval), "=m" (lock->slock)
    9.87 -		:"0" (0) : "memory");
    9.88 -	return oldval > 0;
    9.89 -}
    9.90 -
    9.91 -static inline void _raw_spin_lock(spinlock_t *lock)
    9.92 -{
    9.93 -	__asm__ __volatile__(
    9.94 -		spin_lock_string
    9.95 -		:"=m" (lock->slock) : : "memory");
    9.96 -}
    9.97 -
    9.98 -static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
    9.99 -{
   9.100 -	__asm__ __volatile__(
   9.101 -		spin_lock_string_flags
   9.102 -		:"=m" (lock->slock) : "r" (flags) : "memory");
   9.103 -}
   9.104 -
   9.105 -#define _spin_trylock(lock)     ({_raw_spin_trylock(lock) ? \
   9.106 -                                1 : ({ 0;});})
   9.107 -
   9.108 -#define _spin_lock(lock)        \
   9.109 -do {                            \
   9.110 -        _raw_spin_lock(lock);   \
   9.111 -} while(0)
   9.112 -
   9.113 -#define _spin_unlock(lock)      \
   9.114 -do {                            \
   9.115 -        _raw_spin_unlock(lock); \
   9.116 -} while (0)
   9.117 -
   9.118 -
   9.119 -#define spin_lock(lock)       _spin_lock(lock)
   9.120 -#define spin_unlock(lock)       _spin_unlock(lock)
   9.121 -
   9.122 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
   9.123 -
   9.124 -#endif
    10.1 --- a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h	Mon Nov 20 12:14:40 2006 -0700
    10.2 +++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h	Mon Nov 20 13:11:15 2006 -0700
    10.3 @@ -235,9 +235,9 @@ HYPERVISOR_update_va_mapping(
    10.4  
    10.5  static inline int
    10.6  HYPERVISOR_event_channel_op(
    10.7 -	void *op)
    10.8 +       int cmd, void *op)
    10.9  {
   10.10 -	return _hypercall1(int, event_channel_op, op);
   10.11 +    return _hypercall2(int, event_channel_op, cmd, op);
   10.12  }
   10.13  
   10.14  static inline int
    11.1 --- a/extras/mini-os/kernel.c	Mon Nov 20 12:14:40 2006 -0700
    11.2 +++ b/extras/mini-os/kernel.c	Mon Nov 20 13:11:15 2006 -0700
    11.3 @@ -6,6 +6,7 @@
    11.4   * 
    11.5   * Copyright (c) 2002-2003, K A Fraser & R Neugebauer
    11.6   * Copyright (c) 2005, Grzegorz Milos, Intel Research Cambridge
    11.7 + * Copyright (c) 2006, Robert Kaiser, FH Wiesbaden
    11.8   * 
    11.9   * Permission is hereby granted, free of charge, to any person obtaining a copy
   11.10   * of this software and associated documentation files (the "Software"), to
   11.11 @@ -66,11 +67,24 @@ void xenbus_tester(void *p)
   11.12      /* test_xenbus(); */
   11.13  }
   11.14  
   11.15 +void periodic_thread(void *p)
   11.16 +{
   11.17 +    struct timeval tv;
   11.18 +    printk("Periodic thread started.\n");
   11.19 +    for(;;)
   11.20 +    {
   11.21 +        gettimeofday(&tv);
   11.22 +        printk("T(s=%ld us=%ld)\n", tv.tv_sec, tv.tv_usec);
   11.23 +        sleep(1000);
   11.24 +    }
   11.25 +}
   11.26 +
   11.27  /* This should be overridden by the application we are linked against. */
   11.28  __attribute__((weak)) int app_main(start_info_t *si)
   11.29  {
   11.30      printk("Dummy main: start_info=%p\n", si);
   11.31      create_thread("xenbus_tester", xenbus_tester, si);
   11.32 +    create_thread("periodic_thread", periodic_thread, si);
   11.33      return 0;
   11.34  }
   11.35  
   11.36 @@ -87,9 +101,6 @@ void start_kernel(start_info_t *si)
   11.37  
   11.38      trap_init();
   11.39  
   11.40 -    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
   11.41 -    __sti();
   11.42 -    
   11.43      /* print out some useful information  */
   11.44      printk("Xen Minimal OS!\n");
   11.45      printk("start_info:   %p\n",    si);
   11.46 @@ -102,6 +113,12 @@ void start_kernel(start_info_t *si)
   11.47      printk("  cmd_line:   %s\n",  
   11.48             si->cmd_line ? (const char *)si->cmd_line : "NULL");
   11.49  
   11.50 +    /* Set up events. */
   11.51 +    init_events();
   11.52 +    
   11.53 +    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
   11.54 +    __sti();
   11.55 +
   11.56      arch_print_info();
   11.57  
   11.58      setup_xen_features();
   11.59 @@ -109,9 +126,6 @@ void start_kernel(start_info_t *si)
   11.60      /* Init memory management. */
   11.61      init_mm();
   11.62  
   11.63 -    /* Set up events. */
   11.64 -    init_events();
   11.65 -    
   11.66      /* Init time and timers. */
   11.67      init_time();
   11.68  
    12.1 --- a/extras/mini-os/mm.c	Mon Nov 20 12:14:40 2006 -0700
    12.2 +++ b/extras/mini-os/mm.c	Mon Nov 20 13:11:15 2006 -0700
    12.3 @@ -148,7 +148,7 @@ static chunk_head_t  free_tail[FREELIST_
    12.4   * Prints allocation[0/1] for @nr_pages, starting at @start
    12.5   * address (virtual).
    12.6   */
    12.7 -static void print_allocation(void *start, int nr_pages)
    12.8 +USED static void print_allocation(void *start, int nr_pages)
    12.9  {
   12.10      unsigned long pfn_start = virt_to_pfn(start);
   12.11      int count;
   12.12 @@ -163,7 +163,7 @@ static void print_allocation(void *start
   12.13   * Prints chunks (making them with letters) for @nr_pages starting
   12.14   * at @start (virtual).
   12.15   */
   12.16 -static void print_chunks(void *start, int nr_pages)
   12.17 +USED static void print_chunks(void *start, int nr_pages)
   12.18  {
   12.19      char chunks[1001], current='A';
   12.20      int order, count;
   12.21 @@ -408,7 +408,6 @@ void new_pt_frame(unsigned long *pt_pfn,
   12.22           do_exit();
   12.23           break;
   12.24      }
   12.25 -
   12.26      /* Update the entry */
   12.27  #if defined(__x86_64__)
   12.28      tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
   12.29 @@ -446,7 +445,6 @@ void new_pt_frame(unsigned long *pt_pfn,
   12.30         printk("ERROR: mmu_update failed\n");
   12.31         do_exit();
   12.32      }
   12.33 -
   12.34      *pt_pfn += 1;
   12.35  }
   12.36  
   12.37 @@ -581,7 +579,6 @@ void build_pagetable(unsigned long *star
   12.38          }
   12.39          start_address += PAGE_SIZE;
   12.40      }
   12.41 -
   12.42      *start_pfn = pt_pfn;
   12.43  }
   12.44  
    13.1 --- a/extras/mini-os/sched.c	Mon Nov 20 12:14:40 2006 -0700
    13.2 +++ b/extras/mini-os/sched.c	Mon Nov 20 13:11:15 2006 -0700
    13.3 @@ -5,7 +5,7 @@
    13.4   *
    13.5   *        File: sched.c
    13.6   *      Author: Grzegorz Milos
    13.7 - *     Changes: 
    13.8 + *     Changes: Robert Kaiser
    13.9   *              
   13.10   *        Date: Aug 2005
   13.11   * 
   13.12 @@ -142,6 +142,54 @@ void inline print_runqueue(void)
   13.13      printk("\n");
   13.14  }
   13.15  
   13.16 +/* Find the time when the next timeout expires. If this is more than
   13.17 +   10 seconds from now, return 10 seconds from now. */
   13.18 +static s_time_t blocking_time(void)
   13.19 +{
   13.20 +    struct thread *thread;
   13.21 +    struct list_head *iterator;
   13.22 +    s_time_t min_wakeup_time;
   13.23 +    unsigned long flags;
   13.24 +    local_irq_save(flags);
   13.25 +    /* default-block the domain for 10 seconds: */
   13.26 +    min_wakeup_time = NOW() + SECONDS(10);
   13.27 +
   13.28 +    /* Thread list needs to be protected */
   13.29 +    list_for_each(iterator, &idle_thread->thread_list)
   13.30 +    {
   13.31 +        thread = list_entry(iterator, struct thread, thread_list);
   13.32 +        if(!is_runnable(thread) && thread->wakeup_time != 0LL)
   13.33 +        {
   13.34 +            if(thread->wakeup_time < min_wakeup_time)
   13.35 +            {
   13.36 +                min_wakeup_time = thread->wakeup_time;
   13.37 +            }
   13.38 +        }
   13.39 +    }
   13.40 +    local_irq_restore(flags);
   13.41 +    return(min_wakeup_time);
   13.42 +}
   13.43 +
   13.44 +/* Wake up all threads with expired timeouts. */
   13.45 +static void wake_expired(void)
   13.46 +{
   13.47 +    struct thread *thread;
   13.48 +    struct list_head *iterator;
   13.49 +    s_time_t now = NOW();
   13.50 +    unsigned long flags;
   13.51 +    local_irq_save(flags);
   13.52 +    /* Thread list needs to be protected */
   13.53 +    list_for_each(iterator, &idle_thread->thread_list)
   13.54 +    {
   13.55 +        thread = list_entry(iterator, struct thread, thread_list);
   13.56 +        if(!is_runnable(thread) && thread->wakeup_time != 0LL)
   13.57 +        {
   13.58 +            if(thread->wakeup_time <= now)
   13.59 +                wake(thread);
   13.60 +        }
   13.61 +    }
   13.62 +    local_irq_restore(flags);
   13.63 +}
   13.64  
   13.65  void schedule(void)
   13.66  {
   13.67 @@ -229,8 +277,9 @@ struct thread* create_thread(char *name,
   13.68      stack_push(thread, (unsigned long) data);
   13.69      thread->ip = (unsigned long) thread_starter;
   13.70       
   13.71 -    /* Not runable, not exited */ 
   13.72 +    /* Not runable, not exited, not sleeping */
   13.73      thread->flags = 0;
   13.74 +    thread->wakeup_time = 0LL;
   13.75      set_runnable(thread);
   13.76      local_irq_save(flags);
   13.77      if(idle_thread != NULL) {
   13.78 @@ -247,20 +296,34 @@ struct thread* create_thread(char *name,
   13.79  
   13.80  void block(struct thread *thread)
   13.81  {
   13.82 +    thread->wakeup_time = 0LL;
   13.83      clear_runnable(thread);
   13.84  }
   13.85  
   13.86 +void sleep(u32 millisecs)
   13.87 +{
   13.88 +    struct thread *thread = get_current();
   13.89 +    thread->wakeup_time = NOW()  + MILLISECS(millisecs);
   13.90 +    clear_runnable(thread);
   13.91 +    schedule();
   13.92 +}
   13.93 +
   13.94  void wake(struct thread *thread)
   13.95  {
   13.96 +    thread->wakeup_time = 0LL;
   13.97      set_runnable(thread);
   13.98  }
   13.99  
  13.100  void idle_thread_fn(void *unused)
  13.101  {
  13.102 +    s_time_t until;
  13.103      for(;;)
  13.104      {
  13.105          schedule();
  13.106 -        block_domain(10000);
  13.107 +        /* block until the next timeout expires, or for 10 secs, whichever comes first */
  13.108 +        until = blocking_time();
  13.109 +        block_domain(until);
  13.110 +        wake_expired();
  13.111      }
  13.112  }
  13.113  
  13.114 @@ -278,7 +341,7 @@ void run_idle_thread(void)
  13.115                           "push %1\n\t" 
  13.116                           "ret"                                            
  13.117                           :"=m" (idle_thread->sp)
  13.118 -                         :"m" (idle_thread->ip));                          
  13.119 +                         :"m" (idle_thread->ip));                                                    
  13.120  #endif
  13.121  }
  13.122  
    14.1 --- a/extras/mini-os/time.c	Mon Nov 20 12:14:40 2006 -0700
    14.2 +++ b/extras/mini-os/time.c	Mon Nov 20 13:11:15 2006 -0700
    14.3 @@ -3,6 +3,7 @@
    14.4   * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    14.5   * (C) 2002-2003 - Keir Fraser - University of Cambridge 
    14.6   * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
    14.7 + * (C) 2006 - Robert Kaiser - FH Wiesbaden
    14.8   ****************************************************************************
    14.9   *
   14.10   *        File: time.c
   14.11 @@ -194,21 +195,15 @@ void gettimeofday(struct timeval *tv)
   14.12  }
   14.13  
   14.14  
   14.15 -static void print_current_time(void)
   14.16 -{
   14.17 -    struct timeval tv;    
   14.18 -
   14.19 -    gettimeofday(&tv);
   14.20 -    printk("T(s=%ld us=%ld)\n", tv.tv_sec, tv.tv_usec);
   14.21 -}
   14.22 -
   14.23 -
   14.24 -void block_domain(u32 millisecs)
   14.25 +void block_domain(s_time_t until)
   14.26  {
   14.27      struct timeval tv;
   14.28      gettimeofday(&tv);
   14.29 -    HYPERVISOR_set_timer_op(monotonic_clock() + 1000000LL * (s64) millisecs);
   14.30 -    HYPERVISOR_sched_op(SCHEDOP_block, 0);
   14.31 +    if(monotonic_clock() < until)
   14.32 +    {
   14.33 +        HYPERVISOR_set_timer_op(until);
   14.34 +        HYPERVISOR_sched_op(SCHEDOP_block, 0);
   14.35 +    }
   14.36  }
   14.37  
   14.38  
   14.39 @@ -217,15 +212,8 @@ void block_domain(u32 millisecs)
   14.40   */
   14.41  static void timer_handler(evtchn_port_t ev, struct pt_regs *regs, void *ign)
   14.42  {
   14.43 -    static int i;
   14.44 -
   14.45      get_time_values_from_xen();
   14.46      update_wallclock();
   14.47 -    i++;
   14.48 -    if (i >= 1000) {
   14.49 -        print_current_time();
   14.50 -        i = 0;
   14.51 -    }
   14.52  }
   14.53  
   14.54  
    15.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c	Mon Nov 20 12:14:40 2006 -0700
    15.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c	Mon Nov 20 13:11:15 2006 -0700
    15.3 @@ -716,6 +716,7 @@ irqreturn_t timer_interrupt(int irq, voi
    15.4  		rcu_check_callbacks(cpu, user_mode(regs));
    15.5  	scheduler_tick();
    15.6  	run_posix_cpu_timers(current);
    15.7 +	profile_tick(CPU_PROFILING, regs);
    15.8  
    15.9  	return IRQ_HANDLED;
   15.10  }
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Mon Nov 20 12:14:40 2006 -0700
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Mon Nov 20 13:11:15 2006 -0700
    16.3 @@ -1059,9 +1059,9 @@ static int blktap_read_ufe_ring(tap_blki
    16.4  			map[offset] = NULL;
    16.5  		}
    16.6  		fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
    16.7 +		info->idx_map[usr_idx] = INVALID_REQ;
    16.8  		make_response(blkif, pending_req->id, res.operation,
    16.9  			      res.status);
   16.10 -		info->idx_map[usr_idx] = INVALID_REQ;
   16.11  		blkif_put(pending_req->blkif);
   16.12  		free_req(pending_req);
   16.13  	}
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon Nov 20 12:14:40 2006 -0700
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon Nov 20 13:11:15 2006 -0700
    17.3 @@ -825,7 +825,9 @@ static void tx_add_credit(netif_t *netif
    17.4  	max_burst = max(max_burst, netif->credit_bytes);
    17.5  
    17.6  	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
    17.7 -	max_credit = max(netif->remaining_credit + netif->credit_bytes, ~0UL);
    17.8 +	max_credit = netif->remaining_credit + netif->credit_bytes;
    17.9 +	if (max_credit < netif->remaining_credit)
   17.10 +		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
   17.11  
   17.12  	netif->remaining_credit = min(max_credit, max_burst);
   17.13  }
    18.1 --- a/tools/Rules.mk	Mon Nov 20 12:14:40 2006 -0700
    18.2 +++ b/tools/Rules.mk	Mon Nov 20 13:11:15 2006 -0700
    18.3 @@ -14,6 +14,11 @@ X11_LDPATH = -L/usr/X11R6/$(LIBDIR)
    18.4  
    18.5  CFLAGS += -D__XEN_TOOLS__
    18.6  
    18.7 +# Enable implicit LFS support *and* explicit LFS names.
    18.8 +CFLAGS  += $(shell getconf LFS_CFLAGS)
    18.9 +CFLAGS  += -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE
   18.10 +LDFLAGS += $(shell getconf LFS_LDFLAGS)
   18.11 +
   18.12  %.opic: %.c
   18.13  	$(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $<
   18.14  
    19.1 --- a/tools/blktap/drivers/Makefile	Mon Nov 20 12:14:40 2006 -0700
    19.2 +++ b/tools/blktap/drivers/Makefile	Mon Nov 20 13:11:15 2006 -0700
    19.3 @@ -13,7 +13,6 @@ CFLAGS   += -Wno-unused
    19.4  CFLAGS   += -fno-strict-aliasing
    19.5  CFLAGS   += -I $(XEN_LIBXC) -I $(LIBAIO_DIR)
    19.6  CFLAGS   += $(INCLUDES) -I. -I../../xenstore 
    19.7 -CFLAGS   += -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE
    19.8  CFLAGS   += -D_GNU_SOURCE
    19.9  
   19.10  # Get gcc to generate the dependencies for us.
    20.1 --- a/tools/blktap/drivers/blktapctrl.c	Mon Nov 20 12:14:40 2006 -0700
    20.2 +++ b/tools/blktap/drivers/blktapctrl.c	Mon Nov 20 13:11:15 2006 -0700
    20.3 @@ -167,13 +167,22 @@ static int get_tapdisk_pid(blkif_t *blki
    20.4  	return 1;
    20.5  }
    20.6  
    20.7 -static blkif_t *test_path(char *path, char **dev, int *type)
    20.8 +/* Look up the disk specified by path: 
    20.9 + *   if found, dev points to the device string in the path
   20.10 + *             type is the tapdisk driver type id
   20.11 + *             blkif is the existing interface if this is a shared driver
   20.12 + *             and NULL otherwise.
   20.13 + *   return 0 on success, -1 on error.
   20.14 + */
   20.15 +
   20.16 +static int test_path(char *path, char **dev, int *type, blkif_t *blkif)
   20.17  {
   20.18  	char *ptr, handle[10];
   20.19 -	int i, size;
   20.20 +	int i, size, found = 0;
   20.21  
   20.22  	size = sizeof(dtypes)/sizeof(disk_info_t *);
   20.23  	*type = MAX_DISK_TYPES + 1;
   20.24 +        blkif = NULL;
   20.25  
   20.26  	if ( (ptr = strstr(path, ":"))!=NULL) {
   20.27  		memcpy(handle, path, (ptr - path));
   20.28 @@ -182,26 +191,36 @@ static blkif_t *test_path(char *path, ch
   20.29  		*ptr = '\0';
   20.30  		DPRINTF("Detected handle: [%s]\n",handle);
   20.31  
   20.32 -		for (i = 0; i < size; i++) {
   20.33 -			if (strncmp(handle, dtypes[i]->handle, (ptr - path))
   20.34 -			    ==0) {
   20.35 -				*type = dtypes[i]->idnum;
   20.36 +		for (i = 0; i < size; i++) 
   20.37 +			if (strncmp(handle, dtypes[i]->handle, 
   20.38 +                                    (ptr - path)) ==0) {
   20.39 +                                found = 1;
   20.40 +                                break;
   20.41 +                        }
   20.42  
   20.43 -				if (dtypes[i]->single_handler == 1) {
   20.44 -					/* Check whether tapdisk process 
   20.45 -					   already exists */
   20.46 -					if (active_disks[dtypes[i]->idnum] 
   20.47 -					    == NULL) return NULL;
   20.48 -					else 
   20.49 -						return active_disks[dtypes[i]->idnum]->blkif;
   20.50 -				}
   20.51 -			}
   20.52 -		}
   20.53 -	} else *dev = NULL;
   20.54 +                if (found) {
   20.55 +                        *type = dtypes[i]->idnum;
   20.56 +                        
   20.57 +                        if (dtypes[i]->single_handler == 1) {
   20.58 +                                /* Check whether tapdisk process 
   20.59 +                                   already exists */
   20.60 +                                if (active_disks[dtypes[i]->idnum] == NULL) 
   20.61 +                                        blkif = NULL;
   20.62 +                                else 
   20.63 +                                        blkif = active_disks[dtypes[i]
   20.64 +                                                             ->idnum]->blkif;
   20.65 +                        }
   20.66 +                        return 0;
   20.67 +                }
   20.68 +        }
   20.69  
   20.70 -	return NULL;
   20.71 +        /* Fall-through case, we didn't find a disk driver. */
   20.72 +        DPRINTF("Unknown blktap disk type [%s]!\n",handle);
   20.73 +        *dev = NULL;
   20.74 +        return -1;
   20.75  }
   20.76  
   20.77 +
   20.78  static void add_disktype(blkif_t *blkif, int type)
   20.79  {
   20.80  	driver_list_entry_t *entry, **pprev;
   20.81 @@ -463,7 +482,11 @@ int blktapctrl_new_blkif(blkif_t *blkif)
   20.82  		if (get_new_dev(&major, &minor, blkif)<0)
   20.83  			return -1;
   20.84  
   20.85 -		exist = test_path(blk->params, &ptr, &type);
   20.86 +		if (test_path(blk->params, &ptr, &type, exist) != 0) {
   20.87 +                        DPRINTF("Error in blktap device string(%s).\n",
   20.88 +                                blk->params);
   20.89 +                        return -1;
   20.90 +                }
   20.91  		blkif->drivertype = type;
   20.92  		blkif->cookie = lrand48() % MAX_RAND_VAL;
   20.93  
    21.1 --- a/tools/blktap/lib/Makefile	Mon Nov 20 12:14:40 2006 -0700
    21.2 +++ b/tools/blktap/lib/Makefile	Mon Nov 20 13:11:15 2006 -0700
    21.3 @@ -17,7 +17,6 @@ SRCS     += xenbus.c blkif.c xs_api.c
    21.4  CFLAGS   += -Werror
    21.5  CFLAGS   += -Wno-unused
    21.6  CFLAGS   += -fno-strict-aliasing -fPIC
    21.7 -CFLAGS   += -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE
    21.8  # get asprintf():
    21.9  CFLAGS   += -D _GNU_SOURCE
   21.10  
    22.1 --- a/tools/check/check_hotplug	Mon Nov 20 12:14:40 2006 -0700
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,16 +0,0 @@
    22.4 -#!/bin/bash
    22.5 -# CHECK-INSTALL
    22.6 -
    22.7 -function error {
    22.8 -   echo
    22.9 -   echo '  *** Check for the hotplug scripts (hotplug) FAILED'
   22.10 -   exit 1
   22.11 -}
   22.12 -[ -x "$(which udevinfo)" ] && \
   22.13 -  UDEV_VERSION=$(udevinfo -V | sed -e 's/^[^0-9]* \([0-9]\{1,\}\)[^0-9]\{0,\}/\1/')
   22.14 -
   22.15 -if [ -n "$UDEV_VERSION" ] && [ $UDEV_VERSION -ge 059 ]; then
   22.16 -  exit 0
   22.17 -fi
   22.18 -
   22.19 -which hotplug 1>/dev/null 2>&1 || error
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/tools/check/check_udev	Mon Nov 20 13:11:15 2006 -0700
    23.3 @@ -0,0 +1,16 @@
    23.4 +#!/bin/bash
    23.5 +# CHECK-INSTALL
    23.6 +
    23.7 +function error {
    23.8 +   echo
    23.9 +   echo '  *** Check for udev/hotplug FAILED'
   23.10 +   exit 1
   23.11 +}
   23.12 +[ -x "$(which udevinfo)" ] && \
   23.13 +  UDEV_VERSION=$(udevinfo -V | sed -e 's/^[^0-9]* \([0-9]\{1,\}\)[^0-9]\{0,\}/\1/')
   23.14 +
   23.15 +if [ -n "$UDEV_VERSION" ] && [ $UDEV_VERSION -ge 059 ]; then
   23.16 +  exit 0
   23.17 +fi
   23.18 +
   23.19 +which hotplug 1>/dev/null 2>&1 || error
    24.1 --- a/tools/firmware/hvmloader/Makefile	Mon Nov 20 12:14:40 2006 -0700
    24.2 +++ b/tools/firmware/hvmloader/Makefile	Mon Nov 20 13:11:15 2006 -0700
    24.3 @@ -46,16 +46,14 @@ OBJS = $(patsubst %.c,%.o,$(SRCS))
    24.4  .PHONY: all
    24.5  all: hvmloader
    24.6  
    24.7 -acpi/acpi.bin:
    24.8 -	$(MAKE) -C acpi
    24.9 -
   24.10  hvmloader: roms.h $(SRCS)
   24.11  	$(CC) $(CFLAGS) -c $(SRCS)
   24.12  	$(CC) $(CFLAGS) $(LDFLAGS) -o hvmloader.tmp $(OBJS)
   24.13  	$(OBJCOPY) hvmloader.tmp hvmloader
   24.14  	rm -f hvmloader.tmp
   24.15  
   24.16 -roms.h:	../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin ../vgabios/VGABIOS-lgpl-latest.cirrus.bin ../vmxassist/vmxassist.bin acpi/acpi.bin
   24.17 +roms.h:	../rombios/BIOS-bochs-latest ../vgabios/VGABIOS-lgpl-latest.bin ../vgabios/VGABIOS-lgpl-latest.cirrus.bin ../vmxassist/vmxassist.bin
   24.18 +	$(MAKE) -C acpi
   24.19  	sh ./mkhex rombios ../rombios/BIOS-bochs-latest > roms.h
   24.20  	sh ./mkhex vgabios_stdvga ../vgabios/VGABIOS-lgpl-latest.bin >> roms.h
   24.21  	sh ./mkhex vgabios_cirrusvga ../vgabios/VGABIOS-lgpl-latest.cirrus.bin >> roms.h
    25.1 --- a/tools/firmware/hvmloader/acpi/acpi2_0.h	Mon Nov 20 12:14:40 2006 -0700
    25.2 +++ b/tools/firmware/hvmloader/acpi/acpi2_0.h	Mon Nov 20 13:11:15 2006 -0700
    25.3 @@ -34,6 +34,11 @@ typedef   signed long int64_t;
    25.4  
    25.5  #include <xen/xen.h>
    25.6  
    25.7 +#define ASCII32(a,b,c,d)         \
    25.8 +    (((a) <<  0) | ((b) <<  8) | ((c) << 16) | ((d) << 24))
    25.9 +#define ASCII64(a,b,c,d,e,f,g,h) \
   25.10 +    (((uint64_t)ASCII32(a,b,c,d)) | (((uint64_t)ASCII32(e,f,g,h)) << 32))
   25.11 +
   25.12  #pragma pack (1)
   25.13  
   25.14  /*
   25.15 @@ -52,7 +57,7 @@ struct acpi_header {
   25.16  };
   25.17  
   25.18  #define ACPI_OEM_ID             {'I','N','T','E','L',' '}
   25.19 -#define ACPI_OEM_TABLE_ID       0x544244   /* "TBD" */
   25.20 +#define ACPI_OEM_TABLE_ID       ASCII32(' ','T','B','D')
   25.21  #define ACPI_OEM_REVISION       0x00000002
   25.22  #define ACPI_CREATOR_ID         0x00       /* TBD */
   25.23  #define ACPI_CREATOR_REVISION   0x00000002
   25.24 @@ -128,6 +133,20 @@ struct acpi_20_xsdt {
   25.25  #define ACPI_2_0_XSDT_REVISION 0x01
   25.26  
   25.27  /*
   25.28 + * TCG Hardware Interface Table (TCPA)
   25.29 + */
   25.30 +
   25.31 +typedef struct _ACPI_2_0_TCPA_CLIENT {
   25.32 +    struct acpi_header header;
   25.33 +    uint16_t PlatformClass;
   25.34 +    uint32_t LAML;
   25.35 +    uint64_t LASA;
   25.36 +} ACPI_2_0_TCPA_CLIENT;
   25.37 +
   25.38 +#define ACPI_2_0_TCPA_REVISION 0x02
   25.39 +#define ACPI_2_0_TCPA_LAML_SIZE (64*1024)
   25.40 +
   25.41 +/*
   25.42   * Fixed ACPI Description Table Structure (FADT).
   25.43   */
   25.44  struct acpi_20_fadt {
   25.45 @@ -297,12 +316,13 @@ struct acpi_20_madt {
   25.46  /*
   25.47   * Table Signatures.
   25.48   */
   25.49 -#define ACPI_2_0_RSDP_SIGNATURE 0x2052545020445352LL /* "RSD PTR " */
   25.50 -#define ACPI_2_0_FACS_SIGNATURE 0x53434146 /* "FACS" */
   25.51 -#define ACPI_2_0_FADT_SIGNATURE 0x50434146 /* "FADT" */
   25.52 -#define ACPI_2_0_MADT_SIGNATURE 0x43495041 /* "APIC" */
   25.53 -#define ACPI_2_0_RSDT_SIGNATURE 0x54445352 /* "RSDT" */
   25.54 -#define ACPI_2_0_XSDT_SIGNATURE 0x54445358 /* "XSDT" */
   25.55 +#define ACPI_2_0_RSDP_SIGNATURE ASCII64('R','S','D',' ','P','T','R',' ')
   25.56 +#define ACPI_2_0_FACS_SIGNATURE ASCII32('F','A','C','S')
   25.57 +#define ACPI_2_0_FADT_SIGNATURE ASCII32('F','A','C','P')
   25.58 +#define ACPI_2_0_MADT_SIGNATURE ASCII32('A','P','I','C')
   25.59 +#define ACPI_2_0_RSDT_SIGNATURE ASCII32('R','S','D','T')
   25.60 +#define ACPI_2_0_XSDT_SIGNATURE ASCII32('X','S','D','T')
   25.61 +#define ACPI_2_0_TCPA_SIGNATURE ASCII32('T','C','P','A')
   25.62  
   25.63  #pragma pack ()
   25.64  
    26.1 --- a/tools/firmware/hvmloader/acpi/dsdt.asl	Mon Nov 20 12:14:40 2006 -0700
    26.2 +++ b/tools/firmware/hvmloader/acpi/dsdt.asl	Mon Nov 20 13:11:15 2006 -0700
    26.3 @@ -117,6 +117,13 @@ DefinitionBlock ("DSDT.aml", "DSDT", 1, 
    26.4  
    26.5                  /* reserve memory for pci devices */
    26.6  
    26.7 +                    DWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
    26.8 +                        0x00000000,
    26.9 +                        0x000A0000,
   26.10 +                        0x000BFFFF,
   26.11 +                        0x00000000,
   26.12 +                        0x00020000)
   26.13 +
   26.14                      DWordMemory (ResourceConsumer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
   26.15                          0x00000000,
   26.16                          0xF0000000,
    27.1 --- a/tools/firmware/hvmloader/acpi/dsdt.c	Mon Nov 20 12:14:40 2006 -0700
    27.2 +++ b/tools/firmware/hvmloader/acpi/dsdt.c	Mon Nov 20 13:11:15 2006 -0700
    27.3 @@ -1,19 +1,19 @@
    27.4  /*
    27.5   * 
    27.6   * Intel ACPI Component Architecture
    27.7 - * ASL Optimizing Compiler / AML Disassembler version 20050513 [Oct 12 2006]
    27.8 + * ASL Optimizing Compiler / AML Disassembler version 20050513 [Nov 16 2006]
    27.9   * Copyright (C) 2000 - 2005 Intel Corporation
   27.10   * Supports ACPI Specification Revision 3.0
   27.11   * 
   27.12 - * Compilation of "acpi_dsdt.asl" - Thu Oct 12 14:08:49 2006
   27.13 + * Compilation of "dsdt.asl" - Fri Nov 17 10:00:20 2006
   27.14   * 
   27.15   * C source code output
   27.16   *
   27.17   */
   27.18  unsigned char AmlCode[] = 
   27.19  {
   27.20 -    0x44,0x53,0x44,0x54,0xDA,0x08,0x00,0x00,  /* 00000000    "DSDT...." */
   27.21 -    0x01,0x26,0x49,0x4E,0x54,0x45,0x4C,0x00,  /* 00000008    ".&INTEL." */
   27.22 +    0x44,0x53,0x44,0x54,0xF4,0x08,0x00,0x00,  /* 00000000    "DSDT...." */
   27.23 +    0x01,0x22,0x49,0x4E,0x54,0x45,0x4C,0x00,  /* 00000008    "."INTEL." */
   27.24      0x69,0x6E,0x74,0x2D,0x78,0x65,0x6E,0x00,  /* 00000010    "int-xen." */
   27.25      0xD6,0x07,0x00,0x00,0x49,0x4E,0x54,0x4C,  /* 00000018    "....INTL" */
   27.26      0x13,0x05,0x05,0x20,0x08,0x50,0x4D,0x42,  /* 00000020    "... .PMB" */
   27.27 @@ -34,7 +34,7 @@ unsigned char AmlCode[] =
   27.28      0x12,0x08,0x04,0x0A,0x07,0x0A,0x07,0x00,  /* 00000098    "........" */
   27.29      0x00,0x08,0x50,0x49,0x43,0x44,0x00,0x14,  /* 000000A0    "..PICD.." */
   27.30      0x0C,0x5F,0x50,0x49,0x43,0x01,0x70,0x68,  /* 000000A8    "._PIC.ph" */
   27.31 -    0x50,0x49,0x43,0x44,0x10,0x45,0x82,0x5F,  /* 000000B0    "PICD.E._" */
   27.32 +    0x50,0x49,0x43,0x44,0x10,0x4F,0x83,0x5F,  /* 000000B0    "PICD.O._" */
   27.33      0x53,0x42,0x5F,0x5B,0x82,0x49,0x04,0x4D,  /* 000000B8    "SB_[.I.M" */
   27.34      0x45,0x4D,0x30,0x08,0x5F,0x48,0x49,0x44,  /* 000000C0    "EM0._HID" */
   27.35      0x0C,0x41,0xD0,0x0C,0x02,0x08,0x5F,0x43,  /* 000000C8    ".A...._C" */
   27.36 @@ -45,7 +45,7 @@ unsigned char AmlCode[] =
   27.37      0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,  /* 000000F0    "........" */
   27.38      0x00,0x00,0x00,0x00,0x00,0x00,0x0A,0x00,  /* 000000F8    "........" */
   27.39      0x00,0x00,0x00,0x00,0x79,0x00,0x5B,0x82,  /* 00000100    "....y.[." */
   27.40 -    0x42,0x7D,0x50,0x43,0x49,0x30,0x08,0x5F,  /* 00000108    "B}PCI0._" */
   27.41 +    0x4C,0x7E,0x50,0x43,0x49,0x30,0x08,0x5F,  /* 00000108    "L~PCI0._" */
   27.42      0x48,0x49,0x44,0x0C,0x41,0xD0,0x0A,0x03,  /* 00000110    "HID.A..." */
   27.43      0x08,0x5F,0x55,0x49,0x44,0x00,0x08,0x5F,  /* 00000118    "._UID.._" */
   27.44      0x41,0x44,0x52,0x00,0x08,0x5F,0x42,0x42,  /* 00000120    "ADR.._BB" */
   27.45 @@ -55,9 +55,9 @@ unsigned char AmlCode[] =
   27.46      0x33,0x03,0x49,0x52,0x51,0x35,0x05,0x49,  /* 00000140    "3.IRQ5.I" */
   27.47      0x52,0x51,0x37,0x07,0x49,0x52,0x51,0x39,  /* 00000148    "RQ7.IRQ9" */
   27.48      0x09,0x49,0x52,0x51,0x41,0x0A,0x49,0x52,  /* 00000150    ".IRQA.IR" */
   27.49 -    0x51,0x42,0x0B,0x14,0x4A,0x06,0x5F,0x43,  /* 00000158    "QB..J._C" */
   27.50 +    0x51,0x42,0x0B,0x14,0x44,0x08,0x5F,0x43,  /* 00000158    "QB..D._C" */
   27.51      0x52,0x53,0x00,0x08,0x50,0x52,0x54,0x30,  /* 00000160    "RS..PRT0" */
   27.52 -    0x11,0x48,0x05,0x0A,0x54,0x88,0x0D,0x00,  /* 00000168    ".H..T..." */
   27.53 +    0x11,0x42,0x07,0x0A,0x6E,0x88,0x0D,0x00,  /* 00000168    ".B..n..." */
   27.54      0x02,0x0F,0x00,0x00,0x00,0x00,0x00,0xFF,  /* 00000170    "........" */
   27.55      0x00,0x00,0x00,0x00,0x01,0x47,0x01,0xF8,  /* 00000178    ".....G.." */
   27.56      0x0C,0xF8,0x0C,0x01,0x08,0x88,0x0D,0x00,  /* 00000180    "........" */
   27.57 @@ -65,236 +65,239 @@ unsigned char AmlCode[] =
   27.58      0x0C,0x00,0x00,0xF8,0x0C,0x88,0x0D,0x00,  /* 00000190    "........" */
   27.59      0x01,0x0C,0x03,0x00,0x00,0x00,0x0D,0xFF,  /* 00000198    "........" */
   27.60      0xFF,0x00,0x00,0x00,0xF3,0x87,0x17,0x00,  /* 000001A0    "........" */
   27.61 -    0x00,0x0D,0x03,0x00,0x00,0x00,0x00,0x00,  /* 000001A8    "........" */
   27.62 -    0x00,0x00,0xF0,0xFF,0xFF,0xFF,0xF4,0x00,  /* 000001B0    "........" */
   27.63 -    0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x79,  /* 000001B8    ".......y" */
   27.64 -    0x00,0xA4,0x50,0x52,0x54,0x30,0x08,0x42,  /* 000001C0    "..PRT0.B" */
   27.65 -    0x55,0x46,0x41,0x11,0x09,0x0A,0x06,0x23,  /* 000001C8    "UFA....#" */
   27.66 -    0xF8,0xDC,0x18,0x79,0x00,0x08,0x42,0x55,  /* 000001D0    "...y..BU" */
   27.67 -    0x46,0x42,0x11,0x09,0x0A,0x06,0x23,0x00,  /* 000001D8    "FB....#." */
   27.68 -    0x00,0x18,0x79,0x00,0x8B,0x42,0x55,0x46,  /* 000001E0    "..y..BUF" */
   27.69 -    0x42,0x01,0x49,0x52,0x51,0x56,0x08,0x42,  /* 000001E8    "B.IRQV.B" */
   27.70 -    0x55,0x46,0x43,0x11,0x07,0x0A,0x04,0x05,  /* 000001F0    "UFC....." */
   27.71 -    0x07,0x0A,0x0B,0x8C,0x42,0x55,0x46,0x43,  /* 000001F8    "....BUFC" */
   27.72 -    0x01,0x50,0x49,0x51,0x41,0x8C,0x42,0x55,  /* 00000200    ".PIQA.BU" */
   27.73 -    0x46,0x43,0x01,0x50,0x49,0x51,0x42,0x8C,  /* 00000208    "FC.PIQB." */
   27.74 -    0x42,0x55,0x46,0x43,0x01,0x50,0x49,0x51,  /* 00000210    "BUFC.PIQ" */
   27.75 -    0x43,0x8C,0x42,0x55,0x46,0x43,0x01,0x50,  /* 00000218    "C.BUFC.P" */
   27.76 -    0x49,0x51,0x44,0x5B,0x82,0x48,0x08,0x4C,  /* 00000220    "IQD[.H.L" */
   27.77 -    0x4E,0x4B,0x41,0x08,0x5F,0x48,0x49,0x44,  /* 00000228    "NKA._HID" */
   27.78 -    0x0C,0x41,0xD0,0x0C,0x0F,0x08,0x5F,0x55,  /* 00000230    ".A...._U" */
   27.79 -    0x49,0x44,0x01,0x14,0x1C,0x5F,0x53,0x54,  /* 00000238    "ID..._ST" */
   27.80 -    0x41,0x00,0x7B,0x50,0x49,0x52,0x41,0x0A,  /* 00000240    "A.{PIRA." */
   27.81 -    0x80,0x60,0xA0,0x08,0x93,0x60,0x0A,0x80,  /* 00000248    ".`...`.." */
   27.82 -    0xA4,0x0A,0x09,0xA1,0x04,0xA4,0x0A,0x0B,  /* 00000250    "........" */
   27.83 -    0x14,0x0B,0x5F,0x50,0x52,0x53,0x00,0xA4,  /* 00000258    ".._PRS.." */
   27.84 -    0x42,0x55,0x46,0x41,0x14,0x11,0x5F,0x44,  /* 00000260    "BUFA.._D" */
   27.85 -    0x49,0x53,0x00,0x7D,0x50,0x49,0x52,0x41,  /* 00000268    "IS.}PIRA" */
   27.86 -    0x0A,0x80,0x50,0x49,0x52,0x41,0x14,0x1A,  /* 00000270    "..PIRA.." */
   27.87 -    0x5F,0x43,0x52,0x53,0x00,0x7B,0x50,0x49,  /* 00000278    "_CRS.{PI" */
   27.88 -    0x52,0x42,0x0A,0x0F,0x60,0x79,0x01,0x60,  /* 00000280    "RB..`y.`" */
   27.89 -    0x49,0x52,0x51,0x56,0xA4,0x42,0x55,0x46,  /* 00000288    "IRQV.BUF" */
   27.90 -    0x42,0x14,0x1B,0x5F,0x53,0x52,0x53,0x01,  /* 00000290    "B.._SRS." */
   27.91 -    0x8B,0x68,0x01,0x49,0x52,0x51,0x31,0x82,  /* 00000298    ".h.IRQ1." */
   27.92 -    0x49,0x52,0x51,0x31,0x60,0x76,0x60,0x70,  /* 000002A0    "IRQ1`v`p" */
   27.93 -    0x60,0x50,0x49,0x52,0x41,0x5B,0x82,0x49,  /* 000002A8    "`PIRA[.I" */
   27.94 -    0x08,0x4C,0x4E,0x4B,0x42,0x08,0x5F,0x48,  /* 000002B0    ".LNKB._H" */
   27.95 -    0x49,0x44,0x0C,0x41,0xD0,0x0C,0x0F,0x08,  /* 000002B8    "ID.A...." */
   27.96 -    0x5F,0x55,0x49,0x44,0x0A,0x02,0x14,0x1C,  /* 000002C0    "_UID...." */
   27.97 -    0x5F,0x53,0x54,0x41,0x00,0x7B,0x50,0x49,  /* 000002C8    "_STA.{PI" */
   27.98 -    0x52,0x42,0x0A,0x80,0x60,0xA0,0x08,0x93,  /* 000002D0    "RB..`..." */
   27.99 -    0x60,0x0A,0x80,0xA4,0x0A,0x09,0xA1,0x04,  /* 000002D8    "`......." */
  27.100 -    0xA4,0x0A,0x0B,0x14,0x0B,0x5F,0x50,0x52,  /* 000002E0    "....._PR" */
  27.101 -    0x53,0x00,0xA4,0x42,0x55,0x46,0x41,0x14,  /* 000002E8    "S..BUFA." */
  27.102 -    0x11,0x5F,0x44,0x49,0x53,0x00,0x7D,0x50,  /* 000002F0    "._DIS.}P" */
  27.103 -    0x49,0x52,0x42,0x0A,0x80,0x50,0x49,0x52,  /* 000002F8    "IRB..PIR" */
  27.104 -    0x42,0x14,0x1A,0x5F,0x43,0x52,0x53,0x00,  /* 00000300    "B.._CRS." */
  27.105 -    0x7B,0x50,0x49,0x52,0x42,0x0A,0x0F,0x60,  /* 00000308    "{PIRB..`" */
  27.106 -    0x79,0x01,0x60,0x49,0x52,0x51,0x56,0xA4,  /* 00000310    "y.`IRQV." */
  27.107 -    0x42,0x55,0x46,0x42,0x14,0x1B,0x5F,0x53,  /* 00000318    "BUFB.._S" */
  27.108 -    0x52,0x53,0x01,0x8B,0x68,0x01,0x49,0x52,  /* 00000320    "RS..h.IR" */
  27.109 -    0x51,0x31,0x82,0x49,0x52,0x51,0x31,0x60,  /* 00000328    "Q1.IRQ1`" */
  27.110 -    0x76,0x60,0x70,0x60,0x50,0x49,0x52,0x42,  /* 00000330    "v`p`PIRB" */
  27.111 -    0x5B,0x82,0x49,0x08,0x4C,0x4E,0x4B,0x43,  /* 00000338    "[.I.LNKC" */
  27.112 -    0x08,0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,  /* 00000340    "._HID.A." */
  27.113 -    0x0C,0x0F,0x08,0x5F,0x55,0x49,0x44,0x0A,  /* 00000348    "..._UID." */
  27.114 -    0x03,0x14,0x1C,0x5F,0x53,0x54,0x41,0x00,  /* 00000350    "..._STA." */
  27.115 -    0x7B,0x50,0x49,0x52,0x43,0x0A,0x80,0x60,  /* 00000358    "{PIRC..`" */
  27.116 -    0xA0,0x08,0x93,0x60,0x0A,0x80,0xA4,0x0A,  /* 00000360    "...`...." */
  27.117 -    0x09,0xA1,0x04,0xA4,0x0A,0x0B,0x14,0x0B,  /* 00000368    "........" */
  27.118 -    0x5F,0x50,0x52,0x53,0x00,0xA4,0x42,0x55,  /* 00000370    "_PRS..BU" */
  27.119 -    0x46,0x41,0x14,0x11,0x5F,0x44,0x49,0x53,  /* 00000378    "FA.._DIS" */
  27.120 -    0x00,0x7D,0x50,0x49,0x52,0x43,0x0A,0x80,  /* 00000380    ".}PIRC.." */
  27.121 -    0x50,0x49,0x52,0x43,0x14,0x1A,0x5F,0x43,  /* 00000388    "PIRC.._C" */
  27.122 -    0x52,0x53,0x00,0x7B,0x50,0x49,0x52,0x43,  /* 00000390    "RS.{PIRC" */
  27.123 -    0x0A,0x0F,0x60,0x79,0x01,0x60,0x49,0x52,  /* 00000398    "..`y.`IR" */
  27.124 -    0x51,0x56,0xA4,0x42,0x55,0x46,0x42,0x14,  /* 000003A0    "QV.BUFB." */
  27.125 -    0x1B,0x5F,0x53,0x52,0x53,0x01,0x8B,0x68,  /* 000003A8    "._SRS..h" */
  27.126 -    0x01,0x49,0x52,0x51,0x31,0x82,0x49,0x52,  /* 000003B0    ".IRQ1.IR" */
  27.127 -    0x51,0x31,0x60,0x76,0x60,0x70,0x60,0x50,  /* 000003B8    "Q1`v`p`P" */
  27.128 -    0x49,0x52,0x43,0x5B,0x82,0x49,0x08,0x4C,  /* 000003C0    "IRC[.I.L" */
  27.129 -    0x4E,0x4B,0x44,0x08,0x5F,0x48,0x49,0x44,  /* 000003C8    "NKD._HID" */
  27.130 -    0x0C,0x41,0xD0,0x0C,0x0F,0x08,0x5F,0x55,  /* 000003D0    ".A...._U" */
  27.131 -    0x49,0x44,0x0A,0x04,0x14,0x1C,0x5F,0x53,  /* 000003D8    "ID...._S" */
  27.132 -    0x54,0x41,0x00,0x7B,0x50,0x49,0x52,0x44,  /* 000003E0    "TA.{PIRD" */
  27.133 -    0x0A,0x80,0x60,0xA0,0x08,0x93,0x60,0x0A,  /* 000003E8    "..`...`." */
  27.134 -    0x80,0xA4,0x0A,0x09,0xA1,0x04,0xA4,0x0A,  /* 000003F0    "........" */
  27.135 -    0x0B,0x14,0x0B,0x5F,0x50,0x52,0x53,0x00,  /* 000003F8    "..._PRS." */
  27.136 -    0xA4,0x42,0x55,0x46,0x41,0x14,0x11,0x5F,  /* 00000400    ".BUFA.._" */
  27.137 -    0x44,0x49,0x53,0x00,0x7D,0x50,0x49,0x52,  /* 00000408    "DIS.}PIR" */
  27.138 -    0x44,0x0A,0x80,0x50,0x49,0x52,0x44,0x14,  /* 00000410    "D..PIRD." */
  27.139 -    0x1A,0x5F,0x43,0x52,0x53,0x00,0x7B,0x50,  /* 00000418    "._CRS.{P" */
  27.140 -    0x49,0x52,0x44,0x0A,0x0F,0x60,0x79,0x01,  /* 00000420    "IRD..`y." */
  27.141 -    0x60,0x49,0x52,0x51,0x56,0xA4,0x42,0x55,  /* 00000428    "`IRQV.BU" */
  27.142 -    0x46,0x42,0x14,0x1B,0x5F,0x53,0x52,0x53,  /* 00000430    "FB.._SRS" */
  27.143 -    0x01,0x8B,0x68,0x01,0x49,0x52,0x51,0x31,  /* 00000438    "..h.IRQ1" */
  27.144 -    0x82,0x49,0x52,0x51,0x31,0x60,0x76,0x60,  /* 00000440    ".IRQ1`v`" */
  27.145 -    0x70,0x60,0x50,0x49,0x52,0x44,0x14,0x16,  /* 00000448    "p`PIRD.." */
  27.146 -    0x5F,0x50,0x52,0x54,0x00,0xA0,0x0A,0x50,  /* 00000450    "_PRT...P" */
  27.147 -    0x49,0x43,0x44,0xA4,0x50,0x52,0x54,0x41,  /* 00000458    "ICD.PRTA" */
  27.148 -    0xA4,0x50,0x52,0x54,0x50,0x08,0x50,0x52,  /* 00000460    ".PRTP.PR" */
  27.149 -    0x54,0x50,0x12,0x4D,0x11,0x14,0x12,0x0B,  /* 00000468    "TP.M...." */
  27.150 -    0x04,0x0B,0xFF,0xFF,0x00,0x4C,0x4E,0x4B,  /* 00000470    ".....LNK" */
  27.151 -    0x41,0x00,0x12,0x0B,0x04,0x0B,0xFF,0xFF,  /* 00000478    "A......." */
  27.152 -    0x01,0x4C,0x4E,0x4B,0x42,0x00,0x12,0x0C,  /* 00000480    ".LNKB..." */
  27.153 -    0x04,0x0B,0xFF,0xFF,0x0A,0x02,0x4C,0x4E,  /* 00000488    "......LN" */
  27.154 -    0x4B,0x43,0x00,0x12,0x0C,0x04,0x0B,0xFF,  /* 00000490    "KC......" */
  27.155 -    0xFF,0x0A,0x03,0x4C,0x4E,0x4B,0x44,0x00,  /* 00000498    "...LNKD." */
  27.156 -    0x12,0x0D,0x04,0x0C,0xFF,0xFF,0x01,0x00,  /* 000004A0    "........" */
  27.157 -    0x00,0x4C,0x4E,0x4B,0x42,0x00,0x12,0x0D,  /* 000004A8    ".LNKB..." */
  27.158 -    0x04,0x0C,0xFF,0xFF,0x01,0x00,0x01,0x4C,  /* 000004B0    ".......L" */
  27.159 -    0x4E,0x4B,0x43,0x00,0x12,0x0E,0x04,0x0C,  /* 000004B8    "NKC....." */
  27.160 -    0xFF,0xFF,0x01,0x00,0x0A,0x02,0x4C,0x4E,  /* 000004C0    "......LN" */
  27.161 -    0x4B,0x44,0x00,0x12,0x0E,0x04,0x0C,0xFF,  /* 000004C8    "KD......" */
  27.162 -    0xFF,0x01,0x00,0x0A,0x03,0x4C,0x4E,0x4B,  /* 000004D0    ".....LNK" */
  27.163 -    0x41,0x00,0x12,0x0D,0x04,0x0C,0xFF,0xFF,  /* 000004D8    "A......." */
  27.164 -    0x02,0x00,0x00,0x4C,0x4E,0x4B,0x43,0x00,  /* 000004E0    "...LNKC." */
  27.165 -    0x12,0x0D,0x04,0x0C,0xFF,0xFF,0x02,0x00,  /* 000004E8    "........" */
  27.166 -    0x01,0x4C,0x4E,0x4B,0x44,0x00,0x12,0x0E,  /* 000004F0    ".LNKD..." */
  27.167 -    0x04,0x0C,0xFF,0xFF,0x02,0x00,0x0A,0x02,  /* 000004F8    "........" */
  27.168 -    0x4C,0x4E,0x4B,0x41,0x00,0x12,0x0E,0x04,  /* 00000500    "LNKA...." */
  27.169 -    0x0C,0xFF,0xFF,0x02,0x00,0x0A,0x03,0x4C,  /* 00000508    ".......L" */
  27.170 -    0x4E,0x4B,0x42,0x00,0x12,0x0D,0x04,0x0C,  /* 00000510    "NKB....." */
  27.171 -    0xFF,0xFF,0x03,0x00,0x00,0x4C,0x4E,0x4B,  /* 00000518    ".....LNK" */
  27.172 -    0x44,0x00,0x12,0x0D,0x04,0x0C,0xFF,0xFF,  /* 00000520    "D......." */
  27.173 -    0x03,0x00,0x01,0x4C,0x4E,0x4B,0x41,0x00,  /* 00000528    "...LNKA." */
  27.174 -    0x12,0x0E,0x04,0x0C,0xFF,0xFF,0x03,0x00,  /* 00000530    "........" */
  27.175 -    0x0A,0x02,0x4C,0x4E,0x4B,0x42,0x00,0x12,  /* 00000538    "..LNKB.." */
  27.176 -    0x0E,0x04,0x0C,0xFF,0xFF,0x03,0x00,0x0A,  /* 00000540    "........" */
  27.177 -    0x03,0x4C,0x4E,0x4B,0x43,0x00,0x12,0x0D,  /* 00000548    ".LNKC..." */
  27.178 -    0x04,0x0C,0xFF,0xFF,0x04,0x00,0x00,0x4C,  /* 00000550    ".......L" */
  27.179 -    0x4E,0x4B,0x41,0x00,0x12,0x0D,0x04,0x0C,  /* 00000558    "NKA....." */
  27.180 -    0xFF,0xFF,0x04,0x00,0x01,0x4C,0x4E,0x4B,  /* 00000560    ".....LNK" */
  27.181 -    0x42,0x00,0x12,0x0E,0x04,0x0C,0xFF,0xFF,  /* 00000568    "B......." */
  27.182 -    0x04,0x00,0x0A,0x02,0x4C,0x4E,0x4B,0x43,  /* 00000570    "....LNKC" */
  27.183 -    0x00,0x12,0x0E,0x04,0x0C,0xFF,0xFF,0x04,  /* 00000578    "........" */
  27.184 -    0x00,0x0A,0x03,0x4C,0x4E,0x4B,0x44,0x00,  /* 00000580    "...LNKD." */
  27.185 -    0x08,0x50,0x52,0x54,0x41,0x12,0x32,0x04,  /* 00000588    ".PRTA.2." */
  27.186 -    0x12,0x0B,0x04,0x0C,0xFF,0xFF,0x01,0x00,  /* 00000590    "........" */
  27.187 -    0x00,0x00,0x0A,0x05,0x12,0x0B,0x04,0x0C,  /* 00000598    "........" */
  27.188 -    0xFF,0xFF,0x02,0x00,0x00,0x00,0x0A,0x07,  /* 000005A0    "........" */
  27.189 -    0x12,0x0B,0x04,0x0C,0xFF,0xFF,0x03,0x00,  /* 000005A8    "........" */
  27.190 -    0x00,0x00,0x0A,0x0A,0x12,0x0B,0x04,0x0C,  /* 000005B0    "........" */
  27.191 -    0xFF,0xFF,0x04,0x00,0x00,0x00,0x0A,0x0B,  /* 000005B8    "........" */
  27.192 -    0x5B,0x82,0x48,0x31,0x49,0x53,0x41,0x5F,  /* 000005C0    "[.H1ISA_" */
  27.193 -    0x08,0x5F,0x41,0x44,0x52,0x00,0x5B,0x80,  /* 000005C8    "._ADR.[." */
  27.194 -    0x50,0x49,0x52,0x51,0x02,0x0A,0x60,0x0A,  /* 000005D0    "PIRQ..`." */
  27.195 -    0x04,0x10,0x2E,0x5C,0x00,0x5B,0x81,0x29,  /* 000005D8    "...\.[.)" */
  27.196 -    0x5C,0x2F,0x04,0x5F,0x53,0x42,0x5F,0x50,  /* 000005E0    "\/._SB_P" */
  27.197 -    0x43,0x49,0x30,0x49,0x53,0x41,0x5F,0x50,  /* 000005E8    "CI0ISA_P" */
  27.198 -    0x49,0x52,0x51,0x01,0x50,0x49,0x52,0x41,  /* 000005F0    "IRQ.PIRA" */
  27.199 -    0x08,0x50,0x49,0x52,0x42,0x08,0x50,0x49,  /* 000005F8    ".PIRB.PI" */
  27.200 -    0x52,0x43,0x08,0x50,0x49,0x52,0x44,0x08,  /* 00000600    "RC.PIRD." */
  27.201 -    0x5B,0x82,0x46,0x0B,0x53,0x59,0x53,0x52,  /* 00000608    "[.F.SYSR" */
  27.202 -    0x08,0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,  /* 00000610    "._HID.A." */
  27.203 -    0x0C,0x02,0x08,0x5F,0x55,0x49,0x44,0x01,  /* 00000618    "..._UID." */
  27.204 -    0x08,0x43,0x52,0x53,0x5F,0x11,0x4E,0x08,  /* 00000620    ".CRS_.N." */
  27.205 -    0x0A,0x8A,0x47,0x01,0x10,0x00,0x10,0x00,  /* 00000628    "..G....." */
  27.206 -    0x00,0x10,0x47,0x01,0x22,0x00,0x22,0x00,  /* 00000630    "..G."."." */
  27.207 -    0x00,0x0C,0x47,0x01,0x30,0x00,0x30,0x00,  /* 00000638    "..G.0.0." */
  27.208 -    0x00,0x10,0x47,0x01,0x44,0x00,0x44,0x00,  /* 00000640    "..G.D.D." */
  27.209 -    0x00,0x1C,0x47,0x01,0x62,0x00,0x62,0x00,  /* 00000648    "..G.b.b." */
  27.210 -    0x00,0x02,0x47,0x01,0x65,0x00,0x65,0x00,  /* 00000650    "..G.e.e." */
  27.211 -    0x00,0x0B,0x47,0x01,0x72,0x00,0x72,0x00,  /* 00000658    "..G.r.r." */
  27.212 -    0x00,0x0E,0x47,0x01,0x80,0x00,0x80,0x00,  /* 00000660    "..G....." */
  27.213 -    0x00,0x01,0x47,0x01,0x84,0x00,0x84,0x00,  /* 00000668    "..G....." */
  27.214 -    0x00,0x03,0x47,0x01,0x88,0x00,0x88,0x00,  /* 00000670    "..G....." */
  27.215 -    0x00,0x01,0x47,0x01,0x8C,0x00,0x8C,0x00,  /* 00000678    "..G....." */
  27.216 -    0x00,0x03,0x47,0x01,0x90,0x00,0x90,0x00,  /* 00000680    "..G....." */
  27.217 -    0x00,0x10,0x47,0x01,0xA2,0x00,0xA2,0x00,  /* 00000688    "..G....." */
  27.218 -    0x00,0x1C,0x47,0x01,0xE0,0x00,0xE0,0x00,  /* 00000690    "..G....." */
  27.219 -    0x00,0x10,0x47,0x01,0xA0,0x08,0xA0,0x08,  /* 00000698    "..G....." */
  27.220 -    0x00,0x04,0x47,0x01,0xC0,0x0C,0xC0,0x0C,  /* 000006A0    "..G....." */
  27.221 -    0x00,0x10,0x47,0x01,0xD0,0x04,0xD0,0x04,  /* 000006A8    "..G....." */
  27.222 -    0x00,0x02,0x79,0x00,0x14,0x0B,0x5F,0x43,  /* 000006B0    "..y..._C" */
  27.223 -    0x52,0x53,0x00,0xA4,0x43,0x52,0x53,0x5F,  /* 000006B8    "RS..CRS_" */
  27.224 -    0x5B,0x82,0x2B,0x50,0x49,0x43,0x5F,0x08,  /* 000006C0    "[.+PIC_." */
  27.225 -    0x5F,0x48,0x49,0x44,0x0B,0x41,0xD0,0x08,  /* 000006C8    "_HID.A.." */
  27.226 -    0x5F,0x43,0x52,0x53,0x11,0x18,0x0A,0x15,  /* 000006D0    "_CRS...." */
  27.227 -    0x47,0x01,0x20,0x00,0x20,0x00,0x01,0x02,  /* 000006D8    "G. . ..." */
  27.228 -    0x47,0x01,0xA0,0x00,0xA0,0x00,0x01,0x02,  /* 000006E0    "G......." */
  27.229 -    0x22,0x04,0x00,0x79,0x00,0x5B,0x82,0x47,  /* 000006E8    ""..y.[.G" */
  27.230 -    0x05,0x44,0x4D,0x41,0x30,0x08,0x5F,0x48,  /* 000006F0    ".DMA0._H" */
  27.231 -    0x49,0x44,0x0C,0x41,0xD0,0x02,0x00,0x08,  /* 000006F8    "ID.A...." */
  27.232 -    0x5F,0x43,0x52,0x53,0x11,0x41,0x04,0x0A,  /* 00000700    "_CRS.A.." */
  27.233 -    0x3D,0x2A,0x10,0x04,0x47,0x01,0x00,0x00,  /* 00000708    "=*..G..." */
  27.234 -    0x00,0x00,0x00,0x10,0x47,0x01,0x81,0x00,  /* 00000710    "....G..." */
  27.235 -    0x81,0x00,0x00,0x03,0x47,0x01,0x87,0x00,  /* 00000718    "....G..." */
  27.236 -    0x87,0x00,0x00,0x01,0x47,0x01,0x89,0x00,  /* 00000720    "....G..." */
  27.237 -    0x89,0x00,0x00,0x03,0x47,0x01,0x8F,0x00,  /* 00000728    "....G..." */
  27.238 -    0x8F,0x00,0x00,0x01,0x47,0x01,0xC0,0x00,  /* 00000730    "....G..." */
  27.239 -    0xC0,0x00,0x00,0x20,0x47,0x01,0x80,0x04,  /* 00000738    "... G..." */
  27.240 -    0x80,0x04,0x00,0x10,0x79,0x00,0x5B,0x82,  /* 00000740    "....y.[." */
  27.241 -    0x25,0x54,0x4D,0x52,0x5F,0x08,0x5F,0x48,  /* 00000748    "%TMR_._H" */
  27.242 -    0x49,0x44,0x0C,0x41,0xD0,0x01,0x00,0x08,  /* 00000750    "ID.A...." */
  27.243 -    0x5F,0x43,0x52,0x53,0x11,0x10,0x0A,0x0D,  /* 00000758    "_CRS...." */
  27.244 -    0x47,0x01,0x40,0x00,0x40,0x00,0x00,0x04,  /* 00000760    "G.@.@..." */
  27.245 -    0x22,0x01,0x00,0x79,0x00,0x5B,0x82,0x25,  /* 00000768    ""..y.[.%" */
  27.246 -    0x52,0x54,0x43,0x5F,0x08,0x5F,0x48,0x49,  /* 00000770    "RTC_._HI" */
  27.247 -    0x44,0x0C,0x41,0xD0,0x0B,0x00,0x08,0x5F,  /* 00000778    "D.A...._" */
  27.248 -    0x43,0x52,0x53,0x11,0x10,0x0A,0x0D,0x47,  /* 00000780    "CRS....G" */
  27.249 -    0x01,0x70,0x00,0x70,0x00,0x00,0x02,0x22,  /* 00000788    ".p.p..."" */
  27.250 -    0x00,0x01,0x79,0x00,0x5B,0x82,0x22,0x53,  /* 00000790    "..y.[."S" */
  27.251 -    0x50,0x4B,0x52,0x08,0x5F,0x48,0x49,0x44,  /* 00000798    "PKR._HID" */
  27.252 -    0x0C,0x41,0xD0,0x08,0x00,0x08,0x5F,0x43,  /* 000007A0    ".A...._C" */
  27.253 -    0x52,0x53,0x11,0x0D,0x0A,0x0A,0x47,0x01,  /* 000007A8    "RS....G." */
  27.254 -    0x61,0x00,0x61,0x00,0x00,0x01,0x79,0x00,  /* 000007B0    "a.a...y." */
  27.255 -    0x5B,0x82,0x31,0x50,0x53,0x32,0x4D,0x08,  /* 000007B8    "[.1PS2M." */
  27.256 -    0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,0x0F,  /* 000007C0    "_HID.A.." */
  27.257 -    0x13,0x08,0x5F,0x43,0x49,0x44,0x0C,0x41,  /* 000007C8    ".._CID.A" */
  27.258 -    0xD0,0x0F,0x13,0x14,0x09,0x5F,0x53,0x54,  /* 000007D0    "....._ST" */
  27.259 -    0x41,0x00,0xA4,0x0A,0x0F,0x08,0x5F,0x43,  /* 000007D8    "A....._C" */
  27.260 -    0x52,0x53,0x11,0x08,0x0A,0x05,0x22,0x00,  /* 000007E0    "RS...."." */
  27.261 -    0x10,0x79,0x00,0x5B,0x82,0x42,0x04,0x50,  /* 000007E8    ".y.[.B.P" */
  27.262 -    0x53,0x32,0x4B,0x08,0x5F,0x48,0x49,0x44,  /* 000007F0    "S2K._HID" */
  27.263 -    0x0C,0x41,0xD0,0x03,0x03,0x08,0x5F,0x43,  /* 000007F8    ".A...._C" */
  27.264 -    0x49,0x44,0x0C,0x41,0xD0,0x03,0x0B,0x14,  /* 00000800    "ID.A...." */
  27.265 -    0x09,0x5F,0x53,0x54,0x41,0x00,0xA4,0x0A,  /* 00000808    "._STA..." */
  27.266 -    0x0F,0x08,0x5F,0x43,0x52,0x53,0x11,0x18,  /* 00000810    ".._CRS.." */
  27.267 -    0x0A,0x15,0x47,0x01,0x60,0x00,0x60,0x00,  /* 00000818    "..G.`.`." */
  27.268 -    0x00,0x01,0x47,0x01,0x64,0x00,0x64,0x00,  /* 00000820    "..G.d.d." */
  27.269 -    0x00,0x01,0x22,0x02,0x00,0x79,0x00,0x5B,  /* 00000828    ".."..y.[" */
  27.270 -    0x82,0x3A,0x46,0x44,0x43,0x30,0x08,0x5F,  /* 00000830    ".:FDC0._" */
  27.271 -    0x48,0x49,0x44,0x0C,0x41,0xD0,0x07,0x00,  /* 00000838    "HID.A..." */
  27.272 -    0x14,0x09,0x5F,0x53,0x54,0x41,0x00,0xA4,  /* 00000840    ".._STA.." */
  27.273 -    0x0A,0x0F,0x08,0x5F,0x43,0x52,0x53,0x11,  /* 00000848    "..._CRS." */
  27.274 -    0x1B,0x0A,0x18,0x47,0x01,0xF0,0x03,0xF0,  /* 00000850    "...G...." */
  27.275 -    0x03,0x01,0x06,0x47,0x01,0xF7,0x03,0xF7,  /* 00000858    "...G...." */
  27.276 -    0x03,0x01,0x01,0x22,0x40,0x00,0x2A,0x04,  /* 00000860    "..."@.*." */
  27.277 -    0x00,0x79,0x00,0x5B,0x82,0x35,0x55,0x41,  /* 00000868    ".y.[.5UA" */
  27.278 -    0x52,0x31,0x08,0x5F,0x48,0x49,0x44,0x0C,  /* 00000870    "R1._HID." */
  27.279 -    0x41,0xD0,0x05,0x01,0x08,0x5F,0x55,0x49,  /* 00000878    "A...._UI" */
  27.280 -    0x44,0x01,0x14,0x09,0x5F,0x53,0x54,0x41,  /* 00000880    "D..._STA" */
  27.281 -    0x00,0xA4,0x0A,0x0F,0x08,0x5F,0x43,0x52,  /* 00000888    "....._CR" */
  27.282 -    0x53,0x11,0x10,0x0A,0x0D,0x47,0x01,0xF8,  /* 00000890    "S....G.." */
  27.283 -    0x03,0xF8,0x03,0x01,0x08,0x22,0x10,0x00,  /* 00000898    ".....".." */
  27.284 -    0x79,0x00,0x5B,0x82,0x36,0x4C,0x54,0x50,  /* 000008A0    "y.[.6LTP" */
  27.285 -    0x31,0x08,0x5F,0x48,0x49,0x44,0x0C,0x41,  /* 000008A8    "1._HID.A" */
  27.286 -    0xD0,0x04,0x00,0x08,0x5F,0x55,0x49,0x44,  /* 000008B0    "...._UID" */
  27.287 -    0x0A,0x02,0x14,0x09,0x5F,0x53,0x54,0x41,  /* 000008B8    "...._STA" */
  27.288 -    0x00,0xA4,0x0A,0x0F,0x08,0x5F,0x43,0x52,  /* 000008C0    "....._CR" */
  27.289 -    0x53,0x11,0x10,0x0A,0x0D,0x47,0x01,0x78,  /* 000008C8    "S....G.x" */
  27.290 -    0x03,0x78,0x03,0x08,0x08,0x22,0x80,0x00,  /* 000008D0    ".x...".." */
  27.291 -    0x79,0x00,
  27.292 +    0x00,0x0C,0x03,0x00,0x00,0x00,0x00,0x00,  /* 000001A8    "........" */
  27.293 +    0x00,0x0A,0x00,0xFF,0xFF,0x0B,0x00,0x00,  /* 000001B0    "........" */
  27.294 +    0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x87,  /* 000001B8    "........" */
  27.295 +    0x17,0x00,0x00,0x0D,0x03,0x00,0x00,0x00,  /* 000001C0    "........" */
  27.296 +    0x00,0x00,0x00,0x00,0xF0,0xFF,0xFF,0xFF,  /* 000001C8    "........" */
  27.297 +    0xF4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,  /* 000001D0    "........" */
  27.298 +    0x05,0x79,0x00,0xA4,0x50,0x52,0x54,0x30,  /* 000001D8    ".y..PRT0" */
  27.299 +    0x08,0x42,0x55,0x46,0x41,0x11,0x09,0x0A,  /* 000001E0    ".BUFA..." */
  27.300 +    0x06,0x23,0xF8,0xDC,0x18,0x79,0x00,0x08,  /* 000001E8    ".#...y.." */
  27.301 +    0x42,0x55,0x46,0x42,0x11,0x09,0x0A,0x06,  /* 000001F0    "BUFB...." */
  27.302 +    0x23,0x00,0x00,0x18,0x79,0x00,0x8B,0x42,  /* 000001F8    "#...y..B" */
  27.303 +    0x55,0x46,0x42,0x01,0x49,0x52,0x51,0x56,  /* 00000200    "UFB.IRQV" */
  27.304 +    0x08,0x42,0x55,0x46,0x43,0x11,0x07,0x0A,  /* 00000208    ".BUFC..." */
  27.305 +    0x04,0x05,0x07,0x0A,0x0B,0x8C,0x42,0x55,  /* 00000210    "......BU" */
  27.306 +    0x46,0x43,0x01,0x50,0x49,0x51,0x41,0x8C,  /* 00000218    "FC.PIQA." */
  27.307 +    0x42,0x55,0x46,0x43,0x01,0x50,0x49,0x51,  /* 00000220    "BUFC.PIQ" */
  27.308 +    0x42,0x8C,0x42,0x55,0x46,0x43,0x01,0x50,  /* 00000228    "B.BUFC.P" */
  27.309 +    0x49,0x51,0x43,0x8C,0x42,0x55,0x46,0x43,  /* 00000230    "IQC.BUFC" */
  27.310 +    0x01,0x50,0x49,0x51,0x44,0x5B,0x82,0x48,  /* 00000238    ".PIQD[.H" */
  27.311 +    0x08,0x4C,0x4E,0x4B,0x41,0x08,0x5F,0x48,  /* 00000240    ".LNKA._H" */
  27.312 +    0x49,0x44,0x0C,0x41,0xD0,0x0C,0x0F,0x08,  /* 00000248    "ID.A...." */
  27.313 +    0x5F,0x55,0x49,0x44,0x01,0x14,0x1C,0x5F,  /* 00000250    "_UID..._" */
  27.314 +    0x53,0x54,0x41,0x00,0x7B,0x50,0x49,0x52,  /* 00000258    "STA.{PIR" */
  27.315 +    0x41,0x0A,0x80,0x60,0xA0,0x08,0x93,0x60,  /* 00000260    "A..`...`" */
  27.316 +    0x0A,0x80,0xA4,0x0A,0x09,0xA1,0x04,0xA4,  /* 00000268    "........" */
  27.317 +    0x0A,0x0B,0x14,0x0B,0x5F,0x50,0x52,0x53,  /* 00000270    "...._PRS" */
  27.318 +    0x00,0xA4,0x42,0x55,0x46,0x41,0x14,0x11,  /* 00000278    "..BUFA.." */
  27.319 +    0x5F,0x44,0x49,0x53,0x00,0x7D,0x50,0x49,  /* 00000280    "_DIS.}PI" */
  27.320 +    0x52,0x41,0x0A,0x80,0x50,0x49,0x52,0x41,  /* 00000288    "RA..PIRA" */
  27.321 +    0x14,0x1A,0x5F,0x43,0x52,0x53,0x00,0x7B,  /* 00000290    ".._CRS.{" */
  27.322 +    0x50,0x49,0x52,0x42,0x0A,0x0F,0x60,0x79,  /* 00000298    "PIRB..`y" */
  27.323 +    0x01,0x60,0x49,0x52,0x51,0x56,0xA4,0x42,  /* 000002A0    ".`IRQV.B" */
  27.324 +    0x55,0x46,0x42,0x14,0x1B,0x5F,0x53,0x52,  /* 000002A8    "UFB.._SR" */
  27.325 +    0x53,0x01,0x8B,0x68,0x01,0x49,0x52,0x51,  /* 000002B0    "S..h.IRQ" */
  27.326 +    0x31,0x82,0x49,0x52,0x51,0x31,0x60,0x76,  /* 000002B8    "1.IRQ1`v" */
  27.327 +    0x60,0x70,0x60,0x50,0x49,0x52,0x41,0x5B,  /* 000002C0    "`p`PIRA[" */
  27.328 +    0x82,0x49,0x08,0x4C,0x4E,0x4B,0x42,0x08,  /* 000002C8    ".I.LNKB." */
  27.329 +    0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,0x0C,  /* 000002D0    "_HID.A.." */
  27.330 +    0x0F,0x08,0x5F,0x55,0x49,0x44,0x0A,0x02,  /* 000002D8    ".._UID.." */
  27.331 +    0x14,0x1C,0x5F,0x53,0x54,0x41,0x00,0x7B,  /* 000002E0    ".._STA.{" */
  27.332 +    0x50,0x49,0x52,0x42,0x0A,0x80,0x60,0xA0,  /* 000002E8    "PIRB..`." */
  27.333 +    0x08,0x93,0x60,0x0A,0x80,0xA4,0x0A,0x09,  /* 000002F0    "..`....." */
  27.334 +    0xA1,0x04,0xA4,0x0A,0x0B,0x14,0x0B,0x5F,  /* 000002F8    "......._" */
  27.335 +    0x50,0x52,0x53,0x00,0xA4,0x42,0x55,0x46,  /* 00000300    "PRS..BUF" */
  27.336 +    0x41,0x14,0x11,0x5F,0x44,0x49,0x53,0x00,  /* 00000308    "A.._DIS." */
  27.337 +    0x7D,0x50,0x49,0x52,0x42,0x0A,0x80,0x50,  /* 00000310    "}PIRB..P" */
  27.338 +    0x49,0x52,0x42,0x14,0x1A,0x5F,0x43,0x52,  /* 00000318    "IRB.._CR" */
  27.339 +    0x53,0x00,0x7B,0x50,0x49,0x52,0x42,0x0A,  /* 00000320    "S.{PIRB." */
  27.340 +    0x0F,0x60,0x79,0x01,0x60,0x49,0x52,0x51,  /* 00000328    ".`y.`IRQ" */
  27.341 +    0x56,0xA4,0x42,0x55,0x46,0x42,0x14,0x1B,  /* 00000330    "V.BUFB.." */
  27.342 +    0x5F,0x53,0x52,0x53,0x01,0x8B,0x68,0x01,  /* 00000338    "_SRS..h." */
  27.343 +    0x49,0x52,0x51,0x31,0x82,0x49,0x52,0x51,  /* 00000340    "IRQ1.IRQ" */
  27.344 +    0x31,0x60,0x76,0x60,0x70,0x60,0x50,0x49,  /* 00000348    "1`v`p`PI" */
  27.345 +    0x52,0x42,0x5B,0x82,0x49,0x08,0x4C,0x4E,  /* 00000350    "RB[.I.LN" */
  27.346 +    0x4B,0x43,0x08,0x5F,0x48,0x49,0x44,0x0C,  /* 00000358    "KC._HID." */
  27.347 +    0x41,0xD0,0x0C,0x0F,0x08,0x5F,0x55,0x49,  /* 00000360    "A...._UI" */
  27.348 +    0x44,0x0A,0x03,0x14,0x1C,0x5F,0x53,0x54,  /* 00000368    "D...._ST" */
  27.349 +    0x41,0x00,0x7B,0x50,0x49,0x52,0x43,0x0A,  /* 00000370    "A.{PIRC." */
  27.350 +    0x80,0x60,0xA0,0x08,0x93,0x60,0x0A,0x80,  /* 00000378    ".`...`.." */
  27.351 +    0xA4,0x0A,0x09,0xA1,0x04,0xA4,0x0A,0x0B,  /* 00000380    "........" */
  27.352 +    0x14,0x0B,0x5F,0x50,0x52,0x53,0x00,0xA4,  /* 00000388    ".._PRS.." */
  27.353 +    0x42,0x55,0x46,0x41,0x14,0x11,0x5F,0x44,  /* 00000390    "BUFA.._D" */
  27.354 +    0x49,0x53,0x00,0x7D,0x50,0x49,0x52,0x43,  /* 00000398    "IS.}PIRC" */
  27.355 +    0x0A,0x80,0x50,0x49,0x52,0x43,0x14,0x1A,  /* 000003A0    "..PIRC.." */
  27.356 +    0x5F,0x43,0x52,0x53,0x00,0x7B,0x50,0x49,  /* 000003A8    "_CRS.{PI" */
  27.357 +    0x52,0x43,0x0A,0x0F,0x60,0x79,0x01,0x60,  /* 000003B0    "RC..`y.`" */
  27.358 +    0x49,0x52,0x51,0x56,0xA4,0x42,0x55,0x46,  /* 000003B8    "IRQV.BUF" */
  27.359 +    0x42,0x14,0x1B,0x5F,0x53,0x52,0x53,0x01,  /* 000003C0    "B.._SRS." */
  27.360 +    0x8B,0x68,0x01,0x49,0x52,0x51,0x31,0x82,  /* 000003C8    ".h.IRQ1." */
  27.361 +    0x49,0x52,0x51,0x31,0x60,0x76,0x60,0x70,  /* 000003D0    "IRQ1`v`p" */
  27.362 +    0x60,0x50,0x49,0x52,0x43,0x5B,0x82,0x49,  /* 000003D8    "`PIRC[.I" */
  27.363 +    0x08,0x4C,0x4E,0x4B,0x44,0x08,0x5F,0x48,  /* 000003E0    ".LNKD._H" */
  27.364 +    0x49,0x44,0x0C,0x41,0xD0,0x0C,0x0F,0x08,  /* 000003E8    "ID.A...." */
  27.365 +    0x5F,0x55,0x49,0x44,0x0A,0x04,0x14,0x1C,  /* 000003F0    "_UID...." */
  27.366 +    0x5F,0x53,0x54,0x41,0x00,0x7B,0x50,0x49,  /* 000003F8    "_STA.{PI" */
  27.367 +    0x52,0x44,0x0A,0x80,0x60,0xA0,0x08,0x93,  /* 00000400    "RD..`..." */
  27.368 +    0x60,0x0A,0x80,0xA4,0x0A,0x09,0xA1,0x04,  /* 00000408    "`......." */
  27.369 +    0xA4,0x0A,0x0B,0x14,0x0B,0x5F,0x50,0x52,  /* 00000410    "....._PR" */
  27.370 +    0x53,0x00,0xA4,0x42,0x55,0x46,0x41,0x14,  /* 00000418    "S..BUFA." */
  27.371 +    0x11,0x5F,0x44,0x49,0x53,0x00,0x7D,0x50,  /* 00000420    "._DIS.}P" */
  27.372 +    0x49,0x52,0x44,0x0A,0x80,0x50,0x49,0x52,  /* 00000428    "IRD..PIR" */
  27.373 +    0x44,0x14,0x1A,0x5F,0x43,0x52,0x53,0x00,  /* 00000430    "D.._CRS." */
  27.374 +    0x7B,0x50,0x49,0x52,0x44,0x0A,0x0F,0x60,  /* 00000438    "{PIRD..`" */
  27.375 +    0x79,0x01,0x60,0x49,0x52,0x51,0x56,0xA4,  /* 00000440    "y.`IRQV." */
  27.376 +    0x42,0x55,0x46,0x42,0x14,0x1B,0x5F,0x53,  /* 00000448    "BUFB.._S" */
  27.377 +    0x52,0x53,0x01,0x8B,0x68,0x01,0x49,0x52,  /* 00000450    "RS..h.IR" */
  27.378 +    0x51,0x31,0x82,0x49,0x52,0x51,0x31,0x60,  /* 00000458    "Q1.IRQ1`" */
  27.379 +    0x76,0x60,0x70,0x60,0x50,0x49,0x52,0x44,  /* 00000460    "v`p`PIRD" */
  27.380 +    0x14,0x16,0x5F,0x50,0x52,0x54,0x00,0xA0,  /* 00000468    ".._PRT.." */
  27.381 +    0x0A,0x50,0x49,0x43,0x44,0xA4,0x50,0x52,  /* 00000470    ".PICD.PR" */
  27.382 +    0x54,0x41,0xA4,0x50,0x52,0x54,0x50,0x08,  /* 00000478    "TA.PRTP." */
  27.383 +    0x50,0x52,0x54,0x50,0x12,0x4D,0x11,0x14,  /* 00000480    "PRTP.M.." */
  27.384 +    0x12,0x0B,0x04,0x0B,0xFF,0xFF,0x00,0x4C,  /* 00000488    ".......L" */
  27.385 +    0x4E,0x4B,0x41,0x00,0x12,0x0B,0x04,0x0B,  /* 00000490    "NKA....." */
  27.386 +    0xFF,0xFF,0x01,0x4C,0x4E,0x4B,0x42,0x00,  /* 00000498    "...LNKB." */
  27.387 +    0x12,0x0C,0x04,0x0B,0xFF,0xFF,0x0A,0x02,  /* 000004A0    "........" */
  27.388 +    0x4C,0x4E,0x4B,0x43,0x00,0x12,0x0C,0x04,  /* 000004A8    "LNKC...." */
  27.389 +    0x0B,0xFF,0xFF,0x0A,0x03,0x4C,0x4E,0x4B,  /* 000004B0    ".....LNK" */
  27.390 +    0x44,0x00,0x12,0x0D,0x04,0x0C,0xFF,0xFF,  /* 000004B8    "D......." */
  27.391 +    0x01,0x00,0x00,0x4C,0x4E,0x4B,0x42,0x00,  /* 000004C0    "...LNKB." */
  27.392 +    0x12,0x0D,0x04,0x0C,0xFF,0xFF,0x01,0x00,  /* 000004C8    "........" */
  27.393 +    0x01,0x4C,0x4E,0x4B,0x43,0x00,0x12,0x0E,  /* 000004D0    ".LNKC..." */
  27.394 +    0x04,0x0C,0xFF,0xFF,0x01,0x00,0x0A,0x02,  /* 000004D8    "........" */
  27.395 +    0x4C,0x4E,0x4B,0x44,0x00,0x12,0x0E,0x04,  /* 000004E0    "LNKD...." */
  27.396 +    0x0C,0xFF,0xFF,0x01,0x00,0x0A,0x03,0x4C,  /* 000004E8    ".......L" */
  27.397 +    0x4E,0x4B,0x41,0x00,0x12,0x0D,0x04,0x0C,  /* 000004F0    "NKA....." */
  27.398 +    0xFF,0xFF,0x02,0x00,0x00,0x4C,0x4E,0x4B,  /* 000004F8    ".....LNK" */
  27.399 +    0x43,0x00,0x12,0x0D,0x04,0x0C,0xFF,0xFF,  /* 00000500    "C......." */
  27.400 +    0x02,0x00,0x01,0x4C,0x4E,0x4B,0x44,0x00,  /* 00000508    "...LNKD." */
  27.401 +    0x12,0x0E,0x04,0x0C,0xFF,0xFF,0x02,0x00,  /* 00000510    "........" */
  27.402 +    0x0A,0x02,0x4C,0x4E,0x4B,0x41,0x00,0x12,  /* 00000518    "..LNKA.." */
  27.403 +    0x0E,0x04,0x0C,0xFF,0xFF,0x02,0x00,0x0A,  /* 00000520    "........" */
  27.404 +    0x03,0x4C,0x4E,0x4B,0x42,0x00,0x12,0x0D,  /* 00000528    ".LNKB..." */
  27.405 +    0x04,0x0C,0xFF,0xFF,0x03,0x00,0x00,0x4C,  /* 00000530    ".......L" */
  27.406 +    0x4E,0x4B,0x44,0x00,0x12,0x0D,0x04,0x0C,  /* 00000538    "NKD....." */
  27.407 +    0xFF,0xFF,0x03,0x00,0x01,0x4C,0x4E,0x4B,  /* 00000540    ".....LNK" */
  27.408 +    0x41,0x00,0x12,0x0E,0x04,0x0C,0xFF,0xFF,  /* 00000548    "A......." */
  27.409 +    0x03,0x00,0x0A,0x02,0x4C,0x4E,0x4B,0x42,  /* 00000550    "....LNKB" */
  27.410 +    0x00,0x12,0x0E,0x04,0x0C,0xFF,0xFF,0x03,  /* 00000558    "........" */
  27.411 +    0x00,0x0A,0x03,0x4C,0x4E,0x4B,0x43,0x00,  /* 00000560    "...LNKC." */
  27.412 +    0x12,0x0D,0x04,0x0C,0xFF,0xFF,0x04,0x00,  /* 00000568    "........" */
  27.413 +    0x00,0x4C,0x4E,0x4B,0x41,0x00,0x12,0x0D,  /* 00000570    ".LNKA..." */
  27.414 +    0x04,0x0C,0xFF,0xFF,0x04,0x00,0x01,0x4C,  /* 00000578    ".......L" */
  27.415 +    0x4E,0x4B,0x42,0x00,0x12,0x0E,0x04,0x0C,  /* 00000580    "NKB....." */
  27.416 +    0xFF,0xFF,0x04,0x00,0x0A,0x02,0x4C,0x4E,  /* 00000588    "......LN" */
  27.417 +    0x4B,0x43,0x00,0x12,0x0E,0x04,0x0C,0xFF,  /* 00000590    "KC......" */
  27.418 +    0xFF,0x04,0x00,0x0A,0x03,0x4C,0x4E,0x4B,  /* 00000598    ".....LNK" */
  27.419 +    0x44,0x00,0x08,0x50,0x52,0x54,0x41,0x12,  /* 000005A0    "D..PRTA." */
  27.420 +    0x32,0x04,0x12,0x0B,0x04,0x0C,0xFF,0xFF,  /* 000005A8    "2......." */
  27.421 +    0x01,0x00,0x00,0x00,0x0A,0x05,0x12,0x0B,  /* 000005B0    "........" */
  27.422 +    0x04,0x0C,0xFF,0xFF,0x02,0x00,0x00,0x00,  /* 000005B8    "........" */
  27.423 +    0x0A,0x07,0x12,0x0B,0x04,0x0C,0xFF,0xFF,  /* 000005C0    "........" */
  27.424 +    0x03,0x00,0x00,0x00,0x0A,0x0A,0x12,0x0B,  /* 000005C8    "........" */
  27.425 +    0x04,0x0C,0xFF,0xFF,0x04,0x00,0x00,0x00,  /* 000005D0    "........" */
  27.426 +    0x0A,0x0B,0x5B,0x82,0x48,0x31,0x49,0x53,  /* 000005D8    "..[.H1IS" */
  27.427 +    0x41,0x5F,0x08,0x5F,0x41,0x44,0x52,0x00,  /* 000005E0    "A_._ADR." */
  27.428 +    0x5B,0x80,0x50,0x49,0x52,0x51,0x02,0x0A,  /* 000005E8    "[.PIRQ.." */
  27.429 +    0x60,0x0A,0x04,0x10,0x2E,0x5C,0x00,0x5B,  /* 000005F0    "`....\.[" */
  27.430 +    0x81,0x29,0x5C,0x2F,0x04,0x5F,0x53,0x42,  /* 000005F8    ".)\/._SB" */
  27.431 +    0x5F,0x50,0x43,0x49,0x30,0x49,0x53,0x41,  /* 00000600    "_PCI0ISA" */
  27.432 +    0x5F,0x50,0x49,0x52,0x51,0x01,0x50,0x49,  /* 00000608    "_PIRQ.PI" */
  27.433 +    0x52,0x41,0x08,0x50,0x49,0x52,0x42,0x08,  /* 00000610    "RA.PIRB." */
  27.434 +    0x50,0x49,0x52,0x43,0x08,0x50,0x49,0x52,  /* 00000618    "PIRC.PIR" */
  27.435 +    0x44,0x08,0x5B,0x82,0x46,0x0B,0x53,0x59,  /* 00000620    "D.[.F.SY" */
  27.436 +    0x53,0x52,0x08,0x5F,0x48,0x49,0x44,0x0C,  /* 00000628    "SR._HID." */
  27.437 +    0x41,0xD0,0x0C,0x02,0x08,0x5F,0x55,0x49,  /* 00000630    "A...._UI" */
  27.438 +    0x44,0x01,0x08,0x43,0x52,0x53,0x5F,0x11,  /* 00000638    "D..CRS_." */
  27.439 +    0x4E,0x08,0x0A,0x8A,0x47,0x01,0x10,0x00,  /* 00000640    "N...G..." */
  27.440 +    0x10,0x00,0x00,0x10,0x47,0x01,0x22,0x00,  /* 00000648    "....G."." */
  27.441 +    0x22,0x00,0x00,0x0C,0x47,0x01,0x30,0x00,  /* 00000650    ""...G.0." */
  27.442 +    0x30,0x00,0x00,0x10,0x47,0x01,0x44,0x00,  /* 00000658    "0...G.D." */
  27.443 +    0x44,0x00,0x00,0x1C,0x47,0x01,0x62,0x00,  /* 00000660    "D...G.b." */
  27.444 +    0x62,0x00,0x00,0x02,0x47,0x01,0x65,0x00,  /* 00000668    "b...G.e." */
  27.445 +    0x65,0x00,0x00,0x0B,0x47,0x01,0x72,0x00,  /* 00000670    "e...G.r." */
  27.446 +    0x72,0x00,0x00,0x0E,0x47,0x01,0x80,0x00,  /* 00000678    "r...G..." */
  27.447 +    0x80,0x00,0x00,0x01,0x47,0x01,0x84,0x00,  /* 00000680    "....G..." */
  27.448 +    0x84,0x00,0x00,0x03,0x47,0x01,0x88,0x00,  /* 00000688    "....G..." */
  27.449 +    0x88,0x00,0x00,0x01,0x47,0x01,0x8C,0x00,  /* 00000690    "....G..." */
  27.450 +    0x8C,0x00,0x00,0x03,0x47,0x01,0x90,0x00,  /* 00000698    "....G..." */
  27.451 +    0x90,0x00,0x00,0x10,0x47,0x01,0xA2,0x00,  /* 000006A0    "....G..." */
  27.452 +    0xA2,0x00,0x00,0x1C,0x47,0x01,0xE0,0x00,  /* 000006A8    "....G..." */
  27.453 +    0xE0,0x00,0x00,0x10,0x47,0x01,0xA0,0x08,  /* 000006B0    "....G..." */
  27.454 +    0xA0,0x08,0x00,0x04,0x47,0x01,0xC0,0x0C,  /* 000006B8    "....G..." */
  27.455 +    0xC0,0x0C,0x00,0x10,0x47,0x01,0xD0,0x04,  /* 000006C0    "....G..." */
  27.456 +    0xD0,0x04,0x00,0x02,0x79,0x00,0x14,0x0B,  /* 000006C8    "....y..." */
  27.457 +    0x5F,0x43,0x52,0x53,0x00,0xA4,0x43,0x52,  /* 000006D0    "_CRS..CR" */
  27.458 +    0x53,0x5F,0x5B,0x82,0x2B,0x50,0x49,0x43,  /* 000006D8    "S_[.+PIC" */
  27.459 +    0x5F,0x08,0x5F,0x48,0x49,0x44,0x0B,0x41,  /* 000006E0    "_._HID.A" */
  27.460 +    0xD0,0x08,0x5F,0x43,0x52,0x53,0x11,0x18,  /* 000006E8    ".._CRS.." */
  27.461 +    0x0A,0x15,0x47,0x01,0x20,0x00,0x20,0x00,  /* 000006F0    "..G. . ." */
  27.462 +    0x01,0x02,0x47,0x01,0xA0,0x00,0xA0,0x00,  /* 000006F8    "..G....." */
  27.463 +    0x01,0x02,0x22,0x04,0x00,0x79,0x00,0x5B,  /* 00000700    ".."..y.[" */
  27.464 +    0x82,0x47,0x05,0x44,0x4D,0x41,0x30,0x08,  /* 00000708    ".G.DMA0." */
  27.465 +    0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,0x02,  /* 00000710    "_HID.A.." */
  27.466 +    0x00,0x08,0x5F,0x43,0x52,0x53,0x11,0x41,  /* 00000718    ".._CRS.A" */
  27.467 +    0x04,0x0A,0x3D,0x2A,0x10,0x04,0x47,0x01,  /* 00000720    "..=*..G." */
  27.468 +    0x00,0x00,0x00,0x00,0x00,0x10,0x47,0x01,  /* 00000728    "......G." */
  27.469 +    0x81,0x00,0x81,0x00,0x00,0x03,0x47,0x01,  /* 00000730    "......G." */
  27.470 +    0x87,0x00,0x87,0x00,0x00,0x01,0x47,0x01,  /* 00000738    "......G." */
  27.471 +    0x89,0x00,0x89,0x00,0x00,0x03,0x47,0x01,  /* 00000740    "......G." */
  27.472 +    0x8F,0x00,0x8F,0x00,0x00,0x01,0x47,0x01,  /* 00000748    "......G." */
  27.473 +    0xC0,0x00,0xC0,0x00,0x00,0x20,0x47,0x01,  /* 00000750    "..... G." */
  27.474 +    0x80,0x04,0x80,0x04,0x00,0x10,0x79,0x00,  /* 00000758    "......y." */
  27.475 +    0x5B,0x82,0x25,0x54,0x4D,0x52,0x5F,0x08,  /* 00000760    "[.%TMR_." */
  27.476 +    0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,0x01,  /* 00000768    "_HID.A.." */
  27.477 +    0x00,0x08,0x5F,0x43,0x52,0x53,0x11,0x10,  /* 00000770    ".._CRS.." */
  27.478 +    0x0A,0x0D,0x47,0x01,0x40,0x00,0x40,0x00,  /* 00000778    "..G.@.@." */
  27.479 +    0x00,0x04,0x22,0x01,0x00,0x79,0x00,0x5B,  /* 00000780    ".."..y.[" */
  27.480 +    0x82,0x25,0x52,0x54,0x43,0x5F,0x08,0x5F,  /* 00000788    ".%RTC_._" */
  27.481 +    0x48,0x49,0x44,0x0C,0x41,0xD0,0x0B,0x00,  /* 00000790    "HID.A..." */
  27.482 +    0x08,0x5F,0x43,0x52,0x53,0x11,0x10,0x0A,  /* 00000798    "._CRS..." */
  27.483 +    0x0D,0x47,0x01,0x70,0x00,0x70,0x00,0x00,  /* 000007A0    ".G.p.p.." */
  27.484 +    0x02,0x22,0x00,0x01,0x79,0x00,0x5B,0x82,  /* 000007A8    "."..y.[." */
  27.485 +    0x22,0x53,0x50,0x4B,0x52,0x08,0x5F,0x48,  /* 000007B0    ""SPKR._H" */
  27.486 +    0x49,0x44,0x0C,0x41,0xD0,0x08,0x00,0x08,  /* 000007B8    "ID.A...." */
  27.487 +    0x5F,0x43,0x52,0x53,0x11,0x0D,0x0A,0x0A,  /* 000007C0    "_CRS...." */
  27.488 +    0x47,0x01,0x61,0x00,0x61,0x00,0x00,0x01,  /* 000007C8    "G.a.a..." */
  27.489 +    0x79,0x00,0x5B,0x82,0x31,0x50,0x53,0x32,  /* 000007D0    "y.[.1PS2" */
  27.490 +    0x4D,0x08,0x5F,0x48,0x49,0x44,0x0C,0x41,  /* 000007D8    "M._HID.A" */
  27.491 +    0xD0,0x0F,0x13,0x08,0x5F,0x43,0x49,0x44,  /* 000007E0    "...._CID" */
  27.492 +    0x0C,0x41,0xD0,0x0F,0x13,0x14,0x09,0x5F,  /* 000007E8    ".A....._" */
  27.493 +    0x53,0x54,0x41,0x00,0xA4,0x0A,0x0F,0x08,  /* 000007F0    "STA....." */
  27.494 +    0x5F,0x43,0x52,0x53,0x11,0x08,0x0A,0x05,  /* 000007F8    "_CRS...." */
  27.495 +    0x22,0x00,0x10,0x79,0x00,0x5B,0x82,0x42,  /* 00000800    ""..y.[.B" */
  27.496 +    0x04,0x50,0x53,0x32,0x4B,0x08,0x5F,0x48,  /* 00000808    ".PS2K._H" */
  27.497 +    0x49,0x44,0x0C,0x41,0xD0,0x03,0x03,0x08,  /* 00000810    "ID.A...." */
  27.498 +    0x5F,0x43,0x49,0x44,0x0C,0x41,0xD0,0x03,  /* 00000818    "_CID.A.." */
  27.499 +    0x0B,0x14,0x09,0x5F,0x53,0x54,0x41,0x00,  /* 00000820    "..._STA." */
  27.500 +    0xA4,0x0A,0x0F,0x08,0x5F,0x43,0x52,0x53,  /* 00000828    "...._CRS" */
  27.501 +    0x11,0x18,0x0A,0x15,0x47,0x01,0x60,0x00,  /* 00000830    "....G.`." */
  27.502 +    0x60,0x00,0x00,0x01,0x47,0x01,0x64,0x00,  /* 00000838    "`...G.d." */
  27.503 +    0x64,0x00,0x00,0x01,0x22,0x02,0x00,0x79,  /* 00000840    "d..."..y" */
  27.504 +    0x00,0x5B,0x82,0x3A,0x46,0x44,0x43,0x30,  /* 00000848    ".[.:FDC0" */
  27.505 +    0x08,0x5F,0x48,0x49,0x44,0x0C,0x41,0xD0,  /* 00000850    "._HID.A." */
  27.506 +    0x07,0x00,0x14,0x09,0x5F,0x53,0x54,0x41,  /* 00000858    "...._STA" */
  27.507 +    0x00,0xA4,0x0A,0x0F,0x08,0x5F,0x43,0x52,  /* 00000860    "....._CR" */
  27.508 +    0x53,0x11,0x1B,0x0A,0x18,0x47,0x01,0xF0,  /* 00000868    "S....G.." */
  27.509 +    0x03,0xF0,0x03,0x01,0x06,0x47,0x01,0xF7,  /* 00000870    ".....G.." */
  27.510 +    0x03,0xF7,0x03,0x01,0x01,0x22,0x40,0x00,  /* 00000878    "....."@." */
  27.511 +    0x2A,0x04,0x00,0x79,0x00,0x5B,0x82,0x35,  /* 00000880    "*..y.[.5" */
  27.512 +    0x55,0x41,0x52,0x31,0x08,0x5F,0x48,0x49,  /* 00000888    "UAR1._HI" */
  27.513 +    0x44,0x0C,0x41,0xD0,0x05,0x01,0x08,0x5F,  /* 00000890    "D.A...._" */
  27.514 +    0x55,0x49,0x44,0x01,0x14,0x09,0x5F,0x53,  /* 00000898    "UID..._S" */
  27.515 +    0x54,0x41,0x00,0xA4,0x0A,0x0F,0x08,0x5F,  /* 000008A0    "TA....._" */
  27.516 +    0x43,0x52,0x53,0x11,0x10,0x0A,0x0D,0x47,  /* 000008A8    "CRS....G" */
  27.517 +    0x01,0xF8,0x03,0xF8,0x03,0x01,0x08,0x22,  /* 000008B0    "......."" */
  27.518 +    0x10,0x00,0x79,0x00,0x5B,0x82,0x36,0x4C,  /* 000008B8    "..y.[.6L" */
  27.519 +    0x54,0x50,0x31,0x08,0x5F,0x48,0x49,0x44,  /* 000008C0    "TP1._HID" */
  27.520 +    0x0C,0x41,0xD0,0x04,0x00,0x08,0x5F,0x55,  /* 000008C8    ".A...._U" */
  27.521 +    0x49,0x44,0x0A,0x02,0x14,0x09,0x5F,0x53,  /* 000008D0    "ID...._S" */
  27.522 +    0x54,0x41,0x00,0xA4,0x0A,0x0F,0x08,0x5F,  /* 000008D8    "TA....._" */
  27.523 +    0x43,0x52,0x53,0x11,0x10,0x0A,0x0D,0x47,  /* 000008E0    "CRS....G" */
  27.524 +    0x01,0x78,0x03,0x78,0x03,0x08,0x08,0x22,  /* 000008E8    ".x.x..."" */
  27.525 +    0x80,0x00,0x79,0x00,
  27.526  };
  27.527  int DsdtLen=sizeof(AmlCode);
    28.1 --- a/tools/firmware/hvmloader/acpi_utils.c	Mon Nov 20 12:14:40 2006 -0700
    28.2 +++ b/tools/firmware/hvmloader/acpi_utils.c	Mon Nov 20 13:11:15 2006 -0700
    28.3 @@ -23,9 +23,12 @@
    28.4  #include "acpi/acpi2_0.h"
    28.5  #include "acpi_utils.h"
    28.6  #include "util.h"
    28.7 +#include <xen/hvm/e820.h>
    28.8  
    28.9  static int acpi_rsdt_add_entry_pointer(unsigned char *acpi_start,
   28.10                                         unsigned char *entry);
   28.11 +static int acpi_xsdt_add_entry_pointer(unsigned char *acpi_start,
   28.12 +                                       unsigned char *entry);
   28.13  static unsigned char *acpi_xsdt_add_entry(unsigned char *acpi_start,
   28.14                                            unsigned char **freemem,
   28.15                                            unsigned char *limit,
   28.16 @@ -34,45 +37,78 @@ static unsigned char *acpi_xsdt_add_entr
   28.17  
   28.18  void set_checksum(void *start, int checksum_offset, int len)
   28.19  {
   28.20 -	unsigned char sum = 0;
   28.21 -	unsigned char *ptr;
   28.22 +    unsigned char sum = 0;
   28.23 +    unsigned char *ptr;
   28.24  
   28.25 -	ptr = start;
   28.26 -	ptr[checksum_offset] = 0;
   28.27 -	while (len--)
   28.28 -		sum += *ptr++;
   28.29 +    ptr = start;
   28.30 +    ptr[checksum_offset] = 0;
   28.31 +    while ( len-- )
   28.32 +        sum += *ptr++;
   28.33  
   28.34 -	ptr = start;
   28.35 -	ptr[checksum_offset] = -sum;
   28.36 +    ptr = start;
   28.37 +    ptr[checksum_offset] = -sum;
   28.38  }
   28.39  
   28.40  
   28.41  #include "acpi_ssdt_tpm.h"
   28.42 -static int acpi_tpm_tis_probe(unsigned char *acpi_start,
   28.43 -                              unsigned char **freemem,
   28.44 -                              unsigned char *limit)
   28.45 +static void acpi_tpm_tis_probe(unsigned char *acpi_start,
   28.46 +                               unsigned char **freemem,
   28.47 +                               unsigned char *limit)
   28.48  {
   28.49 -	int success = 1; /* not successful means 'out of memory' */
   28.50 -	unsigned char *addr;
   28.51 -	/* check TPM_DID, TPM_VID, TPM_RID in ioemu/hw/tpm_tis.c */
   28.52 -	uint16_t tis_did_vid_rid[] = {0x0001, 0x0001, 0x0001};
   28.53 +    unsigned char *addr;
   28.54 +    ACPI_2_0_TCPA_CLIENT *tcpa;
   28.55 +    /* check TPM_DID, TPM_VID, TPM_RID in ioemu/hw/tpm_tis.c */
   28.56 +    uint16_t tis_did_vid_rid[] = {0x0001, 0x0001, 0x0001};
   28.57 +    static const ACPI_2_0_TCPA_CLIENT Tcpa = {
   28.58 +        .header = {
   28.59 +            .signature = ACPI_2_0_TCPA_SIGNATURE,
   28.60 +            .length    = sizeof(ACPI_2_0_TCPA_CLIENT),
   28.61 +            .revision  = ACPI_2_0_TCPA_REVISION,
   28.62 +            .oem_id    = {'I', 'B', 'M', ' ', ' ', ' '},
   28.63 +            .oem_table_id = ASCII64(' ', ' ', ' ', ' ', ' ', 'x', 'e', 'n'),
   28.64 +            .oem_revision = 1,
   28.65 +            .creator_id   = ASCII32('I', 'B', 'M', ' '),
   28.66 +            .creator_revision = 1,
   28.67 +        }
   28.68 +    };
   28.69 +
   28.70 +    /* probe for TIS interface ... */
   28.71 +    if ( memcmp((char *)(0xFED40000 + 0xF00),
   28.72 +                tis_did_vid_rid,
   28.73 +                sizeof(tis_did_vid_rid)) != 0 )
   28.74 +        return;
   28.75  
   28.76 -	/* probe for TIS interface ... */
   28.77 -	if (memcmp((char *)(0xFED40000 + 0xF00),
   28.78 -	           tis_did_vid_rid,
   28.79 -	           sizeof(tis_did_vid_rid)) == 0) {
   28.80 -		puts("TIS is available\n");
   28.81 -		addr = acpi_xsdt_add_entry(acpi_start, freemem, limit,
   28.82 -		                           AmlCode_TPM, sizeof(AmlCode_TPM));
   28.83 -		if (addr == NULL)
   28.84 -			success = 0;
   28.85 -		else {
   28.86 -			/* legacy systems need an RSDT entry */
   28.87 -			acpi_rsdt_add_entry_pointer(acpi_start,
   28.88 -			                            addr);
   28.89 -		}
   28.90 -	}
   28.91 -	return success;
   28.92 +    puts("TIS is available\n");
   28.93 +    addr = acpi_xsdt_add_entry(acpi_start, freemem, limit,
   28.94 +                               AmlCode_TPM, sizeof(AmlCode_TPM));
   28.95 +    if ( addr == NULL )
   28.96 +        return;
   28.97 +
   28.98 +    /* legacy systems need an RSDT entry */
   28.99 +    acpi_rsdt_add_entry_pointer(acpi_start, addr);
  28.100 +
  28.101 +    /* add ACPI TCPA table */
  28.102 +    addr = acpi_xsdt_add_entry(acpi_start, freemem, limit,
  28.103 +                               (unsigned char *)&Tcpa,
  28.104 +                               sizeof(Tcpa));
  28.105 +    if ( addr == NULL )
  28.106 +        return;
  28.107 +
  28.108 +    tcpa = (ACPI_2_0_TCPA_CLIENT *)addr;
  28.109 +    tcpa->LASA = e820_malloc(
  28.110 +        ACPI_2_0_TCPA_LAML_SIZE, E820_RESERVED, (uint32_t)~0);
  28.111 +    if ( tcpa->LASA )
  28.112 +    {
  28.113 +        tcpa->LAML = ACPI_2_0_TCPA_LAML_SIZE;
  28.114 +        memset((char *)(unsigned long)tcpa->LASA,
  28.115 +               0x0,
  28.116 +               tcpa->LAML);
  28.117 +        set_checksum(tcpa,
  28.118 +                     FIELD_OFFSET(struct acpi_header, checksum),
  28.119 +                     tcpa->header.length);
  28.120 +    }
  28.121 +
  28.122 +    acpi_rsdt_add_entry_pointer(acpi_start, addr);
  28.123  }
  28.124  
  28.125  
  28.126 @@ -95,17 +131,20 @@ struct acpi_20_rsdt *acpi_rsdt_get(unsig
  28.127      struct acpi_20_rsdt *rsdt;
  28.128  
  28.129      rsdp = (struct acpi_20_rsdp *)(acpi_start + sizeof(struct acpi_20_facs));
  28.130 -    if (rsdp->signature != ACPI_2_0_RSDP_SIGNATURE) {
  28.131 +    if ( rsdp->signature != ACPI_2_0_RSDP_SIGNATURE )
  28.132 +    {
  28.133          puts("Bad RSDP signature\n");
  28.134          return NULL;
  28.135      }
  28.136  
  28.137      rsdt = (struct acpi_20_rsdt *)
  28.138          (acpi_start + rsdp->rsdt_address - ACPI_PHYSICAL_ADDRESS);
  28.139 -    if (rsdt->header.signature != ACPI_2_0_RSDT_SIGNATURE) {
  28.140 +    if ( rsdt->header.signature != ACPI_2_0_RSDT_SIGNATURE )
  28.141 +    {
  28.142          puts("Bad RSDT signature\n");
  28.143          return NULL;
  28.144      }
  28.145 +
  28.146      return rsdt;
  28.147  }
  28.148  
  28.149 @@ -119,17 +158,20 @@ static int acpi_rsdt_add_entry_pointer(u
  28.150      int found = 0;
  28.151      int i = 0;
  28.152  
  28.153 -    /* get empty slot in the RSDT table */
  28.154 -    while (i < ACPI_MAX_NUM_TABLES) {
  28.155 -        if (rsdt->entry[i] == 0) {
  28.156 +    /* Find an empty slot in the RSDT table. */
  28.157 +    while ( i < ACPI_MAX_NUM_TABLES )
  28.158 +    {
  28.159 +        if ( rsdt->entry[i] == 0 )
  28.160 +        {
  28.161              found = 1;
  28.162              break;
  28.163          }
  28.164          i++;
  28.165      }
  28.166  
  28.167 -    if (found) {
  28.168 -        rsdt->entry[i] = (uint64_t)(long)entry;
  28.169 +    if ( found )
  28.170 +    {
  28.171 +        rsdt->entry[i] = (uint64_t)(unsigned long)entry;
  28.172          rsdt->header.length =
  28.173              sizeof(struct acpi_header) +
  28.174              (i + 1) * sizeof(uint64_t);
  28.175 @@ -141,21 +183,23 @@ static int acpi_rsdt_add_entry_pointer(u
  28.176      return found;
  28.177  }
  28.178  
  28.179 -/* Get the XSDT table */
  28.180 +/* Get the XSDT table. */
  28.181  struct acpi_20_xsdt *acpi_xsdt_get(unsigned char *acpi_start)
  28.182  {
  28.183      struct acpi_20_rsdp *rsdp;
  28.184      struct acpi_20_xsdt *xsdt;
  28.185  
  28.186      rsdp = (struct acpi_20_rsdp *)(acpi_start + sizeof(struct acpi_20_facs));
  28.187 -    if (rsdp->signature != ACPI_2_0_RSDP_SIGNATURE) {
  28.188 +    if ( rsdp->signature != ACPI_2_0_RSDP_SIGNATURE )
  28.189 +    {
  28.190          puts("Bad RSDP signature\n");
  28.191          return NULL;
  28.192      }
  28.193  
  28.194      xsdt = (struct acpi_20_xsdt *)
  28.195          (acpi_start + rsdp->xsdt_address - ACPI_PHYSICAL_ADDRESS);
  28.196 -    if (xsdt->header.signature != ACPI_2_0_XSDT_SIGNATURE) {
  28.197 +    if ( xsdt->header.signature != ACPI_2_0_XSDT_SIGNATURE )
  28.198 +    {
  28.199          puts("Bad XSDT signature\n");
  28.200          return NULL;
  28.201      }
  28.202 @@ -163,6 +207,41 @@ struct acpi_20_xsdt *acpi_xsdt_get(unsig
  28.203  }
  28.204  
  28.205  /*
  28.206 + * Add an entry to the XSDT table given the pointer to the entry.
  28.207 + */
  28.208 +static int acpi_xsdt_add_entry_pointer(unsigned char *acpi_start,
  28.209 +                                       unsigned char *entry)
  28.210 +{
  28.211 +    struct acpi_20_xsdt *xsdt = acpi_xsdt_get(acpi_start);
  28.212 +    int found = 0;
  28.213 +    int i = 0;
  28.214 +
  28.215 +    /* Find an empty slot in the XSDT table. */
  28.216 +    while ( i < ACPI_MAX_NUM_TABLES )
  28.217 +    {
  28.218 +        if ( xsdt->entry[i] == 0 )
  28.219 +        {
  28.220 +            found = 1;
  28.221 +            break;
  28.222 +        }
  28.223 +        i++;
  28.224 +    }
  28.225 +
  28.226 +    if ( found )
  28.227 +    {
  28.228 +        xsdt->entry[i] = (uint64_t)(unsigned long)entry;
  28.229 +        xsdt->header.length =
  28.230 +            sizeof(struct acpi_header) +
  28.231 +            (i + 1) * sizeof(uint64_t);
  28.232 +        set_checksum(xsdt,
  28.233 +                     FIELD_OFFSET(struct acpi_header, checksum),
  28.234 +                     xsdt->header.length);
  28.235 +    }
  28.236 +
  28.237 +    return found;
  28.238 +}
  28.239 +
  28.240 +/*
  28.241     add an entry to the xdst table entry pointers
  28.242     copy the given ssdt data to the current available memory at
  28.243     freemem, if it does not exceed the limit
  28.244 @@ -177,31 +256,29 @@ static unsigned char *acpi_xsdt_add_entr
  28.245      int found = 0, i = 0;
  28.246      unsigned char *addr = NULL;
  28.247  
  28.248 -    /* get empty slot in the Xsdt table */
  28.249 -    while (i < ACPI_MAX_NUM_TABLES) {
  28.250 -        if (xsdt->entry[i] == 0) {
  28.251 +    /* Check for an empty slot in the Xsdt table. */
  28.252 +    while ( i < ACPI_MAX_NUM_TABLES )
  28.253 +    {
  28.254 +        if ( xsdt->entry[i] == 0 )
  28.255 +        {
  28.256              found = 1;
  28.257              break;
  28.258          }
  28.259          i++;
  28.260      }
  28.261  
  28.262 -    if (found) {
  28.263 +    if ( found )
  28.264 +    {
  28.265          /* memory below hard limit ? */
  28.266          if (*freemem + table_size <= limit) {
  28.267              puts("Copying SSDT entry!\n");
  28.268              addr = *freemem;
  28.269              memcpy(addr, table, table_size);
  28.270 -            xsdt->entry[i] = (uint64_t)(long)addr;
  28.271              *freemem += table_size;
  28.272 -            /* update the XSDT table */
  28.273 -            xsdt->header.length =
  28.274 -                sizeof(struct acpi_header) +
  28.275 -                (i + 1) * sizeof(uint64_t);
  28.276 -            set_checksum(xsdt,
  28.277 -                         FIELD_OFFSET(struct acpi_header, checksum),
  28.278 -                         xsdt->header.length);
  28.279 +
  28.280 +            acpi_xsdt_add_entry_pointer(acpi_start, addr);
  28.281          }
  28.282      }
  28.283 +
  28.284      return addr;
  28.285  }
    29.1 --- a/tools/firmware/hvmloader/util.c	Mon Nov 20 12:14:40 2006 -0700
    29.2 +++ b/tools/firmware/hvmloader/util.c	Mon Nov 20 13:11:15 2006 -0700
    29.3 @@ -90,6 +90,23 @@ void *memcpy(void *dest, const void *src
    29.4  	return dest;
    29.5  }
    29.6  
    29.7 +void *memmove(void *dest, const void *src, unsigned n)
    29.8 +{
    29.9 +	if ((long)dest > (long)src) {
   29.10 +		n--;
   29.11 +		while (n > 0) {
   29.12 +			((char *)dest)[n] = ((char *)src)[n];
   29.13 +			n--;
   29.14 +		}
   29.15 +	} else {
   29.16 +		memcpy(dest, src, n);
   29.17 +	}
   29.18 +	return dest;
   29.19 +}
   29.20 +
   29.21 +
   29.22 +
   29.23 +
   29.24  void puts(const char *s)
   29.25  {
   29.26  	while (*s)
   29.27 @@ -229,3 +246,37 @@ uuid_to_string(char *dest, uint8_t *uuid
   29.28  	}
   29.29  	*p = 0;
   29.30  }
   29.31 +
   29.32 +#include <xen/hvm/e820.h>
   29.33 +#define E820_MAP_NR ((unsigned char *)E820_MAP_PAGE + E820_MAP_NR_OFFSET)
   29.34 +#define E820_MAP    ((struct e820entry *)(E820_MAP_PAGE + E820_MAP_OFFSET))
   29.35 +uint64_t e820_malloc(uint64_t size, uint32_t type, uint64_t mask)
   29.36 +{
   29.37 +	uint64_t addr = 0;
   29.38 +	int c = *E820_MAP_NR - 1;
   29.39 +	struct e820entry *e820entry = (struct e820entry *)E820_MAP;
   29.40 +
   29.41 +	while (c >= 0) {
   29.42 +		if (e820entry[c].type  == E820_RAM     &&
   29.43 +		    (e820entry[c].addr & (~mask)) == 0 &&
   29.44 +		    e820entry[c].size >= size) {
   29.45 +			addr = e820entry[c].addr;
   29.46 +			if (e820entry[c].size != size) {
   29.47 +				(*E820_MAP_NR)++;
   29.48 +				memmove(&e820entry[c+1],
   29.49 +				        &e820entry[c],
   29.50 +				        (*E820_MAP_NR - c) *
   29.51 +				            sizeof(struct e820entry));
   29.52 +				e820entry[c].size -= size;
   29.53 +				addr += e820entry[c].size;
   29.54 +				c++;
   29.55 +			}
   29.56 +			e820entry[c].addr = addr;
   29.57 +			e820entry[c].size = size;
   29.58 +			e820entry[c].type = type;
   29.59 +			break;
   29.60 +		}
   29.61 +		c--;
   29.62 +	}
   29.63 +        return addr;
   29.64 +}
    30.1 --- a/tools/firmware/hvmloader/util.h	Mon Nov 20 12:14:40 2006 -0700
    30.2 +++ b/tools/firmware/hvmloader/util.h	Mon Nov 20 13:11:15 2006 -0700
    30.3 @@ -22,6 +22,7 @@ char *strncpy(char *dest, const char *sr
    30.4  unsigned strlen(const char *s);
    30.5  int memcmp(const void *s1, const void *s2, unsigned n);
    30.6  void *memcpy(void *dest, const void *src, unsigned n);
    30.7 +void *memmove(void *dest, const void *src, unsigned n);
    30.8  void *memset(void *s, int c, unsigned n);
    30.9  char *itoa(char *a, unsigned int i);
   30.10  
   30.11 @@ -38,4 +39,7 @@ void uuid_to_string(char *dest, uint8_t 
   30.12  /* Debug output */
   30.13  void puts(const char *s);
   30.14  
   30.15 +/* Allocate region of specified type in the e820 table. */
   30.16 +uint64_t e820_malloc(uint64_t size, uint32_t type, uint64_t mask);
   30.17 +
   30.18  #endif /* __HVMLOADER_UTIL_H__ */
    31.1 --- a/tools/ioemu/Makefile.target	Mon Nov 20 12:14:40 2006 -0700
    31.2 +++ b/tools/ioemu/Makefile.target	Mon Nov 20 13:11:15 2006 -0700
    31.3 @@ -177,7 +177,8 @@ endif
    31.4  
    31.5  #########################################################
    31.6  
    31.7 -DEFINES+=-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE
    31.8 +DEFINES+=-D_GNU_SOURCE
    31.9 +#-D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE
   31.10  LIBS+=-lm
   31.11  LIBS+=-L../../libxc -lxenctrl -lxenguest
   31.12  LIBS+=-L../../xenstore -lxenstore
    32.1 --- a/tools/ioemu/hw/rtl8139.c	Mon Nov 20 12:14:40 2006 -0700
    32.2 +++ b/tools/ioemu/hw/rtl8139.c	Mon Nov 20 13:11:15 2006 -0700
    32.3 @@ -1999,12 +1999,12 @@ static int rtl8139_cplus_transmit_one(RT
    32.4          DEBUG_PRINT(("RTL8139: +++ C+ mode transmission buffer allocated space %d\n", s->cplus_txbuffer_len));
    32.5      }
    32.6  
    32.7 -    while (s->cplus_txbuffer && s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len)
    32.8 +    if (s->cplus_txbuffer && s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len)
    32.9      {
   32.10 -        s->cplus_txbuffer_len += CP_TX_BUFFER_SIZE;
   32.11 -        s->cplus_txbuffer = realloc(s->cplus_txbuffer, s->cplus_txbuffer_len);
   32.12 -
   32.13 -        DEBUG_PRINT(("RTL8139: +++ C+ mode transmission buffer space changed to %d\n", s->cplus_txbuffer_len));
   32.14 +	free(s->cplus_txbuffer);
   32.15 +	s->cplus_txbuffer = NULL;
   32.16 +
   32.17 +	DEBUG_PRINT(("RTL8139: +++ C+ mode transmission buffer space exceeded: %d\n", s->cplus_txbuffer_offset + txsize));
   32.18      }
   32.19  
   32.20      if (!s->cplus_txbuffer)
    33.1 --- a/tools/ioemu/xenstore.c	Mon Nov 20 12:14:40 2006 -0700
    33.2 +++ b/tools/ioemu/xenstore.c	Mon Nov 20 13:11:15 2006 -0700
    33.3 @@ -100,7 +100,7 @@ void xenstore_parse_domain_config(int do
    33.4  	if (strncmp(dev, "hd", 2) || strlen(dev) != 3)
    33.5  	    continue;
    33.6  	hd_index = dev[2] - 'a';
    33.7 -	if (hd_index > MAX_DISKS)
    33.8 +	if (hd_index >= MAX_DISKS)
    33.9  	    continue;
   33.10  	/* read the type of the device */
   33.11  	if (pasprintf(&buf, "%s/device/vbd/%s/device-type", path, e[i]) == -1)
    34.1 --- a/tools/libfsimage/Rules.mk	Mon Nov 20 12:14:40 2006 -0700
    34.2 +++ b/tools/libfsimage/Rules.mk	Mon Nov 20 13:11:15 2006 -0700
    34.3 @@ -2,7 +2,7 @@ include $(XEN_ROOT)/tools/Rules.mk
    34.4  
    34.5  DEPS = .*.d
    34.6  
    34.7 -CFLAGS += -I$(XEN_ROOT)/tools/libfsimage/common/ -Werror -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -Wp,-MD,.$(@F).d
    34.8 +CFLAGS += -I$(XEN_ROOT)/tools/libfsimage/common/ -Werror -Wp,-MD,.$(@F).d
    34.9  LDFLAGS += -L../common/
   34.10  
   34.11  PIC_OBJS := $(patsubst %.c,%.opic,$(LIB_SRCS-y))
    35.1 --- a/tools/libfsimage/common/Makefile	Mon Nov 20 12:14:40 2006 -0700
    35.2 +++ b/tools/libfsimage/common/Makefile	Mon Nov 20 13:11:15 2006 -0700
    35.3 @@ -4,7 +4,7 @@ include $(XEN_ROOT)/tools/Rules.mk
    35.4  MAJOR = 1.0
    35.5  MINOR = 0
    35.6  
    35.7 -CFLAGS += -Werror -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -Wp,-MD,.$(@F).d
    35.8 +CFLAGS += -Werror -Wp,-MD,.$(@F).d
    35.9  DEPS = .*.d
   35.10  
   35.11  LDFLAGS-$(CONFIG_SunOS) = -Wl,-M -Wl,mapfile-SunOS
    36.1 --- a/tools/libxc/xc_acm.c	Mon Nov 20 12:14:40 2006 -0700
    36.2 +++ b/tools/libxc/xc_acm.c	Mon Nov 20 13:11:15 2006 -0700
    36.3 @@ -14,8 +14,7 @@
    36.4  
    36.5  #include "xc_private.h"
    36.6  
    36.7 -
    36.8 -int xc_acm_op(int xc_handle, int cmd, void *arg, size_t arg_size)
    36.9 +int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size)
   36.10  {
   36.11      int ret = -1;
   36.12      DECLARE_HYPERCALL;
    37.1 --- a/tools/libxc/xc_domain.c	Mon Nov 20 12:14:40 2006 -0700
    37.2 +++ b/tools/libxc/xc_domain.c	Mon Nov 20 13:11:15 2006 -0700
    37.3 @@ -352,7 +352,7 @@ int xc_domain_memory_increase_reservatio
    37.4      if ( err >= 0 )
    37.5      {
    37.6          DPRINTF("Failed allocation for dom %d: "
    37.7 -                "%ld pages order %d addr_bits %d\n",
    37.8 +                "%ld extents of order %d, addr_bits %d\n",
    37.9                  domid, nr_extents, extent_order, address_bits);
   37.10          errno = ENOMEM;
   37.11          err = -1;
   37.12 @@ -390,7 +390,7 @@ int xc_domain_memory_decrease_reservatio
   37.13  
   37.14      if ( err >= 0 )
   37.15      {
   37.16 -        DPRINTF("Failed deallocation for dom %d: %ld pages order %d\n",
   37.17 +        DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n",
   37.18                  domid, nr_extents, extent_order);
   37.19          errno = EINVAL;
   37.20          err = -1;
   37.21 @@ -421,7 +421,7 @@ int xc_domain_memory_populate_physmap(in
   37.22  
   37.23      if ( err >= 0 )
   37.24      {
   37.25 -        DPRINTF("Failed allocation for dom %d: %ld pages order %d\n",
   37.26 +        DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
   37.27                  domid, nr_extents, extent_order);
   37.28          errno = EBUSY;
   37.29          err = -1;
    38.1 --- a/tools/libxc/xc_linux_build.c	Mon Nov 20 12:14:40 2006 -0700
    38.2 +++ b/tools/libxc/xc_linux_build.c	Mon Nov 20 13:11:15 2006 -0700
    38.3 @@ -1106,7 +1106,7 @@ static int xc_linux_build_internal(int x
    38.4  {
    38.5      struct xen_domctl launch_domctl;
    38.6      DECLARE_DOMCTL;
    38.7 -    int rc, i;
    38.8 +    int rc;
    38.9      struct vcpu_guest_context st_ctxt, *ctxt = &st_ctxt;
   38.10      unsigned long vstartinfo_start, vkern_entry, vstack_start;
   38.11      uint32_t      features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
   38.12 @@ -1120,11 +1120,9 @@ static int xc_linux_build_internal(int x
   38.13          }
   38.14      }
   38.15  
   38.16 -#ifdef VALGRIND
   38.17 -    memset(&st_ctxt, 0, sizeof(st_ctxt));
   38.18 -#endif
   38.19 +    memset(ctxt, 0, sizeof(*ctxt));
   38.20  
   38.21 -    if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
   38.22 +    if ( lock_pages(ctxt, sizeof(*ctxt) ) )
   38.23      {
   38.24          PERROR("%s: ctxt lock failed", __func__);
   38.25          return 1;
   38.26 @@ -1139,8 +1137,6 @@ static int xc_linux_build_internal(int x
   38.27          goto error_out;
   38.28      }
   38.29  
   38.30 -    memset(ctxt, 0, sizeof(*ctxt));
   38.31 -
   38.32      if ( setup_guest(xc_handle, domid, image, image_size,
   38.33                       initrd,
   38.34                       mem_mb << (20 - PAGE_SHIFT),
   38.35 @@ -1157,12 +1153,9 @@ static int xc_linux_build_internal(int x
   38.36  
   38.37  #ifdef __ia64__
   38.38      /* based on new_thread in xen/arch/ia64/domain.c */
   38.39 -    ctxt->flags = 0;
   38.40 -    ctxt->user_regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
   38.41      ctxt->user_regs.cr_iip = vkern_entry;
   38.42      ctxt->user_regs.cr_ifs = 1UL << 63;
   38.43      ctxt->user_regs.ar_fpsr = xc_ia64_fpsr_default();
   38.44 -    i = 0; /* silence unused variable warning */
   38.45  #else /* x86 */
   38.46      /*
   38.47       * Initial register values:
   38.48 @@ -1186,43 +1179,11 @@ static int xc_linux_build_internal(int x
   38.49  
   38.50      ctxt->flags = VGCF_IN_KERNEL;
   38.51  
   38.52 -    /* FPU is set up to default initial state. */
   38.53 -    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   38.54 -
   38.55 -    /* Virtual IDT is empty at start-of-day. */
   38.56 -    for ( i = 0; i < 256; i++ )
   38.57 -    {
   38.58 -        ctxt->trap_ctxt[i].vector = i;
   38.59 -        ctxt->trap_ctxt[i].cs     = FLAT_KERNEL_CS;
   38.60 -    }
   38.61 -
   38.62 -    /* No LDT. */
   38.63 -    ctxt->ldt_ents = 0;
   38.64 -
   38.65 -    /* Use the default Xen-provided GDT. */
   38.66 -    ctxt->gdt_ents = 0;
   38.67 -
   38.68 -    /* Ring 1 stack is the initial stack. */
   38.69 -    ctxt->kernel_ss = FLAT_KERNEL_SS;
   38.70 -    ctxt->kernel_sp = vstack_start + PAGE_SIZE;
   38.71 -
   38.72 -    /* No debugging. */
   38.73 -    memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
   38.74 -
   38.75 -    /* No callback handlers. */
   38.76 -#if defined(__i386__)
   38.77 -    ctxt->event_callback_cs     = FLAT_KERNEL_CS;
   38.78 -    ctxt->event_callback_eip    = 0;
   38.79 -    ctxt->failsafe_callback_cs  = FLAT_KERNEL_CS;
   38.80 -    ctxt->failsafe_callback_eip = 0;
   38.81 -#elif defined(__x86_64__)
   38.82 -    ctxt->event_callback_eip    = 0;
   38.83 -    ctxt->failsafe_callback_eip = 0;
   38.84 -    ctxt->syscall_callback_eip  = 0;
   38.85 -#endif
   38.86 +    ctxt->kernel_ss = ctxt->user_regs.ss;
   38.87 +    ctxt->kernel_sp = ctxt->user_regs.esp;
   38.88  #endif /* x86 */
   38.89  
   38.90 -    memset( &launch_domctl, 0, sizeof(launch_domctl) );
   38.91 +    memset(&launch_domctl, 0, sizeof(launch_domctl));
   38.92  
   38.93      launch_domctl.domain = (domid_t)domid;
   38.94      launch_domctl.u.vcpucontext.vcpu   = 0;
    39.1 --- a/tools/libxc/xc_linux_restore.c	Mon Nov 20 12:14:40 2006 -0700
    39.2 +++ b/tools/libxc/xc_linux_restore.c	Mon Nov 20 13:11:15 2006 -0700
    39.3 @@ -774,39 +774,6 @@ int xc_linux_restore(int xc_handle, int 
    39.4      memcpy(live_p2m, p2m, P2M_SIZE);
    39.5      munmap(live_p2m, P2M_SIZE);
    39.6  
    39.7 -    /*
    39.8 -     * Safety checking of saved context:
    39.9 -     *  1. user_regs is fine, as Xen checks that on context switch.
   39.10 -     *  2. fpu_ctxt is fine, as it can't hurt Xen.
   39.11 -     *  3. trap_ctxt needs the code selectors checked.
   39.12 -     *  4. ldt base must be page-aligned, no more than 8192 ents, ...
   39.13 -     *  5. gdt already done, and further checking is done by Xen.
   39.14 -     *  6. check that kernel_ss is safe.
   39.15 -     *  7. pt_base is already done.
   39.16 -     *  8. debugregs are checked by Xen.
   39.17 -     *  9. callback code selectors need checking.
   39.18 -     */
   39.19 -    for ( i = 0; i < 256; i++ ) {
   39.20 -        ctxt.trap_ctxt[i].vector = i;
   39.21 -        if ((ctxt.trap_ctxt[i].cs & 3) == 0)
   39.22 -            ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
   39.23 -    }
   39.24 -    if ((ctxt.kernel_ss & 3) == 0)
   39.25 -        ctxt.kernel_ss = FLAT_KERNEL_DS;
   39.26 -#if defined(__i386__)
   39.27 -    if ((ctxt.event_callback_cs & 3) == 0)
   39.28 -        ctxt.event_callback_cs = FLAT_KERNEL_CS;
   39.29 -    if ((ctxt.failsafe_callback_cs & 3) == 0)
   39.30 -        ctxt.failsafe_callback_cs = FLAT_KERNEL_CS;
   39.31 -#endif
   39.32 -    if (((ctxt.ldt_base & (PAGE_SIZE - 1)) != 0) ||
   39.33 -        (ctxt.ldt_ents > 8192) ||
   39.34 -        (ctxt.ldt_base > hvirt_start) ||
   39.35 -        ((ctxt.ldt_base + ctxt.ldt_ents*8) > hvirt_start)) {
   39.36 -        ERROR("Bad LDT base or size");
   39.37 -        goto out;
   39.38 -    }
   39.39 -
   39.40      DPRINTF("Domain ready to be built.\n");
   39.41  
   39.42      domctl.cmd = XEN_DOMCTL_setvcpucontext;
    40.1 --- a/tools/libxc/xc_tbuf.c	Mon Nov 20 12:14:40 2006 -0700
    40.2 +++ b/tools/libxc/xc_tbuf.c	Mon Nov 20 13:11:15 2006 -0700
    40.3 @@ -57,7 +57,7 @@ int xc_tbuf_get_size(int xc_handle, unsi
    40.4      return rc;
    40.5  }
    40.6  
    40.7 -int xc_tbuf_enable(int xc_handle, size_t cnt, unsigned long *mfn,
    40.8 +int xc_tbuf_enable(int xc_handle, unsigned long pages, unsigned long *mfn,
    40.9                     unsigned long *size)
   40.10  {
   40.11      DECLARE_SYSCTL;
   40.12 @@ -68,7 +68,7 @@ int xc_tbuf_enable(int xc_handle, size_t
   40.13       * set (since trace buffers cannot be reallocated). If we really have no
   40.14       * buffers at all then tbuf_enable() will fail, so this is safe.
   40.15       */
   40.16 -    (void)xc_tbuf_set_size(xc_handle, cnt);
   40.17 +    (void)xc_tbuf_set_size(xc_handle, pages);
   40.18  
   40.19      if ( tbuf_enable(xc_handle, 1) != 0 )
   40.20          return -1;
    41.1 --- a/tools/libxc/xenctrl.h	Mon Nov 20 12:14:40 2006 -0700
    41.2 +++ b/tools/libxc/xenctrl.h	Mon Nov 20 13:11:15 2006 -0700
    41.3 @@ -556,8 +556,8 @@ long xc_get_tot_pages(int xc_handle, uin
    41.4   * Gets the machine address of the trace pointer area and the size of the
    41.5   * per CPU buffers.
    41.6   */
    41.7 -int xc_tbuf_enable(int xc_handle, size_t cnt, unsigned long *mfn,
    41.8 -    unsigned long *size);
    41.9 +int xc_tbuf_enable(int xc_handle, unsigned long pages,
   41.10 +                   unsigned long *mfn, unsigned long *size);
   41.11  
   41.12  /*
   41.13   * Disable tracing buffers.
   41.14 @@ -610,7 +610,7 @@ int xc_add_mmu_update(int xc_handle, xc_
   41.15                     unsigned long long ptr, unsigned long long val);
   41.16  int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
   41.17  
   41.18 -int xc_acm_op(int xc_handle, int cmd, void *arg, size_t arg_size);
   41.19 +int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
   41.20  
   41.21  /*
   41.22   * Return a handle to the event channel driver, or -1 on failure, in which case
    42.1 --- a/tools/misc/lomount/lomount.c	Mon Nov 20 12:14:40 2006 -0700
    42.2 +++ b/tools/misc/lomount/lomount.c	Mon Nov 20 13:11:15 2006 -0700
    42.3 @@ -44,8 +44,6 @@ enum
    42.4  	ERR_MOUNT		// Other failure of mount command
    42.5  };
    42.6  
    42.7 -#define _LARGEFILE_SOURCE
    42.8 -#define _FILE_OFFSET_BITS 64
    42.9  #include <unistd.h>
   42.10  #include <stdio.h>
   42.11  #include <stdlib.h>
    43.1 --- a/tools/python/xen/util/security.py	Mon Nov 20 12:14:40 2006 -0700
    43.2 +++ b/tools/python/xen/util/security.py	Mon Nov 20 13:11:15 2006 -0700
    43.3 @@ -606,11 +606,17 @@ def unify_resname(resource):
    43.4  
    43.5      # sanity check on resource name
    43.6      try:
    43.7 -        (type, resfile) = resource.split(":")
    43.8 +        (type, resfile) = resource.split(":", 1)
    43.9      except:
   43.10          err("Resource spec '%s' contains no ':' delimiter" % resource)
   43.11  
   43.12 -    if type == "phy":
   43.13 +    if type == "tap":
   43.14 +        try:
   43.15 +            (subtype, resfile) = resfile.split(":")
   43.16 +        except:
   43.17 +            err("Resource spec '%s' contains no tap subtype" % resource)
   43.18 +
   43.19 +    if type in ["phy", "tap"]:
   43.20          if not resfile.startswith("/"):
   43.21              resfile = "/dev/" + resfile
   43.22  
   43.23 @@ -619,6 +625,8 @@ def unify_resname(resource):
   43.24          err("Invalid resource.")
   43.25  
   43.26      # from here on absolute file names with resources
   43.27 +    if type == "tap":
   43.28 +        type = type + ":" + subtype
   43.29      resource = type + ":" + resfile
   43.30      return resource
   43.31  
    44.1 --- a/tools/python/xen/xend/XendAPI.py	Mon Nov 20 12:14:40 2006 -0700
    44.2 +++ b/tools/python/xen/xend/XendAPI.py	Mon Nov 20 13:11:15 2006 -0700
    44.3 @@ -591,6 +591,7 @@ class XendAPI:
    44.4                    'platform_localtime',
    44.5                    'platform_clock_offset',
    44.6                    'platform_enable_audio',
    44.7 +                  'platform_keymap',
    44.8                    'builder',
    44.9                    'boot_method',
   44.10                    'kernel_kernel',
   44.11 @@ -638,6 +639,7 @@ class XendAPI:
   44.12          'platform_localtime',
   44.13          'platform_clock_offset',
   44.14          'platform_enable_audio',
   44.15 +        'platform_keymap',
   44.16          'builder',
   44.17          'boot_method',
   44.18          'kernel_kernel',
   44.19 @@ -784,6 +786,10 @@ class XendAPI:
   44.20          dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   44.21          return xen_api_todo()
   44.22      
   44.23 +    def vm_get_platform_keymap(self, session, vm_ref):
   44.24 +        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   44.25 +        return xen_api_todo()
   44.26 +    
   44.27      def vm_get_builder(self, session, vm_ref):
   44.28          dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
   44.29          return xen_api_todo()
   44.30 @@ -981,6 +987,7 @@ class XendAPI:
   44.31              'platform_localtime': xeninfo.get_platform_localtime(),
   44.32              'platform_clock_offset': xeninfo.get_platform_clock_offset(),
   44.33              'platform_enable_audio': xeninfo.get_platform_enable_audio(),
   44.34 +            'platform_keymap': xeninfo.get_platform_keymap(),
   44.35              'builder': xeninfo.get_builder(),
   44.36              'boot_method': xeninfo.get_boot_method(),
   44.37              'kernel_kernel': xeninfo.get_kernel_image(),
    45.1 --- a/tools/python/xen/xend/XendConfig.py	Mon Nov 20 12:14:40 2006 -0700
    45.2 +++ b/tools/python/xen/xend/XendConfig.py	Mon Nov 20 13:11:15 2006 -0700
    45.3 @@ -17,6 +17,7 @@
    45.4  
    45.5  import re
    45.6  import time
    45.7 +import types
    45.8  
    45.9  from xen.xend import sxp
   45.10  from xen.xend import uuid
   45.11 @@ -60,6 +61,7 @@ XENAPI_HVM_CFG = {
   45.12      'platform_serial' : 'serial',
   45.13      'platform_localtime': 'localtime',
   45.14      'platform_enable_audio': 'soundhw',
   45.15 +    'platform_keymap' : 'keymap',
   45.16  }    
   45.17  
   45.18  XENAPI_UNSUPPORTED_IN_LEGACY_CFG = [
   45.19 @@ -82,6 +84,7 @@ XENAPI_UNSUPPORTED_IN_LEGACY_CFG = [
   45.20      'platform_localtime',
   45.21      'platform_clock_offset',
   45.22      'platform_enable_audio',
   45.23 +    'platform_keymap',
   45.24      'builder',
   45.25      'grub_cmdline',
   45.26      'pci_bus',
   45.27 @@ -142,6 +145,7 @@ ROUNDTRIPPING_CONFIG_ENTRIES = [
   45.28      ('uuid',       str),
   45.29      ('vcpus',      int),
   45.30      ('vcpu_avail', int),
   45.31 +    ('cpu_cap',    int),
   45.32      ('cpu_weight', int),
   45.33      ('memory',     int),
   45.34      ('shadow_memory', int),
   45.35 @@ -447,11 +451,36 @@ class XendConfig(dict):
   45.36          for c in sxp.children(parsed, 'backend'):
   45.37              cfg['backend'].append(sxp.name(sxp.child0(c)))
   45.38  
   45.39 +        # Parsing the device SXP's. In most cases, the SXP looks
   45.40 +        # like this:
   45.41 +        #
   45.42 +        # [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]]
   45.43 +        #
   45.44 +        # However, for PCI devices it looks like this:
   45.45 +        #
   45.46 +        # [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1]]]]
   45.47 +        #
   45.48 +        # It seems the reasoning for this difference is because
   45.49 +        # pciif.py needs all the PCI device configurations at
   45.50 +        # the same time when creating the devices.
   45.51 +        #
   45.52 +        # To further complicate matters, Xen 2.0 configuration format
   45.53 +        # uses the following for pci device configuration:
   45.54 +        #
   45.55 +        # [device, [pci, [domain, 0], [bus, 0], [dev, 1], [func, 2]]]
   45.56 +        #
   45.57 +        # Hence we deal with pci device configurations outside of
   45.58 +        # the regular device parsing.
   45.59 +        
   45.60          cfg['device'] = {}
   45.61          for dev in sxp.children(parsed, 'device'):
   45.62              config = sxp.child0(dev)
   45.63              dev_type = sxp.name(config)
   45.64              dev_info = {}
   45.65 +            
   45.66 +            if dev_type == 'pci':
   45.67 +                continue 
   45.68 +            
   45.69              for opt, val in config[1:]:
   45.70                  dev_info[opt] = val
   45.71              log.debug("XendConfig: reading device: %s" % dev_info)
   45.72 @@ -459,9 +488,34 @@ class XendConfig(dict):
   45.73              dev_uuid = dev_info.get('uuid', uuid.createString())
   45.74              dev_info['uuid'] = dev_uuid
   45.75              cfg['device'][dev_uuid] = (dev_type, dev_info)
   45.76 +
   45.77 +        # deal with PCI device configurations if they exist
   45.78 +        for dev in sxp.children(parsed, 'device'):
   45.79 +            config = sxp.child0(dev)
   45.80 +            dev_type = sxp.name(config)
   45.81 +
   45.82 +            if dev_type != 'pci':
   45.83 +                continue
   45.84              
   45.85 -            #cfg['device'].append((sxp.name(config), config))
   45.86 -
   45.87 +            dev_attr = sxp.child_value(config, 'dev')
   45.88 +            if isinstance(dev_attr, (types.ListType, types.TupleType)):
   45.89 +                for pci_dev in sxp.children(config, 'dev'):
   45.90 +                    dev_info = {}
   45.91 +                    for opt, val in pci_dev[1:]:
   45.92 +                        dev_info[opt] = val
   45.93 +                    log.debug("XendConfig: reading device: %s" % dev_info)
   45.94 +                    dev_uuid = dev_info.get('uuid', uuid.createString())
   45.95 +                    dev_info['uuid'] = dev_uuid
   45.96 +                    cfg['device'][dev_uuid] = (dev_type, dev_info)
   45.97 +                    
   45.98 +            else: # Xen 2.0 PCI device configuration
   45.99 +                for opt, val in config[1:]:
  45.100 +                    dev_info[opt] = val
  45.101 +                log.debug("XendConfig: reading device: %s" % dev_info)
  45.102 +                # create uuid if it doesn't
  45.103 +                dev_uuid = dev_info.get('uuid', uuid.createString())
  45.104 +                dev_info['uuid'] = dev_uuid
  45.105 +                cfg['device'][dev_uuid] = (dev_type, dev_info)
  45.106  
  45.107          # Extract missing data from configuration entries
  45.108          if 'image' in cfg:
  45.109 @@ -529,7 +583,7 @@ class XendConfig(dict):
  45.110          old_state = sxp.child_value(parsed, 'state')
  45.111          if old_state:
  45.112              for i in range(len(CONFIG_OLD_DOM_STATES)):
  45.113 -                cfg[CONFIG_OLD_DOM_STATES[i]] = (old_state[i] != '-')
  45.114 +                cfg[CONFIG_OLD_DOM_STATES[i]] = int(old_state[i] != '-')
  45.115  
  45.116          # Xen API extra cfgs
  45.117          # ------------------
  45.118 @@ -726,7 +780,8 @@ class XendConfig(dict):
  45.119  
  45.120          # Verify devices
  45.121          for d_uuid, (d_type, d_info) in self['device'].items():
  45.122 -            if d_type not in XendDevices.valid_devices():
  45.123 +            if d_type not in XendDevices.valid_devices() and \
  45.124 +               d_type not in XendDevices.pseudo_devices():
  45.125                  raise XendConfigError('Invalid device (%s)' % d_type)
  45.126  
  45.127          # Verify restart modes
  45.128 @@ -744,7 +799,8 @@ class XendConfig(dict):
  45.129              self['vtpm_refs'] = []
  45.130  
  45.131      def device_add(self, dev_type, cfg_sxp = None, cfg_xenapi = None):
  45.132 -        if dev_type not in XendDevices.valid_devices():
  45.133 +        if dev_type not in XendDevices.valid_devices() and \
  45.134 +           dev_type not in XendDevices.pseudo_devices():
  45.135              raise XendConfigError("XendConfig: %s not a valid device type" %
  45.136                              dev_type)
  45.137  
  45.138 @@ -856,10 +912,27 @@ class XendConfig(dict):
  45.139          return sxpr
  45.140  
  45.141      def all_devices_sxpr(self):
  45.142 +        """Returns the SXPR for all devices in the current configuration."""
  45.143          sxprs = []
  45.144 +        pci_devs = []
  45.145          for dev_type, dev_info in self['device'].values():
  45.146 -            sxpr =  self.device_sxpr(dev_type = dev_type, dev_info = dev_info)
  45.147 -            sxprs.append((dev_type, sxpr))
  45.148 +            if dev_type == 'pci': # special case for pci devices
  45.149 +                pci_devs.append(dev_info)
  45.150 +            else:
  45.151 +                sxpr = self.device_sxpr(dev_type = dev_type,
  45.152 +                                        dev_info = dev_info)
  45.153 +                sxprs.append((dev_type, sxpr))
  45.154 +
  45.155 +        # if we have any pci_devs, we parse them differently into
  45.156 +        # one single pci SXP entry.
  45.157 +        if pci_devs:
  45.158 +            sxpr = ['pci',]
  45.159 +            for dev_info in pci_devs:
  45.160 +                dev_sxpr = self.device_sxpr(dev_type = 'dev',
  45.161 +                                            dev_info = dev_info)
  45.162 +                sxpr.append(dev_sxpr)
  45.163 +            sxprs.append(('pci', sxpr))
  45.164 +            
  45.165          return sxprs
  45.166  
  45.167                       
    46.1 --- a/tools/python/xen/xend/XendDevices.py	Mon Nov 20 12:14:40 2006 -0700
    46.2 +++ b/tools/python/xen/xend/XendDevices.py	Mon Nov 20 13:11:15 2006 -0700
    46.3 @@ -49,6 +49,11 @@ class XendDevices:
    46.4      valid_devices = classmethod(valid_devices)
    46.5  
    46.6      #@classmethod
    46.7 +    def pseudo_devices(cls):
    46.8 +        return ['console']
    46.9 +    pseudo_devices = classmethod(pseudo_devices)
   46.10 +
   46.11 +    #@classmethod
   46.12      def make_controller(cls, name, domain):
   46.13          """Factory function to make device controllers per domain.
   46.14  
    47.1 --- a/tools/python/xen/xend/XendDomain.py	Mon Nov 20 12:14:40 2006 -0700
    47.2 +++ b/tools/python/xen/xend/XendDomain.py	Mon Nov 20 13:11:15 2006 -0700
    47.3 @@ -33,7 +33,7 @@ import xen.lowlevel.xc
    47.4  from xen.xend import XendRoot, XendCheckpoint, XendDomainInfo
    47.5  from xen.xend.PrettyPrint import prettyprint
    47.6  from xen.xend.XendConfig import XendConfig
    47.7 -from xen.xend.XendError import XendError, XendInvalidDomain
    47.8 +from xen.xend.XendError import XendError, XendInvalidDomain, VmError
    47.9  from xen.xend.XendLogging import log
   47.10  from xen.xend.XendConstants import XS_VMROOT
   47.11  from xen.xend.XendConstants import DOM_STATE_HALTED, DOM_STATE_RUNNING
   47.12 @@ -65,7 +65,6 @@ class XendDomain:
   47.13      @type domains_lock: threaading.RLock
   47.14      @ivar _allow_new_domains: Flag to set that allows creating of new domains.
   47.15      @type _allow_new_domains: boolean
   47.16 -    
   47.17      """
   47.18  
   47.19      def __init__(self):
   47.20 @@ -281,9 +280,13 @@ class XendDomain:
   47.21                  sxp_cache_file = open(self._managed_config_path(dom_uuid),'w')
   47.22                  prettyprint(dominfo.sxpr(), sxp_cache_file, width = 78)
   47.23                  sxp_cache_file.close()
   47.24 -            except IOError:
   47.25 -                log.error("Error occurred saving configuration file to %s" %
   47.26 -                          domain_config_dir)
   47.27 +            except:
   47.28 +                log.exception("Error occurred saving configuration file " +
   47.29 +                              "to %s" % domain_config_dir)
   47.30 +                try:
   47.31 +                    self._managed_domain_remove(dom_uuid)
   47.32 +                except:
   47.33 +                    pass
   47.34                  raise XendError("Failed to save configuration file to: %s" %
   47.35                                  domain_config_dir)
   47.36          else:
   47.37 @@ -374,24 +377,39 @@ class XendDomain:
   47.38          @rtype: None
   47.39          """
   47.40  
   47.41 +        running = self._running_domains()
   47.42 +        # Add domains that are not already tracked but running in Xen,
   47.43 +        # and update domain state for those that are running and tracked.
   47.44 +        for dom in running:
   47.45 +            domid = dom['domid']
   47.46 +            if domid in self.domains:
   47.47 +                self.domains[domid].update(dom)
   47.48 +            elif domid not in self.domains and dom['dying'] != 1:
   47.49 +                try:
   47.50 +                    new_dom = XendDomainInfo.recreate(dom, False)
   47.51 +                    self._add_domain(new_dom)                    
   47.52 +                except VmError:
   47.53 +                    log.exception("Unable to recreate domain")
   47.54 +                    try:
   47.55 +                        xc.domain_destroy(domid)
   47.56 +                    except:
   47.57 +                        log.exception("Hard destruction of domain failed: %d" %
   47.58 +                                      domid)
   47.59 +
   47.60          # update information for all running domains
   47.61          # - like cpu_time, status, dying, etc.
   47.62 -        running = self._running_domains()
   47.63 -        for dom in running:
   47.64 -            domid = dom['domid']
   47.65 -            if domid in self.domains and dom['dying'] != 1:
   47.66 -                self.domains[domid].update(dom)
   47.67 -
   47.68          # remove domains that are not running from active domain list.
   47.69          # The list might have changed by now, because the update call may
   47.70          # cause new domains to be added, if the domain has rebooted.  We get
   47.71          # the list again.
   47.72 +        running = self._running_domains()
   47.73          running_domids = [d['domid'] for d in running if d['dying'] != 1]
   47.74          for domid, dom in self.domains.items():
   47.75              if domid not in running_domids and domid != DOM0_ID:
   47.76                  self._remove_domain(dom, domid)
   47.77  
   47.78  
   47.79 +
   47.80      def _add_domain(self, info):
   47.81          """Add a domain to the list of running domains
   47.82          
   47.83 @@ -409,7 +427,6 @@ class XendDomain:
   47.84          @param info: XendDomainInfo of a domain to be removed.
   47.85          @type info: XendDomainInfo
   47.86          """
   47.87 -
   47.88          if info:
   47.89              if domid == None:
   47.90                  domid = info.getDomid()
   47.91 @@ -948,10 +965,10 @@ class XendDomain:
   47.92              dominfo = self.domain_lookup_nr(domid)
   47.93              if not dominfo:
   47.94                  raise XendInvalidDomain(str(domid))
   47.95 -            
   47.96 +            if dominfo.getDomid() == DOM0_ID:
   47.97 +                raise XendError("Cannot unpause privileged domain %s" % domid)
   47.98              log.info("Domain %s (%d) unpaused.", dominfo.getName(),
   47.99                       int(dominfo.getDomid()))
  47.100 -            
  47.101              dominfo.unpause()
  47.102          except XendInvalidDomain:
  47.103              log.exception("domain_unpause")
  47.104 @@ -973,6 +990,8 @@ class XendDomain:
  47.105              dominfo = self.domain_lookup_nr(domid)
  47.106              if not dominfo:
  47.107                  raise XendInvalidDomain(str(domid))
  47.108 +            if dominfo.getDomid() == DOM0_ID:
  47.109 +                raise XendError("Cannot pause privileged domain %s" % domid)
  47.110              log.info("Domain %s (%d) paused.", dominfo.getName(),
  47.111                       int(dominfo.getDomid()))
  47.112              dominfo.pause()
  47.113 @@ -1049,7 +1068,7 @@ class XendDomain:
  47.114              raise XendInvalidDomain(str(domid))
  47.115  
  47.116          if dominfo.getDomid() == DOM0_ID:
  47.117 -            raise XendError("Cannot migrate privileged domain %i" % domid)
  47.118 +            raise XendError("Cannot migrate privileged domain %s" % domid)
  47.119  
  47.120          """ The following call may raise a XendError exception """
  47.121          dominfo.testMigrateDevices(True, dst)
    48.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Mon Nov 20 12:14:40 2006 -0700
    48.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Mon Nov 20 13:11:15 2006 -0700
    48.3 @@ -403,7 +403,7 @@ class XendDomainInfo:
    48.4          self.vmWatch = None
    48.5          self.shutdownWatch = None
    48.6          self.shutdownStartTime = None
    48.7 -        
    48.8 +
    48.9          self.state = DOM_STATE_HALTED
   48.10          self.state_updated = threading.Condition()
   48.11          self.refresh_shutdown_lock = threading.Condition()
   48.12 @@ -430,7 +430,7 @@ class XendDomainInfo:
   48.13          initialisation if it not started.
   48.14          """
   48.15          from xen.xend import XendDomain
   48.16 -        
   48.17 +
   48.18          if self.state == DOM_STATE_HALTED:
   48.19              try:
   48.20                  self._constructDomain()
   48.21 @@ -443,7 +443,6 @@ class XendDomainInfo:
   48.22  
   48.23                  # save running configuration if XendDomains believe domain is
   48.24                  # persistent
   48.25 -                #
   48.26                  if is_managed:
   48.27                      xendomains = XendDomain.instance()
   48.28                      xendomains.managed_config_save(self)
   48.29 @@ -475,6 +474,9 @@ class XendDomainInfo:
   48.30          log.debug('XendDomainInfo.shutdown')
   48.31          if self.state in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
   48.32              raise XendError('Domain cannot be shutdown')
   48.33 +
   48.34 +        if self.domid == 0:
   48.35 +            raise XendError('Domain 0 cannot be shutdown')
   48.36          
   48.37          if not reason in DOMAIN_SHUTDOWN_REASONS.values():
   48.38              raise XendError('Invalid reason: %s' % reason)
   48.39 @@ -920,7 +922,7 @@ class XendDomainInfo:
   48.40                          # the VM path now, otherwise we will end up with one
   48.41                          # watch for the old domain, and one for the new.
   48.42                          self._unwatchVm()
   48.43 -                    elif reason in ['poweroff', 'reboot']:
   48.44 +                    elif reason in ('poweroff', 'reboot'):
   48.45                          restart_reason = reason
   48.46                      else:
   48.47                          self.destroy()
   48.48 @@ -1117,8 +1119,9 @@ class XendDomainInfo:
   48.49          @raise: VmError for invalid devices
   48.50          """
   48.51          for (devclass, config) in self.info.all_devices_sxpr():
   48.52 -            log.info("createDevice: %s : %s" % (devclass, config))
   48.53 -            self._createDevice(devclass, config)
   48.54 +            if devclass in XendDevices.valid_devices():
   48.55 +                log.info("createDevice: %s : %s" % (devclass, config))
   48.56 +                self._createDevice(devclass, config)
   48.57  
   48.58          if self.image:
   48.59              self.image.createDeviceModel()
   48.60 @@ -1323,6 +1326,8 @@ class XendDomainInfo:
   48.61              self._stateSet(DOM_STATE_RUNNING)
   48.62          except RuntimeError, exn:
   48.63              log.exception("XendDomainInfo.initDomain: exception occurred")
   48.64 +            if self.info['bootloader'] and self.image is not None:
   48.65 +                self.image.cleanupBootloading()
   48.66              raise VmError(str(exn))
   48.67  
   48.68  
   48.69 @@ -1521,6 +1526,14 @@ class XendDomainInfo:
   48.70      def _unwatchVm(self):
   48.71          """Remove the watch on the VM path, if any.  Idempotent.  Nothrow
   48.72          guarantee."""
   48.73 +        try:
   48.74 +            try:
   48.75 +                if self.vmWatch:
   48.76 +                    self.vmWatch.unwatch()
   48.77 +            finally:
   48.78 +                self.vmWatch = None
   48.79 +        except:
   48.80 +            log.exception("Unwatching VM path failed.")
   48.81  
   48.82      def testDeviceComplete(self):
   48.83          """ For Block IO migration safety we must ensure that
   48.84 @@ -1659,9 +1672,17 @@ class XendDomainInfo:
   48.85          log.trace("XendDomainInfo.update done on domain %s: %s",
   48.86                    str(self.domid), self.info)
   48.87  
   48.88 -    def sxpr(self, ignore_devices = False):
   48.89 -        return self.info.get_sxp(domain = self,
   48.90 -                                 ignore_devices = ignore_devices)
   48.91 +    def sxpr(self, ignore_store = False):
   48.92 +        result = self.info.get_sxp(domain = self,
   48.93 +                                   ignore_devices = ignore_store)
   48.94 +
   48.95 +        if not ignore_store and self.dompath:
   48.96 +            vnc_port = self._readDom('console/vnc-port')
   48.97 +            if vnc_port is not None:
   48.98 +                result.append(['device',
   48.99 +                               ['console', ['vnc-port', str(vnc_port)]]])
  48.100 +
  48.101 +        return result
  48.102  
  48.103      # Xen API
  48.104      # ----------------------------------------------------------------
    49.1 --- a/tools/python/xen/xend/image.py	Mon Nov 20 12:14:40 2006 -0700
    49.2 +++ b/tools/python/xen/xend/image.py	Mon Nov 20 13:11:15 2006 -0700
    49.3 @@ -309,13 +309,14 @@ class HVMImageHandler(ImageHandler):
    49.4      def parseDeviceModelArgs(self, imageConfig, deviceConfig):
    49.5          dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
    49.6                     'localtime', 'serial', 'stdvga', 'isa', 'vcpus',
    49.7 -                   'acpi', 'usb', 'usbdevice']
    49.8 +                   'acpi', 'usb', 'usbdevice', 'keymap' ]
    49.9          ret = []
   49.10          for a in dmargs:
   49.11              v = sxp.child_value(imageConfig, a)
   49.12  
   49.13              # python doesn't allow '-' in variable names
   49.14              if a == 'stdvga': a = 'std-vga'
   49.15 +            if a == 'keymap': a = 'k'
   49.16  
   49.17              # Handle booleans gracefully
   49.18              if a in ['localtime', 'std-vga', 'isa', 'usb', 'acpi']:
   49.19 @@ -328,7 +329,7 @@ class HVMImageHandler(ImageHandler):
   49.20  
   49.21              if a in ['fda', 'fdb' ]:
   49.22                  if v:
   49.23 -                    if not os.path.isfile(v):
   49.24 +                    if not os.path.isabs(v):
   49.25                          raise VmError("Floppy file %s does not exist." % v)
   49.26              log.debug("args: %s, val: %s" % (a,v))
   49.27  
   49.28 @@ -385,8 +386,6 @@ class HVMImageHandler(ImageHandler):
   49.29              else:
   49.30                  ret += ['-vnc', '%d' % vncdisplay]
   49.31  
   49.32 -            ret += ['-k', 'en-us']
   49.33 -
   49.34              vnclisten = sxp.child_value(config, 'vnclisten')
   49.35              if not(vnclisten):
   49.36                  vnclisten = (xen.xend.XendRoot.instance().
    50.1 --- a/tools/python/xen/xend/server/pciif.py	Mon Nov 20 12:14:40 2006 -0700
    50.2 +++ b/tools/python/xen/xend/server/pciif.py	Mon Nov 20 13:11:15 2006 -0700
    50.3 @@ -65,7 +65,7 @@ class PciController(DevController):
    50.4                      else:
    50.5                          return default
    50.6  
    50.7 -                if isinstance(val, types.StringType):
    50.8 +                if isinstance(val, types.StringTypes):
    50.9                      return int(val, 16)
   50.10                  else:
   50.11                      return val
   50.12 @@ -79,7 +79,7 @@ class PciController(DevController):
   50.13          back = {}
   50.14  
   50.15          val = sxp.child_value(config, 'dev')
   50.16 -        if isinstance(val, list):
   50.17 +        if isinstance(val, (types.ListType, types.TupleType)):
   50.18              pcidevid = 0
   50.19              for dev_config in sxp.children(config, 'dev'):
   50.20                  domain = get_param(dev_config, 'domain', 0)
   50.21 @@ -89,7 +89,7 @@ class PciController(DevController):
   50.22  
   50.23                  self.setupDevice(domain, bus, slot, func)
   50.24  
   50.25 -                back['dev-%i'%(pcidevid)]="%04x:%02x:%02x.%02x"% \
   50.26 +                back['dev-%i' % pcidevid]="%04x:%02x:%02x.%02x"% \
   50.27                          (domain, bus, slot, func)
   50.28                  pcidevid+=1
   50.29              
   50.30 @@ -115,19 +115,19 @@ class PciController(DevController):
   50.31          pci_devs = []
   50.32          
   50.33          for i in range(int(num_devs)):
   50.34 -            (dev_config,) = self.readBackend(devid, 'dev-%d'%(i))
   50.35 +            dev_config = self.readBackend(devid, 'dev-%d' % i)
   50.36  
   50.37 -            pci_match = re.match(r"((?P<domain>[0-9a-fA-F]{1,4})[:,])?" + \
   50.38 -                    r"(?P<bus>[0-9a-fA-F]{1,2})[:,]" + \
   50.39 -                    r"(?P<slot>[0-9a-fA-F]{1,2})[.,]" + \
   50.40 -                    r"(?P<func>[0-9a-fA-F]{1,2})", dev_config)
   50.41 +            pci_match = re.match(r"((?P<domain>[0-9a-fA-F]{1,4})[:,])?" +
   50.42 +                                 r"(?P<bus>[0-9a-fA-F]{1,2})[:,]" + 
   50.43 +                                 r"(?P<slot>[0-9a-fA-F]{1,2})[.,]" + 
   50.44 +                                 r"(?P<func>[0-9a-fA-F]{1,2})", dev_config)
   50.45              
   50.46              if pci_match!=None:
   50.47 -                pci_dev_info = pci_match.groupdict('0')
   50.48 +                pci_dev_info = pci_match.groupdict()
   50.49                  pci_devs.append({'domain': '0x%(domain)s' % pci_dev_info,
   50.50                                   'bus': '0x%(bus)s' % pci_dev_info,
   50.51 -                                 'slot': '0x(slot)s' % pci_dev_info,
   50.52 -                                 'func': '0x(func)s' % pci_dev_info})
   50.53 +                                 'slot': '0x%(slot)s' % pci_dev_info,
   50.54 +                                 'func': '0x%(func)s' % pci_dev_info})
   50.55  
   50.56          result['dev'] = pci_devs
   50.57          return result
    51.1 --- a/tools/python/xen/xm/create.py	Mon Nov 20 12:14:40 2006 -0700
    51.2 +++ b/tools/python/xen/xm/create.py	Mon Nov 20 13:11:15 2006 -0700
    51.3 @@ -387,6 +387,10 @@ gopts.var('localtime', val='no|yes',
    51.4            fn=set_bool, default=0,
    51.5            use="Is RTC set to localtime?")
    51.6  
    51.7 +gopts.var('keymap', val='FILE',
    51.8 +          fn=set_value, default='',
    51.9 +          use="Set keyboard layout used")
   51.10 +
   51.11  gopts.var('usb', val='no|yes',
   51.12            fn=set_bool, default=0,
   51.13            use="Emulate USB devices?")
   51.14 @@ -660,7 +664,7 @@ def configure_hvm(config_image, vals):
   51.15               'localtime', 'serial', 'stdvga', 'isa', 'nographic', 'soundhw',
   51.16               'vnc', 'vncdisplay', 'vncunused', 'vncconsole', 'vnclisten',
   51.17               'sdl', 'display', 'xauthority',
   51.18 -             'acpi', 'usb', 'usbdevice' ]
   51.19 +             'acpi', 'usb', 'usbdevice', 'keymap' ]
   51.20      for a in args:
   51.21          if (vals.__dict__[a]):
   51.22              config_image.append([a, vals.__dict__[a]])
    52.1 --- a/tools/python/xen/xm/main.py	Mon Nov 20 12:14:40 2006 -0700
    52.2 +++ b/tools/python/xen/xm/main.py	Mon Nov 20 13:11:15 2006 -0700
    52.3 @@ -199,6 +199,10 @@ SUBCOMMAND_OPTIONS = {
    52.4      'network-list': (
    52.5         ('-l', '--long', 'List resources as SXP'),
    52.6      ),
    52.7 +    'dump-core': (
    52.8 +       ('-L', '--live', 'Dump core without pausing the domain'),
    52.9 +       ('-C', '--crash', 'Crash domain after dumping core'),
   52.10 +    ),
   52.11  }
   52.12  
   52.13  common_commands = [
    53.1 --- a/tools/xentrace/Makefile	Mon Nov 20 12:14:40 2006 -0700
    53.2 +++ b/tools/xentrace/Makefile	Mon Nov 20 13:11:15 2006 -0700
    53.3 @@ -1,7 +1,7 @@
    53.4  XEN_ROOT=../..
    53.5  include $(XEN_ROOT)/tools/Rules.mk
    53.6  
    53.7 -CFLAGS  += -Werror -D_LARGEFILE64_SOURCE
    53.8 +CFLAGS  += -Werror
    53.9  
   53.10  CFLAGS  += -I $(XEN_XC)
   53.11  CFLAGS  += -I $(XEN_LIBXC)
    54.1 --- a/unmodified_drivers/linux-2.6/README	Mon Nov 20 12:14:40 2006 -0700
    54.2 +++ b/unmodified_drivers/linux-2.6/README	Mon Nov 20 13:11:15 2006 -0700
    54.3 @@ -2,6 +2,6 @@ To build, run ./mkbuildtree and then
    54.4  
    54.5  make -C /path/to/kernel/source M=$PWD modules
    54.6  
    54.7 -You get four modules, xen-evtchn-pci.ko, xenbus.ko, xen-vbd.ko, and
    54.8 -xen-vnif.ko.  Load xen-evtchn-pci first, then xenbus, and then
    54.9 +You get four modules, xen-platform-pci.ko, xenbus.ko, xen-vbd.ko, and
   54.10 +xen-vnif.ko.  Load xen-platform-pci first, then xenbus, and then
   54.11  whichever of xen-vbd and xen-vnif you happen to need.
    55.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Mon Nov 20 12:14:40 2006 -0700
    55.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Mon Nov 20 13:11:15 2006 -0700
    55.3 @@ -95,8 +95,7 @@ void vmx_send_assist_req(struct vcpu *v)
    55.4              break;
    55.5          }
    55.6  
    55.7 -        /* I want to call __enter_scheduler() only */
    55.8 -        do_sched_op_compat(SCHEDOP_yield, 0);
    55.9 +        raise_softirq(SCHEDULE_SOFTIRQ);
   55.10          mb();
   55.11      }
   55.12  
    56.1 --- a/xen/arch/x86/domain.c	Mon Nov 20 12:14:40 2006 -0700
    56.2 +++ b/xen/arch/x86/domain.c	Mon Nov 20 13:11:15 2006 -0700
    56.3 @@ -294,6 +294,12 @@ int arch_set_info_guest(
    56.4  
    56.5          for ( i = 0; i < 256; i++ )
    56.6              fixup_guest_code_selector(c->trap_ctxt[i].cs);
    56.7 +
    56.8 +        /* LDT safety checks. */
    56.9 +        if ( ((c->ldt_base & (PAGE_SIZE-1)) != 0) || 
   56.10 +             (c->ldt_ents > 8192) ||
   56.11 +             !array_access_ok(c->ldt_base, c->ldt_ents, LDT_ENTRY_SIZE) )
   56.12 +            return -EINVAL;
   56.13      }
   56.14  
   56.15      clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
   56.16 @@ -396,21 +402,20 @@ arch_do_vcpu_op(
   56.17          if ( copy_from_guest(&area, arg, 1) )
   56.18              break;
   56.19  
   56.20 -        if ( !access_ok(area.addr.v, sizeof(*area.addr.v)) )
   56.21 +        if ( !guest_handle_okay(area.addr.h, 1) )
   56.22              break;
   56.23  
   56.24          rc = 0;
   56.25 -        v->runstate_guest = area.addr.v;
   56.26 +        v->runstate_guest = area.addr.h;
   56.27  
   56.28          if ( v == current )
   56.29          {
   56.30 -            __copy_to_user(v->runstate_guest, &v->runstate,
   56.31 -                           sizeof(v->runstate));
   56.32 +            __copy_to_guest(v->runstate_guest, &v->runstate, 1);
   56.33          }
   56.34          else
   56.35          {
   56.36              vcpu_runstate_get(v, &runstate);
   56.37 -            __copy_to_user(v->runstate_guest, &runstate, sizeof(runstate));
   56.38 +            __copy_to_guest(v->runstate_guest, &runstate, 1);
   56.39          }
   56.40  
   56.41          break;
   56.42 @@ -424,33 +429,6 @@ arch_do_vcpu_op(
   56.43      return rc;
   56.44  }
   56.45  
   56.46 -void new_thread(struct vcpu *d,
   56.47 -                unsigned long start_pc,
   56.48 -                unsigned long start_stack,
   56.49 -                unsigned long start_info)
   56.50 -{
   56.51 -    struct cpu_user_regs *regs = &d->arch.guest_context.user_regs;
   56.52 -
   56.53 -    /*
   56.54 -     * Initial register values:
   56.55 -     *  DS,ES,FS,GS = FLAT_KERNEL_DS
   56.56 -     *       CS:EIP = FLAT_KERNEL_CS:start_pc
   56.57 -     *       SS:ESP = FLAT_KERNEL_SS:start_stack
   56.58 -     *          ESI = start_info
   56.59 -     *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   56.60 -     */
   56.61 -    regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
   56.62 -    regs->ss = FLAT_KERNEL_SS;
   56.63 -    regs->cs = FLAT_KERNEL_CS;
   56.64 -    regs->eip = start_pc;
   56.65 -    regs->esp = start_stack;
   56.66 -    regs->esi = start_info;
   56.67 -
   56.68 -    __save_flags(regs->eflags);
   56.69 -    regs->eflags |= X86_EFLAGS_IF;
   56.70 -}
   56.71 -
   56.72 -
   56.73  #ifdef __x86_64__
   56.74  
   56.75  #define loadsegment(seg,value) ({               \
   56.76 @@ -767,9 +745,8 @@ void context_switch(struct vcpu *prev, s
   56.77      context_saved(prev);
   56.78  
   56.79      /* Update per-VCPU guest runstate shared memory area (if registered). */
   56.80 -    if ( next->runstate_guest != NULL )
   56.81 -        __copy_to_user(next->runstate_guest, &next->runstate,
   56.82 -                       sizeof(next->runstate));
   56.83 +    if ( !guest_handle_is_null(next->runstate_guest) )
   56.84 +        __copy_to_guest(next->runstate_guest, &next->runstate, 1);
   56.85  
   56.86      schedule_tail(next);
   56.87      BUG();
    57.1 --- a/xen/arch/x86/domain_build.c	Mon Nov 20 12:14:40 2006 -0700
    57.2 +++ b/xen/arch/x86/domain_build.c	Mon Nov 20 13:11:15 2006 -0700
    57.3 @@ -249,6 +249,7 @@ int construct_dom0(struct domain *d,
    57.4                     char *cmdline)
    57.5  {
    57.6      int i, rc, dom0_pae, xen_pae, order;
    57.7 +    struct cpu_user_regs *regs;
    57.8      unsigned long pfn, mfn;
    57.9      unsigned long nr_pages;
   57.10      unsigned long nr_pt_pages;
   57.11 @@ -441,19 +442,7 @@ int construct_dom0(struct domain *d,
   57.12      mpt_alloc = (vpt_start - dsi.v_start) + 
   57.13          (unsigned long)pfn_to_paddr(alloc_spfn);
   57.14  
   57.15 -    /*
   57.16 -     * We're basically forcing default RPLs to 1, so that our "what privilege
   57.17 -     * level are we returning to?" logic works.
   57.18 -     */
   57.19 -    v->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
   57.20 -    for ( i = 0; i < 256; i++ ) 
   57.21 -        v->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
   57.22 -
   57.23  #if defined(__i386__)
   57.24 -
   57.25 -    v->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
   57.26 -    v->arch.guest_context.event_callback_cs    = FLAT_KERNEL_CS;
   57.27 -
   57.28      /*
   57.29       * Protect the lowest 1GB of memory. We use a temporary mapping there
   57.30       * from which we copy the kernel and ramdisk images.
   57.31 @@ -816,7 +805,22 @@ int construct_dom0(struct domain *d,
   57.32  
   57.33      set_bit(_VCPUF_initialised, &v->vcpu_flags);
   57.34  
   57.35 -    new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
   57.36 +    /*
   57.37 +     * Initial register values:
   57.38 +     *  DS,ES,FS,GS = FLAT_KERNEL_DS
   57.39 +     *       CS:EIP = FLAT_KERNEL_CS:start_pc
   57.40 +     *       SS:ESP = FLAT_KERNEL_SS:start_stack
   57.41 +     *          ESI = start_info
   57.42 +     *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   57.43 +     */
   57.44 +    regs = &v->arch.guest_context.user_regs;
   57.45 +    regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
   57.46 +    regs->ss = FLAT_KERNEL_SS;
   57.47 +    regs->cs = FLAT_KERNEL_CS;
   57.48 +    regs->eip = dsi.v_kernentry;
   57.49 +    regs->esp = vstack_end;
   57.50 +    regs->esi = vstartinfo_start;
   57.51 +    regs->eflags = X86_EFLAGS_IF;
   57.52  
   57.53      if ( opt_dom0_shadow )
   57.54          if ( shadow_test_enable(d) == 0 ) 
    58.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Nov 20 12:14:40 2006 -0700
    58.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Nov 20 13:11:15 2006 -0700
    58.3 @@ -74,6 +74,44 @@ void hvm_set_guest_time(struct vcpu *v, 
    58.4      hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
    58.5  }
    58.6  
    58.7 +u64 hvm_get_guest_time(struct vcpu *v)
    58.8 +{
    58.9 +    u64    host_tsc;
   58.10 +
   58.11 +    rdtscll(host_tsc);
   58.12 +    return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
   58.13 +}
   58.14 +
   58.15 +void hvm_freeze_time(struct vcpu *v)
   58.16 +{
   58.17 +    struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
   58.18 +
   58.19 +    if ( pt->enabled && pt->first_injected
   58.20 +            && (v->vcpu_id == pt->bind_vcpu)
   58.21 +            && !v->arch.hvm_vcpu.guest_time ) {
   58.22 +        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
   58.23 +        if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
   58.24 +        {
   58.25 +            stop_timer(&pt->timer);
   58.26 +            rtc_freeze(v);
   58.27 +        }
   58.28 +    }
   58.29 +}
   58.30 +
   58.31 +void hvm_migrate_timers(struct vcpu *v)
   58.32 +{
   58.33 +    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
   58.34 +    struct PMTState *vpmt = &v->domain->arch.hvm_domain.pl_time.vpmt;
   58.35 +
   58.36 +    if ( pt->enabled )
   58.37 +    {
   58.38 +        migrate_timer(&pt->timer, v->processor);
   58.39 +    }
   58.40 +    migrate_timer(&vcpu_vlapic(v)->vlapic_timer, v->processor);
   58.41 +    migrate_timer(&vpmt->timer, v->processor);
   58.42 +    rtc_migrate_timers(v);
   58.43 +}
   58.44 +
   58.45  void hvm_do_resume(struct vcpu *v)
   58.46  {
   58.47      ioreq_t *p;
   58.48 @@ -92,6 +130,9 @@ void hvm_do_resume(struct vcpu *v)
   58.49          pickup_deactive_ticks(pt);
   58.50      }
   58.51  
   58.52 +    /* Re-enable the RTC timer if needed */
   58.53 +    rtc_thaw(v);
   58.54 +
   58.55      /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
   58.56      p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   58.57      while ( p->state != STATE_IOREQ_NONE )
   58.58 @@ -186,7 +227,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
   58.59                 pt_timer_fn, v, v->processor);
   58.60      pit_init(v, cpu_khz);
   58.61      rtc_init(v, RTC_PORT(0), RTC_IRQ);
   58.62 -    pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS); 
   58.63 +    pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
   58.64  
   58.65      /* Init guest TSC to start from zero. */
   58.66      hvm_set_guest_time(v, 0);
   58.67 @@ -209,14 +250,6 @@ void pic_irq_request(void *data, int lev
   58.68      *interrupt_request = level;
   58.69  }
   58.70  
   58.71 -u64 hvm_get_guest_time(struct vcpu *v)
   58.72 -{
   58.73 -    u64    host_tsc;
   58.74 -    
   58.75 -    rdtscll(host_tsc);
   58.76 -    return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
   58.77 -}
   58.78 -
   58.79  int cpu_get_interrupt(struct vcpu *v, int *type)
   58.80  {
   58.81      int intno;
   58.82 @@ -267,6 +300,28 @@ static void hvm_vcpu_down(void)
   58.83      }
   58.84  }
   58.85  
   58.86 +void hvm_send_assist_req(struct vcpu *v)
   58.87 +{
   58.88 +    ioreq_t *p;
   58.89 +
   58.90 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   58.91 +    if ( unlikely(p->state != STATE_IOREQ_NONE) )
   58.92 +    {
   58.93 +        /* This indicates a bug in the device model.  Crash the domain. */
   58.94 +        gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
   58.95 +        domain_crash_synchronous();
   58.96 +    }
   58.97 +
   58.98 +    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
   58.99 +
  58.100 +    /*
  58.101 +     * Following happens /after/ blocking and setting up ioreq contents.
  58.102 +     * prepare_wait_on_xen_event_channel() is an implicit barrier.
  58.103 +     */
  58.104 +    p->state = STATE_IOREQ_READY;
  58.105 +    notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
  58.106 +}
  58.107 +
  58.108  void hvm_hlt(unsigned long rflags)
  58.109  {
  58.110      /*
    59.1 --- a/xen/arch/x86/hvm/i8259.c	Mon Nov 20 12:14:40 2006 -0700
    59.2 +++ b/xen/arch/x86/hvm/i8259.c	Mon Nov 20 13:11:15 2006 -0700
    59.3 @@ -536,8 +536,6 @@ int is_periodic_irq(struct vcpu *v, int 
    59.4      int vec;
    59.5      struct periodic_time *pt =
    59.6          &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
    59.7 -    struct RTCState *vrtc =
    59.8 -        &(v->domain->arch.hvm_domain.pl_time.vrtc);
    59.9  
   59.10      if (pt->irq == 0) { /* Is it pit irq? */
   59.11          if (type == APIC_DM_EXTINT)
   59.12 @@ -549,16 +547,6 @@ int is_periodic_irq(struct vcpu *v, int 
   59.13              return 1;
   59.14      }
   59.15  
   59.16 -    if (pt->irq == 8) { /* Or rtc irq? */
   59.17 -        if (type == APIC_DM_EXTINT)
   59.18 -            vec = domain_vpic(v->domain)->pics[1].irq_base;
   59.19 -        else
   59.20 -            vec = domain_vioapic(v->domain)->redirtbl[8].fields.vector;
   59.21 -
   59.22 -        if (irq == vec)
   59.23 -            return is_rtc_periodic_irq(vrtc);
   59.24 -    }
   59.25 -
   59.26      return 0;
   59.27  }
   59.28  
    60.1 --- a/xen/arch/x86/hvm/platform.c	Mon Nov 20 12:14:40 2006 -0700
    60.2 +++ b/xen/arch/x86/hvm/platform.c	Mon Nov 20 13:11:15 2006 -0700
    60.3 @@ -346,7 +346,7 @@ static int reg_mem(unsigned char size, u
    60.4      return DECODE_success;
    60.5  }
    60.6  
    60.7 -static int hvm_decode(int realmode, unsigned char *opcode,
    60.8 +static int mmio_decode(int realmode, unsigned char *opcode,
    60.9                        struct hvm_io_op *mmio_op, unsigned char *op_size)
   60.10  {
   60.11      unsigned char size_reg = 0;
   60.12 @@ -722,28 +722,6 @@ int inst_copy_from_guest(unsigned char *
   60.13      return inst_len;
   60.14  }
   60.15  
   60.16 -static void hvm_send_assist_req(struct vcpu *v)
   60.17 -{
   60.18 -    ioreq_t *p;
   60.19 -
   60.20 -    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   60.21 -    if ( unlikely(p->state != STATE_IOREQ_NONE) )
   60.22 -    {
   60.23 -        /* This indicates a bug in the device model.  Crash the domain. */
   60.24 -        gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
   60.25 -        domain_crash_synchronous();
   60.26 -    }
   60.27 -
   60.28 -    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
   60.29 -
   60.30 -    /*
   60.31 -     * Following happens /after/ blocking and setting up ioreq contents.
   60.32 -     * prepare_wait_on_xen_event_channel() is an implicit barrier.
   60.33 -     */
   60.34 -    p->state = STATE_IOREQ_READY;
   60.35 -    notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
   60.36 -}
   60.37 -
   60.38  void send_pio_req(unsigned long port, unsigned long count, int size,
   60.39                    long value, int dir, int df, int value_is_ptr)
   60.40  {
   60.41 @@ -927,7 +905,7 @@ void handle_mmio(unsigned long gpa)
   60.42          domain_crash_synchronous();
   60.43      }
   60.44  
   60.45 -    if ( hvm_decode(realmode, inst, mmio_op, &op_size) == DECODE_failure ) {
   60.46 +    if ( mmio_decode(realmode, inst, mmio_op, &op_size) == DECODE_failure ) {
   60.47          printk("handle_mmio: failed to decode instruction\n");
   60.48          printk("mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
   60.49          for ( i = 0; i < inst_len; i++ )
    61.1 --- a/xen/arch/x86/hvm/rtc.c	Mon Nov 20 12:14:40 2006 -0700
    61.2 +++ b/xen/arch/x86/hvm/rtc.c	Mon Nov 20 13:11:15 2006 -0700
    61.3 @@ -30,40 +30,43 @@
    61.4  
    61.5  /* #define DEBUG_RTC */
    61.6  
    61.7 -void rtc_periodic_cb(struct vcpu *v, void *opaque)
    61.8 +/* Callback that fires the RTC's periodic interrupt */
    61.9 +void rtc_pie_callback(void *opaque)
   61.10  {
   61.11      RTCState *s = opaque;
   61.12 -    s->cmos_data[RTC_REG_C] |= 0xc0;
   61.13 +    struct hvm_domain *plat = &s->vcpu->domain->arch.hvm_domain;
   61.14 +    struct vpic       *pic  = &plat->vpic;
   61.15 +    /* Record that we have fired */
   61.16 +    s->cmos_data[RTC_REG_C] |= (RTC_IRQF|RTC_PF); /* 0xc0 */
   61.17 +    /* Fire */
   61.18 +    pic_set_irq(pic, s->irq, 1);
   61.19 +    /* Remember to fire again */
   61.20 +    s->next_pie = NOW() + s->period;
   61.21 +    set_timer(&s->pie_timer, s->next_pie);
   61.22  }
   61.23  
   61.24 -int is_rtc_periodic_irq(void *opaque)
   61.25 -{
   61.26 -    RTCState *s = opaque;
   61.27 -    return !(s->cmos_data[RTC_REG_C] & RTC_AF || 
   61.28 -           s->cmos_data[RTC_REG_C] & RTC_UF);
   61.29 -}
   61.30 -
   61.31 -static void rtc_timer_update(RTCState *s, int64_t current_time)
   61.32 +/* Enable/configure/disable the periodic timer based on the RTC_PIE and
   61.33 + * RTC_RATE_SELECT settings */
   61.34 +static void rtc_timer_update(RTCState *s)
   61.35  {
   61.36      int period_code; 
   61.37      int period;
   61.38  
   61.39 -    period_code = s->cmos_data[RTC_REG_A] & 0x0f;
   61.40 +    period_code = s->cmos_data[RTC_REG_A] & RTC_RATE_SELECT;
   61.41      if (period_code != 0 && (s->cmos_data[RTC_REG_B] & RTC_PIE)) {
   61.42          if (period_code <= 2)
   61.43              period_code += 7;
   61.44          
   61.45          period = 1 << (period_code - 1); /* period in 32 Khz cycles */
   61.46          period = DIV_ROUND((period * 1000000000ULL), 32768); /* period in ns */
   61.47 -
   61.48 +        s->period = period;
   61.49  #ifdef DEBUG_RTC
   61.50          printk("HVM_RTC: period = %uns\n", period);
   61.51  #endif
   61.52 -
   61.53 -        s->pt = create_periodic_time(period, RTC_IRQ, 0, rtc_periodic_cb, s);
   61.54 -    } else if (s->pt) {
   61.55 -        destroy_periodic_time(s->pt);
   61.56 -        s->pt = NULL;
   61.57 +        s->next_pie = NOW() + s->period;
   61.58 +        set_timer(&s->pie_timer, s->next_pie);
   61.59 +    } else {
   61.60 +        stop_timer(&s->pie_timer);
   61.61      }
   61.62  }
   61.63  
   61.64 @@ -105,7 +108,7 @@ static int rtc_ioport_write(void *opaque
   61.65              /* UIP bit is read only */
   61.66              s->cmos_data[RTC_REG_A] = (data & ~RTC_UIP) |
   61.67                  (s->cmos_data[RTC_REG_A] & RTC_UIP);
   61.68 -            rtc_timer_update(s, hvm_get_clock(s->vcpu));
   61.69 +            rtc_timer_update(s);
   61.70              break;
   61.71          case RTC_REG_B:
   61.72              if (data & RTC_SET) {
   61.73 @@ -119,14 +122,14 @@ static int rtc_ioport_write(void *opaque
   61.74                  }
   61.75              }
   61.76              s->cmos_data[RTC_REG_B] = data;
   61.77 -            rtc_timer_update(s, hvm_get_clock(s->vcpu));
   61.78 +            rtc_timer_update(s);
   61.79              break;
   61.80          case RTC_REG_C:
   61.81          case RTC_REG_D:
   61.82              /* cannot write to them */
   61.83              break;
   61.84 +        }
   61.85          return 1;
   61.86 -        }
   61.87      }
   61.88      return 0;
   61.89  }
   61.90 @@ -172,7 +175,7 @@ static void rtc_copy_date(RTCState *s)
   61.91  
   61.92      s->cmos_data[RTC_SECONDS] = to_bcd(s, tm->tm_sec);
   61.93      s->cmos_data[RTC_MINUTES] = to_bcd(s, tm->tm_min);
   61.94 -    if (s->cmos_data[RTC_REG_B] & 0x02) {
   61.95 +    if (s->cmos_data[RTC_REG_B] & RTC_24H) {
   61.96          /* 24 hour format */
   61.97          s->cmos_data[RTC_HOURS] = to_bcd(s, tm->tm_hour);
   61.98      } else {
   61.99 @@ -245,7 +248,7 @@ static void rtc_update_second(void *opaq
  61.100      RTCState *s = opaque;
  61.101  
  61.102      /* if the oscillator is not in normal operation, we do not update */
  61.103 -    if ((s->cmos_data[RTC_REG_A] & 0x70) != 0x20) {
  61.104 +    if ((s->cmos_data[RTC_REG_A] & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) {
  61.105          s->next_second_time += 1000000000ULL;
  61.106          set_timer(&s->second_timer, s->next_second_time);
  61.107      } else {
  61.108 @@ -361,22 +364,48 @@ static int handle_rtc_io(ioreq_t *p)
  61.109      return 0;
  61.110  }
  61.111  
  61.112 +/* Stop the periodic interrupts from this RTC */
  61.113 +void rtc_freeze(struct vcpu *v)
  61.114 +{
  61.115 +    RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  61.116 +    stop_timer(&s->pie_timer);
  61.117 +}
  61.118 +
  61.119 +/* Start them again */
  61.120 +void rtc_thaw(struct vcpu *v)
  61.121 +{
  61.122 +    RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  61.123 +    if ( (s->cmos_data[RTC_REG_A] & RTC_RATE_SELECT) /* Period is not zero */
  61.124 +         && (s->cmos_data[RTC_REG_B] & RTC_PIE) ) 
  61.125 +        set_timer(&s->pie_timer, s->next_pie);
  61.126 +}
  61.127 +
  61.128 +/* Move the RTC timers on to this vcpu's current cpu */
  61.129 +void rtc_migrate_timers(struct vcpu *v)
  61.130 +{
  61.131 +    RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  61.132 +    migrate_timer(&s->second_timer, v->processor);
  61.133 +    migrate_timer(&s->second_timer2, v->processor);
  61.134 +    migrate_timer(&s->pie_timer, v->processor);
  61.135 +}
  61.136 +
  61.137  void rtc_init(struct vcpu *v, int base, int irq)
  61.138  {
  61.139      RTCState *s = &v->domain->arch.hvm_domain.pl_time.vrtc;
  61.140  
  61.141      s->vcpu = v;
  61.142      s->irq = irq;
  61.143 -    s->cmos_data[RTC_REG_A] = 0x26;
  61.144 -    s->cmos_data[RTC_REG_B] = 0x02;
  61.145 -    s->cmos_data[RTC_REG_C] = 0x00;
  61.146 -    s->cmos_data[RTC_REG_D] = 0x80;
  61.147 +    s->cmos_data[RTC_REG_A] = RTC_REF_CLCK_32KHZ | 6; /* ~1kHz */
  61.148 +    s->cmos_data[RTC_REG_B] = RTC_24H;
  61.149 +    s->cmos_data[RTC_REG_C] = 0;
  61.150 +    s->cmos_data[RTC_REG_D] = RTC_VRT;
  61.151  
  61.152      s->current_tm = gmtime(get_localtime(v->domain));
  61.153      rtc_copy_date(s);
  61.154  
  61.155      init_timer(&s->second_timer, rtc_update_second, s, v->processor);
  61.156      init_timer(&s->second_timer2, rtc_update_second2, s, v->processor);
  61.157 +    init_timer(&s->pie_timer, rtc_pie_callback, s, v->processor);
  61.158  
  61.159      s->next_second_time = NOW() + 1000000000ULL;
  61.160      set_timer(&s->second_timer2, s->next_second_time);
  61.161 @@ -390,4 +419,5 @@ void rtc_deinit(struct domain *d)
  61.162  
  61.163      kill_timer(&s->second_timer);
  61.164      kill_timer(&s->second_timer2);
  61.165 +    kill_timer(&s->pie_timer);
  61.166  }
    62.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Nov 20 12:14:40 2006 -0700
    62.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Nov 20 13:11:15 2006 -0700
    62.3 @@ -714,23 +714,9 @@ static void arch_svm_do_launch(struct vc
    62.4      reset_stack_and_jump(svm_asm_do_launch);
    62.5  }
    62.6  
    62.7 -static void svm_freeze_time(struct vcpu *v)
    62.8 -{
    62.9 -    struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
   62.10 -
   62.11 -    if ( pt->enabled && pt->first_injected
   62.12 -            && (v->vcpu_id == pt->bind_vcpu)
   62.13 -            && !v->arch.hvm_vcpu.guest_time ) {
   62.14 -        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
   62.15 -        if ( test_bit(_VCPUF_blocked, &v->vcpu_flags) )
   62.16 -            stop_timer(&pt->timer);
   62.17 -    }
   62.18 -}
   62.19 -
   62.20 -
   62.21  static void svm_ctxt_switch_from(struct vcpu *v)
   62.22  {
   62.23 -    svm_freeze_time(v);
   62.24 +    hvm_freeze_time(v);
   62.25      svm_save_dr(v);
   62.26  }
   62.27  
   62.28 @@ -849,25 +835,6 @@ int start_svm(void)
   62.29      return 1;
   62.30  }
   62.31  
   62.32 -
   62.33 -static void svm_migrate_timers(struct vcpu *v)
   62.34 -{
   62.35 -    struct periodic_time *pt = 
   62.36 -        &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
   62.37 -    struct RTCState *vrtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
   62.38 -    struct PMTState *vpmt = &v->domain->arch.hvm_domain.pl_time.vpmt;
   62.39 -
   62.40 -    if ( pt->enabled )
   62.41 -    {
   62.42 -        migrate_timer(&pt->timer, v->processor);
   62.43 -    }
   62.44 -    migrate_timer(&vcpu_vlapic(v)->vlapic_timer, v->processor);
   62.45 -    migrate_timer(&vrtc->second_timer, v->processor);
   62.46 -    migrate_timer(&vrtc->second_timer2, v->processor);
   62.47 -    migrate_timer(&vpmt->timer, v->processor);
   62.48 -}
   62.49 -
   62.50 -
   62.51  void arch_svm_do_resume(struct vcpu *v) 
   62.52  {
   62.53      /* pinning VCPU to a different core? */
   62.54 @@ -880,14 +847,12 @@ void arch_svm_do_resume(struct vcpu *v)
   62.55              printk("VCPU core pinned: %d to %d\n", 
   62.56                     v->arch.hvm_svm.launch_core, smp_processor_id() );
   62.57          v->arch.hvm_svm.launch_core = smp_processor_id();
   62.58 -        svm_migrate_timers( v );
   62.59 +        hvm_migrate_timers( v );
   62.60          hvm_do_resume( v );
   62.61          reset_stack_and_jump( svm_asm_do_resume );
   62.62      }
   62.63  }
   62.64  
   62.65 -
   62.66 -
   62.67  static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
   62.68  {
   62.69      struct vcpu *v = current;
    63.1 --- a/xen/arch/x86/hvm/vioapic.c	Mon Nov 20 12:14:40 2006 -0700
    63.2 +++ b/xen/arch/x86/hvm/vioapic.c	Mon Nov 20 13:11:15 2006 -0700
    63.3 @@ -471,8 +471,8 @@ void vioapic_set_irq(struct domain *d, i
    63.4      struct vioapic *vioapic = domain_vioapic(d);
    63.5      uint32_t bit;
    63.6  
    63.7 -    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_set_irq "
    63.8 -                "irq %x level %x\n", irq, level);
    63.9 +    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_set_irq irq %x level %x", 
   63.10 +                irq, level);
   63.11  
   63.12      if ( (irq < 0) || (irq >= VIOAPIC_NUM_PINS) )
   63.13          return;
    64.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 20 12:14:40 2006 -0700
    64.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 20 13:11:15 2006 -0700
    64.3 @@ -486,7 +486,7 @@ void arch_vmx_do_resume(struct vcpu *v)
    64.4      {
    64.5          vmx_clear_vmcs(v);
    64.6          vmx_load_vmcs(v);
    64.7 -        vmx_migrate_timers(v);
    64.8 +        hvm_migrate_timers(v);
    64.9          vmx_set_host_env(v);
   64.10      }
   64.11  
    65.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 20 12:14:40 2006 -0700
    65.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 20 13:11:15 2006 -0700
    65.3 @@ -78,75 +78,48 @@ static void vmx_vcpu_destroy(struct vcpu
    65.4  
    65.5  #ifdef __x86_64__
    65.6  
    65.7 -static DEFINE_PER_CPU(struct vmx_msr_state, percpu_msr);
    65.8 +static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
    65.9  
   65.10 -static u32 msr_data_index[VMX_MSR_COUNT] =
   65.11 +static u32 msr_index[VMX_MSR_COUNT] =
   65.12  {
   65.13      MSR_LSTAR, MSR_STAR, MSR_CSTAR,
   65.14      MSR_SYSCALL_MASK, MSR_EFER,
   65.15  };
   65.16  
   65.17 -static void vmx_save_segments(struct vcpu *v)
   65.18 -{
   65.19 -    rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs);
   65.20 -}
   65.21 -
   65.22 -/*
   65.23 - * To avoid MSR save/restore at every VM exit/entry time, we restore
   65.24 - * the x86_64 specific MSRs at domain switch time. Since those MSRs are
   65.25 - * are not modified once set for generic domains, we don't save them,
   65.26 - * but simply reset them to the values set at percpu_traps_init().
   65.27 - */
   65.28 -static void vmx_load_msrs(void)
   65.29 +static void vmx_save_host_msrs(void)
   65.30  {
   65.31 -    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
   65.32 -    int i;
   65.33 -
   65.34 -    while ( host_state->flags )
   65.35 -    {
   65.36 -        i = find_first_set_bit(host_state->flags);
   65.37 -        wrmsrl(msr_data_index[i], host_state->msr_items[i]);
   65.38 -        clear_bit(i, &host_state->flags);
   65.39 -    }
   65.40 -}
   65.41 -
   65.42 -static void vmx_save_init_msrs(void)
   65.43 -{
   65.44 -    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
   65.45 +    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
   65.46      int i;
   65.47  
   65.48      for ( i = 0; i < VMX_MSR_COUNT; i++ )
   65.49 -        rdmsrl(msr_data_index[i], host_state->msr_items[i]);
   65.50 +        rdmsrl(msr_index[i], host_msr_state->msrs[i]);
   65.51  }
   65.52  
   65.53 -#define CASE_READ_MSR(address)              \
   65.54 -    case MSR_ ## address:                 \
   65.55 -    msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
   65.56 -    break
   65.57 +#define CASE_READ_MSR(address)                                              \
   65.58 +    case MSR_ ## address:                                                   \
   65.59 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address];     \
   65.60 +        break
   65.61  
   65.62 -#define CASE_WRITE_MSR(address)                                     \
   65.63 -    case MSR_ ## address:                                           \
   65.64 -    {                                                               \
   65.65 -        msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content;    \
   65.66 -        if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) {    \
   65.67 -            set_bit(VMX_INDEX_MSR_ ## address, &msr->flags);        \
   65.68 -        }                                                           \
   65.69 -        wrmsrl(MSR_ ## address, msr_content);                       \
   65.70 -        set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags);     \
   65.71 -    }                                                               \
   65.72 -    break
   65.73 +#define CASE_WRITE_MSR(address)                                             \
   65.74 +    case MSR_ ## address:                                                   \
   65.75 +        guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content;     \
   65.76 +        if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\
   65.77 +            set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags);    \
   65.78 +        wrmsrl(MSR_ ## address, msr_content);                               \
   65.79 +        set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags);         \
   65.80 +        break
   65.81  
   65.82  #define IS_CANO_ADDRESS(add) 1
   65.83  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
   65.84  {
   65.85      u64 msr_content = 0;
   65.86      struct vcpu *v = current;
   65.87 -    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
   65.88 +    struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
   65.89  
   65.90      switch ( regs->ecx ) {
   65.91      case MSR_EFER:
   65.92          HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
   65.93 -        msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
   65.94 +        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER];
   65.95          break;
   65.96  
   65.97      case MSR_FS_BASE:
   65.98 @@ -164,7 +137,7 @@ static inline int long_mode_do_msr_read(
   65.99          break;
  65.100  
  65.101      case MSR_SHADOW_GS_BASE:
  65.102 -        msr_content = msr->shadow_gs;
  65.103 +        msr_content = guest_msr_state->shadow_gs;
  65.104          break;
  65.105  
  65.106      CASE_READ_MSR(STAR);
  65.107 @@ -193,8 +166,8 @@ static inline int long_mode_do_msr_write
  65.108  {
  65.109      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
  65.110      struct vcpu *v = current;
  65.111 -    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
  65.112 -    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
  65.113 +    struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
  65.114 +    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
  65.115  
  65.116      HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
  65.117                  (unsigned long)regs->ecx, msr_content);
  65.118 @@ -211,7 +184,7 @@ static inline int long_mode_do_msr_write
  65.119          }
  65.120  
  65.121          if ( (msr_content & EFER_LME)
  65.122 -             &&  !(msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
  65.123 +             &&  !(guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
  65.124          {
  65.125              if ( unlikely(vmx_paging_enabled(v)) )
  65.126              {
  65.127 @@ -221,7 +194,7 @@ static inline int long_mode_do_msr_write
  65.128              }
  65.129          }
  65.130          else if ( !(msr_content & EFER_LME)
  65.131 -                  && (msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
  65.132 +                  && (guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
  65.133          {
  65.134              if ( unlikely(vmx_paging_enabled(v)) )
  65.135              {
  65.136 @@ -231,12 +204,12 @@ static inline int long_mode_do_msr_write
  65.137              }
  65.138          }
  65.139  
  65.140 -        msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
  65.141 +        guest_msr_state->msrs[VMX_INDEX_MSR_EFER] = msr_content;
  65.142          break;
  65.143  
  65.144      case MSR_FS_BASE:
  65.145      case MSR_GS_BASE:
  65.146 -        if ( !(vmx_long_mode_enabled(v)) )
  65.147 +        if ( !vmx_long_mode_enabled(v) )
  65.148              goto exit_and_crash;
  65.149  
  65.150          if ( !IS_CANO_ADDRESS(msr_content) )
  65.151 @@ -257,7 +230,7 @@ static inline int long_mode_do_msr_write
  65.152          if ( !(vmx_long_mode_enabled(v)) )
  65.153              goto exit_and_crash;
  65.154  
  65.155 -        v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
  65.156 +        v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
  65.157          wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
  65.158          break;
  65.159  
  65.160 @@ -278,40 +251,57 @@ static inline int long_mode_do_msr_write
  65.161      return 1; /* handled */
  65.162  }
  65.163  
  65.164 -static void vmx_restore_msrs(struct vcpu *v)
  65.165 +/*
  65.166 + * To avoid MSR save/restore at every VM exit/entry time, we restore
  65.167 + * the x86_64 specific MSRs at domain switch time. Since these MSRs
  65.168 + * are not modified once set for para domains, we don't save them,
  65.169 + * but simply reset them to values set in percpu_traps_init().
  65.170 + */
  65.171 +static void vmx_restore_host_msrs(void)
  65.172  {
  65.173 -    int i = 0;
  65.174 -    struct vmx_msr_state *guest_state;
  65.175 -    struct vmx_msr_state *host_state;
  65.176 -    unsigned long guest_flags ;
  65.177 +    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
  65.178 +    int i;
  65.179  
  65.180 -    guest_state = &v->arch.hvm_vmx.msr_content;;
  65.181 -    host_state = &this_cpu(percpu_msr);
  65.182 +    while ( host_msr_state->flags )
  65.183 +    {
  65.184 +        i = find_first_set_bit(host_msr_state->flags);
  65.185 +        wrmsrl(msr_index[i], host_msr_state->msrs[i]);
  65.186 +        clear_bit(i, &host_msr_state->flags);
  65.187 +    }
  65.188 +}
  65.189  
  65.190 -    wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
  65.191 -    guest_flags = guest_state->flags;
  65.192 -    if (!guest_flags)
  65.193 +static void vmx_restore_guest_msrs(struct vcpu *v)
  65.194 +{
  65.195 +    struct vmx_msr_state *guest_msr_state, *host_msr_state;
  65.196 +    unsigned long guest_flags;
  65.197 +    int i;
  65.198 +
  65.199 +    guest_msr_state = &v->arch.hvm_vmx.msr_state;
  65.200 +    host_msr_state = &this_cpu(host_msr_state);
  65.201 +
  65.202 +    wrmsrl(MSR_SHADOW_GS_BASE, guest_msr_state->shadow_gs);
  65.203 +
  65.204 +    guest_flags = guest_msr_state->flags;
  65.205 +    if ( !guest_flags )
  65.206          return;
  65.207  
  65.208 -    while (guest_flags){
  65.209 +    while ( guest_flags ) {
  65.210          i = find_first_set_bit(guest_flags);
  65.211  
  65.212          HVM_DBG_LOG(DBG_LEVEL_2,
  65.213 -                    "restore guest's index %d msr %lx with %lx\n",
  65.214 -                    i, (unsigned long)msr_data_index[i],
  65.215 -                    (unsigned long)guest_state->msr_items[i]);
  65.216 -        set_bit(i, &host_state->flags);
  65.217 -        wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
  65.218 +                    "restore guest's index %d msr %x with value %lx",
  65.219 +                    i, msr_index[i], guest_msr_state->msrs[i]);
  65.220 +        set_bit(i, &host_msr_state->flags);
  65.221 +        wrmsrl(msr_index[i], guest_msr_state->msrs[i]);
  65.222          clear_bit(i, &guest_flags);
  65.223      }
  65.224  }
  65.225  
  65.226  #else  /* __i386__ */
  65.227  
  65.228 -#define vmx_save_segments(v)      ((void)0)
  65.229 -#define vmx_load_msrs()           ((void)0)
  65.230 -#define vmx_restore_msrs(v)       ((void)0)
  65.231 -#define vmx_save_init_msrs()      ((void)0)
  65.232 +#define vmx_save_host_msrs()        ((void)0)
  65.233 +#define vmx_restore_host_msrs()     ((void)0)
  65.234 +#define vmx_restore_guest_msrs(v)   ((void)0)
  65.235  
  65.236  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
  65.237  {
  65.238 @@ -325,9 +315,9 @@ static inline int long_mode_do_msr_write
  65.239  
  65.240  #endif /* __i386__ */
  65.241  
  65.242 -#define loaddebug(_v,_reg) \
  65.243 +#define loaddebug(_v,_reg)  \
  65.244      __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
  65.245 -#define savedebug(_v,_reg) \
  65.246 +#define savedebug(_v,_reg)  \
  65.247      __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
  65.248  
  65.249  static inline void vmx_save_dr(struct vcpu *v)
  65.250 @@ -374,30 +364,21 @@ static inline void vmx_restore_dr(struct
  65.251          __restore_debug_registers(v);
  65.252  }
  65.253  
  65.254 -static void vmx_freeze_time(struct vcpu *v)
  65.255 -{
  65.256 -    struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
  65.257 -
  65.258 -    if ( pt->enabled && pt->first_injected
  65.259 -            && (v->vcpu_id == pt->bind_vcpu)
  65.260 -            && !v->arch.hvm_vcpu.guest_time ) {
  65.261 -        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
  65.262 -        if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
  65.263 -            stop_timer(&pt->timer);
  65.264 -    }
  65.265 -}
  65.266 -
  65.267  static void vmx_ctxt_switch_from(struct vcpu *v)
  65.268  {
  65.269 -    vmx_freeze_time(v);
  65.270 -    vmx_save_segments(v);
  65.271 -    vmx_load_msrs();
  65.272 +    hvm_freeze_time(v);
  65.273 +
  65.274 +    /* NB. MSR_SHADOW_GS_BASE may be changed by swapgs instrucion in guest,
  65.275 +     * so we must save it. */
  65.276 +    rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_state.shadow_gs);
  65.277 +
  65.278 +    vmx_restore_host_msrs();
  65.279      vmx_save_dr(v);
  65.280  }
  65.281  
  65.282  static void vmx_ctxt_switch_to(struct vcpu *v)
  65.283  {
  65.284 -    vmx_restore_msrs(v);
  65.285 +    vmx_restore_guest_msrs(v);
  65.286      vmx_restore_dr(v);
  65.287  }
  65.288  
  65.289 @@ -405,26 +386,11 @@ static void stop_vmx(void)
  65.290  {
  65.291      if ( !(read_cr4() & X86_CR4_VMXE) )
  65.292          return;
  65.293 +
  65.294      __vmxoff();
  65.295      clear_in_cr4(X86_CR4_VMXE);
  65.296  }
  65.297  
  65.298 -void vmx_migrate_timers(struct vcpu *v)
  65.299 -{
  65.300 -    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
  65.301 -    struct RTCState *vrtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
  65.302 -    struct PMTState *vpmt = &v->domain->arch.hvm_domain.pl_time.vpmt;
  65.303 -
  65.304 -    if ( pt->enabled )
  65.305 -    {
  65.306 -        migrate_timer(&pt->timer, v->processor);
  65.307 -    }
  65.308 -    migrate_timer(&vcpu_vlapic(v)->vlapic_timer, v->processor);
  65.309 -    migrate_timer(&vrtc->second_timer, v->processor);
  65.310 -    migrate_timer(&vrtc->second_timer2, v->processor);
  65.311 -    migrate_timer(&vpmt->timer, v->processor);
  65.312 -}
  65.313 -
  65.314  static void vmx_store_cpu_guest_regs(
  65.315      struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
  65.316  {
  65.317 @@ -718,7 +684,7 @@ int start_vmx(void)
  65.318  
  65.319      printk("VMXON is done\n");
  65.320  
  65.321 -    vmx_save_init_msrs();
  65.322 +    vmx_save_host_msrs();
  65.323  
  65.324      vmx_setup_hvm_funcs();
  65.325  
  65.326 @@ -855,14 +821,14 @@ static void vmx_do_cpuid(struct cpu_user
  65.327  
  65.328              if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
  65.329                  clear_bit(X86_FEATURE_APIC, &edx);
  65.330 -    
  65.331 +
  65.332  #if CONFIG_PAGING_LEVELS >= 3
  65.333              if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
  65.334  #endif
  65.335                  clear_bit(X86_FEATURE_PAE, &edx);
  65.336              clear_bit(X86_FEATURE_PSE36, &edx);
  65.337  
  65.338 -            ebx &= NUM_THREADS_RESET_MASK;  
  65.339 +            ebx &= NUM_THREADS_RESET_MASK;
  65.340  
  65.341              /* Unsupportable for virtualised CPUs. */
  65.342              ecx &= ~(bitmaskof(X86_FEATURE_VMXE)  |
  65.343 @@ -875,7 +841,7 @@ static void vmx_do_cpuid(struct cpu_user
  65.344                       bitmaskof(X86_FEATURE_ACPI)  |
  65.345                       bitmaskof(X86_FEATURE_ACC) );
  65.346          }
  65.347 -        else if (  ( input == CPUID_LEAF_0x6 ) 
  65.348 +        else if (  ( input == CPUID_LEAF_0x6 )
  65.349                  || ( input == CPUID_LEAF_0x9 )
  65.350                  || ( input == CPUID_LEAF_0xA ))
  65.351          {
  65.352 @@ -1331,7 +1297,7 @@ static int vmx_assist(struct vcpu *v, in
  65.353                  goto error;
  65.354              if ( vmx_world_restore(v, &c) != 0 )
  65.355                  goto error;
  65.356 -            v->arch.hvm_vmx.vmxassist_enabled = 1;            
  65.357 +            v->arch.hvm_vmx.vmxassist_enabled = 1;
  65.358              return 1;
  65.359          }
  65.360          break;
  65.361 @@ -1401,7 +1367,7 @@ static int vmx_set_cr0(unsigned long val
  65.362          mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
  65.363          if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  65.364          {
  65.365 -            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
  65.366 +            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
  65.367                       v->arch.hvm_vmx.cpu_cr3, mfn);
  65.368              domain_crash(v->domain);
  65.369              return 0;
  65.370 @@ -1416,10 +1382,10 @@ static int vmx_set_cr0(unsigned long val
  65.371                              "with EFER.LME set but not CR4.PAE\n");
  65.372                  vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  65.373              }
  65.374 -            else 
  65.375 +            else
  65.376              {
  65.377                  HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
  65.378 -                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
  65.379 +                v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
  65.380                      |= EFER_LMA;
  65.381                  vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
  65.382                  vm_entry_value |= VM_ENTRY_IA32E_MODE;
  65.383 @@ -1473,7 +1439,7 @@ static int vmx_set_cr0(unsigned long val
  65.384               */
  65.385              if ( vmx_long_mode_enabled(v) )
  65.386              {
  65.387 -                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
  65.388 +                v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
  65.389                      &= ~EFER_LMA;
  65.390                  vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
  65.391                  vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
  65.392 @@ -1506,8 +1472,7 @@ static int vmx_set_cr0(unsigned long val
  65.393      {
  65.394          if ( vmx_long_mode_enabled(v) )
  65.395          {
  65.396 -            v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
  65.397 -              &= ~EFER_LMA;
  65.398 +            v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] &= ~EFER_LMA;
  65.399              vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
  65.400              vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
  65.401              __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
  65.402 @@ -1865,8 +1830,8 @@ static inline void vmx_do_msr_write(stru
  65.403          {
  65.404              struct periodic_time *pt =
  65.405                  &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
  65.406 -            if ( pt->enabled && pt->first_injected 
  65.407 -                    && v->vcpu_id == pt->bind_vcpu ) 
  65.408 +            if ( pt->enabled && pt->first_injected
  65.409 +                    && v->vcpu_id == pt->bind_vcpu )
  65.410                  pt->first_injected = 0;
  65.411          }
  65.412          hvm_set_guest_time(v, msr_content);
  65.413 @@ -1975,7 +1940,7 @@ void store_cpu_user_regs(struct cpu_user
  65.414      regs->es = __vmread(GUEST_ES_SELECTOR);
  65.415      regs->eip = __vmread(GUEST_RIP);
  65.416  }
  65.417 -#endif 
  65.418 +#endif
  65.419  
  65.420  #ifdef XEN_DEBUGGER
  65.421  void save_cpu_user_regs(struct cpu_user_regs *regs)
    66.1 --- a/xen/arch/x86/mm.c	Mon Nov 20 12:14:40 2006 -0700
    66.2 +++ b/xen/arch/x86/mm.c	Mon Nov 20 13:11:15 2006 -0700
    66.3 @@ -2067,7 +2067,7 @@ int do_mmuext_op(
    66.4          {
    66.5              unsigned long vmask;
    66.6              cpumask_t     pmask;
    66.7 -            if ( unlikely(get_user(vmask, (unsigned long *)op.arg2.vcpumask)) )
    66.8 +            if ( unlikely(copy_from_guest(&vmask, op.arg2.vcpumask, 1)) )
    66.9              {
   66.10                  okay = 0;
   66.11                  break;
    67.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Nov 20 12:14:40 2006 -0700
    67.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Nov 20 13:11:15 2006 -0700
    67.3 @@ -1047,6 +1047,10 @@ shadow_set_p2m_entry(struct domain *d, u
    67.4      else
    67.5          *p2m_entry = l1e_empty();
    67.6  
    67.7 +    /* Track the highest gfn for which we have ever had a valid mapping */
    67.8 +    if ( valid_mfn(mfn) && (gfn > d->arch.max_mapped_pfn) ) 
    67.9 +        d->arch.max_mapped_pfn = gfn;
   67.10 +
   67.11      /* The P2M can be shadowed: keep the shadows synced */
   67.12      if ( d->vcpu[0] != NULL )
   67.13          (void)__shadow_validate_guest_entry(
   67.14 @@ -1142,12 +1146,9 @@ sh_gfn_to_mfn_foreign(struct domain *d, 
   67.15      mfn = pagetable_get_mfn(d->arch.phys_table);
   67.16  
   67.17  
   67.18 -#if CONFIG_PAGING_LEVELS > 2
   67.19 -    if ( gpfn >= (RO_MPT_VIRT_END-RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
   67.20 -        /* This pfn is higher than the p2m map can hold */
   67.21 +    if ( gpfn > d->arch.max_mapped_pfn ) 
   67.22 +        /* This pfn is higher than the highest the p2m map currently holds */
   67.23          return _mfn(INVALID_MFN);
   67.24 -#endif
   67.25 -
   67.26  
   67.27  #if CONFIG_PAGING_LEVELS >= 4
   67.28      { 
   67.29 @@ -3333,13 +3334,14 @@ void shadow_audit_p2m(struct domain *d)
   67.30              set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
   67.31          }
   67.32  
   67.33 -        if ( test_linear )
   67.34 +        if ( test_linear && (gfn <= d->arch.max_mapped_pfn) )
   67.35          {
   67.36 -            lp2mfn = get_mfn_from_gpfn(gfn);
   67.37 -            if ( lp2mfn != mfn_x(p2mfn) )
   67.38 +            lp2mfn = gfn_to_mfn_current(gfn);
   67.39 +            if ( mfn_x(lp2mfn) != mfn_x(p2mfn) )
   67.40              {
   67.41                  SHADOW_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
   67.42 -                               "(!= mfn %#lx)\n", gfn, lp2mfn, p2mfn);
   67.43 +                              "(!= mfn %#lx)\n", gfn, 
   67.44 +                              mfn_x(lp2mfn), mfn_x(p2mfn));
   67.45              }
   67.46          }
   67.47  
    68.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Nov 20 12:14:40 2006 -0700
    68.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Mon Nov 20 13:11:15 2006 -0700
    68.3 @@ -2798,8 +2798,9 @@ static int sh_page_fault(struct vcpu *v,
    68.4       * We do not emulate user writes. Instead we use them as a hint that the
    68.5       * page is no longer a page table. This behaviour differs from native, but
    68.6       * it seems very unlikely that any OS grants user access to page tables.
    68.7 +     * We also disallow guest PTE updates from within Xen.
    68.8       */
    68.9 -    if ( (regs->error_code & PFEC_user_mode) ||
   68.10 +    if ( (regs->error_code & PFEC_user_mode) || !guest_mode(regs) ||
   68.11           x86_emulate_memop(&emul_ctxt, &shadow_emulator_ops) )
   68.12      {
   68.13          SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
   68.14 @@ -2839,6 +2840,8 @@ static int sh_page_fault(struct vcpu *v,
   68.15      goto done;
   68.16  
   68.17   mmio:
   68.18 +    if ( !guest_mode(regs) )
   68.19 +        goto not_a_shadow_fault;
   68.20      perfc_incrc(shadow_fault_mmio);
   68.21      sh_audit_gw(v, &gw);
   68.22      unmap_walk(v, &gw);
   68.23 @@ -3259,9 +3262,25 @@ sh_set_toplevel_shadow(struct vcpu *v,
   68.24                         mfn_t gmfn, 
   68.25                         unsigned int root_type) 
   68.26  {
   68.27 -    mfn_t smfn = get_shadow_status(v, gmfn, root_type);
   68.28 +    mfn_t smfn;
   68.29      struct domain *d = v->domain;
   68.30 -    ASSERT(pagetable_is_null(v->arch.shadow_table[slot]));
   68.31 +    
   68.32 +    /* Decrement the refcount of the old contents of this slot */
   68.33 +    smfn = pagetable_get_mfn(v->arch.shadow_table[slot]);
   68.34 +    if ( mfn_x(smfn) )
   68.35 +        sh_put_ref(v, smfn, 0);
   68.36 +
   68.37 +    /* Now figure out the new contents: is this a valid guest MFN? */
   68.38 +    if ( !valid_mfn(gmfn) )
   68.39 +    {
   68.40 +        SHADOW_PRINTK("%u/%u [%u] invalid gmfn\n",
   68.41 +                      GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot);
   68.42 +        v->arch.shadow_table[slot] = pagetable_null();
   68.43 +        return;
   68.44 +    }
   68.45 +
   68.46 +    /* Guest mfn is valid: shadow it and install the shadow */
   68.47 +    smfn = get_shadow_status(v, gmfn, root_type);
   68.48      if ( valid_mfn(smfn) )
   68.49      {
   68.50          /* Pull this root shadow to the front of the list of roots. */
   68.51 @@ -3270,10 +3289,6 @@ sh_set_toplevel_shadow(struct vcpu *v,
   68.52      }
   68.53      else
   68.54      {
   68.55 -        /* This guest MFN is a pagetable.  Must revoke write access 
   68.56 -         * (and can't use heuristics because we have no linear map here). */
   68.57 -        if ( shadow_remove_write_access(v, gmfn, 0, 0) != 0 )
   68.58 -            flush_tlb_mask(v->domain->domain_dirty_cpumask); 
   68.59          /* Make sure there's enough free shadow memory. */
   68.60          shadow_prealloc(d, SHADOW_MAX_ORDER); 
   68.61          /* Shadow the page. */
   68.62 @@ -3288,7 +3303,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
   68.63      mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
   68.64  #endif
   68.65  
   68.66 -    /* Take a ref to this page: it will be released in sh_detach_old_tables. */
   68.67 +    /* Take a ref to this page: it will be released in sh_detach_old_tables()
   68.68 +     * or in the next call to sh_set_toplevel_shadow(). */
   68.69      sh_get_ref(smfn, 0);
   68.70      sh_pin(smfn);
   68.71  
   68.72 @@ -3360,8 +3376,6 @@ sh_update_cr3(struct vcpu *v)
   68.73  #endif
   68.74          gmfn = pagetable_get_mfn(v->arch.guest_table);
   68.75  
   68.76 -    sh_detach_old_tables(v);
   68.77 -
   68.78      if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   68.79      {
   68.80          ASSERT(v->arch.cr3 == 0);
   68.81 @@ -3373,10 +3387,16 @@ sh_update_cr3(struct vcpu *v)
   68.82      ////
   68.83  #if GUEST_PAGING_LEVELS == 4
   68.84      if ( shadow_mode_external(d) || shadow_mode_translate(d) )
   68.85 +    {
   68.86 +        if ( v->arch.guest_vtable )
   68.87 +            sh_unmap_domain_page_global(v->arch.guest_vtable);
   68.88          v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
   68.89 +    }
   68.90      else
   68.91          v->arch.guest_vtable = __linear_l4_table;
   68.92  #elif GUEST_PAGING_LEVELS == 3
   68.93 +    if ( v->arch.guest_vtable )
   68.94 +        sh_unmap_domain_page_global(v->arch.guest_vtable);
   68.95      if ( shadow_mode_external(d) )
   68.96      {
   68.97          if ( shadow_vcpu_mode_translate(v) ) 
   68.98 @@ -3398,7 +3418,11 @@ sh_update_cr3(struct vcpu *v)
   68.99          v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
  68.100  #elif GUEST_PAGING_LEVELS == 2
  68.101      if ( shadow_mode_external(d) || shadow_mode_translate(d) )
  68.102 +    {
  68.103 +        if ( v->arch.guest_vtable )
  68.104 +            sh_unmap_domain_page_global(v->arch.guest_vtable);
  68.105          v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
  68.106 +    }
  68.107      else
  68.108          v->arch.guest_vtable = __linear_l2_table;
  68.109  #else
  68.110 @@ -3414,29 +3438,49 @@ sh_update_cr3(struct vcpu *v)
  68.111      //// vcpu->arch.shadow_table[]
  68.112      ////
  68.113  
  68.114 +    /* We revoke write access to the new guest toplevel page(s) before we
  68.115 +     * replace the old shadow pagetable(s), so that we can safely use the 
  68.116 +     * (old) shadow linear maps in the writeable mapping heuristics. */
  68.117  #if GUEST_PAGING_LEVELS == 2
  68.118 +    if ( shadow_remove_write_access(v, gmfn, 2, 0) != 0 )
  68.119 +        flush_tlb_mask(v->domain->domain_dirty_cpumask); 
  68.120      sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l2_shadow);
  68.121  #elif GUEST_PAGING_LEVELS == 3
  68.122      /* PAE guests have four shadow_table entries, based on the 
  68.123       * current values of the guest's four l3es. */
  68.124      {
  68.125 -        int i;
  68.126 +        int i, flush = 0;
  68.127 +        gfn_t gl2gfn;
  68.128 +        mfn_t gl2mfn;
  68.129          guest_l3e_t *gl3e = (guest_l3e_t*)v->arch.guest_vtable;
  68.130 -        for ( i = 0; i < 4; i++ ) 
  68.131 +        /* First, make all four entries read-only. */
  68.132 +        for ( i = 0; i < 4; i++ )
  68.133          {
  68.134 -            ASSERT(pagetable_is_null(v->arch.shadow_table[i]));
  68.135              if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
  68.136              {
  68.137 -                gfn_t gl2gfn = guest_l3e_get_gfn(gl3e[i]);
  68.138 -                mfn_t gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
  68.139 -                if ( valid_mfn(gl2mfn) )                
  68.140 -                    sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
  68.141 -                                           ? PGC_SH_l2h_shadow 
  68.142 -                                           : PGC_SH_l2_shadow);
  68.143 +                gl2gfn = guest_l3e_get_gfn(gl3e[i]);
  68.144 +                gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
  68.145 +                flush |= shadow_remove_write_access(v, gl2mfn, 2, 0); 
  68.146 +            }
  68.147 +        }
  68.148 +        if ( flush ) 
  68.149 +            flush_tlb_mask(v->domain->domain_dirty_cpumask);
  68.150 +        /* Now install the new shadows. */
  68.151 +        for ( i = 0; i < 4; i++ ) 
  68.152 +        {
  68.153 +            if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
  68.154 +            {
  68.155 +                gl2gfn = guest_l3e_get_gfn(gl3e[i]);
  68.156 +                gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
  68.157 +                sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
  68.158 +                                       ? PGC_SH_l2h_shadow 
  68.159 +                                       : PGC_SH_l2_shadow);
  68.160              }
  68.161          }
  68.162      }
  68.163  #elif GUEST_PAGING_LEVELS == 4
  68.164 +    if ( shadow_remove_write_access(v, gmfn, 4, 0) != 0 )
  68.165 +        flush_tlb_mask(v->domain->domain_dirty_cpumask);
  68.166      sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l4_shadow);
  68.167  #else
  68.168  #error This should never happen 
  68.169 @@ -3524,9 +3568,9 @@ static int sh_guess_wrmap(struct vcpu *v
  68.170  {
  68.171      shadow_l1e_t sl1e, *sl1p;
  68.172      shadow_l2e_t *sl2p;
  68.173 -#if GUEST_PAGING_LEVELS >= 3
  68.174 +#if SHADOW_PAGING_LEVELS >= 3
  68.175      shadow_l3e_t *sl3p;
  68.176 -#if GUEST_PAGING_LEVELS >= 4
  68.177 +#if SHADOW_PAGING_LEVELS >= 4
  68.178      shadow_l4e_t *sl4p;
  68.179  #endif
  68.180  #endif
  68.181 @@ -3534,14 +3578,14 @@ static int sh_guess_wrmap(struct vcpu *v
  68.182  
  68.183  
  68.184      /* Carefully look in the shadow linear map for the l1e we expect */
  68.185 -#if GUEST_PAGING_LEVELS >= 4
  68.186 +#if SHADOW_PAGING_LEVELS >= 4
  68.187      sl4p = sh_linear_l4_table(v) + shadow_l4_linear_offset(vaddr);
  68.188      if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
  68.189          return 0;
  68.190      sl3p = sh_linear_l3_table(v) + shadow_l3_linear_offset(vaddr);
  68.191      if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
  68.192          return 0;
  68.193 -#elif GUEST_PAGING_LEVELS == 3
  68.194 +#elif SHADOW_PAGING_LEVELS == 3
  68.195      sl3p = ((shadow_l3e_t *) v->arch.shadow.l3table) 
  68.196          + shadow_l3_linear_offset(vaddr);
  68.197      if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
  68.198 @@ -3828,7 +3872,6 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
  68.199  
  68.200      sh_unmap_domain_page(addr);
  68.201      shadow_audit_tables(v);
  68.202 -    check_for_early_unshadow(v, mfn);
  68.203      return rv;
  68.204  }
  68.205  
  68.206 @@ -3863,7 +3906,6 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
  68.207  
  68.208      sh_unmap_domain_page(addr);
  68.209      shadow_audit_tables(v);
  68.210 -    check_for_early_unshadow(v, mfn);
  68.211      return rv;
  68.212  }
  68.213  
  68.214 @@ -3899,22 +3941,22 @@ static char * sh_audit_flags(struct vcpu
  68.215          return "shadow is present but guest is not present";
  68.216      if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) ) 
  68.217          return "global bit set in PV shadow";
  68.218 -    if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
  68.219 -         && ((sflags & _PAGE_DIRTY) && !(gflags & _PAGE_DIRTY)) ) 
  68.220 -        return "dirty bit not propagated";
  68.221      if ( level == 2 && (sflags & _PAGE_PSE) )
  68.222          return "PS bit set in shadow";
  68.223  #if SHADOW_PAGING_LEVELS == 3
  68.224      if ( level == 3 ) return NULL; /* All the other bits are blank in PAEl3 */
  68.225  #endif
  68.226 +    if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) ) 
  68.227 +        return "accessed bit not propagated";
  68.228 +    if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
  68.229 +         && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) ) 
  68.230 +        return "dirty bit not propagated";
  68.231      if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) ) 
  68.232          return "user/supervisor bit does not match";
  68.233      if ( (sflags & _PAGE_NX_BIT) != (gflags & _PAGE_NX_BIT) ) 
  68.234          return "NX bit does not match";
  68.235      if ( (sflags & _PAGE_RW) && !(gflags & _PAGE_RW) ) 
  68.236          return "shadow grants write access but guest does not";
  68.237 -    if ( (sflags & _PAGE_ACCESSED) && !(gflags & _PAGE_ACCESSED) ) 
  68.238 -        return "accessed bit not propagated";
  68.239      return NULL;
  68.240  }
  68.241  
    69.1 --- a/xen/arch/x86/mm/shadow/types.h	Mon Nov 20 12:14:40 2006 -0700
    69.2 +++ b/xen/arch/x86/mm/shadow/types.h	Mon Nov 20 13:11:15 2006 -0700
    69.3 @@ -416,9 +416,7 @@ vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gf
    69.4  {
    69.5      if ( !shadow_vcpu_mode_translate(v) )
    69.6          return _mfn(gfn_x(gfn));
    69.7 -    if ( likely(current->domain == v->domain) )
    69.8 -        return _mfn(get_mfn_from_gpfn(gfn_x(gfn)));
    69.9 -    return sh_gfn_to_mfn_foreign(v->domain, gfn_x(gfn));
   69.10 +    return sh_gfn_to_mfn(v->domain, gfn_x(gfn));
   69.11  }
   69.12  
   69.13  static inline gfn_t
    70.1 --- a/xen/arch/x86/oprofile/nmi_int.c	Mon Nov 20 12:14:40 2006 -0700
    70.2 +++ b/xen/arch/x86/oprofile/nmi_int.c	Mon Nov 20 13:11:15 2006 -0700
    70.3 @@ -305,22 +305,24 @@ static int __init ppro_init(char *cpu_ty
    70.4  {
    70.5  	__u8 cpu_model = current_cpu_data.x86_model;
    70.6  
    70.7 -	if (cpu_model > 0xd) {
    70.8 +	if (cpu_model > 15) {
    70.9  		printk("xenoprof: Initialization failed. "
   70.10  		       "Intel processor model %d for P6 class family is not "
   70.11  		       "supported\n", cpu_model);
   70.12  		return 0;
   70.13  	}
   70.14 -
   70.15 -	if (cpu_model == 9) {
   70.16 +	else if (cpu_model == 15)
   70.17 +		strncpy (cpu_type, "i386/core_2", XENOPROF_CPU_TYPE_SIZE - 1);
   70.18 +	else if (cpu_model == 14)
   70.19 +		strncpy (cpu_type, "i386/core", XENOPROF_CPU_TYPE_SIZE - 1);
   70.20 +	else if (cpu_model == 9)
   70.21  		strncpy (cpu_type, "i386/p6_mobile", XENOPROF_CPU_TYPE_SIZE - 1);
   70.22 -	} else if (cpu_model > 5) {
   70.23 +	else if (cpu_model > 5)
   70.24  		strncpy (cpu_type, "i386/piii", XENOPROF_CPU_TYPE_SIZE - 1);
   70.25 -	} else if (cpu_model > 2) {
   70.26 +	else if (cpu_model > 2)
   70.27  		strncpy (cpu_type, "i386/pii", XENOPROF_CPU_TYPE_SIZE - 1);
   70.28 -	} else {
   70.29 +	else
   70.30  		strncpy (cpu_type, "i386/ppro", XENOPROF_CPU_TYPE_SIZE - 1);
   70.31 -	}
   70.32  
   70.33  	model = &op_ppro_spec;
   70.34  	return 1;
    71.1 --- a/xen/arch/x86/physdev.c	Mon Nov 20 12:14:40 2006 -0700
    71.2 +++ b/xen/arch/x86/physdev.c	Mon Nov 20 13:11:15 2006 -0700
    71.3 @@ -125,7 +125,7 @@ long do_physdev_op(int cmd, XEN_GUEST_HA
    71.4          if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
    71.5              break;
    71.6          ret = -EINVAL;
    71.7 -        if ( !access_ok(set_iobitmap.bitmap, IOBMP_BYTES) ||
    71.8 +        if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) ||
    71.9               (set_iobitmap.nr_ports > 65536) )
   71.10              break;
   71.11          ret = 0;
    72.1 --- a/xen/arch/x86/traps.c	Mon Nov 20 12:14:40 2006 -0700
    72.2 +++ b/xen/arch/x86/traps.c	Mon Nov 20 13:11:15 2006 -0700
    72.3 @@ -331,14 +331,9 @@ void show_execution_state(struct cpu_use
    72.4      show_stack(regs);
    72.5  }
    72.6  
    72.7 -/*
    72.8 - * This is called for faults at very unexpected times (e.g., when interrupts
    72.9 - * are disabled). In such situations we can't do much that is safe. We try to
   72.10 - * print out some tracing and then we just spin.
   72.11 - */
   72.12 -asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
   72.13 +char *trapstr(int trapnr)
   72.14  {
   72.15 -    static char *trapstr[] = { 
   72.16 +    static char *strings[] = { 
   72.17          "divide error", "debug", "nmi", "bkpt", "overflow", "bounds", 
   72.18          "invalid opcode", "device not available", "double fault", 
   72.19          "coprocessor segment", "invalid tss", "segment not found", 
   72.20 @@ -347,6 +342,19 @@ asmlinkage void fatal_trap(int trapnr, s
   72.21          "machine check", "simd error"
   72.22      };
   72.23  
   72.24 +    if ( (trapnr < 0) || (trapnr >= ARRAY_SIZE(strings)) )
   72.25 +        return "???";
   72.26 +
   72.27 +    return strings[trapnr];
   72.28 +}
   72.29 +
   72.30 +/*
   72.31 + * This is called for faults at very unexpected times (e.g., when interrupts
   72.32 + * are disabled). In such situations we can't do much that is safe. We try to
   72.33 + * print out some tracing and then we just spin.
   72.34 + */
   72.35 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
   72.36 +{
   72.37      watchdog_disable();
   72.38      console_start_sync();
   72.39  
   72.40 @@ -361,38 +369,51 @@ asmlinkage void fatal_trap(int trapnr, s
   72.41  
   72.42      panic("FATAL TRAP: vector = %d (%s)\n"
   72.43            "[error_code=%04x] %s\n",
   72.44 -          trapnr, trapstr[trapnr], regs->error_code,
   72.45 +          trapnr, trapstr(trapnr), regs->error_code,
   72.46            (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
   72.47  }
   72.48  
   72.49 -static inline int do_trap(int trapnr, char *str,
   72.50 -                          struct cpu_user_regs *regs, 
   72.51 -                          int use_error_code)
   72.52 +static int do_guest_trap(
   72.53 +    int trapnr, const struct cpu_user_regs *regs, int use_error_code)
   72.54  {
   72.55      struct vcpu *v = current;
   72.56 -    struct trap_bounce *tb = &v->arch.trap_bounce;
   72.57 -    struct trap_info *ti;
   72.58 -    unsigned long fixup;
   72.59 +    struct trap_bounce *tb;
   72.60 +    const struct trap_info *ti;
   72.61  
   72.62 -    DEBUGGER_trap_entry(trapnr, regs);
   72.63 +    tb = &v->arch.trap_bounce;
   72.64 +    ti = &v->arch.guest_context.trap_ctxt[trapnr];
   72.65  
   72.66 -    if ( !guest_mode(regs) )
   72.67 -        goto xen_fault;
   72.68 -
   72.69 -    ti = &current->arch.guest_context.trap_ctxt[trapnr];
   72.70      tb->flags = TBF_EXCEPTION;
   72.71      tb->cs    = ti->cs;
   72.72      tb->eip   = ti->address;
   72.73 +
   72.74      if ( use_error_code )
   72.75      {
   72.76          tb->flags |= TBF_EXCEPTION_ERRCODE;
   72.77          tb->error_code = regs->error_code;
   72.78      }
   72.79 +
   72.80      if ( TI_GET_IF(ti) )
   72.81          tb->flags |= TBF_INTERRUPT;
   72.82 +
   72.83 +    if ( unlikely(null_trap_bounce(tb)) )
   72.84 +        gdprintk(XENLOG_WARNING, "Unhandled %s fault/trap [#%d] in "
   72.85 +                 "domain %d on VCPU %d [ec=%04x]\n",
   72.86 +                 trapstr(trapnr), trapnr, v->domain->domain_id, v->vcpu_id,
   72.87 +                 regs->error_code);
   72.88 +
   72.89      return 0;
   72.90 +}
   72.91  
   72.92 - xen_fault:
   72.93 +static inline int do_trap(
   72.94 +    int trapnr, struct cpu_user_regs *regs, int use_error_code)
   72.95 +{
   72.96 +    unsigned long fixup;
   72.97 +
   72.98 +    DEBUGGER_trap_entry(trapnr, regs);
   72.99 +
  72.100 +    if ( guest_mode(regs) )
  72.101 +        return do_guest_trap(trapnr, regs, use_error_code);
  72.102  
  72.103      if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
  72.104      {
  72.105 @@ -407,32 +428,32 @@ static inline int do_trap(int trapnr, ch
  72.106      show_execution_state(regs);
  72.107      panic("FATAL TRAP: vector = %d (%s)\n"
  72.108            "[error_code=%04x]\n",
  72.109 -          trapnr, str, regs->error_code);
  72.110 +          trapnr, trapstr(trapnr), regs->error_code);
  72.111      return 0;
  72.112  }
  72.113  
  72.114 -#define DO_ERROR_NOCODE(trapnr, str, name)              \
  72.115 +#define DO_ERROR_NOCODE(trapnr, name)                   \
  72.116  asmlinkage int do_##name(struct cpu_user_regs *regs)    \
  72.117  {                                                       \
  72.118 -    return do_trap(trapnr, str, regs, 0);               \
  72.119 +    return do_trap(trapnr, regs, 0);                    \
  72.120  }
  72.121  
  72.122 -#define DO_ERROR(trapnr, str, name)                     \
  72.123 +#define DO_ERROR(trapnr, name)                          \
  72.124  asmlinkage int do_##name(struct cpu_user_regs *regs)    \
  72.125  {                                                       \
  72.126 -    return do_trap(trapnr, str, regs, 1);               \
  72.127 +    return do_trap(trapnr, regs, 1);                    \
  72.128  }
  72.129  
  72.130 -DO_ERROR_NOCODE( 0, "divide error", divide_error)
  72.131 -DO_ERROR_NOCODE( 4, "overflow", overflow)
  72.132 -DO_ERROR_NOCODE( 5, "bounds", bounds)
  72.133 -DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
  72.134 -DO_ERROR(10, "invalid TSS", invalid_TSS)
  72.135 -DO_ERROR(11, "segment not present", segment_not_present)
  72.136 -DO_ERROR(12, "stack segment", stack_segment)
  72.137 -DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
  72.138 -DO_ERROR(17, "alignment check", alignment_check)
  72.139 -DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
  72.140 +DO_ERROR_NOCODE(TRAP_divide_error,    divide_error)
  72.141 +DO_ERROR_NOCODE(TRAP_overflow,        overflow)
  72.142 +DO_ERROR_NOCODE(TRAP_bounds,          bounds)
  72.143 +DO_ERROR_NOCODE(TRAP_copro_seg,       coprocessor_segment_overrun)
  72.144 +DO_ERROR(       TRAP_invalid_tss,     invalid_TSS)
  72.145 +DO_ERROR(       TRAP_no_segment,      segment_not_present)
  72.146 +DO_ERROR(       TRAP_stack_error,     stack_segment)
  72.147 +DO_ERROR_NOCODE(TRAP_copro_error,     coprocessor_error)
  72.148 +DO_ERROR(       TRAP_alignment_check, alignment_check)
  72.149 +DO_ERROR_NOCODE(TRAP_simd_error,      simd_coprocessor_error)
  72.150  
  72.151  int rdmsr_hypervisor_regs(
  72.152      uint32_t idx, uint32_t *eax, uint32_t *edx)
  72.153 @@ -599,9 +620,6 @@ static int emulate_forced_invalid_op(str
  72.154  
  72.155  asmlinkage int do_invalid_op(struct cpu_user_regs *regs)
  72.156  {
  72.157 -    struct vcpu *v = current;
  72.158 -    struct trap_bounce *tb = &v->arch.trap_bounce;
  72.159 -    struct trap_info *ti;
  72.160      int rc;
  72.161  
  72.162      DEBUGGER_trap_entry(TRAP_invalid_op, regs);
  72.163 @@ -625,22 +643,11 @@ asmlinkage int do_invalid_op(struct cpu_
  72.164      if ( (rc = emulate_forced_invalid_op(regs)) != 0 )
  72.165          return rc;
  72.166  
  72.167 -    ti = &current->arch.guest_context.trap_ctxt[TRAP_invalid_op];
  72.168 -    tb->flags = TBF_EXCEPTION;
  72.169 -    tb->cs    = ti->cs;
  72.170 -    tb->eip   = ti->address;
  72.171 -    if ( TI_GET_IF(ti) )
  72.172 -        tb->flags |= TBF_INTERRUPT;
  72.173 -
  72.174 -    return 0;
  72.175 +    return do_guest_trap(TRAP_invalid_op, regs, 0);
  72.176  }
  72.177  
  72.178  asmlinkage int do_int3(struct cpu_user_regs *regs)
  72.179  {
  72.180 -    struct vcpu *v = current;
  72.181 -    struct trap_bounce *tb = &v->arch.trap_bounce;
  72.182 -    struct trap_info *ti;
  72.183 -
  72.184      DEBUGGER_trap_entry(TRAP_int3, regs);
  72.185  
  72.186      if ( !guest_mode(regs) )
  72.187 @@ -650,14 +657,7 @@ asmlinkage int do_int3(struct cpu_user_r
  72.188          panic("FATAL TRAP: vector = 3 (Int3)\n");
  72.189      } 
  72.190  
  72.191 -    ti = &current->arch.guest_context.trap_ctxt[TRAP_int3];
  72.192 -    tb->flags = TBF_EXCEPTION;
  72.193 -    tb->cs    = ti->cs;
  72.194 -    tb->eip   = ti->address;
  72.195 -    if ( TI_GET_IF(ti) )
  72.196 -        tb->flags |= TBF_INTERRUPT;
  72.197 -
  72.198 -    return 0;
  72.199 +    return do_guest_trap(TRAP_int3, regs, 0);
  72.200  }
  72.201  
  72.202  asmlinkage int do_machine_check(struct cpu_user_regs *regs)
  72.203 @@ -687,6 +687,12 @@ void propagate_page_fault(unsigned long 
  72.204      tb->eip        = ti->address;
  72.205      if ( TI_GET_IF(ti) )
  72.206          tb->flags |= TBF_INTERRUPT;
  72.207 +    if ( unlikely(null_trap_bounce(tb)) )
  72.208 +    {
  72.209 +        printk("Unhandled page fault in domain %d on VCPU %d (ec=%04X)\n",
  72.210 +               v->domain->domain_id, v->vcpu_id, error_code);
  72.211 +        show_page_walk(addr);
  72.212 +    }
  72.213  }
  72.214  
  72.215  static int handle_gdt_ldt_mapping_fault(
  72.216 @@ -952,7 +958,6 @@ static inline int guest_io_okay(
  72.217      unsigned int port, unsigned int bytes,
  72.218      struct vcpu *v, struct cpu_user_regs *regs)
  72.219  {
  72.220 -    u16 x;
  72.221  #if defined(__x86_64__)
  72.222      /* If in user mode, switch to kernel mode just to read I/O bitmap. */
  72.223      int user_mode = !(v->arch.flags & TF_kernel_mode);
  72.224 @@ -967,10 +972,23 @@ static inline int guest_io_okay(
  72.225  
  72.226      if ( v->arch.iobmp_limit > (port + bytes) )
  72.227      {
  72.228 +        union { uint8_t bytes[2]; uint16_t mask; } x;
  72.229 +
  72.230 +        /*
  72.231 +         * Grab permission bytes from guest space. Inaccessible bytes are
  72.232 +         * read as 0xff (no access allowed).
  72.233 +         */
  72.234          TOGGLE_MODE();
  72.235 -        __get_user(x, (u16 *)(v->arch.iobmp+(port>>3)));
  72.236 +        switch ( __copy_from_guest_offset(&x.bytes[0], v->arch.iobmp,
  72.237 +                                          port>>3, 2) )
  72.238 +        {
  72.239 +        default: x.bytes[0] = ~0;
  72.240 +        case 1:  x.bytes[1] = ~0;
  72.241 +        case 0:  break;
  72.242 +        }
  72.243          TOGGLE_MODE();
  72.244 -        if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
  72.245 +
  72.246 +        if ( (x.mask & (((1<<bytes)-1) << (port&7))) == 0 )
  72.247              return 1;
  72.248      }
  72.249  
  72.250 @@ -1469,8 +1487,6 @@ static int emulate_privileged_op(struct 
  72.251  asmlinkage int do_general_protection(struct cpu_user_regs *regs)
  72.252  {
  72.253      struct vcpu *v = current;
  72.254 -    struct trap_bounce *tb = &v->arch.trap_bounce;
  72.255 -    struct trap_info *ti;
  72.256      unsigned long fixup;
  72.257  
  72.258      DEBUGGER_trap_entry(TRAP_gp_fault, regs);
  72.259 @@ -1504,12 +1520,13 @@ asmlinkage int do_general_protection(str
  72.260      if ( (regs->error_code & 3) == 2 )
  72.261      {
  72.262          /* This fault must be due to <INT n> instruction. */
  72.263 -        ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
  72.264 +        const struct trap_info *ti;
  72.265 +        unsigned char vector = regs->error_code >> 3;
  72.266 +        ti = &v->arch.guest_context.trap_ctxt[vector];
  72.267          if ( permit_softint(TI_GET_DPL(ti), v, regs) )
  72.268          {
  72.269 -            tb->flags = TBF_EXCEPTION;
  72.270              regs->eip += 2;
  72.271 -            goto finish_propagation;
  72.272 +            return do_guest_trap(vector, regs, 0);
  72.273          }
  72.274      }
  72.275  
  72.276 @@ -1526,15 +1543,7 @@ asmlinkage int do_general_protection(str
  72.277  #endif
  72.278  
  72.279      /* Pass on GPF as is. */
  72.280 -    ti = &current->arch.guest_context.trap_ctxt[TRAP_gp_fault];
  72.281 -    tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
  72.282 -    tb->error_code = regs->error_code;
  72.283 - finish_propagation:
  72.284 -    tb->cs         = ti->cs;
  72.285 -    tb->eip        = ti->address;
  72.286 -    if ( TI_GET_IF(ti) )
  72.287 -        tb->flags |= TBF_INTERRUPT;
  72.288 -    return 0;
  72.289 +    return do_guest_trap(TRAP_gp_fault, regs, 1);
  72.290  
  72.291   gp_in_kernel:
  72.292  
  72.293 @@ -1672,22 +1681,11 @@ void unset_nmi_callback(void)
  72.294  
  72.295  asmlinkage int math_state_restore(struct cpu_user_regs *regs)
  72.296  {
  72.297 -    struct trap_bounce *tb;
  72.298 -    struct trap_info *ti;
  72.299 -
  72.300      setup_fpu(current);
  72.301  
  72.302      if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS )
  72.303      {
  72.304 -        tb = &current->arch.trap_bounce;
  72.305 -        ti = &current->arch.guest_context.trap_ctxt[TRAP_no_device];
  72.306 -
  72.307 -        tb->flags = TBF_EXCEPTION;
  72.308 -        tb->cs    = ti->cs;
  72.309 -        tb->eip   = ti->address;
  72.310 -        if ( TI_GET_IF(ti) )
  72.311 -            tb->flags |= TBF_INTERRUPT;
  72.312 -
  72.313 +        do_guest_trap(TRAP_no_device, regs, 0);
  72.314          current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
  72.315      }
  72.316  
  72.317 @@ -1698,8 +1696,6 @@ asmlinkage int do_debug(struct cpu_user_
  72.318  {
  72.319      unsigned long condition;
  72.320      struct vcpu *v = current;
  72.321 -    struct trap_bounce *tb = &v->arch.trap_bounce;
  72.322 -    struct trap_info *ti;
  72.323  
  72.324      __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
  72.325  
  72.326 @@ -1729,12 +1725,7 @@ asmlinkage int do_debug(struct cpu_user_
  72.327      /* Save debug status register where guest OS can peek at it */
  72.328      v->arch.guest_context.debugreg[6] = condition;
  72.329  
  72.330 -    ti = &v->arch.guest_context.trap_ctxt[TRAP_debug];
  72.331 -    tb->flags = TBF_EXCEPTION;
  72.332 -    tb->cs    = ti->cs;
  72.333 -    tb->eip   = ti->address;
  72.334 -    if ( TI_GET_IF(ti) )
  72.335 -        tb->flags |= TBF_INTERRUPT;
  72.336 +    return do_guest_trap(TRAP_debug, regs, 0);
  72.337  
  72.338   out:
  72.339      return EXCRET_not_a_fault;
    73.1 --- a/xen/arch/x86/x86_32/entry.S	Mon Nov 20 12:14:40 2006 -0700
    73.2 +++ b/xen/arch/x86/x86_32/entry.S	Mon Nov 20 13:11:15 2006 -0700
    73.3 @@ -373,10 +373,11 @@ nvm86_3:/* Rewrite our stack frame and r
    73.4          mov  %gs,UREGS_ss+4(%esp)
    73.5          movl %esi,UREGS_esp+4(%esp)
    73.6          movzwl TRAPBOUNCE_cs(%edx),%eax
    73.7 +        /* Null selectors (0-3) are not allowed. */
    73.8 +        testl $~3,%eax
    73.9 +        jz   domain_crash_synchronous
   73.10          movl %eax,UREGS_cs+4(%esp)
   73.11          movl TRAPBOUNCE_eip(%edx),%eax
   73.12 -        test %eax,%eax
   73.13 -        jz   domain_crash_synchronous
   73.14          movl %eax,UREGS_eip+4(%esp)
   73.15          movb $0,TRAPBOUNCE_flags(%edx)
   73.16          ret
   73.17 @@ -596,20 +597,6 @@ ENTRY(setup_vm86_frame)
   73.18          addl $16,%esp
   73.19          ret
   73.20  
   73.21 -do_arch_sched_op_compat:
   73.22 -        # Ensure we return success even if we return via schedule_tail()
   73.23 -        xorl %eax,%eax
   73.24 -        GET_GUEST_REGS(%ecx)
   73.25 -        movl %eax,UREGS_eax(%ecx)
   73.26 -        jmp  do_sched_op_compat
   73.27 -
   73.28 -do_arch_sched_op:
   73.29 -        # Ensure we return success even if we return via schedule_tail()
   73.30 -        xorl %eax,%eax
   73.31 -        GET_GUEST_REGS(%ecx)
   73.32 -        movl %eax,UREGS_eax(%ecx)
   73.33 -        jmp  do_sched_op
   73.34 -
   73.35  .data
   73.36  
   73.37  ENTRY(exception_table)
   73.38 @@ -641,7 +628,7 @@ ENTRY(hypercall_table)
   73.39          .long do_stack_switch
   73.40          .long do_set_callbacks
   73.41          .long do_fpu_taskswitch     /*  5 */
   73.42 -        .long do_arch_sched_op_compat
   73.43 +        .long do_sched_op_compat
   73.44          .long do_platform_op
   73.45          .long do_set_debugreg
   73.46          .long do_get_debugreg
   73.47 @@ -664,7 +651,7 @@ ENTRY(hypercall_table)
   73.48          .long do_mmuext_op
   73.49          .long do_acm_op
   73.50          .long do_nmi_op
   73.51 -        .long do_arch_sched_op
   73.52 +        .long do_sched_op
   73.53          .long do_callback_op        /* 30 */
   73.54          .long do_xenoprof_op
   73.55          .long do_event_channel_op
   73.56 @@ -683,7 +670,7 @@ ENTRY(hypercall_args_table)
   73.57          .byte 2 /* do_stack_switch      */
   73.58          .byte 4 /* do_set_callbacks     */
   73.59          .byte 1 /* do_fpu_taskswitch    */  /*  5 */
   73.60 -        .byte 2 /* do_arch_sched_op_compat */
   73.61 +        .byte 2 /* do_sched_op_compat   */
   73.62          .byte 1 /* do_platform_op       */
   73.63          .byte 2 /* do_set_debugreg      */
   73.64          .byte 1 /* do_get_debugreg      */
   73.65 @@ -706,7 +693,7 @@ ENTRY(hypercall_args_table)
   73.66          .byte 4 /* do_mmuext_op         */
   73.67          .byte 1 /* do_acm_op            */
   73.68          .byte 2 /* do_nmi_op            */
   73.69 -        .byte 2 /* do_arch_sched_op     */
   73.70 +        .byte 2 /* do_sched_op          */
   73.71          .byte 2 /* do_callback_op       */  /* 30 */
   73.72          .byte 2 /* do_xenoprof_op       */
   73.73          .byte 2 /* do_event_channel_op  */
    74.1 --- a/xen/arch/x86/x86_64/entry.S	Mon Nov 20 12:14:40 2006 -0700
    74.2 +++ b/xen/arch/x86/x86_64/entry.S	Mon Nov 20 13:11:15 2006 -0700
    74.3 @@ -497,20 +497,6 @@ nmi_in_hypervisor_mode:
    74.4          call  do_nmi
    74.5          jmp   ret_from_intr
    74.6  
    74.7 -do_arch_sched_op_compat:
    74.8 -        # Ensure we return success even if we return via schedule_tail()
    74.9 -        xorl  %eax,%eax
   74.10 -        GET_GUEST_REGS(%r10)
   74.11 -        movq  %rax,UREGS_rax(%r10)
   74.12 -        jmp   do_sched_op_compat
   74.13 -
   74.14 -do_arch_sched_op:
   74.15 -        # Ensure we return success even if we return via schedule_tail()
   74.16 -        xorl  %eax,%eax
   74.17 -        GET_GUEST_REGS(%r10)
   74.18 -        movq  %rax,UREGS_rax(%r10)
   74.19 -        jmp   do_sched_op
   74.20 -
   74.21  .data
   74.22  
   74.23  ENTRY(exception_table)
   74.24 @@ -542,7 +528,7 @@ ENTRY(hypercall_table)
   74.25          .quad do_stack_switch
   74.26          .quad do_set_callbacks
   74.27          .quad do_fpu_taskswitch     /*  5 */
   74.28 -        .quad do_arch_sched_op_compat
   74.29 +        .quad do_sched_op_compat
   74.30          .quad do_platform_op
   74.31          .quad do_set_debugreg
   74.32          .quad do_get_debugreg
   74.33 @@ -565,7 +551,7 @@ ENTRY(hypercall_table)
   74.34          .quad do_mmuext_op
   74.35          .quad do_acm_op
   74.36          .quad do_nmi_op
   74.37 -        .quad do_arch_sched_op
   74.38 +        .quad do_sched_op
   74.39          .quad do_callback_op        /* 30 */
   74.40          .quad do_xenoprof_op
   74.41          .quad do_event_channel_op
   74.42 @@ -584,8 +570,8 @@ ENTRY(hypercall_args_table)
   74.43          .byte 2 /* do_stack_switch      */
   74.44          .byte 3 /* do_set_callbacks     */
   74.45          .byte 1 /* do_fpu_taskswitch    */  /*  5 */
   74.46 -        .byte 2 /* do_arch_sched_op_compat */
   74.47 -        .byte 1 /* do_platform_op           */
   74.48 +        .byte 2 /* do_sched_op_compat   */
   74.49 +        .byte 1 /* do_platform_op       */
   74.50          .byte 2 /* do_set_debugreg      */
   74.51          .byte 1 /* do_get_debugreg      */
   74.52          .byte 2 /* do_update_descriptor */  /* 10 */
   74.53 @@ -607,7 +593,7 @@ ENTRY(hypercall_args_table)
   74.54          .byte 4 /* do_mmuext_op         */
   74.55          .byte 1 /* do_acm_op            */
   74.56          .byte 2 /* do_nmi_op            */
   74.57 -        .byte 2 /* do_arch_sched_op     */
   74.58 +        .byte 2 /* do_sched_op          */
   74.59          .byte 2 /* do_callback_op       */  /* 30 */
   74.60          .byte 2 /* do_xenoprof_op       */
   74.61          .byte 2 /* do_event_channel_op  */
    75.1 --- a/xen/common/domain.c	Mon Nov 20 12:14:40 2006 -0700
    75.2 +++ b/xen/common/domain.c	Mon Nov 20 13:11:15 2006 -0700
    75.3 @@ -258,8 +258,18 @@ void __domain_crash_synchronous(void)
    75.4  {
    75.5      __domain_crash(current->domain);
    75.6  
    75.7 -    /* Flush multicall state before dying. */
    75.8 -    this_cpu(mc_state).flags = 0;
    75.9 +    /*
   75.10 +     * Flush multicall state before dying if a multicall is in progress.
   75.11 +     * This shouldn't be necessary, but some architectures are calling
   75.12 +     * domain_crash_synchronous() when they really shouldn't (i.e., from
   75.13 +     * within hypercall context).
   75.14 +     */
   75.15 +    if ( this_cpu(mc_state).flags != 0 )
   75.16 +    {
   75.17 +        dprintk(XENLOG_ERR,
   75.18 +                "FIXME: synchronous domain crash during a multicall!\n");
   75.19 +        this_cpu(mc_state).flags = 0;
   75.20 +    }
   75.21  
   75.22      for ( ; ; )
   75.23          do_softirq();
    76.1 --- a/xen/common/grant_table.c	Mon Nov 20 12:14:40 2006 -0700
    76.2 +++ b/xen/common/grant_table.c	Mon Nov 20 13:11:15 2006 -0700
    76.3 @@ -24,6 +24,8 @@
    76.4   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    76.5   */
    76.6  
    76.7 +#include <xen/config.h>
    76.8 +#include <xen/iocap.h>
    76.9  #include <xen/lib.h>
   76.10  #include <xen/sched.h>
   76.11  #include <xen/shadow.h>
   76.12 @@ -991,6 +993,9 @@ do_grant_table_op(
   76.13              guest_handle_cast(uop, gnttab_map_grant_ref_t);
   76.14          if ( unlikely(!guest_handle_okay(map, count)) )
   76.15              goto out;
   76.16 +        rc = -EPERM;
   76.17 +        if ( unlikely(!grant_operation_permitted(d)) )
   76.18 +            goto out;
   76.19          rc = gnttab_map_grant_ref(map, count);
   76.20          break;
   76.21      }
   76.22 @@ -1000,6 +1005,9 @@ do_grant_table_op(
   76.23              guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
   76.24          if ( unlikely(!guest_handle_okay(unmap, count)) )
   76.25              goto out;
   76.26 +        rc = -EPERM;
   76.27 +        if ( unlikely(!grant_operation_permitted(d)) )
   76.28 +            goto out;
   76.29          rc = gnttab_unmap_grant_ref(unmap, count);
   76.30          break;
   76.31      }
   76.32 @@ -1015,6 +1023,9 @@ do_grant_table_op(
   76.33              guest_handle_cast(uop, gnttab_transfer_t);
   76.34          if ( unlikely(!guest_handle_okay(transfer, count)) )
   76.35              goto out;
   76.36 +        rc = -EPERM;
   76.37 +        if ( unlikely(!grant_operation_permitted(d)) )
   76.38 +            goto out;
   76.39          rc = gnttab_transfer(transfer, count);
   76.40          break;
   76.41      }
    77.1 --- a/xen/common/schedule.c	Mon Nov 20 12:14:40 2006 -0700
    77.2 +++ b/xen/common/schedule.c	Mon Nov 20 13:11:15 2006 -0700
    77.3 @@ -29,6 +29,7 @@
    77.4  #include <xen/mm.h>
    77.5  #include <xen/errno.h>
    77.6  #include <xen/guest_access.h>
    77.7 +#include <xen/multicall.h>
    77.8  #include <public/sched.h>
    77.9  
   77.10  extern void arch_getdomaininfo_ctxt(struct vcpu *,
   77.11 @@ -60,8 +61,6 @@ static struct scheduler *schedulers[] = 
   77.12      NULL
   77.13  };
   77.14  
   77.15 -static void __enter_scheduler(void);
   77.16 -
   77.17  static struct scheduler ops;
   77.18  
   77.19  #define SCHED_OP(fn, ...)                                 \
   77.20 @@ -270,7 +269,7 @@ static long do_block(void)
   77.21      else
   77.22      {
   77.23          TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
   77.24 -        __enter_scheduler();
   77.25 +        raise_softirq(SCHEDULE_SOFTIRQ);
   77.26      }
   77.27  
   77.28      return 0;
   77.29 @@ -315,9 +314,9 @@ static long do_poll(struct sched_poll *s
   77.30          set_timer(&v->poll_timer, sched_poll->timeout);
   77.31  
   77.32      TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
   77.33 -    __enter_scheduler();
   77.34 +    raise_softirq(SCHEDULE_SOFTIRQ);
   77.35  
   77.36 -    stop_timer(&v->poll_timer);
   77.37 +    return 0;
   77.38  
   77.39   out:
   77.40      clear_bit(_VCPUF_polling, &v->vcpu_flags);
   77.41 @@ -329,7 +328,7 @@ static long do_poll(struct sched_poll *s
   77.42  static long do_yield(void)
   77.43  {
   77.44      TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
   77.45 -    __enter_scheduler();
   77.46 +    raise_softirq(SCHEDULE_SOFTIRQ);
   77.47      return 0;
   77.48  }
   77.49  
   77.50 @@ -540,7 +539,7 @@ long sched_adjust(struct domain *d, stru
   77.51   * - deschedule the current domain (scheduler independent).
   77.52   * - pick a new domain (scheduler dependent).
   77.53   */
   77.54 -static void __enter_scheduler(void)
   77.55 +static void schedule(void)
   77.56  {
   77.57      struct vcpu          *prev = current, *next = NULL;
   77.58      s_time_t              now = NOW();
   77.59 @@ -549,6 +548,7 @@ static void __enter_scheduler(void)
   77.60      s32                   r_time;     /* time for new dom to run */
   77.61  
   77.62      ASSERT(!in_irq());
   77.63 +    ASSERT(this_cpu(mc_state).flags == 0);
   77.64  
   77.65      perfc_incrc(sched_run);
   77.66  
   77.67 @@ -679,7 +679,7 @@ void __init scheduler_init(void)
   77.68  {
   77.69      int i;
   77.70  
   77.71 -    open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
   77.72 +    open_softirq(SCHEDULE_SOFTIRQ, schedule);
   77.73  
   77.74      for_each_cpu ( i )
   77.75      {
    78.1 --- a/xen/include/asm-x86/domain.h	Mon Nov 20 12:14:40 2006 -0700
    78.2 +++ b/xen/include/asm-x86/domain.h	Mon Nov 20 13:11:15 2006 -0700
    78.3 @@ -111,6 +111,8 @@ struct arch_domain
    78.4  
    78.5      /* Shadow translated domain: P2M mapping */
    78.6      pagetable_t phys_table;
    78.7 +    /* Highest guest frame that's ever been mapped in the p2m */
    78.8 +    unsigned long max_mapped_pfn;
    78.9  
   78.10  } __cacheline_aligned;
   78.11  
   78.12 @@ -169,7 +171,7 @@ struct arch_vcpu
   78.13      struct trap_bounce trap_bounce;
   78.14  
   78.15      /* I/O-port access bitmap. */
   78.16 -    u8 *iobmp;        /* Guest kernel virtual address of the bitmap. */
   78.17 +    XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
   78.18      int iobmp_limit;  /* Number of ports represented in the bitmap.  */
   78.19      int iopl;         /* Current IOPL for this VCPU. */
   78.20  
    79.1 --- a/xen/include/asm-x86/hvm/hvm.h	Mon Nov 20 12:14:40 2006 -0700
    79.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Mon Nov 20 13:11:15 2006 -0700
    79.3 @@ -97,6 +97,8 @@ void hvm_domain_destroy(struct domain *d
    79.4  int hvm_vcpu_initialise(struct vcpu *v);
    79.5  void hvm_vcpu_destroy(struct vcpu *v);
    79.6  
    79.7 +void hvm_send_assist_req(struct vcpu *v);
    79.8 +
    79.9  static inline void
   79.10  hvm_store_cpu_guest_regs(
   79.11      struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
   79.12 @@ -161,6 +163,8 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u
   79.13  
   79.14  void hvm_stts(struct vcpu *v);
   79.15  void hvm_set_guest_time(struct vcpu *v, u64 gtime);
   79.16 +void hvm_freeze_time(struct vcpu *v);
   79.17 +void hvm_migrate_timers(struct vcpu *v);
   79.18  void hvm_do_resume(struct vcpu *v);
   79.19  
   79.20  static inline void
    80.1 --- a/xen/include/asm-x86/hvm/io.h	Mon Nov 20 12:14:40 2006 -0700
    80.2 +++ b/xen/include/asm-x86/hvm/io.h	Mon Nov 20 13:11:15 2006 -0700
    80.3 @@ -151,8 +151,5 @@ extern void pic_irq_request(void *data, 
    80.4  extern int cpu_get_interrupt(struct vcpu *v, int *type);
    80.5  extern int cpu_has_pending_irq(struct vcpu *v);
    80.6  
    80.7 -// XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO frame.
    80.8 -#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT)))
    80.9 -
   80.10  #endif /* __ASM_X86_HVM_IO_H__ */
   80.11  
    81.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Nov 20 12:14:40 2006 -0700
    81.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Nov 20 13:11:15 2006 -0700
    81.3 @@ -41,12 +41,12 @@ enum {
    81.4      VMX_INDEX_MSR_SYSCALL_MASK,
    81.5      VMX_INDEX_MSR_EFER,
    81.6  
    81.7 -    VMX_MSR_COUNT,
    81.8 +    VMX_MSR_COUNT
    81.9  };
   81.10  
   81.11  struct vmx_msr_state {
   81.12      unsigned long flags;
   81.13 -    unsigned long msr_items[VMX_MSR_COUNT];
   81.14 +    unsigned long msrs[VMX_MSR_COUNT];
   81.15      unsigned long shadow_gs;
   81.16  };
   81.17  
   81.18 @@ -76,8 +76,8 @@ struct arch_vmx_struct {
   81.19      unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
   81.20      unsigned long        cpu_cr2; /* save CR2 */
   81.21      unsigned long        cpu_cr3;
   81.22 -    struct vmx_msr_state msr_content;
   81.23 -    unsigned long        vmxassist_enabled:1; 
   81.24 +    struct vmx_msr_state msr_state;
   81.25 +    unsigned long        vmxassist_enabled:1;
   81.26  };
   81.27  
   81.28  #define vmx_schedule_tail(next)         \
   81.29 @@ -141,10 +141,10 @@ enum vmcs_field {
   81.30      HOST_FS_SELECTOR                = 0x00000c08,
   81.31      HOST_GS_SELECTOR                = 0x00000c0a,
   81.32      HOST_TR_SELECTOR                = 0x00000c0c,
   81.33 -    IO_BITMAP_A                     = 0x00002000, 
   81.34 -    IO_BITMAP_A_HIGH                = 0x00002001, 
   81.35 -    IO_BITMAP_B                     = 0x00002002, 
   81.36 -    IO_BITMAP_B_HIGH                = 0x00002003, 
   81.37 +    IO_BITMAP_A                     = 0x00002000,
   81.38 +    IO_BITMAP_A_HIGH                = 0x00002001,
   81.39 +    IO_BITMAP_B                     = 0x00002002,
   81.40 +    IO_BITMAP_B_HIGH                = 0x00002003,
   81.41      VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
   81.42      VM_EXIT_MSR_STORE_ADDR_HIGH     = 0x00002007,
   81.43      VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
   81.44 @@ -160,7 +160,7 @@ enum vmcs_field {
   81.45      GUEST_IA32_DEBUGCTL             = 0x00002802,
   81.46      GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
   81.47      PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
   81.48 -    CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,   
   81.49 +    CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
   81.50      EXCEPTION_BITMAP                = 0x00004004,
   81.51      PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
   81.52      PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
   81.53 @@ -177,7 +177,7 @@ enum vmcs_field {
   81.54      SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
   81.55      VM_INSTRUCTION_ERROR            = 0x00004400,
   81.56      VM_EXIT_REASON                  = 0x00004402,
   81.57 -    VM_EXIT_INTR_INFO               = 0x00004404,   
   81.58 +    VM_EXIT_INTR_INFO               = 0x00004404,
   81.59      VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
   81.60      IDT_VECTORING_INFO_FIELD        = 0x00004408,
   81.61      IDT_VECTORING_ERROR_CODE        = 0x0000440a,
   81.62 @@ -209,10 +209,10 @@ enum vmcs_field {
   81.63      CR4_GUEST_HOST_MASK             = 0x00006002,
   81.64      CR0_READ_SHADOW                 = 0x00006004,
   81.65      CR4_READ_SHADOW                 = 0x00006006,
   81.66 -    CR3_TARGET_VALUE0               = 0x00006008, 
   81.67 -    CR3_TARGET_VALUE1               = 0x0000600a, 
   81.68 -    CR3_TARGET_VALUE2               = 0x0000600c, 
   81.69 -    CR3_TARGET_VALUE3               = 0x0000600e, 
   81.70 +    CR3_TARGET_VALUE0               = 0x00006008,
   81.71 +    CR3_TARGET_VALUE1               = 0x0000600a,
   81.72 +    CR3_TARGET_VALUE2               = 0x0000600c,
   81.73 +    CR3_TARGET_VALUE3               = 0x0000600e,
   81.74      EXIT_QUALIFICATION              = 0x00006400,
   81.75      GUEST_LINEAR_ADDRESS            = 0x0000640a,
   81.76      GUEST_CR0                       = 0x00006800,
   81.77 @@ -226,7 +226,7 @@ enum vmcs_field {
   81.78      GUEST_GS_BASE                   = 0x00006810,
   81.79      GUEST_LDTR_BASE                 = 0x00006812,
   81.80      GUEST_TR_BASE                   = 0x00006814,
   81.81 -    GUEST_GDTR_BASE                 = 0x00006816,    
   81.82 +    GUEST_GDTR_BASE                 = 0x00006816,
   81.83      GUEST_IDTR_BASE                 = 0x00006818,
   81.84      GUEST_DR7                       = 0x0000681a,
   81.85      GUEST_RSP                       = 0x0000681c,
    82.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Nov 20 12:14:40 2006 -0700
    82.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Nov 20 13:11:15 2006 -0700
    82.3 @@ -29,7 +29,6 @@
    82.4  extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
    82.5  extern void vmx_asm_do_vmentry(void);
    82.6  extern void vmx_intr_assist(void);
    82.7 -extern void vmx_migrate_timers(struct vcpu *v);
    82.8  extern void arch_vmx_do_resume(struct vcpu *);
    82.9  extern void set_guest_time(struct vcpu *v, u64 gtime);
   82.10  
   82.11 @@ -263,13 +262,13 @@ static inline int vmx_paging_enabled(str
   82.12  
   82.13  static inline int vmx_long_mode_enabled(struct vcpu *v)
   82.14  {
   82.15 -    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
   82.16 +    u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
   82.17      return efer & EFER_LMA;
   82.18  }
   82.19  
   82.20  static inline int vmx_lme_is_set(struct vcpu *v)
   82.21  {
   82.22 -    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
   82.23 +    u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
   82.24      return efer & EFER_LME;
   82.25  }
   82.26  
    83.1 --- a/xen/include/asm-x86/hvm/vpt.h	Mon Nov 20 12:14:40 2006 -0700
    83.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Mon Nov 20 13:11:15 2006 -0700
    83.3 @@ -67,8 +67,10 @@ typedef struct RTCState {
    83.4      int64_t next_second_time;
    83.5      struct timer second_timer;
    83.6      struct timer second_timer2;
    83.7 +    struct timer pie_timer;
    83.8 +    int period;
    83.9 +    s_time_t next_pie;
   83.10      struct vcpu      *vcpu;
   83.11 -    struct periodic_time *pt;
   83.12  } RTCState;
   83.13  
   83.14  #define FREQUENCE_PMTIMER  3579545
   83.15 @@ -143,9 +145,11 @@ extern void destroy_periodic_time(struct
   83.16  void pit_init(struct vcpu *v, unsigned long cpu_khz);
   83.17  void rtc_init(struct vcpu *v, int base, int irq);
   83.18  void rtc_deinit(struct domain *d);
   83.19 +void rtc_freeze(struct vcpu *v);
   83.20 +void rtc_thaw(struct vcpu *v);
   83.21 +void rtc_migrate_timers(struct vcpu *v);
   83.22  void pmtimer_init(struct vcpu *v, int base);
   83.23  void pmtimer_deinit(struct domain *d);
   83.24 -int is_rtc_periodic_irq(void *opaque);
   83.25  void pt_timer_fn(void *data);
   83.26  void pit_time_fired(struct vcpu *v, void *priv);
   83.27  
    84.1 --- a/xen/include/asm-x86/mm.h	Mon Nov 20 12:14:40 2006 -0700
    84.2 +++ b/xen/include/asm-x86/mm.h	Mon Nov 20 13:11:15 2006 -0700
    84.3 @@ -304,37 +304,9 @@ int check_descriptor(struct desc_struct 
    84.4  
    84.5  #define gmfn_to_mfn(_d, gpfn)  mfn_x(sh_gfn_to_mfn(_d, gpfn))
    84.6  
    84.7 -
    84.8 -/*
    84.9 - * The phys_to_machine_mapping is the reversed mapping of MPT for full
   84.10 - * virtualization.  It is only used by shadow_mode_translate()==true
   84.11 - * guests, so we steal the address space that would have normally
   84.12 - * been used by the read-only MPT map.
   84.13 - */
   84.14 -#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
   84.15  #define INVALID_MFN             (~0UL)
   84.16  #define VALID_MFN(_mfn)         (!((_mfn) & (1U<<31)))
   84.17  
   84.18 -static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
   84.19 -{
   84.20 -    l1_pgentry_t l1e = l1e_empty();
   84.21 -    int ret;
   84.22 -
   84.23 -#if CONFIG_PAGING_LEVELS > 2
   84.24 -    if ( pfn >= (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
   84.25 -        /* This pfn is higher than the p2m map can hold */
   84.26 -        return INVALID_MFN;
   84.27 -#endif
   84.28 -
   84.29 -    ret = __copy_from_user(&l1e,
   84.30 -                               &phys_to_machine_mapping[pfn],
   84.31 -                               sizeof(l1e));
   84.32 -
   84.33 -    if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
   84.34 -        return l1e_get_pfn(l1e);
   84.35 -
   84.36 -    return INVALID_MFN;
   84.37 -}
   84.38  
   84.39  #ifdef MEMORY_GUARD
   84.40  void memguard_init(void);
    85.1 --- a/xen/include/asm-x86/shadow.h	Mon Nov 20 12:14:40 2006 -0700
    85.2 +++ b/xen/include/asm-x86/shadow.h	Mon Nov 20 13:11:15 2006 -0700
    85.3 @@ -663,12 +663,40 @@ struct shadow_walk_cache {
    85.4  
    85.5  
    85.6  /**************************************************************************/
    85.7 -/* Guest physmap (p2m) support */
    85.8 +/* Guest physmap (p2m) support 
    85.9 + *
   85.10 + * The phys_to_machine_mapping is the reversed mapping of MPT for full
   85.11 + * virtualization.  It is only used by shadow_mode_translate()==true
   85.12 + * guests, so we steal the address space that would have normally
   85.13 + * been used by the read-only MPT map.
   85.14 + */
   85.15 +
   85.16 +#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
   85.17 +
   85.18 +/* Read the current domain's P2M table. */
   85.19 +static inline mfn_t sh_gfn_to_mfn_current(unsigned long gfn)
   85.20 +{
   85.21 +    l1_pgentry_t l1e = l1e_empty();
   85.22 +    int ret;
   85.23 +
   85.24 +    if ( gfn > current->domain->arch.max_mapped_pfn )
   85.25 +        return _mfn(INVALID_MFN);
   85.26 +
   85.27 +    /* Don't read off the end of the p2m table */
   85.28 +    ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
   85.29 +
   85.30 +    ret = __copy_from_user(&l1e,
   85.31 +                           &phys_to_machine_mapping[gfn],
   85.32 +                           sizeof(l1e));
   85.33 +
   85.34 +    if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
   85.35 +        return _mfn(l1e_get_pfn(l1e));
   85.36 +
   85.37 +    return _mfn(INVALID_MFN);
   85.38 +}
   85.39  
   85.40  /* Walk another domain's P2M table, mapping pages as we go */
   85.41 -extern mfn_t
   85.42 -sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
   85.43 -
   85.44 +extern mfn_t sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
   85.45  
   85.46  /* General conversion function from gfn to mfn */
   85.47  static inline mfn_t
   85.48 @@ -676,12 +704,19 @@ sh_gfn_to_mfn(struct domain *d, unsigned
   85.49  {
   85.50      if ( !shadow_mode_translate(d) )
   85.51          return _mfn(gfn);
   85.52 -    else if ( likely(current->domain == d) )
   85.53 -        return _mfn(get_mfn_from_gpfn(gfn));
   85.54 -    else
   85.55 +    if ( likely(current->domain == d) )
   85.56 +        return sh_gfn_to_mfn_current(gfn);
   85.57 +    else 
   85.58          return sh_gfn_to_mfn_foreign(d, gfn);
   85.59  }
   85.60  
   85.61 +/* Compatibility function for HVM code */
   85.62 +static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
   85.63 +{
   85.64 +    return mfn_x(sh_gfn_to_mfn_current(pfn));
   85.65 +}
   85.66 +
   85.67 +/* General conversion function from mfn to gfn */
   85.68  static inline unsigned long
   85.69  sh_mfn_to_gfn(struct domain *d, mfn_t mfn)
   85.70  {
   85.71 @@ -691,6 +726,14 @@ sh_mfn_to_gfn(struct domain *d, mfn_t mf
   85.72          return mfn_x(mfn);
   85.73  }
   85.74  
   85.75 +/* Is this guest address an mmio one? (i.e. not defined in p2m map) */
   85.76 +static inline int
   85.77 +mmio_space(paddr_t gpa)
   85.78 +{
   85.79 +    unsigned long gfn = gpa >> PAGE_SHIFT;    
   85.80 +    return !VALID_MFN(mfn_x(sh_gfn_to_mfn_current(gfn)));
   85.81 +}
   85.82 +
   85.83  static inline l1_pgentry_t
   85.84  gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
   85.85  {
    86.1 --- a/xen/include/asm-x86/x86_32/regs.h	Mon Nov 20 12:14:40 2006 -0700
    86.2 +++ b/xen/include/asm-x86/x86_32/regs.h	Mon Nov 20 13:11:15 2006 -0700
    86.3 @@ -16,6 +16,9 @@
    86.4  #define permit_softint(dpl, v, r) \
    86.5      ((dpl) >= (vm86_mode(r) ? 3 : ((r)->cs & 3)))
    86.6  
    86.7 +/* Check for null trap callback handler: Is the selector null (0-3)? */
    86.8 +#define null_trap_bounce(tb) (((tb)->cs & ~3) == 0)
    86.9 +
   86.10  /* Number of bytes of on-stack execution state to be context-switched. */
   86.11  #define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
   86.12  
    87.1 --- a/xen/include/asm-x86/x86_64/regs.h	Mon Nov 20 12:14:40 2006 -0700
    87.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Mon Nov 20 13:11:15 2006 -0700
    87.3 @@ -16,6 +16,9 @@
    87.4  #define permit_softint(dpl, v, r) \
    87.5      ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
    87.6  
    87.7 +/* Check for null trap callback handler: Is the EIP null? */
    87.8 +#define null_trap_bounce(tb) ((tb)->eip == 0)
    87.9 +
   87.10  /* Number of bytes of on-stack execution state to be context-switched. */
   87.11  /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
   87.12  #define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
    88.1 --- a/xen/include/public/physdev.h	Mon Nov 20 12:14:40 2006 -0700
    88.2 +++ b/xen/include/public/physdev.h	Mon Nov 20 13:11:15 2006 -0700
    88.3 @@ -1,4 +1,3 @@
    88.4 -
    88.5  /*
    88.6   * Permission is hereby granted, free of charge, to any person obtaining a copy
    88.7   * of this software and associated documentation files (the "Software"), to
    88.8 @@ -82,7 +81,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl
    88.9  #define PHYSDEVOP_set_iobitmap           7
   88.10  struct physdev_set_iobitmap {
   88.11      /* IN */
   88.12 -    uint8_t *bitmap;
   88.13 +    XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
   88.14      uint32_t nr_ports;
   88.15  };
   88.16  typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
    89.1 --- a/xen/include/public/vcpu.h	Mon Nov 20 12:14:40 2006 -0700
    89.2 +++ b/xen/include/public/vcpu.h	Mon Nov 20 13:11:15 2006 -0700
    89.3 @@ -86,6 +86,7 @@ struct vcpu_runstate_info {
    89.4      uint64_t time[4];
    89.5  };
    89.6  typedef struct vcpu_runstate_info vcpu_runstate_info_t;
    89.7 +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
    89.8  
    89.9  /* VCPU is currently running on a physical CPU. */
   89.10  #define RUNSTATE_running  0
   89.11 @@ -108,8 +109,9 @@ typedef struct vcpu_runstate_info vcpu_r
   89.12   * Register a shared memory area from which the guest may obtain its own
   89.13   * runstate information without needing to execute a hypercall.
   89.14   * Notes:
   89.15 - *  1. The registered address may be virtual or physical, depending on the
   89.16 - *     platform. The virtual address should be registered on x86 systems.
   89.17 + *  1. The registered address may be virtual or physical or guest handle,
   89.18 + *     depending on the platform. Virtual address or guest handle should be
   89.19 + *     registered on x86 systems.
   89.20   *  2. Only one shared area may be registered per VCPU. The shared area is
   89.21   *     updated by the hypervisor each time the VCPU is scheduled. Thus
   89.22   *     runstate.state will always be RUNSTATE_running and
   89.23 @@ -120,6 +122,7 @@ typedef struct vcpu_runstate_info vcpu_r
   89.24  #define VCPUOP_register_runstate_memory_area 5
   89.25  struct vcpu_register_runstate_memory_area {
   89.26      union {
   89.27 +        XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
   89.28          struct vcpu_runstate_info *v;
   89.29          uint64_t p;
   89.30      } addr;
    90.1 --- a/xen/include/public/xen-compat.h	Mon Nov 20 12:14:40 2006 -0700
    90.2 +++ b/xen/include/public/xen-compat.h	Mon Nov 20 13:11:15 2006 -0700
    90.3 @@ -27,7 +27,7 @@
    90.4  #ifndef __XEN_PUBLIC_XEN_COMPAT_H__
    90.5  #define __XEN_PUBLIC_XEN_COMPAT_H__
    90.6  
    90.7 -#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030204
    90.8 +#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030205
    90.9  
   90.10  #if defined(__XEN__) || defined(__XEN_TOOLS__)
   90.11  /* Xen is built with matching headers and implements the latest interface. */
   90.12 @@ -41,4 +41,11 @@
   90.13  #error "These header files do not support the requested interface version."
   90.14  #endif
   90.15  
   90.16 +/* Fields defined as a Xen guest handle since 0x00030205. */
   90.17 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
   90.18 +#define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type)
   90.19 +#else
   90.20 +#define XEN_GUEST_HANDLE_00030205(type) type *
   90.21 +#endif
   90.22 +
   90.23  #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
    91.1 --- a/xen/include/public/xen.h	Mon Nov 20 12:14:40 2006 -0700
    91.2 +++ b/xen/include/public/xen.h	Mon Nov 20 13:11:15 2006 -0700
    91.3 @@ -246,7 +246,7 @@ struct mmuext_op {
    91.4          /* SET_LDT */
    91.5          unsigned int nr_ents;
    91.6          /* TLB_FLUSH_MULTI, INVLPG_MULTI */
    91.7 -        void *vcpumask;
    91.8 +        XEN_GUEST_HANDLE_00030205(void) vcpumask;
    91.9      } arg2;
   91.10  };
   91.11  typedef struct mmuext_op mmuext_op_t;
    92.1 --- a/xen/include/xen/iocap.h	Mon Nov 20 12:14:40 2006 -0700
    92.2 +++ b/xen/include/xen/iocap.h	Mon Nov 20 13:11:15 2006 -0700
    92.3 @@ -31,4 +31,12 @@
    92.4  #define multipage_allocation_permitted(d)               \
    92.5      (!rangeset_is_empty((d)->iomem_caps))
    92.6  
    92.7 +/*
    92.8 + * Until TLB flushing issues are sorted out we consider it unsafe for
    92.9 + * domains with no hardware-access privileges to perform grant map/transfer
   92.10 + * operations.
   92.11 + */
   92.12 +#define grant_operation_permitted(d)                    \
   92.13 +    (!rangeset_is_empty((d)->iomem_caps))
   92.14 +
   92.15  #endif /* __XEN_IOCAP_H__ */
    93.1 --- a/xen/include/xen/sched.h	Mon Nov 20 12:14:40 2006 -0700
    93.2 +++ b/xen/include/xen/sched.h	Mon Nov 20 13:11:15 2006 -0700
    93.3 @@ -75,7 +75,7 @@ struct vcpu
    93.4      void            *sched_priv;    /* scheduler-specific data */
    93.5  
    93.6      struct vcpu_runstate_info runstate;
    93.7 -    struct vcpu_runstate_info *runstate_guest; /* guest address */
    93.8 +    XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
    93.9  
   93.10      unsigned long    vcpu_flags;
   93.11  
   93.12 @@ -282,11 +282,6 @@ void __domain_crash_synchronous(void) __
   93.13      __domain_crash_synchronous();                                         \
   93.14  } while (0)
   93.15  
   93.16 -void new_thread(struct vcpu *d,
   93.17 -                unsigned long start_pc,
   93.18 -                unsigned long start_stack,
   93.19 -                unsigned long start_info);
   93.20 -
   93.21  #define set_current_state(_s) do { current->state = (_s); } while (0)
   93.22  void scheduler_init(void);
   93.23  void schedulers_start(void);