ia64/xen-unstable

changeset 9413:d81636a6fa9f

merge with xen-ia64-unstable.hg
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 22 19:18:42 2006 +0100 (2006-03-22)
parents 11325d1c412c 5d3c2cb42ec4
children b89a155ecd2c
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/cpu/common-xen.c	Wed Mar 22 10:04:43 2006 -0700
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/cpu/common-xen.c	Wed Mar 22 19:18:42 2006 +0100
     1.3 @@ -34,8 +34,6 @@ static int disable_x86_serial_nr __devin
     1.4  
     1.5  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
     1.6  
     1.7 -extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
     1.8 -
     1.9  extern int disable_pse;
    1.10  
    1.11  static void default_init(struct cpuinfo_x86 * c)
    1.12 @@ -425,8 +423,6 @@ void __devinit identify_cpu(struct cpuin
    1.13  				c->x86_vendor, c->x86_model);
    1.14  	}
    1.15  
    1.16 -	machine_specific_modify_cpu_capabilities(c);
    1.17 -
    1.18  	/* Now the feature flags better reflect actual CPU features! */
    1.19  
    1.20  	printk(KERN_DEBUG "CPU: After all inits, caps:");
     2.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S	Wed Mar 22 10:04:43 2006 -0700
     2.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S	Wed Mar 22 19:18:42 2006 +0100
     2.3 @@ -32,14 +32,14 @@ ENTRY(startup_32)
     2.4  
     2.5  	/* get vendor info */
     2.6  	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
     2.7 -	cpuid
     2.8 +	XEN_CPUID
     2.9  	movl %eax,X86_CPUID		# save CPUID level
    2.10  	movl %ebx,X86_VENDOR_ID		# lo 4 chars
    2.11  	movl %edx,X86_VENDOR_ID+4	# next 4 chars
    2.12  	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
    2.13  
    2.14  	movl $1,%eax		# Use the CPUID instruction to get CPU type
    2.15 -	cpuid
    2.16 +	XEN_CPUID
    2.17  	movb %al,%cl		# save reg for future use
    2.18  	andb $0x0f,%ah		# mask processor family
    2.19  	movb %ah,X86
     3.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c	Wed Mar 22 10:04:43 2006 -0700
     3.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c	Wed Mar 22 19:18:42 2006 +0100
     3.3 @@ -82,8 +82,6 @@
     3.4  extern unsigned long start_pfn;
     3.5  extern struct edid_info edid_info;
     3.6  
     3.7 -extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
     3.8 -
     3.9  shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
    3.10  EXPORT_SYMBOL(HYPERVISOR_shared_info);
    3.11  
    3.12 @@ -1433,8 +1431,6 @@ void __cpuinit identify_cpu(struct cpuin
    3.13  	select_idle_routine(c);
    3.14  	detect_ht(c); 
    3.15  
    3.16 -	machine_specific_modify_cpu_capabilities(c);
    3.17 -
    3.18  	/*
    3.19  	 * On SMP, boot_cpu_data holds the common feature set between
    3.20  	 * all CPUs; so make sure that we indicate which features are
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Mar 22 10:04:43 2006 -0700
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Mar 22 19:18:42 2006 +0100
     4.3 @@ -58,17 +58,37 @@ static int evtchn_to_irq[NR_EVENT_CHANNE
     4.4  
     4.5  /* Packed IRQ information: binding type, sub-type index, and event channel. */
     4.6  static u32 irq_info[NR_IRQS];
     4.7 +
     4.8  /* Binding types. */
     4.9  enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
    4.10 +
    4.11  /* Constructor for packed IRQ information. */
    4.12 -#define mk_irq_info(type, index, evtchn)				\
    4.13 -	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
    4.14 +static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
    4.15 +{
    4.16 +	return ((type << 24) | (index << 16) | evtchn);
    4.17 +}
    4.18 +
    4.19  /* Convenient shorthand for packed representation of an unbound IRQ. */
    4.20  #define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
    4.21 -/* Accessor macros for packed IRQ information. */
    4.22 -#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
    4.23 -#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
    4.24 -#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
    4.25 +
    4.26 +/*
    4.27 + * Accessors for packed IRQ information.
    4.28 + */
    4.29 +
    4.30 +static inline unsigned int evtchn_from_irq(int irq)
    4.31 +{
    4.32 +	return (u16)(irq_info[irq]);
    4.33 +}
    4.34 +
    4.35 +static inline unsigned int index_from_irq(int irq)
    4.36 +{
    4.37 +	return (u8)(irq_info[irq] >> 16);
    4.38 +}
    4.39 +
    4.40 +static inline unsigned int type_from_irq(int irq)
    4.41 +{
    4.42 +	return (u8)(irq_info[irq] >> 24);
    4.43 +}
    4.44  
    4.45  /* IRQ <-> VIRQ mapping. */
    4.46  DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
    4.47 @@ -90,10 +110,13 @@ static unsigned long pirq_needs_unmask_n
    4.48  static u8 cpu_evtchn[NR_EVENT_CHANNELS];
    4.49  static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
    4.50  
    4.51 -#define active_evtchns(cpu,sh,idx)		\
    4.52 -	((sh)->evtchn_pending[idx] &		\
    4.53 -	 cpu_evtchn_mask[cpu][idx] &		\
    4.54 -	 ~(sh)->evtchn_mask[idx])
    4.55 +static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
    4.56 +					   unsigned int idx)
    4.57 +{
    4.58 +	return (sh->evtchn_pending[idx] &
    4.59 +		cpu_evtchn_mask[cpu][idx] &
    4.60 +		~sh->evtchn_mask[idx]);
    4.61 +}
    4.62  
    4.63  static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    4.64  {
    4.65 @@ -109,16 +132,31 @@ static void init_evtchn_cpu_bindings(voi
    4.66  	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
    4.67  }
    4.68  
    4.69 -#define cpu_from_evtchn(evtchn)		(cpu_evtchn[evtchn])
    4.70 +static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
    4.71 +{
    4.72 +	return cpu_evtchn[evtchn];
    4.73 +}
    4.74  
    4.75  #else
    4.76  
    4.77 -#define active_evtchns(cpu,sh,idx)		\
    4.78 -	((sh)->evtchn_pending[idx] &		\
    4.79 -	 ~(sh)->evtchn_mask[idx])
    4.80 -#define bind_evtchn_to_cpu(chn,cpu)	((void)0)
    4.81 -#define init_evtchn_cpu_bindings()	((void)0)
    4.82 -#define cpu_from_evtchn(evtchn)		(0)
    4.83 +static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
    4.84 +					   unsigned int idx)
    4.85 +{
    4.86 +	return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
    4.87 +}
    4.88 +
    4.89 +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    4.90 +{
    4.91 +}
    4.92 +
    4.93 +static void init_evtchn_cpu_bindings(void)
    4.94 +{
    4.95 +}
    4.96 +
    4.97 +static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
    4.98 +{
    4.99 +	return 0;
   4.100 +}
   4.101  
   4.102  #endif
   4.103  
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Wed Mar 22 10:04:43 2006 -0700
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Wed Mar 22 19:18:42 2006 +0100
     5.3 @@ -25,9 +25,10 @@ void (*pm_power_off)(void);
     5.4  EXPORT_SYMBOL(pm_power_off);
     5.5  #endif
     5.6  
     5.7 +extern void ctrl_alt_del(void);
     5.8 +
     5.9  #define SHUTDOWN_INVALID  -1
    5.10  #define SHUTDOWN_POWEROFF  0
    5.11 -#define SHUTDOWN_REBOOT    1
    5.12  #define SHUTDOWN_SUSPEND   2
    5.13  /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
    5.14   * report a crash, not be instructed to crash!
    5.15 @@ -234,33 +235,19 @@ static int shutdown_process(void *__unus
    5.16  {
    5.17  	static char *envp[] = { "HOME=/", "TERM=linux",
    5.18  				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
    5.19 -	static char *restart_argv[]  = { "/sbin/reboot", NULL };
    5.20  	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
    5.21  
    5.22  	extern asmlinkage long sys_reboot(int magic1, int magic2,
    5.23  					  unsigned int cmd, void *arg);
    5.24  
    5.25 -	daemonize("shutdown");
    5.26 -
    5.27 -	switch (shutting_down) {
    5.28 -	case SHUTDOWN_POWEROFF:
    5.29 -	case SHUTDOWN_HALT:
    5.30 +	if ((shutting_down == SHUTDOWN_POWEROFF) ||
    5.31 +	    (shutting_down == SHUTDOWN_HALT)) {
    5.32  		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
    5.33  			sys_reboot(LINUX_REBOOT_MAGIC1,
    5.34  				   LINUX_REBOOT_MAGIC2,
    5.35  				   LINUX_REBOOT_CMD_POWER_OFF,
    5.36  				   NULL);
    5.37  		}
    5.38 -		break;
    5.39 -
    5.40 -	case SHUTDOWN_REBOOT:
    5.41 -		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
    5.42 -			sys_reboot(LINUX_REBOOT_MAGIC1,
    5.43 -				   LINUX_REBOOT_MAGIC2,
    5.44 -				   LINUX_REBOOT_CMD_RESTART,
    5.45 -				   NULL);
    5.46 -		}
    5.47 -		break;
    5.48  	}
    5.49  
    5.50  	shutting_down = SHUTDOWN_INVALID; /* could try again */
    5.51 @@ -331,7 +318,7 @@ static void shutdown_handler(struct xenb
    5.52  	if (strcmp(str, "poweroff") == 0)
    5.53  		shutting_down = SHUTDOWN_POWEROFF;
    5.54  	else if (strcmp(str, "reboot") == 0)
    5.55 -		shutting_down = SHUTDOWN_REBOOT;
    5.56 +		ctrl_alt_del();
    5.57  	else if (strcmp(str, "suspend") == 0)
    5.58  		shutting_down = SHUTDOWN_SUSPEND;
    5.59  	else if (strcmp(str, "halt") == 0)
    5.60 @@ -391,8 +378,6 @@ static struct xenbus_watch sysrq_watch =
    5.61  };
    5.62  #endif
    5.63  
    5.64 -static struct notifier_block xenstore_notifier;
    5.65 -
    5.66  static int setup_shutdown_watcher(struct notifier_block *notifier,
    5.67                                    unsigned long event,
    5.68                                    void *data)
    5.69 @@ -420,11 +405,10 @@ static int setup_shutdown_watcher(struct
    5.70  
    5.71  static int __init setup_shutdown_event(void)
    5.72  {
    5.73 -
    5.74 -	xenstore_notifier.notifier_call = setup_shutdown_watcher;
    5.75 -
    5.76 +	static struct notifier_block xenstore_notifier = {
    5.77 +		.notifier_call = setup_shutdown_watcher
    5.78 +	};
    5.79  	register_xenstore_notifier(&xenstore_notifier);
    5.80 -
    5.81  	return 0;
    5.82  }
    5.83  
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Mar 22 10:04:43 2006 -0700
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Mar 22 19:18:42 2006 +0100
     6.3 @@ -68,18 +68,12 @@
     6.4  #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
     6.5  #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
     6.6  
     6.7 -#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
     6.8 -
     6.9 -#define init_skb_shinfo(_skb)                         \
    6.10 -    do {                                              \
    6.11 -        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
    6.12 -        skb_shinfo(_skb)->nr_frags = 0;               \
    6.13 -        skb_shinfo(_skb)->frag_list = NULL;           \
    6.14 -    } while (0)
    6.15 -
    6.16 -static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
    6.17 -static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
    6.18 -static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
    6.19 +static inline void init_skb_shinfo(struct sk_buff *skb)
    6.20 +{
    6.21 +	atomic_set(&(skb_shinfo(skb)->dataref), 1);
    6.22 +	skb_shinfo(skb)->nr_frags = 0;
    6.23 +	skb_shinfo(skb)->frag_list = NULL;
    6.24 +}
    6.25  
    6.26  struct netfront_info
    6.27  {
    6.28 @@ -134,16 +128,28 @@ struct netfront_info
    6.29  	int tx_ring_ref;
    6.30  	int rx_ring_ref;
    6.31  	u8 mac[ETH_ALEN];
    6.32 +
    6.33 +	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
    6.34 +	multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
    6.35 +	mmu_update_t rx_mmu[NET_RX_RING_SIZE];
    6.36  };
    6.37  
    6.38 -/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
    6.39 -#define ADD_ID_TO_FREELIST(_list, _id)			\
    6.40 -	(_list)[(_id)] = (_list)[0];			\
    6.41 -	(_list)[0]     = (void *)(unsigned long)(_id);
    6.42 -#define GET_ID_FROM_FREELIST(_list)				\
    6.43 -	({ unsigned long _id = (unsigned long)(_list)[0];	\
    6.44 -	   (_list)[0]  = (_list)[_id];				\
    6.45 -	   (unsigned short)_id; })
    6.46 +/*
    6.47 + * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
    6.48 + */
    6.49 +
    6.50 +static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
    6.51 +{
    6.52 +	list[id] = list[0];
    6.53 +	list[0]  = (void *)(unsigned long)id;
    6.54 +}
    6.55 +
    6.56 +static inline unsigned short get_id_from_freelist(struct sk_buff **list)
    6.57 +{
    6.58 +	unsigned int id = (unsigned int)(unsigned long)list[0];
    6.59 +	list[0] = list[id];
    6.60 +	return id;
    6.61 +}
    6.62  
    6.63  #ifdef DEBUG
    6.64  static char *be_state_name[] = {
    6.65 @@ -484,7 +490,7 @@ static void network_tx_buf_gc(struct net
    6.66  			gnttab_release_grant_reference(
    6.67  				&np->gref_tx_head, np->grant_tx_ref[id]);
    6.68  			np->grant_tx_ref[id] = GRANT_INVALID_REF;
    6.69 -			ADD_ID_TO_FREELIST(np->tx_skbs, id);
    6.70 +			add_id_to_freelist(np->tx_skbs, id);
    6.71  			dev_kfree_skb_irq(skb);
    6.72  		}
    6.73  
    6.74 @@ -545,9 +551,10 @@ static void network_alloc_rx_buffers(str
    6.75  		 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
    6.76  		 * tailroom then round down to SKB_DATA_ALIGN boundary.
    6.77  		 */
    6.78 -		skb = alloc_xen_skb(
    6.79 +		skb = __dev_alloc_skb(
    6.80  			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
    6.81 -			 (-SKB_DATA_ALIGN(1))) - 16);
    6.82 +			 (-SKB_DATA_ALIGN(1))) - 16,
    6.83 +			GFP_ATOMIC|__GFP_NOWARN);
    6.84  		if (skb == NULL) {
    6.85  			/* Any skbuffs queued for refill? Force them out. */
    6.86  			if (i != 0)
    6.87 @@ -576,7 +583,7 @@ static void network_alloc_rx_buffers(str
    6.88  
    6.89  		skb->dev = dev;
    6.90  
    6.91 -		id = GET_ID_FROM_FREELIST(np->rx_skbs);
    6.92 +		id = get_id_from_freelist(np->rx_skbs);
    6.93  
    6.94  		np->rx_skbs[id] = skb;
    6.95  
    6.96 @@ -588,13 +595,13 @@ static void network_alloc_rx_buffers(str
    6.97  						  np->xbdev->otherend_id,
    6.98  						  __pa(skb->head) >> PAGE_SHIFT);
    6.99  		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
   6.100 -		rx_pfn_array[i] = virt_to_mfn(skb->head);
   6.101 +		np->rx_pfn_array[i] = virt_to_mfn(skb->head);
   6.102  
   6.103  		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   6.104  			/* Remove this page before passing back to Xen. */
   6.105  			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
   6.106  					    INVALID_P2M_ENTRY);
   6.107 -			MULTI_update_va_mapping(rx_mcl+i,
   6.108 +			MULTI_update_va_mapping(np->rx_mcl+i,
   6.109  						(unsigned long)skb->head,
   6.110  						__pte(0), 0);
   6.111  		}
   6.112 @@ -603,7 +610,7 @@ static void network_alloc_rx_buffers(str
   6.113  	/* Tell the ballon driver what is going on. */
   6.114  	balloon_update_driver_allowance(i);
   6.115  
   6.116 -	reservation.extent_start = rx_pfn_array;
   6.117 +	reservation.extent_start = np->rx_pfn_array;
   6.118  	reservation.nr_extents   = i;
   6.119  	reservation.extent_order = 0;
   6.120  	reservation.address_bits = 0;
   6.121 @@ -611,19 +618,19 @@ static void network_alloc_rx_buffers(str
   6.122  
   6.123  	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   6.124  		/* After all PTEs have been zapped, flush the TLB. */
   6.125 -		rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
   6.126 +		np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
   6.127  			UVMF_TLB_FLUSH|UVMF_ALL;
   6.128  
   6.129  		/* Give away a batch of pages. */
   6.130 -		rx_mcl[i].op = __HYPERVISOR_memory_op;
   6.131 -		rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   6.132 -		rx_mcl[i].args[1] = (unsigned long)&reservation;
   6.133 +		np->rx_mcl[i].op = __HYPERVISOR_memory_op;
   6.134 +		np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   6.135 +		np->rx_mcl[i].args[1] = (unsigned long)&reservation;
   6.136  
   6.137  		/* Zap PTEs and give away pages in one big multicall. */
   6.138 -		(void)HYPERVISOR_multicall(rx_mcl, i+1);
   6.139 +		(void)HYPERVISOR_multicall(np->rx_mcl, i+1);
   6.140  
   6.141  		/* Check return status of HYPERVISOR_memory_op(). */
   6.142 -		if (unlikely(rx_mcl[i].result != i))
   6.143 +		if (unlikely(np->rx_mcl[i].result != i))
   6.144  			panic("Unable to reduce memory reservation\n");
   6.145  	} else
   6.146  		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   6.147 @@ -656,7 +663,8 @@ static int network_start_xmit(struct sk_
   6.148  	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   6.149  		     PAGE_SIZE)) {
   6.150  		struct sk_buff *nskb;
   6.151 -		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   6.152 +		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC|__GFP_NOWARN);
   6.153 +		if (unlikely(nskb == NULL))
   6.154  			goto drop;
   6.155  		skb_put(nskb, skb->len);
   6.156  		memcpy(nskb->data, skb->data, skb->len);
   6.157 @@ -674,7 +682,7 @@ static int network_start_xmit(struct sk_
   6.158  
   6.159  	i = np->tx.req_prod_pvt;
   6.160  
   6.161 -	id = GET_ID_FROM_FREELIST(np->tx_skbs);
   6.162 +	id = get_id_from_freelist(np->tx_skbs);
   6.163  	np->tx_skbs[id] = skb;
   6.164  
   6.165  	tx = RING_GET_REQUEST(&np->tx, i);
   6.166 @@ -739,8 +747,8 @@ static int netif_poll(struct net_device 
   6.167  	struct sk_buff *skb, *nskb;
   6.168  	netif_rx_response_t *rx;
   6.169  	RING_IDX i, rp;
   6.170 -	mmu_update_t *mmu = rx_mmu;
   6.171 -	multicall_entry_t *mcl = rx_mcl;
   6.172 +	mmu_update_t *mmu = np->rx_mmu;
   6.173 +	multicall_entry_t *mcl = np->rx_mcl;
   6.174  	int work_done, budget, more_to_do = 1;
   6.175  	struct sk_buff_head rxq;
   6.176  	unsigned long flags;
   6.177 @@ -796,7 +804,7 @@ static int netif_poll(struct net_device 
   6.178  		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   6.179  
   6.180  		skb = np->rx_skbs[rx->id];
   6.181 -		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   6.182 +		add_id_to_freelist(np->rx_skbs, rx->id);
   6.183  
   6.184  		/* NB. We handle skb overflow later. */
   6.185  		skb->data = skb->head + rx->offset;
   6.186 @@ -831,14 +839,14 @@ static int netif_poll(struct net_device 
   6.187  	balloon_update_driver_allowance(-work_done);
   6.188  
   6.189  	/* Do all the remapping work, and M2P updates, in one big hypercall. */
   6.190 -	if (likely((mcl - rx_mcl) != 0)) {
   6.191 +	if (likely((mcl - np->rx_mcl) != 0)) {
   6.192  		mcl->op = __HYPERVISOR_mmu_update;
   6.193 -		mcl->args[0] = (unsigned long)rx_mmu;
   6.194 -		mcl->args[1] = mmu - rx_mmu;
   6.195 +		mcl->args[0] = (unsigned long)np->rx_mmu;
   6.196 +		mcl->args[1] = mmu - np->rx_mmu;
   6.197  		mcl->args[2] = 0;
   6.198  		mcl->args[3] = DOMID_SELF;
   6.199  		mcl++;
   6.200 -		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   6.201 +		(void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
   6.202  	}
   6.203  
   6.204  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   6.205 @@ -871,7 +879,8 @@ static int netif_poll(struct net_device 
   6.206  					       16 - (skb->data - skb->head));
   6.207  			}
   6.208  
   6.209 -			nskb = alloc_xen_skb(skb->len + 2);
   6.210 +			nskb = __dev_alloc_skb(skb->len + 2,
   6.211 +					       GFP_ATOMIC|__GFP_NOWARN);
   6.212  			if (nskb != NULL) {
   6.213  				skb_reserve(nskb, 2);
   6.214  				skb_put(nskb, skb->len);
     7.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/processor.h	Wed Mar 22 10:04:43 2006 -0700
     7.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/processor.h	Wed Mar 22 19:18:42 2006 +0100
     7.3 @@ -146,7 +146,7 @@ static inline void detect_ht(struct cpui
     7.4   */
     7.5  static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
     7.6  {
     7.7 -	__asm__("cpuid"
     7.8 +	__asm__(XEN_CPUID
     7.9  		: "=a" (*eax),
    7.10  		  "=b" (*ebx),
    7.11  		  "=c" (*ecx),
    7.12 @@ -158,7 +158,7 @@ static inline void cpuid(unsigned int op
    7.13  static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
    7.14  	       	int *edx)
    7.15  {
    7.16 -	__asm__("cpuid"
    7.17 +	__asm__(XEN_CPUID
    7.18  		: "=a" (*eax),
    7.19  		  "=b" (*ebx),
    7.20  		  "=c" (*ecx),
    7.21 @@ -173,7 +173,7 @@ static inline unsigned int cpuid_eax(uns
    7.22  {
    7.23  	unsigned int eax;
    7.24  
    7.25 -	__asm__("cpuid"
    7.26 +	__asm__(XEN_CPUID
    7.27  		: "=a" (eax)
    7.28  		: "0" (op)
    7.29  		: "bx", "cx", "dx");
    7.30 @@ -183,7 +183,7 @@ static inline unsigned int cpuid_ebx(uns
    7.31  {
    7.32  	unsigned int eax, ebx;
    7.33  
    7.34 -	__asm__("cpuid"
    7.35 +	__asm__(XEN_CPUID
    7.36  		: "=a" (eax), "=b" (ebx)
    7.37  		: "0" (op)
    7.38  		: "cx", "dx" );
    7.39 @@ -193,7 +193,7 @@ static inline unsigned int cpuid_ecx(uns
    7.40  {
    7.41  	unsigned int eax, ecx;
    7.42  
    7.43 -	__asm__("cpuid"
    7.44 +	__asm__(XEN_CPUID
    7.45  		: "=a" (eax), "=c" (ecx)
    7.46  		: "0" (op)
    7.47  		: "bx", "dx" );
    7.48 @@ -203,7 +203,7 @@ static inline unsigned int cpuid_edx(uns
    7.49  {
    7.50  	unsigned int eax, edx;
    7.51  
    7.52 -	__asm__("cpuid"
    7.53 +	__asm__(XEN_CPUID
    7.54  		: "=a" (eax), "=d" (edx)
    7.55  		: "0" (op)
    7.56  		: "bx", "cx");
     8.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h	Wed Mar 22 10:04:43 2006 -0700
     8.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h	Wed Mar 22 19:18:42 2006 +0100
     8.3 @@ -16,18 +16,6 @@ static char * __init machine_specific_me
     8.4  	return "Xen";
     8.5  }
     8.6  
     8.7 -void __devinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
     8.8 -{
     8.9 -	clear_bit(X86_FEATURE_VME, c->x86_capability);
    8.10 -	clear_bit(X86_FEATURE_DE, c->x86_capability);
    8.11 -	clear_bit(X86_FEATURE_PSE, c->x86_capability);
    8.12 -	clear_bit(X86_FEATURE_PGE, c->x86_capability);
    8.13 -	clear_bit(X86_FEATURE_SEP, c->x86_capability);
    8.14 -	if (!(xen_start_info->flags & SIF_PRIVILEGED))
    8.15 -		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
    8.16 -	c->hlt_works_ok = 0;
    8.17 -}
    8.18 -
    8.19  extern void hypervisor_callback(void);
    8.20  extern void failsafe_callback(void);
    8.21  extern void nmi(void);
    8.22 @@ -51,8 +39,6 @@ static void __init machine_specific_arch
    8.23  	cb.handler_address = (unsigned long)&nmi;
    8.24  	HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
    8.25  
    8.26 -	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
    8.27 -
    8.28  	if (HYPERVISOR_xen_version(XENVER_platform_parameters,
    8.29  				   &pp) == 0)
    8.30  		set_fixaddr_top(pp.virt_start - PAGE_SIZE);
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/msr.h	Wed Mar 22 19:18:42 2006 +0100
     9.3 @@ -0,0 +1,399 @@
     9.4 +#ifndef X86_64_MSR_H
     9.5 +#define X86_64_MSR_H 1
     9.6 +
     9.7 +#ifndef __ASSEMBLY__
     9.8 +/*
     9.9 + * Access to machine-specific registers (available on 586 and better only)
    9.10 + * Note: the rd* operations modify the parameters directly (without using
    9.11 + * pointer indirection), this allows gcc to optimize better
    9.12 + */
    9.13 +
    9.14 +#define rdmsr(msr,val1,val2) \
    9.15 +       __asm__ __volatile__("rdmsr" \
    9.16 +			    : "=a" (val1), "=d" (val2) \
    9.17 +			    : "c" (msr))
    9.18 +
    9.19 +
    9.20 +#define rdmsrl(msr,val) do { unsigned long a__,b__; \
    9.21 +       __asm__ __volatile__("rdmsr" \
    9.22 +			    : "=a" (a__), "=d" (b__) \
    9.23 +			    : "c" (msr)); \
    9.24 +       val = a__ | (b__<<32); \
    9.25 +} while(0)
    9.26 +
    9.27 +#define wrmsr(msr,val1,val2) \
    9.28 +     __asm__ __volatile__("wrmsr" \
    9.29 +			  : /* no outputs */ \
    9.30 +			  : "c" (msr), "a" (val1), "d" (val2))
    9.31 +
    9.32 +#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 
    9.33 +
    9.34 +/* wrmsr with exception handling */
    9.35 +#define wrmsr_safe(msr,a,b) ({ int ret__;			\
    9.36 +	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
    9.37 +		     "1:\n\t"					\
    9.38 +		     ".section .fixup,\"ax\"\n\t"		\
    9.39 +		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
    9.40 +		     ".previous\n\t"				\
    9.41 + 		     ".section __ex_table,\"a\"\n"		\
    9.42 +		     "   .align 8\n\t"				\
    9.43 +		     "   .quad 	2b,3b\n\t"			\
    9.44 +		     ".previous"				\
    9.45 +		     : "=a" (ret__)				\
    9.46 +		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
    9.47 +	ret__; })
    9.48 +
    9.49 +#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
    9.50 +
    9.51 +#define rdmsr_safe(msr,a,b) \
    9.52 +	({ int ret__;						\
    9.53 +	  asm volatile ("1:       rdmsr\n"			\
    9.54 +                      "2:\n"					\
    9.55 +                      ".section .fixup,\"ax\"\n"		\
    9.56 +                      "3:       movl %4,%0\n"			\
    9.57 +                      " jmp 2b\n"				\
    9.58 +                      ".previous\n"				\
    9.59 +                      ".section __ex_table,\"a\"\n"		\
    9.60 +                      " .align 8\n"				\
    9.61 +                      " .quad 1b,3b\n"				\
    9.62 +                      ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
    9.63 +                      :"c"(msr), "i"(-EIO), "0"(0));		\
    9.64 +	  ret__; })		
    9.65 +
    9.66 +#define rdtsc(low,high) \
    9.67 +     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
    9.68 +
    9.69 +#define rdtscl(low) \
    9.70 +     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
    9.71 +
    9.72 +#define rdtscll(val) do { \
    9.73 +     unsigned int __a,__d; \
    9.74 +     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
    9.75 +     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
    9.76 +} while(0)
    9.77 +
    9.78 +#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
    9.79 +
    9.80 +#define rdpmc(counter,low,high) \
    9.81 +     __asm__ __volatile__("rdpmc" \
    9.82 +			  : "=a" (low), "=d" (high) \
    9.83 +			  : "c" (counter))
    9.84 +
    9.85 +static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
    9.86 +			 unsigned int *ecx, unsigned int *edx)
    9.87 +{
    9.88 +	__asm__(XEN_CPUID
    9.89 +		: "=a" (*eax),
    9.90 +		  "=b" (*ebx),
    9.91 +		  "=c" (*ecx),
    9.92 +		  "=d" (*edx)
    9.93 +		: "0" (op));
    9.94 +}
    9.95 +
    9.96 +/* Some CPUID calls want 'count' to be placed in ecx */
    9.97 +static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
    9.98 +	       	int *edx)
    9.99 +{
   9.100 +	__asm__(XEN_CPUID
   9.101 +		: "=a" (*eax),
   9.102 +		  "=b" (*ebx),
   9.103 +		  "=c" (*ecx),
   9.104 +		  "=d" (*edx)
   9.105 +		: "0" (op), "c" (count));
   9.106 +}
   9.107 +
   9.108 +/*
   9.109 + * CPUID functions returning a single datum
   9.110 + */
   9.111 +static inline unsigned int cpuid_eax(unsigned int op)
   9.112 +{
   9.113 +	unsigned int eax;
   9.114 +
   9.115 +	__asm__(XEN_CPUID
   9.116 +		: "=a" (eax)
   9.117 +		: "0" (op)
   9.118 +		: "bx", "cx", "dx");
   9.119 +	return eax;
   9.120 +}
   9.121 +static inline unsigned int cpuid_ebx(unsigned int op)
   9.122 +{
   9.123 +	unsigned int eax, ebx;
   9.124 +
   9.125 +	__asm__(XEN_CPUID
   9.126 +		: "=a" (eax), "=b" (ebx)
   9.127 +		: "0" (op)
   9.128 +		: "cx", "dx" );
   9.129 +	return ebx;
   9.130 +}
   9.131 +static inline unsigned int cpuid_ecx(unsigned int op)
   9.132 +{
   9.133 +	unsigned int eax, ecx;
   9.134 +
   9.135 +	__asm__(XEN_CPUID
   9.136 +		: "=a" (eax), "=c" (ecx)
   9.137 +		: "0" (op)
   9.138 +		: "bx", "dx" );
   9.139 +	return ecx;
   9.140 +}
   9.141 +static inline unsigned int cpuid_edx(unsigned int op)
   9.142 +{
   9.143 +	unsigned int eax, edx;
   9.144 +
   9.145 +	__asm__(XEN_CPUID
   9.146 +		: "=a" (eax), "=d" (edx)
   9.147 +		: "0" (op)
   9.148 +		: "bx", "cx");
   9.149 +	return edx;
   9.150 +}
   9.151 +
   9.152 +#define MSR_IA32_UCODE_WRITE		0x79
   9.153 +#define MSR_IA32_UCODE_REV		0x8b
   9.154 +
   9.155 +
   9.156 +#endif
   9.157 +
   9.158 +/* AMD/K8 specific MSRs */ 
   9.159 +#define MSR_EFER 0xc0000080		/* extended feature register */
   9.160 +#define MSR_STAR 0xc0000081		/* legacy mode SYSCALL target */
   9.161 +#define MSR_LSTAR 0xc0000082 		/* long mode SYSCALL target */
   9.162 +#define MSR_CSTAR 0xc0000083		/* compatibility mode SYSCALL target */
   9.163 +#define MSR_SYSCALL_MASK 0xc0000084	/* EFLAGS mask for syscall */
   9.164 +#define MSR_FS_BASE 0xc0000100		/* 64bit GS base */
   9.165 +#define MSR_GS_BASE 0xc0000101		/* 64bit FS base */
   9.166 +#define MSR_KERNEL_GS_BASE  0xc0000102	/* SwapGS GS shadow (or USER_GS from kernel) */ 
   9.167 +/* EFER bits: */ 
   9.168 +#define _EFER_SCE 0  /* SYSCALL/SYSRET */
   9.169 +#define _EFER_LME 8  /* Long mode enable */
   9.170 +#define _EFER_LMA 10 /* Long mode active (read-only) */
   9.171 +#define _EFER_NX 11  /* No execute enable */
   9.172 +
   9.173 +#define EFER_SCE (1<<_EFER_SCE)
   9.174 +#define EFER_LME (1<<_EFER_LME)
   9.175 +#define EFER_LMA (1<<_EFER_LMA)
   9.176 +#define EFER_NX (1<<_EFER_NX)
   9.177 +
   9.178 +/* Intel MSRs. Some also available on other CPUs */
   9.179 +#define MSR_IA32_TSC		0x10
   9.180 +#define MSR_IA32_PLATFORM_ID	0x17
   9.181 +
   9.182 +#define MSR_IA32_PERFCTR0      0xc1
   9.183 +#define MSR_IA32_PERFCTR1      0xc2
   9.184 +
   9.185 +#define MSR_MTRRcap		0x0fe
   9.186 +#define MSR_IA32_BBL_CR_CTL        0x119
   9.187 +
   9.188 +#define MSR_IA32_SYSENTER_CS	0x174
   9.189 +#define MSR_IA32_SYSENTER_ESP	0x175
   9.190 +#define MSR_IA32_SYSENTER_EIP	0x176
   9.191 +
   9.192 +#define MSR_IA32_MCG_CAP       0x179
   9.193 +#define MSR_IA32_MCG_STATUS        0x17a
   9.194 +#define MSR_IA32_MCG_CTL       0x17b
   9.195 +
   9.196 +#define MSR_IA32_EVNTSEL0      0x186
   9.197 +#define MSR_IA32_EVNTSEL1      0x187
   9.198 +
   9.199 +#define MSR_IA32_DEBUGCTLMSR       0x1d9
   9.200 +#define MSR_IA32_LASTBRANCHFROMIP  0x1db
   9.201 +#define MSR_IA32_LASTBRANCHTOIP        0x1dc
   9.202 +#define MSR_IA32_LASTINTFROMIP     0x1dd
   9.203 +#define MSR_IA32_LASTINTTOIP       0x1de
   9.204 +
   9.205 +#define MSR_MTRRfix64K_00000	0x250
   9.206 +#define MSR_MTRRfix16K_80000	0x258
   9.207 +#define MSR_MTRRfix16K_A0000	0x259
   9.208 +#define MSR_MTRRfix4K_C0000	0x268
   9.209 +#define MSR_MTRRfix4K_C8000	0x269
   9.210 +#define MSR_MTRRfix4K_D0000	0x26a
   9.211 +#define MSR_MTRRfix4K_D8000	0x26b
   9.212 +#define MSR_MTRRfix4K_E0000	0x26c
   9.213 +#define MSR_MTRRfix4K_E8000	0x26d
   9.214 +#define MSR_MTRRfix4K_F0000	0x26e
   9.215 +#define MSR_MTRRfix4K_F8000	0x26f
   9.216 +#define MSR_MTRRdefType		0x2ff
   9.217 +
   9.218 +#define MSR_IA32_MC0_CTL       0x400
   9.219 +#define MSR_IA32_MC0_STATUS        0x401
   9.220 +#define MSR_IA32_MC0_ADDR      0x402
   9.221 +#define MSR_IA32_MC0_MISC      0x403
   9.222 +
   9.223 +#define MSR_P6_PERFCTR0			0xc1
   9.224 +#define MSR_P6_PERFCTR1			0xc2
   9.225 +#define MSR_P6_EVNTSEL0			0x186
   9.226 +#define MSR_P6_EVNTSEL1			0x187
   9.227 +
   9.228 +/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
   9.229 +#define MSR_K7_EVNTSEL0            0xC0010000
   9.230 +#define MSR_K7_PERFCTR0            0xC0010004
   9.231 +#define MSR_K7_EVNTSEL1            0xC0010001
   9.232 +#define MSR_K7_PERFCTR1            0xC0010005
   9.233 +#define MSR_K7_EVNTSEL2            0xC0010002
   9.234 +#define MSR_K7_PERFCTR2            0xC0010006
   9.235 +#define MSR_K7_EVNTSEL3            0xC0010003
   9.236 +#define MSR_K7_PERFCTR3            0xC0010007
   9.237 +#define MSR_K8_TOP_MEM1		   0xC001001A
   9.238 +#define MSR_K8_TOP_MEM2		   0xC001001D
   9.239 +#define MSR_K8_SYSCFG		   0xC0010010
   9.240 +#define MSR_K8_HWCR		   0xC0010015
   9.241 +
   9.242 +/* K6 MSRs */
   9.243 +#define MSR_K6_EFER			0xC0000080
   9.244 +#define MSR_K6_STAR			0xC0000081
   9.245 +#define MSR_K6_WHCR			0xC0000082
   9.246 +#define MSR_K6_UWCCR			0xC0000085
   9.247 +#define MSR_K6_PSOR			0xC0000087
   9.248 +#define MSR_K6_PFIR			0xC0000088
   9.249 +
   9.250 +/* Centaur-Hauls/IDT defined MSRs. */
   9.251 +#define MSR_IDT_FCR1			0x107
   9.252 +#define MSR_IDT_FCR2			0x108
   9.253 +#define MSR_IDT_FCR3			0x109
   9.254 +#define MSR_IDT_FCR4			0x10a
   9.255 +
   9.256 +#define MSR_IDT_MCR0			0x110
   9.257 +#define MSR_IDT_MCR1			0x111
   9.258 +#define MSR_IDT_MCR2			0x112
   9.259 +#define MSR_IDT_MCR3			0x113
   9.260 +#define MSR_IDT_MCR4			0x114
   9.261 +#define MSR_IDT_MCR5			0x115
   9.262 +#define MSR_IDT_MCR6			0x116
   9.263 +#define MSR_IDT_MCR7			0x117
   9.264 +#define MSR_IDT_MCR_CTRL		0x120
   9.265 +
   9.266 +/* VIA Cyrix defined MSRs*/
   9.267 +#define MSR_VIA_FCR			0x1107
   9.268 +#define MSR_VIA_LONGHAUL		0x110a
   9.269 +#define MSR_VIA_RNG			0x110b
   9.270 +#define MSR_VIA_BCR2			0x1147
   9.271 +
   9.272 +/* Intel defined MSRs. */
   9.273 +#define MSR_IA32_P5_MC_ADDR		0
   9.274 +#define MSR_IA32_P5_MC_TYPE		1
   9.275 +#define MSR_IA32_PLATFORM_ID		0x17
   9.276 +#define MSR_IA32_EBL_CR_POWERON		0x2a
   9.277 +
   9.278 +#define MSR_IA32_APICBASE               0x1b
   9.279 +#define MSR_IA32_APICBASE_BSP           (1<<8)
   9.280 +#define MSR_IA32_APICBASE_ENABLE        (1<<11)
   9.281 +#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
   9.282 +
   9.283 +/* P4/Xeon+ specific */
   9.284 +#define MSR_IA32_MCG_EAX		0x180
   9.285 +#define MSR_IA32_MCG_EBX		0x181
   9.286 +#define MSR_IA32_MCG_ECX		0x182
   9.287 +#define MSR_IA32_MCG_EDX		0x183
   9.288 +#define MSR_IA32_MCG_ESI		0x184
   9.289 +#define MSR_IA32_MCG_EDI		0x185
   9.290 +#define MSR_IA32_MCG_EBP		0x186
   9.291 +#define MSR_IA32_MCG_ESP		0x187
   9.292 +#define MSR_IA32_MCG_EFLAGS		0x188
   9.293 +#define MSR_IA32_MCG_EIP		0x189
   9.294 +#define MSR_IA32_MCG_RESERVED		0x18A
   9.295 +
   9.296 +#define MSR_P6_EVNTSEL0			0x186
   9.297 +#define MSR_P6_EVNTSEL1			0x187
   9.298 +
   9.299 +#define MSR_IA32_PERF_STATUS		0x198
   9.300 +#define MSR_IA32_PERF_CTL		0x199
   9.301 +
   9.302 +#define MSR_IA32_THERM_CONTROL		0x19a
   9.303 +#define MSR_IA32_THERM_INTERRUPT	0x19b
   9.304 +#define MSR_IA32_THERM_STATUS		0x19c
   9.305 +#define MSR_IA32_MISC_ENABLE		0x1a0
   9.306 +
   9.307 +#define MSR_IA32_DEBUGCTLMSR		0x1d9
   9.308 +#define MSR_IA32_LASTBRANCHFROMIP	0x1db
   9.309 +#define MSR_IA32_LASTBRANCHTOIP		0x1dc
   9.310 +#define MSR_IA32_LASTINTFROMIP		0x1dd
   9.311 +#define MSR_IA32_LASTINTTOIP		0x1de
   9.312 +
   9.313 +#define MSR_IA32_MC0_CTL		0x400
   9.314 +#define MSR_IA32_MC0_STATUS		0x401
   9.315 +#define MSR_IA32_MC0_ADDR		0x402
   9.316 +#define MSR_IA32_MC0_MISC		0x403
   9.317 +
   9.318 +/* Pentium IV performance counter MSRs */
   9.319 +#define MSR_P4_BPU_PERFCTR0 		0x300
   9.320 +#define MSR_P4_BPU_PERFCTR1 		0x301
   9.321 +#define MSR_P4_BPU_PERFCTR2 		0x302
   9.322 +#define MSR_P4_BPU_PERFCTR3 		0x303
   9.323 +#define MSR_P4_MS_PERFCTR0 		0x304
   9.324 +#define MSR_P4_MS_PERFCTR1 		0x305
   9.325 +#define MSR_P4_MS_PERFCTR2 		0x306
   9.326 +#define MSR_P4_MS_PERFCTR3 		0x307
   9.327 +#define MSR_P4_FLAME_PERFCTR0 		0x308
   9.328 +#define MSR_P4_FLAME_PERFCTR1 		0x309
   9.329 +#define MSR_P4_FLAME_PERFCTR2 		0x30a
   9.330 +#define MSR_P4_FLAME_PERFCTR3 		0x30b
   9.331 +#define MSR_P4_IQ_PERFCTR0 		0x30c
   9.332 +#define MSR_P4_IQ_PERFCTR1 		0x30d
   9.333 +#define MSR_P4_IQ_PERFCTR2 		0x30e
   9.334 +#define MSR_P4_IQ_PERFCTR3 		0x30f
   9.335 +#define MSR_P4_IQ_PERFCTR4 		0x310
   9.336 +#define MSR_P4_IQ_PERFCTR5 		0x311
   9.337 +#define MSR_P4_BPU_CCCR0 		0x360
   9.338 +#define MSR_P4_BPU_CCCR1 		0x361
   9.339 +#define MSR_P4_BPU_CCCR2 		0x362
   9.340 +#define MSR_P4_BPU_CCCR3 		0x363
   9.341 +#define MSR_P4_MS_CCCR0 		0x364
   9.342 +#define MSR_P4_MS_CCCR1 		0x365
   9.343 +#define MSR_P4_MS_CCCR2 		0x366
   9.344 +#define MSR_P4_MS_CCCR3 		0x367
   9.345 +#define MSR_P4_FLAME_CCCR0 		0x368
   9.346 +#define MSR_P4_FLAME_CCCR1 		0x369
   9.347 +#define MSR_P4_FLAME_CCCR2 		0x36a
   9.348 +#define MSR_P4_FLAME_CCCR3 		0x36b
   9.349 +#define MSR_P4_IQ_CCCR0 		0x36c
   9.350 +#define MSR_P4_IQ_CCCR1 		0x36d
   9.351 +#define MSR_P4_IQ_CCCR2 		0x36e
   9.352 +#define MSR_P4_IQ_CCCR3 		0x36f
   9.353 +#define MSR_P4_IQ_CCCR4 		0x370
   9.354 +#define MSR_P4_IQ_CCCR5 		0x371
   9.355 +#define MSR_P4_ALF_ESCR0 		0x3ca
   9.356 +#define MSR_P4_ALF_ESCR1 		0x3cb
   9.357 +#define MSR_P4_BPU_ESCR0 		0x3b2
   9.358 +#define MSR_P4_BPU_ESCR1 		0x3b3
   9.359 +#define MSR_P4_BSU_ESCR0 		0x3a0
   9.360 +#define MSR_P4_BSU_ESCR1 		0x3a1
   9.361 +#define MSR_P4_CRU_ESCR0 		0x3b8
   9.362 +#define MSR_P4_CRU_ESCR1 		0x3b9
   9.363 +#define MSR_P4_CRU_ESCR2 		0x3cc
   9.364 +#define MSR_P4_CRU_ESCR3 		0x3cd
   9.365 +#define MSR_P4_CRU_ESCR4 		0x3e0
   9.366 +#define MSR_P4_CRU_ESCR5 		0x3e1
   9.367 +#define MSR_P4_DAC_ESCR0 		0x3a8
   9.368 +#define MSR_P4_DAC_ESCR1 		0x3a9
   9.369 +#define MSR_P4_FIRM_ESCR0 		0x3a4
   9.370 +#define MSR_P4_FIRM_ESCR1 		0x3a5
   9.371 +#define MSR_P4_FLAME_ESCR0 		0x3a6
   9.372 +#define MSR_P4_FLAME_ESCR1 		0x3a7
   9.373 +#define MSR_P4_FSB_ESCR0 		0x3a2
   9.374 +#define MSR_P4_FSB_ESCR1 		0x3a3
   9.375 +#define MSR_P4_IQ_ESCR0 		0x3ba
   9.376 +#define MSR_P4_IQ_ESCR1 		0x3bb
   9.377 +#define MSR_P4_IS_ESCR0 		0x3b4
   9.378 +#define MSR_P4_IS_ESCR1 		0x3b5
   9.379 +#define MSR_P4_ITLB_ESCR0 		0x3b6
   9.380 +#define MSR_P4_ITLB_ESCR1 		0x3b7
   9.381 +#define MSR_P4_IX_ESCR0 		0x3c8
   9.382 +#define MSR_P4_IX_ESCR1 		0x3c9
   9.383 +#define MSR_P4_MOB_ESCR0 		0x3aa
   9.384 +#define MSR_P4_MOB_ESCR1 		0x3ab
   9.385 +#define MSR_P4_MS_ESCR0 		0x3c0
   9.386 +#define MSR_P4_MS_ESCR1 		0x3c1
   9.387 +#define MSR_P4_PMH_ESCR0 		0x3ac
   9.388 +#define MSR_P4_PMH_ESCR1 		0x3ad
   9.389 +#define MSR_P4_RAT_ESCR0 		0x3bc
   9.390 +#define MSR_P4_RAT_ESCR1 		0x3bd
   9.391 +#define MSR_P4_SAAT_ESCR0 		0x3ae
   9.392 +#define MSR_P4_SAAT_ESCR1 		0x3af
   9.393 +#define MSR_P4_SSU_ESCR0 		0x3be
   9.394 +#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
   9.395 +#define MSR_P4_TBPU_ESCR0 		0x3c2
   9.396 +#define MSR_P4_TBPU_ESCR1 		0x3c3
   9.397 +#define MSR_P4_TC_ESCR0 		0x3c4
   9.398 +#define MSR_P4_TC_ESCR1 		0x3c5
   9.399 +#define MSR_P4_U2L_ESCR0 		0x3b0
   9.400 +#define MSR_P4_U2L_ESCR1 		0x3b1
   9.401 +
   9.402 +#endif
    10.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h	Wed Mar 22 10:04:43 2006 -0700
    10.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/setup_arch_post.h	Wed Mar 22 19:18:42 2006 +0100
    10.3 @@ -6,17 +6,6 @@
    10.4   *	use of all of the static functions.
    10.5   **/
    10.6  
    10.7 -void __cpuinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
    10.8 -{
    10.9 -	clear_bit(X86_FEATURE_VME, c->x86_capability);
   10.10 -	clear_bit(X86_FEATURE_DE, c->x86_capability);
   10.11 -	clear_bit(X86_FEATURE_PSE, c->x86_capability);
   10.12 -	clear_bit(X86_FEATURE_PGE, c->x86_capability);
   10.13 -	clear_bit(X86_FEATURE_SEP, c->x86_capability);
   10.14 -	if (!(xen_start_info->flags & SIF_PRIVILEGED))
   10.15 -		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
   10.16 -}
   10.17 -
   10.18  extern void hypervisor_callback(void);
   10.19  extern void failsafe_callback(void);
   10.20  extern void nmi(void);
   10.21 @@ -36,6 +25,4 @@ static void __init machine_specific_arch
   10.22  	cb.handler_address = (unsigned long)&nmi;
   10.23  	HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
   10.24  #endif
   10.25 -
   10.26 -	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
   10.27  }
    11.1 --- a/xen/arch/x86/traps.c	Wed Mar 22 10:04:43 2006 -0700
    11.2 +++ b/xen/arch/x86/traps.c	Wed Mar 22 19:18:42 2006 +0100
    11.3 @@ -286,7 +286,7 @@ asmlinkage void fatal_trap(int trapnr, s
    11.4      unsigned long cr2;
    11.5      static char *trapstr[] = { 
    11.6          "divide error", "debug", "nmi", "bkpt", "overflow", "bounds", 
    11.7 -        "invalid operation", "device not available", "double fault", 
    11.8 +        "invalid opcode", "device not available", "double fault", 
    11.9          "coprocessor segment", "invalid tss", "segment not found", 
   11.10          "stack error", "general protection fault", "page fault", 
   11.11          "spurious interrupt", "coprocessor error", "alignment check", 
   11.12 @@ -382,7 +382,6 @@ asmlinkage int do_##name(struct cpu_user
   11.13  DO_ERROR_NOCODE( 0, "divide error", divide_error)
   11.14  DO_ERROR_NOCODE( 4, "overflow", overflow)
   11.15  DO_ERROR_NOCODE( 5, "bounds", bounds)
   11.16 -DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
   11.17  DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
   11.18  DO_ERROR(10, "invalid TSS", invalid_TSS)
   11.19  DO_ERROR(11, "segment not present", segment_not_present)
   11.20 @@ -391,6 +390,85 @@ DO_ERROR_NOCODE(16, "fpu error", coproce
   11.21  DO_ERROR(17, "alignment check", alignment_check)
   11.22  DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
   11.23  
   11.24 +static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
   11.25 +{
   11.26 +    char signature[5], instr[2];
   11.27 +    unsigned long a, b, c, d, eip;
   11.28 +
   11.29 +    a = regs->eax;
   11.30 +    b = regs->ebx;
   11.31 +    c = regs->ecx;
   11.32 +    d = regs->edx;
   11.33 +    eip = regs->eip;
   11.34 +
   11.35 +    /* Check for forced emulation signature: ud2 ; .ascii "xen". */
   11.36 +    if ( copy_from_user(signature, (char *)eip, sizeof(signature)) ||
   11.37 +         memcmp(signature, "\xf\xbxen", sizeof(signature)) )
   11.38 +        return 0;
   11.39 +    eip += sizeof(signature);
   11.40 +
   11.41 +    /* We only emulate CPUID. */
   11.42 +    if ( copy_from_user(instr, (char *)eip, sizeof(instr)) ||
   11.43 +         memcmp(instr, "\xf\xa2", sizeof(instr)) )
   11.44 +        return 0;
   11.45 +    eip += sizeof(instr);
   11.46 +
   11.47 +    __asm__ ( 
   11.48 +        "cpuid"
   11.49 +        : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
   11.50 +        : "0" (a), "1" (b), "2" (c), "3" (d) );
   11.51 +
   11.52 +    if ( regs->eax == 1 )
   11.53 +    {
   11.54 +        /* Modify Feature Information. */
   11.55 +        clear_bit(X86_FEATURE_VME, &d);
   11.56 +        clear_bit(X86_FEATURE_DE,  &d);
   11.57 +        clear_bit(X86_FEATURE_PSE, &d);
   11.58 +        clear_bit(X86_FEATURE_PGE, &d);
   11.59 +        clear_bit(X86_FEATURE_SEP, &d);
   11.60 +        if ( !IS_PRIV(current->domain) )
   11.61 +            clear_bit(X86_FEATURE_MTRR, &d);
   11.62 +    }
   11.63 +
   11.64 +    regs->eax = a;
   11.65 +    regs->ebx = b;
   11.66 +    regs->ecx = c;
   11.67 +    regs->edx = d;
   11.68 +    regs->eip = eip;
   11.69 +
   11.70 +    return EXCRET_fault_fixed;
   11.71 +}
   11.72 +
   11.73 +asmlinkage int do_invalid_op(struct cpu_user_regs *regs)
   11.74 +{
   11.75 +    struct vcpu *v = current;
   11.76 +    struct trap_bounce *tb = &v->arch.trap_bounce;
   11.77 +    struct trap_info *ti;
   11.78 +    int rc;
   11.79 +
   11.80 +    DEBUGGER_trap_entry(TRAP_invalid_op, regs);
   11.81 +
   11.82 +    if ( unlikely(!guest_mode(regs)) )
   11.83 +    {
   11.84 +        DEBUGGER_trap_fatal(trapnr, regs);
   11.85 +        show_registers(regs);
   11.86 +        panic("CPU%d FATAL TRAP: vector = %d (invalid opcode)\n",
   11.87 +              smp_processor_id(), TRAP_invalid_op);
   11.88 +    }
   11.89 +
   11.90 +    if ( (rc = emulate_forced_invalid_op(regs)) != 0 )
   11.91 +        return rc;
   11.92 +
   11.93 +    ti = &current->arch.guest_context.trap_ctxt[TRAP_invalid_op];
   11.94 +    tb->flags = TBF_EXCEPTION;
   11.95 +    tb->cs    = ti->cs;
   11.96 +    tb->eip   = ti->address;
   11.97 +    if ( TI_GET_IF(ti) )
   11.98 +        tb->flags |= TBF_INTERRUPT;
   11.99 +
  11.100 +    return 0;
  11.101 +}
  11.102 +
  11.103  asmlinkage int do_int3(struct cpu_user_regs *regs)
  11.104  {
  11.105      struct vcpu *v = current;
    12.1 --- a/xen/include/public/arch-x86_32.h	Wed Mar 22 10:04:43 2006 -0700
    12.2 +++ b/xen/include/public/arch-x86_32.h	Wed Mar 22 19:18:42 2006 +0100
    12.3 @@ -170,6 +170,18 @@ typedef struct {
    12.4  
    12.5  #endif /* !__ASSEMBLY__ */
    12.6  
    12.7 +/*
    12.8 + * Prefix forces emulation of some non-trapping instructions.
    12.9 + * Currently only CPUID.
   12.10 + */
   12.11 +#ifdef __ASSEMBLY__
   12.12 +#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
   12.13 +#define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
   12.14 +#else
   12.15 +#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
   12.16 +#define XEN_CPUID          XEN_EMULATE_PREFIX "cpuid"
   12.17 +#endif
   12.18 +
   12.19  #endif
   12.20  
   12.21  /*
    13.1 --- a/xen/include/public/arch-x86_64.h	Wed Mar 22 10:04:43 2006 -0700
    13.2 +++ b/xen/include/public/arch-x86_64.h	Wed Mar 22 19:18:42 2006 +0100
    13.3 @@ -246,6 +246,18 @@ typedef struct {
    13.4  
    13.5  #endif /* !__ASSEMBLY__ */
    13.6  
    13.7 +/*
    13.8 + * Prefix forces emulation of some non-trapping instructions.
    13.9 + * Currently only CPUID.
   13.10 + */
   13.11 +#ifdef __ASSEMBLY__
   13.12 +#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
   13.13 +#define XEN_CPUID          XEN_EMULATE_PREFIX cpuid
   13.14 +#else
   13.15 +#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
   13.16 +#define XEN_CPUID          XEN_EMULATE_PREFIX "cpuid"
   13.17 +#endif
   13.18 +
   13.19  #endif
   13.20  
   13.21  /*