direct-io.hg

changeset 6808:e2f0a6fdb7d9

merge?
author cl349@firebug.cl.cam.ac.uk
date Wed Sep 14 14:43:34 2005 +0000 (2005-09-14)
parents 383f1336c305 5959fae4722a
children 5cbb2ecce16a
files extras/mini-os/mm.c linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h tools/ioemu/hw/i8259.c tools/libxc/xc_linux_restore.c tools/libxc/xc_vmx_build.c tools/libxc/xg_private.c tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/image.py xen/arch/x86/mm.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/include/public/arch-x86_64.h xen/include/public/xen.h
line diff
     1.1 --- a/extras/mini-os/mm.c	Wed Sep 14 13:37:03 2005 +0000
     1.2 +++ b/extras/mini-os/mm.c	Wed Sep 14 14:43:34 2005 +0000
     1.3 @@ -432,7 +432,7 @@ void build_pagetable(unsigned long *star
     1.4          
     1.5          /* Pin the page to provide correct protection */
     1.6          pin_request.cmd = MMUEXT_PIN_L1_TABLE;
     1.7 -        pin_request.mfn = pfn_to_mfn(pt_frame);
     1.8 +        pin_request.arg1.mfn = pfn_to_mfn(pt_frame);
     1.9          if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
    1.10          {
    1.11              printk("ERROR: pinning failed\n");
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Wed Sep 14 13:37:03 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Wed Sep 14 14:43:34 2005 +0000
     2.3 @@ -115,7 +115,7 @@ void xen_pt_switch(unsigned long ptr)
     2.4  {
     2.5  	struct mmuext_op op;
     2.6  	op.cmd = MMUEXT_NEW_BASEPTR;
     2.7 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
     2.8 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
     2.9  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.10  }
    2.11  
    2.12 @@ -123,7 +123,7 @@ void xen_new_user_pt(unsigned long ptr)
    2.13  {
    2.14  	struct mmuext_op op;
    2.15  	op.cmd = MMUEXT_NEW_USER_BASEPTR;
    2.16 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.17 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.18  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.19  }
    2.20  
    2.21 @@ -138,7 +138,7 @@ void xen_invlpg(unsigned long ptr)
    2.22  {
    2.23  	struct mmuext_op op;
    2.24  	op.cmd = MMUEXT_INVLPG_LOCAL;
    2.25 -	op.linear_addr = ptr & PAGE_MASK;
    2.26 +	op.arg1.linear_addr = ptr & PAGE_MASK;
    2.27  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.28  }
    2.29  
    2.30 @@ -157,7 +157,7 @@ void xen_tlb_flush_mask(cpumask_t *mask)
    2.31  	if ( cpus_empty(*mask) )
    2.32  		return;
    2.33  	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    2.34 -	op.vcpumask = mask->bits;
    2.35 +	op.arg2.vcpumask = mask->bits;
    2.36  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.37  }
    2.38  
    2.39 @@ -165,7 +165,7 @@ void xen_invlpg_all(unsigned long ptr)
    2.40  {
    2.41  	struct mmuext_op op;
    2.42  	op.cmd = MMUEXT_INVLPG_ALL;
    2.43 -	op.linear_addr = ptr & PAGE_MASK;
    2.44 +	op.arg1.linear_addr = ptr & PAGE_MASK;
    2.45  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.46  }
    2.47  
    2.48 @@ -175,8 +175,8 @@ void xen_invlpg_mask(cpumask_t *mask, un
    2.49  	if ( cpus_empty(*mask) )
    2.50  		return;
    2.51  	op.cmd = MMUEXT_INVLPG_MULTI;
    2.52 -	op.vcpumask = mask->bits;
    2.53 -	op.linear_addr = ptr & PAGE_MASK;
    2.54 +	op.arg1.linear_addr = ptr & PAGE_MASK;
    2.55 +	op.arg2.vcpumask    = mask->bits;
    2.56  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.57  }
    2.58  
    2.59 @@ -193,7 +193,7 @@ void xen_pgd_pin(unsigned long ptr)
    2.60  #else
    2.61  	op.cmd = MMUEXT_PIN_L2_TABLE;
    2.62  #endif
    2.63 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.64 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.65  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.66  }
    2.67  
    2.68 @@ -201,7 +201,7 @@ void xen_pgd_unpin(unsigned long ptr)
    2.69  {
    2.70  	struct mmuext_op op;
    2.71  	op.cmd = MMUEXT_UNPIN_TABLE;
    2.72 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.73 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.74  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.75  }
    2.76  
    2.77 @@ -209,7 +209,7 @@ void xen_pte_pin(unsigned long ptr)
    2.78  {
    2.79  	struct mmuext_op op;
    2.80  	op.cmd = MMUEXT_PIN_L1_TABLE;
    2.81 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.82 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.83  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.84  }
    2.85  
    2.86 @@ -217,7 +217,7 @@ void xen_pte_unpin(unsigned long ptr)
    2.87  {
    2.88  	struct mmuext_op op;
    2.89  	op.cmd = MMUEXT_UNPIN_TABLE;
    2.90 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.91 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
    2.92  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
    2.93  }
    2.94  
    2.95 @@ -226,7 +226,7 @@ void xen_pud_pin(unsigned long ptr)
    2.96  {
    2.97  	struct mmuext_op op;
    2.98  	op.cmd = MMUEXT_PIN_L3_TABLE;
    2.99 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.100 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.101  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   2.102  }
   2.103  
   2.104 @@ -234,7 +234,7 @@ void xen_pud_unpin(unsigned long ptr)
   2.105  {
   2.106  	struct mmuext_op op;
   2.107  	op.cmd = MMUEXT_UNPIN_TABLE;
   2.108 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.109 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.110  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   2.111  }
   2.112  
   2.113 @@ -242,7 +242,7 @@ void xen_pmd_pin(unsigned long ptr)
   2.114  {
   2.115  	struct mmuext_op op;
   2.116  	op.cmd = MMUEXT_PIN_L2_TABLE;
   2.117 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.118 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.119  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   2.120  }
   2.121  
   2.122 @@ -250,7 +250,7 @@ void xen_pmd_unpin(unsigned long ptr)
   2.123  {
   2.124  	struct mmuext_op op;
   2.125  	op.cmd = MMUEXT_UNPIN_TABLE;
   2.126 -	op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.127 +	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
   2.128  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   2.129  }
   2.130  #endif /* CONFIG_X86_64 */
   2.131 @@ -260,8 +260,8 @@ void xen_set_ldt(unsigned long ptr, unsi
   2.132  {
   2.133  	struct mmuext_op op;
   2.134  	op.cmd = MMUEXT_SET_LDT;
   2.135 -	op.linear_addr = ptr;
   2.136 -	op.nr_ents = len;
   2.137 +	op.arg1.linear_addr = ptr;
   2.138 +	op.arg2.nr_ents     = len;
   2.139  	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
   2.140  }
   2.141  
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Wed Sep 14 13:37:03 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Wed Sep 14 14:43:34 2005 +0000
     3.3 @@ -294,7 +294,7 @@ static void net_rx_action(unsigned long 
     3.4          mcl++;
     3.5  
     3.6          mmuext->cmd = MMUEXT_REASSIGN_PAGE;
     3.7 -        mmuext->mfn = old_mfn;
     3.8 +        mmuext->arg1.mfn = old_mfn;
     3.9          mmuext++;
    3.10  #endif
    3.11          mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    3.12 @@ -355,7 +355,7 @@ static void net_rx_action(unsigned long 
    3.13  #ifdef CONFIG_XEN_NETDEV_GRANT
    3.14          old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
    3.15  #else
    3.16 -        old_mfn = mmuext[0].mfn;
    3.17 +        old_mfn = mmuext[0].arg1.mfn;
    3.18  #endif
    3.19          atomic_set(&(skb_shinfo(skb)->dataref), 1);
    3.20          skb_shinfo(skb)->nr_frags = 0;
     4.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Wed Sep 14 13:37:03 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Wed Sep 14 14:43:34 2005 +0000
     4.3 @@ -67,7 +67,7 @@ static inline void switch_mm(struct mm_s
     4.4  		/* Re-load page tables: load_cr3(next->pgd) */
     4.5  		per_cpu(cur_pgd, cpu) = next->pgd;
     4.6  		op->cmd = MMUEXT_NEW_BASEPTR;
     4.7 -		op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
     4.8 +		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
     4.9  		op++;
    4.10  
    4.11  		/*
    4.12 @@ -76,8 +76,8 @@ static inline void switch_mm(struct mm_s
    4.13  		if (unlikely(prev->context.ldt != next->context.ldt)) {
    4.14  			/* load_LDT_nolock(&next->context, cpu) */
    4.15  			op->cmd = MMUEXT_SET_LDT;
    4.16 -			op->linear_addr = (unsigned long)next->context.ldt;
    4.17 -			op->nr_ents     = next->context.size;
    4.18 +			op->arg1.linear_addr = (unsigned long)next->context.ldt;
    4.19 +			op->arg2.nr_ents     = next->context.size;
    4.20  			op++;
    4.21  		}
    4.22  
     5.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h	Wed Sep 14 13:37:03 2005 +0000
     5.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h	Wed Sep 14 14:43:34 2005 +0000
     5.3 @@ -83,19 +83,19 @@ static inline void switch_mm(struct mm_s
     5.4  		/* load_cr3(next->pgd) */
     5.5  		per_cpu(cur_pgd, smp_processor_id()) = next->pgd;
     5.6  		op->cmd = MMUEXT_NEW_BASEPTR;
     5.7 -		op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
     5.8 +		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
     5.9  		op++;
    5.10  
    5.11  		/* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
    5.12  		op->cmd = MMUEXT_NEW_USER_BASEPTR;
    5.13 -		op->mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
    5.14 +		op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
    5.15  		op++;
    5.16  		
    5.17  		if (unlikely(next->context.ldt != prev->context.ldt)) {
    5.18  			/* load_LDT_nolock(&next->context, cpu) */
    5.19  			op->cmd = MMUEXT_SET_LDT;
    5.20 -			op->linear_addr = (unsigned long)next->context.ldt;
    5.21 -			op->nr_ents     = next->context.size;
    5.22 +			op->arg1.linear_addr = (unsigned long)next->context.ldt;
    5.23 +			op->arg2.nr_ents     = next->context.size;
    5.24  			op++;
    5.25  		}
    5.26  
     6.1 --- a/tools/ioemu/hw/i8259.c	Wed Sep 14 13:37:03 2005 +0000
     6.2 +++ b/tools/ioemu/hw/i8259.c	Wed Sep 14 14:43:34 2005 +0000
     6.3 @@ -128,21 +128,23 @@ static int pic_get_irq(PicState *s)
     6.4  /* pic[1] is connected to pin2 of pic[0] */
     6.5  #define CASCADE_IRQ 2
     6.6  
     6.7 -static void shared_page_update()
     6.8 +extern shared_iopage_t *shared_page;
     6.9 +
    6.10 +static void xen_update_shared_imr(void)
    6.11  {
    6.12 -    extern shared_iopage_t *shared_page;
    6.13 -    uint8_t * pmask = (uint8_t *)&(shared_page->sp_global.pic_mask[0]);
    6.14 -    int           index;
    6.15 +    uint8_t *pmask = (uint8_t *)shared_page->sp_global.pic_mask;
    6.16 +    int      index;
    6.17  
    6.18      index = pics[0].irq_base/8;
    6.19      pmask[index] = pics[0].imr;
    6.20 -    index = pics[1].irq_base/8;
    6.21  
    6.22 -    if ( pics[0].imr &  (1 << CASCADE_IRQ) ) {
    6.23 -        pmask[index] = 0xff;
    6.24 -    } else {
    6.25 -        pmask[index] = pics[1].imr;
    6.26 -    }
    6.27 +    index = pics[1].irq_base/8;
    6.28 +    pmask[index] = (pics[0].imr & (1 << CASCADE_IRQ)) ? 0xff : pics[1].imr;
    6.29 +}
    6.30 +
    6.31 +static void xen_clear_shared_irr(void)
    6.32 +{
    6.33 +    memset(shared_page->sp_global.pic_intr, 0, INTR_LEN);
    6.34  }
    6.35  
    6.36  /* raise irq to CPU if necessary. must be called every time the active
    6.37 @@ -174,7 +176,8 @@ static void pic_update_irq(void)
    6.38  #endif
    6.39          cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
    6.40      }
    6.41 -    shared_page_update();
    6.42 +
    6.43 +    xen_update_shared_imr();
    6.44  }
    6.45  
    6.46  #ifdef DEBUG_IRQ_LATENCY
    6.47 @@ -283,7 +286,9 @@ static void pic_reset(void *opaque)
    6.48      tmp = s->elcr_mask;
    6.49      memset(s, 0, sizeof(PicState));
    6.50      s->elcr_mask = tmp;
    6.51 -    shared_page_update();
    6.52 +
    6.53 +    xen_update_shared_imr();
    6.54 +    xen_clear_shared_irr();
    6.55  }
    6.56  
    6.57  static void pic_ioport_write(void *opaque, uint32_t addr, uint32_t val)
     7.1 --- a/tools/libxc/xc_linux_restore.c	Wed Sep 14 13:37:03 2005 +0000
     7.2 +++ b/tools/libxc/xc_linux_restore.c	Wed Sep 14 14:43:34 2005 +0000
     7.3 @@ -421,7 +421,7 @@ int xc_linux_restore(int xc_handle, int 
     7.4              pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
     7.5          else /* pfn_type[i] == (L2TAB|LPINTAB) */
     7.6              pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
     7.7 -        pin[nr_pins].mfn = pfn_to_mfn_table[i];
     7.8 +        pin[nr_pins].arg1.mfn = pfn_to_mfn_table[i];
     7.9          if ( ++nr_pins == MAX_PIN_BATCH )
    7.10          {
    7.11              if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 )
     8.1 --- a/tools/libxc/xc_vmx_build.c	Wed Sep 14 13:37:03 2005 +0000
     8.2 +++ b/tools/libxc/xc_vmx_build.c	Wed Sep 14 14:43:34 2005 +0000
     8.3 @@ -169,21 +169,35 @@ static int zap_mmio_range(int xc_handle,
     8.4     l2_pgentry_t *vl2tab;
     8.5   
     8.6     mmio_addr = mmio_range_start & PAGE_MASK;
     8.7 -   for (; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE) {
     8.8 +   for ( ; mmio_addr < mmio_range_end; mmio_addr += PAGE_SIZE )
     8.9 +   {
    8.10         vl3e = vl3tab[l3_table_offset(mmio_addr)];
    8.11 -       if (vl3e == 0)
    8.12 +       if ( vl3e == 0 )
    8.13             continue;
    8.14 -       vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    8.15 -               PROT_READ|PROT_WRITE, vl3e >> PAGE_SHIFT);
    8.16 -       if (vl2tab == 0) {
    8.17 +
    8.18 +       vl2tab = xc_map_foreign_range(
    8.19 +           xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl3e>>PAGE_SHIFT);
    8.20 +       if ( vl2tab == NULL )
    8.21 +       {
    8.22             PERROR("Failed zap MMIO range");
    8.23             return -1;
    8.24         }
    8.25 +
    8.26         vl2e = vl2tab[l2_table_offset(mmio_addr)];
    8.27 -       if (vl2e == 0)
    8.28 +       if ( vl2e == 0 )
    8.29 +       {
    8.30 +           munmap(vl2tab, PAGE_SIZE);
    8.31             continue;
    8.32 -       vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    8.33 -               PROT_READ|PROT_WRITE, vl2e >> PAGE_SHIFT);
    8.34 +       }
    8.35 +
    8.36 +       vl1tab = xc_map_foreign_range(
    8.37 +           xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, vl2e>>PAGE_SHIFT);
    8.38 +       if ( vl1tab == NULL )
    8.39 +       {
    8.40 +           PERROR("Failed zap MMIO range");
    8.41 +           munmap(vl2tab, PAGE_SIZE);
    8.42 +           return -1;
    8.43 +       }
    8.44  
    8.45         vl1tab[l1_table_offset(mmio_addr)] = 0;
    8.46         munmap(vl2tab, PAGE_SIZE);
     9.1 --- a/tools/libxc/xg_private.c	Wed Sep 14 13:37:03 2005 +0000
     9.2 +++ b/tools/libxc/xg_private.c	Wed Sep 14 14:43:34 2005 +0000
     9.3 @@ -65,7 +65,7 @@ int pin_table(
     9.4      struct mmuext_op op;
     9.5  
     9.6      op.cmd = type;
     9.7 -    op.mfn = mfn;
     9.8 +    op.arg1.mfn = mfn;
     9.9  
    9.10      if ( xc_mmuext_op(xc_handle, &op, 1, dom) < 0 )
    9.11          return 1;
    13.1 --- a/xen/arch/x86/mm.c	Wed Sep 14 13:37:03 2005 +0000
    13.2 +++ b/xen/arch/x86/mm.c	Wed Sep 14 14:43:34 2005 +0000
    13.3 @@ -1659,7 +1659,7 @@ int do_mmuext_op(
    13.4  {
    13.5      struct mmuext_op op;
    13.6      int rc = 0, i = 0, okay, cpu = smp_processor_id();
    13.7 -    unsigned long type, done = 0;
    13.8 +    unsigned long mfn, type, done = 0;
    13.9      struct pfn_info *page;
   13.10      struct vcpu *v = current;
   13.11      struct domain *d = v->domain, *e;
   13.12 @@ -1706,7 +1706,8 @@ int do_mmuext_op(
   13.13          }
   13.14  
   13.15          okay = 1;
   13.16 -        page = &frame_table[op.mfn];
   13.17 +        mfn  = op.arg1.mfn;
   13.18 +        page = &frame_table[mfn];
   13.19  
   13.20          switch ( op.cmd )
   13.21          {
   13.22 @@ -1717,17 +1718,17 @@ int do_mmuext_op(
   13.23              if ( shadow_mode_refcounts(FOREIGNDOM) )
   13.24                  type = PGT_writable_page;
   13.25  
   13.26 -            okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM);
   13.27 +            okay = get_page_and_type_from_pagenr(mfn, type, FOREIGNDOM);
   13.28              if ( unlikely(!okay) )
   13.29              {
   13.30 -                MEM_LOG("Error while pinning mfn %lx", op.mfn);
   13.31 +                MEM_LOG("Error while pinning mfn %lx", mfn);
   13.32                  break;
   13.33              }
   13.34              
   13.35              if ( unlikely(test_and_set_bit(_PGT_pinned,
   13.36                                             &page->u.inuse.type_info)) )
   13.37              {
   13.38 -                MEM_LOG("Mfn %lx already pinned", op.mfn);
   13.39 +                MEM_LOG("Mfn %lx already pinned", mfn);
   13.40                  put_page_and_type(page);
   13.41                  okay = 0;
   13.42                  break;
   13.43 @@ -1750,10 +1751,10 @@ int do_mmuext_op(
   13.44              goto pin_page;
   13.45  
   13.46          case MMUEXT_UNPIN_TABLE:
   13.47 -            if ( unlikely(!(okay = get_page_from_pagenr(op.mfn, FOREIGNDOM))) )
   13.48 +            if ( unlikely(!(okay = get_page_from_pagenr(mfn, FOREIGNDOM))) )
   13.49              {
   13.50                  MEM_LOG("Mfn %lx bad domain (dom=%p)",
   13.51 -                        op.mfn, page_get_owner(page));
   13.52 +                        mfn, page_get_owner(page));
   13.53              }
   13.54              else if ( likely(test_and_clear_bit(_PGT_pinned, 
   13.55                                                  &page->u.inuse.type_info)) )
   13.56 @@ -1765,28 +1766,28 @@ int do_mmuext_op(
   13.57              {
   13.58                  okay = 0;
   13.59                  put_page(page);
   13.60 -                MEM_LOG("Mfn %lx not pinned", op.mfn);
   13.61 +                MEM_LOG("Mfn %lx not pinned", mfn);
   13.62              }
   13.63              break;
   13.64  
   13.65          case MMUEXT_NEW_BASEPTR:
   13.66 -            okay = new_guest_cr3(op.mfn);
   13.67 +            okay = new_guest_cr3(mfn);
   13.68              percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
   13.69              break;
   13.70          
   13.71  #ifdef __x86_64__
   13.72          case MMUEXT_NEW_USER_BASEPTR:
   13.73              okay = get_page_and_type_from_pagenr(
   13.74 -                op.mfn, PGT_root_page_table, d);
   13.75 +                mfn, PGT_root_page_table, d);
   13.76              if ( unlikely(!okay) )
   13.77              {
   13.78 -                MEM_LOG("Error while installing new mfn %lx", op.mfn);
   13.79 +                MEM_LOG("Error while installing new mfn %lx", mfn);
   13.80              }
   13.81              else
   13.82              {
   13.83                  unsigned long old_mfn =
   13.84                      pagetable_get_pfn(v->arch.guest_table_user);
   13.85 -                v->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
   13.86 +                v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
   13.87                  if ( old_mfn != 0 )
   13.88                      put_page_and_type(&frame_table[old_mfn]);
   13.89              }
   13.90 @@ -1799,8 +1800,8 @@ int do_mmuext_op(
   13.91      
   13.92          case MMUEXT_INVLPG_LOCAL:
   13.93              if ( shadow_mode_enabled(d) )
   13.94 -                shadow_invlpg(v, op.linear_addr);
   13.95 -            local_flush_tlb_one(op.linear_addr);
   13.96 +                shadow_invlpg(v, op.arg1.linear_addr);
   13.97 +            local_flush_tlb_one(op.arg1.linear_addr);
   13.98              break;
   13.99  
  13.100          case MMUEXT_TLB_FLUSH_MULTI:
  13.101 @@ -1808,7 +1809,7 @@ int do_mmuext_op(
  13.102          {
  13.103              unsigned long vmask;
  13.104              cpumask_t     pmask;
  13.105 -            if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) )
  13.106 +            if ( unlikely(get_user(vmask, (unsigned long *)op.arg2.vcpumask)) )
  13.107              {
  13.108                  okay = 0;
  13.109                  break;
  13.110 @@ -1818,7 +1819,7 @@ int do_mmuext_op(
  13.111              if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
  13.112                  flush_tlb_mask(pmask);
  13.113              else
  13.114 -                flush_tlb_one_mask(pmask, op.linear_addr);
  13.115 +                flush_tlb_one_mask(pmask, op.arg1.linear_addr);
  13.116              break;
  13.117          }
  13.118  
  13.119 @@ -1827,7 +1828,7 @@ int do_mmuext_op(
  13.120              break;
  13.121      
  13.122          case MMUEXT_INVLPG_ALL:
  13.123 -            flush_tlb_one_mask(d->cpumask, op.linear_addr);
  13.124 +            flush_tlb_one_mask(d->cpumask, op.arg1.linear_addr);
  13.125              break;
  13.126  
  13.127          case MMUEXT_FLUSH_CACHE:
  13.128 @@ -1852,8 +1853,8 @@ int do_mmuext_op(
  13.129                  break;
  13.130              }
  13.131  
  13.132 -            unsigned long ptr  = op.linear_addr;
  13.133 -            unsigned long ents = op.nr_ents;
  13.134 +            unsigned long ptr  = op.arg1.linear_addr;
  13.135 +            unsigned long ents = op.arg2.nr_ents;
  13.136              if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
  13.137                   (ents > 8192) ||
  13.138                   !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
  13.139 @@ -1886,7 +1887,7 @@ int do_mmuext_op(
  13.140              e = percpu_info[cpu].foreign;
  13.141              if ( unlikely(e == NULL) )
  13.142              {
  13.143 -                MEM_LOG("No FOREIGNDOM to reassign mfn %lx to", op.mfn);
  13.144 +                MEM_LOG("No FOREIGNDOM to reassign mfn %lx to", mfn);
  13.145                  okay = 0;
  13.146                  break;
  13.147              }
  13.148 @@ -1919,7 +1920,7 @@ int do_mmuext_op(
  13.149              {
  13.150                  MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
  13.151                          "page is in Xen heap (%lx), or dom is dying (%ld).",
  13.152 -                        e->tot_pages, e->max_pages, op.mfn, e->domain_flags);
  13.153 +                        e->tot_pages, e->max_pages, mfn, e->domain_flags);
  13.154                  okay = 0;
  13.155                  goto reassign_fail;
  13.156              }
    14.1 --- a/xen/arch/x86/vmx.c	Wed Sep 14 13:37:03 2005 +0000
    14.2 +++ b/xen/arch/x86/vmx.c	Wed Sep 14 14:43:34 2005 +0000
    14.3 @@ -1021,7 +1021,7 @@ static int vmx_set_cr0(unsigned long val
    14.4       * CR0: We don't want to lose PE and PG.
    14.5       */
    14.6      paging_enabled = vmx_paging_enabled(d);
    14.7 -    __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
    14.8 +    __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
    14.9      __vmwrite(CR0_READ_SHADOW, value);
   14.10  
   14.11      VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
    15.1 --- a/xen/arch/x86/vmx_intercept.c	Wed Sep 14 13:37:03 2005 +0000
    15.2 +++ b/xen/arch/x86/vmx_intercept.c	Wed Sep 14 14:43:34 2005 +0000
    15.3 @@ -227,6 +227,7 @@ void vmx_hooks_assist(struct vcpu *d)
    15.4      u64 *intr = &(sp->sp_global.pic_intr[0]);
    15.5      struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit);
    15.6      int rw_mode, reinit = 0;
    15.7 +    int oldvec = 0;
    15.8  
    15.9      /* load init count*/
   15.10      if (p->state == STATE_IORESP_HOOK) { 
   15.11 @@ -235,6 +236,7 @@ void vmx_hooks_assist(struct vcpu *d)
   15.12              VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT: guest reset PIT with channel %lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) );
   15.13              rem_ac_timer(&(vpit->pit_timer));
   15.14              reinit = 1;
   15.15 +            oldvec = vpit->vector;
   15.16          }
   15.17          else
   15.18              init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, d->processor);
   15.19 @@ -250,6 +252,12 @@ void vmx_hooks_assist(struct vcpu *d)
   15.20              vpit->period = 1000000;
   15.21          }
   15.22          vpit->vector = ((p->u.data >> 16) & 0xFF);
   15.23 +
   15.24 +        if( reinit && oldvec != vpit->vector){
   15.25 +            clear_bit(oldvec, intr);
   15.26 +            vpit->pending_intr_nr = 0;
   15.27 +        }
   15.28 +
   15.29          vpit->channel = ((p->u.data >> 24) & 0x3);
   15.30          vpit->first_injected = 0;
   15.31  
    16.1 --- a/xen/include/public/arch-x86_64.h	Wed Sep 14 13:37:03 2005 +0000
    16.2 +++ b/xen/include/public/arch-x86_64.h	Wed Sep 14 14:43:34 2005 +0000
    16.3 @@ -124,30 +124,38 @@ typedef struct trap_info {
    16.4      unsigned long address; /* code offset                                   */
    16.5  } trap_info_t;
    16.6  
    16.7 +#ifdef __GNUC__
    16.8 +/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
    16.9 +#define __DECL_REG(name) union { u64 r ## name, e ## name; }
   16.10 +#else
   16.11 +/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
   16.12 +#define __DECL_REG(name) u64 r ## name
   16.13 +#endif
   16.14 +
   16.15  typedef struct cpu_user_regs {
   16.16      u64 r15;
   16.17      u64 r14;
   16.18      u64 r13;
   16.19      u64 r12;
   16.20 -    union { u64 rbp, ebp; };
   16.21 -    union { u64 rbx, ebx; };
   16.22 +    __DECL_REG(bp);
   16.23 +    __DECL_REG(bx);
   16.24      u64 r11;
   16.25      u64 r10;
   16.26      u64 r9;
   16.27      u64 r8;
   16.28 -    union { u64 rax, eax; };
   16.29 -    union { u64 rcx, ecx; };
   16.30 -    union { u64 rdx, edx; };
   16.31 -    union { u64 rsi, esi; };
   16.32 -    union { u64 rdi, edi; };
   16.33 +    __DECL_REG(ax);
   16.34 +    __DECL_REG(cx);
   16.35 +    __DECL_REG(dx);
   16.36 +    __DECL_REG(si);
   16.37 +    __DECL_REG(di);
   16.38      u32 error_code;    /* private */
   16.39      u32 entry_vector;  /* private */
   16.40 -    union { u64 rip, eip; };
   16.41 +    __DECL_REG(ip);
   16.42      u16 cs, _pad0[1];
   16.43      u8  saved_upcall_mask;
   16.44      u8  _pad1[3];
   16.45 -    union { u64 rflags, eflags; };
   16.46 -    union { u64 rsp, esp; };
   16.47 +    __DECL_REG(flags);
   16.48 +    __DECL_REG(sp);
   16.49      u16 ss, _pad2[3];
   16.50      u16 es, _pad3[3];
   16.51      u16 ds, _pad4[3];
   16.52 @@ -155,6 +163,8 @@ typedef struct cpu_user_regs {
   16.53      u16 gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_user. */
   16.54  } cpu_user_regs_t;
   16.55  
   16.56 +#undef __DECL_REG
   16.57 +
   16.58  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   16.59  
   16.60  /*
    17.1 --- a/xen/include/public/xen.h	Wed Sep 14 13:37:03 2005 +0000
    17.2 +++ b/xen/include/public/xen.h	Wed Sep 14 14:43:34 2005 +0000
    17.3 @@ -174,13 +174,13 @@ struct mmuext_op {
    17.4          unsigned long mfn;
    17.5          /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
    17.6          unsigned long linear_addr;
    17.7 -    };
    17.8 +    } arg1;
    17.9      union {
   17.10          /* SET_LDT */
   17.11          unsigned int nr_ents;
   17.12          /* TLB_FLUSH_MULTI, INVLPG_MULTI */
   17.13          void *vcpumask;
   17.14 -    };
   17.15 +    } arg2;
   17.16  };
   17.17  #endif
   17.18