ia64/xen-unstable

changeset 3757:bb187d778f52

bitkeeper revision 1.1159.1.562 (420a5d22B5wTDaFFOL7CfKG24g-JCA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@freefall.cl.cam.ac.uk
date Wed Feb 09 18:57:38 2005 +0000 (2005-02-09)
parents 1494093616a3 be9bb69e54cf
children 736089c11af9 a8a0bf99c4fb
files xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx_platform.h
line diff
     1.1 --- a/xen/arch/x86/vmx.c	Wed Feb 09 17:14:35 2005 +0000
     1.2 +++ b/xen/arch/x86/vmx.c	Wed Feb 09 18:57:38 2005 +0000
     1.3 @@ -109,11 +109,9 @@ static void inline __update_guest_eip(un
     1.4  
     1.5  static int vmx_do_page_fault(unsigned long va, unsigned long error_code) 
     1.6  {
     1.7 -    unsigned long eip, pfn;
     1.8 -    unsigned int index;
     1.9 -    unsigned long gpde = 0, gpte, gpa;
    1.10 +    unsigned long eip;
    1.11 +    unsigned long gpa;
    1.12      int result;
    1.13 -    struct exec_domain *ed = current;
    1.14  
    1.15  #if VMX_DEBUG
    1.16      {
    1.17 @@ -123,32 +121,13 @@ static int vmx_do_page_fault(unsigned lo
    1.18                  va, eip, error_code);
    1.19      }
    1.20  #endif
    1.21 -    /*
    1.22 -     * Set up guest page directory cache to make linear_pt_table[] work.
    1.23 -     */
    1.24 -    __guest_get_l2e(ed, va, &gpde);
    1.25 -    if (!(gpde & _PAGE_PRESENT))
    1.26 +
    1.27 +    gpa = gva_to_gpa(va);
    1.28 +    if (!gpa)
    1.29          return 0;
    1.30  
    1.31 -    index = (va >> L2_PAGETABLE_SHIFT);
    1.32 -    if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
    1.33 -        pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
    1.34 -
    1.35 -        VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
    1.36 -                pagetable_val(ed->arch.pagetable));
    1.37 -
    1.38 -        ed->arch.guest_pl2e_cache[index] = 
    1.39 -            mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    1.40 -    }
    1.41 -    
    1.42 -    if (unlikely(__get_user(gpte, (unsigned long *)
    1.43 -                            &linear_pg_table[va >> PAGE_SHIFT])))
    1.44 -        return 0;
    1.45 -    
    1.46 -    gpa = (gpte & PAGE_MASK) | (va & (PAGE_SIZE - 1));
    1.47 -
    1.48      if (mmio_space(gpa))
    1.49 -        handle_mmio(va, gpte, gpa);
    1.50 +        handle_mmio(va, gpa);
    1.51  
    1.52      if ((result = shadow_fault(va, error_code)))
    1.53          return result;
    1.54 @@ -299,35 +278,6 @@ static inline void guest_pl2e_cache_inva
    1.55      memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE);
    1.56  }
    1.57  
    1.58 -inline unsigned long gva_to_gpa(unsigned long gva)
    1.59 -{
    1.60 -    unsigned long gpde, gpte, pfn, index;
    1.61 -    struct exec_domain *ed = current;
    1.62 -
    1.63 -    __guest_get_l2e(ed, gva, &gpde);
    1.64 -    index = (gva >> L2_PAGETABLE_SHIFT);
    1.65 -
    1.66 -    pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
    1.67 -
    1.68 -    ed->arch.guest_pl2e_cache[index] = 
    1.69 -            mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    1.70 -
    1.71 -    if ( unlikely(__get_user(gpte, (unsigned long *)
    1.72 -                             &linear_pg_table[gva >> PAGE_SHIFT])) )
    1.73 -    {
    1.74 -        printk("gva_to_gpa EXIT: read gpte faulted" );
    1.75 -        return 0;
    1.76 -    }
    1.77 -
    1.78 -    if ( !(gpte & _PAGE_PRESENT) )
    1.79 -    {
    1.80 -        printk("gva_to_gpa - EXIT: gpte not present (%lx)",gpte );
    1.81 -        return 0;
    1.82 -    }
    1.83 -
    1.84 -    return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
    1.85 -}
    1.86 -
    1.87  static void vmx_io_instruction(struct xen_regs *regs, 
    1.88                     unsigned long exit_qualification, unsigned long inst_len) 
    1.89  {
     2.1 --- a/xen/arch/x86/vmx_platform.c	Wed Feb 09 17:14:35 2005 +0000
     2.2 +++ b/xen/arch/x86/vmx_platform.c	Wed Feb 09 18:57:38 2005 +0000
     2.3 @@ -378,12 +378,7 @@ static int inst_copy_from_guest(char *bu
     2.4      }
     2.5  
     2.6      if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
     2.7 -        if ( unlikely(__get_user(gpte, (unsigned long *)
     2.8 -                                 &linear_pg_table[guest_eip >> PAGE_SHIFT])) )
     2.9 -            {
    2.10 -                printk("inst_copy_from_guest- EXIT: read gpte faulted" );
    2.11 -                return 0;
    2.12 -            }
    2.13 +        gpte = gva_to_gpte(guest_eip);
    2.14          mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
    2.15          ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
    2.16          inst_start = (unsigned char *)map_domain_mem(ma);
    2.17 @@ -392,6 +387,7 @@ static int inst_copy_from_guest(char *bu
    2.18          unmap_domain_mem(inst_start);
    2.19      } else {
    2.20          // Todo: In two page frames
    2.21 +        BUG();
    2.22      }
    2.23          
    2.24      return inst_len;
    2.25 @@ -432,7 +428,6 @@ static void send_mmio_req(unsigned long 
    2.26      ioreq_t *p;
    2.27      struct mi_per_cpu_info *mpci_p;
    2.28      struct xen_regs *inst_decoder_regs;
    2.29 -    extern inline unsigned long gva_to_gpa(unsigned long gva);
    2.30      extern long evtchn_send(int lport);
    2.31      extern long do_block(void);
    2.32  
    2.33 @@ -476,7 +471,7 @@ static void send_mmio_req(unsigned long 
    2.34  
    2.35  }
    2.36  
    2.37 -void handle_mmio(unsigned long va, unsigned long gpte, unsigned long gpa)
    2.38 +void handle_mmio(unsigned long va, unsigned long gpa)
    2.39  {
    2.40      unsigned long eip;
    2.41      unsigned long inst_len;
     3.1 --- a/xen/include/asm-x86/shadow.h	Wed Feb 09 17:14:35 2005 +0000
     3.2 +++ b/xen/include/asm-x86/shadow.h	Wed Feb 09 18:57:38 2005 +0000
     3.3 @@ -684,6 +684,41 @@ static inline void vmx_update_shadow_sta
     3.4      unmap_domain_mem(mpl2e);
     3.5  }
     3.6  
     3.7 +static inline unsigned long gva_to_gpte(unsigned long gva)
     3.8 +{
     3.9 +    unsigned long gpde, gpte, pfn, index;
    3.10 +    struct exec_domain *ed = current;
    3.11 +
    3.12 +    __guest_get_l2e(ed, gva, &gpde);
    3.13 +    if (!(gpde & _PAGE_PRESENT))
    3.14 +        return 0;
    3.15 +
    3.16 +    index = (gva >> L2_PAGETABLE_SHIFT);
    3.17 +
    3.18 +    if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
    3.19 +        pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
    3.20 +        ed->arch.guest_pl2e_cache[index] = 
    3.21 +            mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.22 +    }
    3.23 +
    3.24 +    if ( unlikely(__get_user(gpte, (unsigned long *)
    3.25 +                             &linear_pg_table[gva >> PAGE_SHIFT])) )
    3.26 +        return 0;
    3.27 +
    3.28 +    return gpte;
    3.29 +}
    3.30 +
    3.31 +static inline unsigned long gva_to_gpa(unsigned long gva)
    3.32 +{
    3.33 +    unsigned long gpte;
    3.34 +
    3.35 +    gpte = gva_to_gpte(gva);
    3.36 +    if ( !(gpte & _PAGE_PRESENT) )
    3.37 +        return 0;
    3.38 +
    3.39 +    return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
    3.40 +}
    3.41 +
    3.42  #endif /* CONFIG_VMX */
    3.43  
    3.44  static inline void __shadow_mk_pagetable(struct exec_domain *ed)
     4.1 --- a/xen/include/asm-x86/vmx_platform.h	Wed Feb 09 17:14:35 2005 +0000
     4.2 +++ b/xen/include/asm-x86/vmx_platform.h	Wed Feb 09 18:57:38 2005 +0000
     4.3 @@ -86,7 +86,7 @@ struct virutal_platform_def {
     4.4      struct mi_per_cpu_info mpci;            /* MMIO */
     4.5  };
     4.6  
     4.7 -extern void handle_mmio(unsigned long, unsigned long, unsigned long);
     4.8 +extern void handle_mmio(unsigned long, unsigned long);
     4.9  extern int vmx_setup_platform(struct exec_domain *, execution_context_t *);
    4.10  
    4.11  static inline int mmio_space(unsigned long gpa)