ia64/xen-unstable

changeset 3298:2711f7eb364c

bitkeeper revision 1.1159.1.490 (41c1bb05aOZv3pnPk-NIbxvGZzv5BQ)

page.h, mm.c:
More cleaning.
author kaf24@pb001.cl.cam.ac.uk
date Thu Dec 16 16:42:45 2004 +0000 (2004-12-16)
parents 47157bca9ab0
children 3609a4de4be5
files xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Dec 16 15:47:33 2004 +0000
     1.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Dec 16 16:42:45 2004 +0000
     1.3 @@ -104,9 +104,10 @@ void __init zap_low_mappings(void)
     1.4   */
     1.5  static void __synchronise_pagetables(void *mask)
     1.6  {
     1.7 -    struct exec_domain *d = current;
     1.8 -    if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d->domain) )
     1.9 -        write_ptbase(&d->mm);
    1.10 +    struct exec_domain *ed = current;
    1.11 +    if ( ((unsigned long)mask & (1 << ed->processor)) &&
    1.12 +         is_idle_task(ed->domain) )
    1.13 +        write_ptbase(&ed->mm);
    1.14  }
    1.15  void synchronise_pagetables(unsigned long cpu_mask)
    1.16  {
     2.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Dec 16 15:47:33 2004 +0000
     2.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Dec 16 16:42:45 2004 +0000
     2.3 @@ -60,36 +60,14 @@ void __set_fixmap(enum fixed_addresses i
     2.4  
     2.5  void __init paging_init(void)
     2.6  {
     2.7 -    void *ioremap_pt;
     2.8 -    int i;
     2.9 -
    2.10 -    /* Create page table for ioremap(). */
    2.11 -    ioremap_pt = (void *)alloc_xenheap_page();
    2.12 -    clear_page(ioremap_pt);
    2.13 -    idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = 
    2.14 -        mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
    2.15 -
    2.16 -    /* Create read-only mapping of MPT for guest-OS use. */
    2.17 -    idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    2.18 -        mk_l2_pgentry(l2_pgentry_val(
    2.19 -            idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]) & 
    2.20 -                      ~_PAGE_RW);
    2.21 -
    2.22 -    /* Set up mapping cache for domain pages. */
    2.23 -    mapcache = (unsigned long *)alloc_xenheap_page();
    2.24 -    clear_page(mapcache);
    2.25 -    idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
    2.26 -        mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
    2.27 -
    2.28      /* Set up linear page table mapping. */
    2.29 -    idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    2.30 -        mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
    2.31 -
    2.32 +    idle_pg_table[LINEAR_PT_VIRT_START >> L4_PAGETABLE_SHIFT] =
    2.33 +        mk_l4_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
    2.34  }
    2.35  
    2.36  void __init zap_low_mappings(void)
    2.37  {
    2.38 -    idle_pg_table[0] = 0;
    2.39 +    idle_pg_table[0] = mk_l4_pgentry(0);
    2.40  }
    2.41  
    2.42  
    2.43 @@ -99,9 +77,10 @@ void __init zap_low_mappings(void)
    2.44   */
    2.45  static void __synchronise_pagetables(void *mask)
    2.46  {
    2.47 -    struct domain *d = current;
    2.48 -    if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d) )
    2.49 -        write_ptbase(&d->mm);
    2.50 +    struct exec_domain *ed = current;
    2.51 +    if ( ((unsigned long)mask & (1 << ed->processor)) &&
    2.52 +         is_idle_task(ed->domain) )
    2.53 +        write_ptbase(&ed->mm);
    2.54  }
    2.55  void synchronise_pagetables(unsigned long cpu_mask)
    2.56  {
    2.57 @@ -111,6 +90,7 @@ void synchronise_pagetables(unsigned lon
    2.58  
    2.59  long do_stack_switch(unsigned long ss, unsigned long esp)
    2.60  {
    2.61 +#if 0
    2.62      int nr = smp_processor_id();
    2.63      struct tss_struct *t = &init_tss[nr];
    2.64  
    2.65 @@ -122,7 +102,7 @@ long do_stack_switch(unsigned long ss, u
    2.66      current->thread.guestos_sp = esp;
    2.67      t->ss1  = ss;
    2.68      t->esp1 = esp;
    2.69 -
    2.70 +#endif
    2.71      return 0;
    2.72  }
    2.73  
    2.74 @@ -163,9 +143,11 @@ int check_descriptor(unsigned long *d)
    2.75          if ( (b & _SEGMENT_TYPE) != 0xc00 )
    2.76              goto bad;
    2.77  
    2.78 +#if 0
    2.79          /* Can't allow far jump to a Xen-private segment. */
    2.80          if ( !VALID_CODESEL(a>>16) )
    2.81              goto bad;
    2.82 +#endif
    2.83  
    2.84          /* Reserved bits must be zero. */
    2.85          if ( (b & 0xe0) != 0 )
    2.86 @@ -226,24 +208,25 @@ int check_descriptor(unsigned long *d)
    2.87  }
    2.88  
    2.89  
    2.90 -void destroy_gdt(struct domain *d)
    2.91 +void destroy_gdt(struct exec_domain *ed)
    2.92  {
    2.93      int i;
    2.94      unsigned long pfn;
    2.95  
    2.96      for ( i = 0; i < 16; i++ )
    2.97      {
    2.98 -        if ( (pfn = l1_pgentry_to_pagenr(d->mm.perdomain_pt[i])) != 0 )
    2.99 +        if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 )
   2.100              put_page_and_type(&frame_table[pfn]);
   2.101 -        d->mm.perdomain_pt[i] = mk_l1_pgentry(0);
   2.102 +        ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
   2.103      }
   2.104  }
   2.105  
   2.106  
   2.107 -long set_gdt(struct domain *d, 
   2.108 +long set_gdt(struct exec_domain *ed, 
   2.109               unsigned long *frames,
   2.110               unsigned int entries)
   2.111  {
   2.112 +    struct domain *d = ed->domain;
   2.113      /* NB. There are 512 8-byte entries per GDT page. */
   2.114      int i = 0, nr_pages = (entries + 511) / 512;
   2.115      struct desc_struct *vgdt;
   2.116 @@ -284,15 +267,15 @@ long set_gdt(struct domain *d,
   2.117      unmap_domain_mem(vgdt);
   2.118  
   2.119      /* Tear down the old GDT. */
   2.120 -    destroy_gdt(d);
   2.121 +    destroy_gdt(ed);
   2.122  
   2.123      /* Install the new GDT. */
   2.124      for ( i = 0; i < nr_pages; i++ )
   2.125 -        d->mm.perdomain_pt[i] =
   2.126 +        ed->mm.perdomain_ptes[i] =
   2.127              mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   2.128  
   2.129 -    SET_GDT_ADDRESS(d, GDT_VIRT_START);
   2.130 -    SET_GDT_ENTRIES(d, entries);
   2.131 +    SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
   2.132 +    SET_GDT_ENTRIES(ed, entries);
   2.133  
   2.134      return 0;
   2.135  
   2.136 @@ -339,7 +322,7 @@ long do_update_descriptor(
   2.137          return -EINVAL;
   2.138  
   2.139      page = &frame_table[pfn];
   2.140 -    if ( unlikely(!get_page(page, current)) )
   2.141 +    if ( unlikely(!get_page(page, current->domain)) )
   2.142          return -EINVAL;
   2.143  
   2.144      /* Check if the given frame is in use in an unsafe context. */
   2.145 @@ -347,7 +330,7 @@ long do_update_descriptor(
   2.146      {
   2.147      case PGT_gdt_page:
   2.148          /* Disallow updates of Xen-reserved descriptors in the current GDT. */
   2.149 -        if ( (l1_pgentry_to_pagenr(current->mm.perdomain_pt[0]) == pfn) &&
   2.150 +        if ( (l1_pgentry_to_pagenr(current->mm.perdomain_ptes[0]) == pfn) &&
   2.151               (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
   2.152               (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
   2.153              goto out;
     3.1 --- a/xen/include/asm-x86/page.h	Thu Dec 16 15:47:33 2004 +0000
     3.2 +++ b/xen/include/asm-x86/page.h	Thu Dec 16 16:42:45 2004 +0000
     3.3 @@ -130,7 +130,12 @@ typedef struct { unsigned long pt_lo; } 
     3.4  
     3.5  #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
     3.6  
     3.7 +#ifdef __i386__
     3.8  extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
     3.9 +#else
    3.10 +extern l4_pgentry_t idle_pg_table[ENTRIES_PER_L4_PAGETABLE];
    3.11 +#endif
    3.12 +
    3.13  extern void paging_init(void);
    3.14  
    3.15  /* Flush global pages as well. */