direct-io.hg

changeset 5004:925c36f915af

bitkeeper revision 1.1463 (428c9382-vrHzvryhVgUsKq4C6xVHA)

Add l?e_create_page() macros. map_pages_to_xen() now takes a pfn range
rather than a byte range. Fix x86/64 RAM mapping to discard partial
frames.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 19 13:24:18 2005 +0000 (2005-05-19)
parents 76bb150a09da
children ba07574a5b6f
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu May 19 12:36:18 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Thu May 19 13:24:18 2005 +0000
     1.3 @@ -263,9 +263,8 @@ void arch_do_createdomain(struct exec_do
     1.4                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
     1.5      ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
     1.6      ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
     1.7 -        l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
     1.8 -                       PAGE_HYPERVISOR);
     1.9 -    
    1.10 +        l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    1.11 +
    1.12      ed->arch.guest_vtable  = __linear_l2_table;
    1.13      ed->arch.shadow_vtable = __shadow_linear_l2_table;
    1.14  
    1.15 @@ -302,8 +301,7 @@ void arch_do_boot_vcpu(struct exec_domai
    1.16      ed->arch.perdomain_ptes =
    1.17          d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
    1.18      ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
    1.19 -        l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
    1.20 -                       PAGE_HYPERVISOR);
    1.21 +        l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    1.22  }
    1.23  
    1.24  #ifdef CONFIG_VMX
     2.1 --- a/xen/arch/x86/mm.c	Thu May 19 12:36:18 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Thu May 19 13:24:18 2005 +0000
     2.3 @@ -161,7 +161,10 @@ void __init init_frametable(void)
     2.4          if ( p == 0 )
     2.5              panic("Not enough memory for frame table\n");
     2.6          map_pages_to_xen(
     2.7 -            FRAMETABLE_VIRT_START + i, p, 4UL << 20, PAGE_HYPERVISOR);
     2.8 +            FRAMETABLE_VIRT_START + i,
     2.9 +            p >> PAGE_SHIFT,
    2.10 +            4UL << (20-PAGE_SHIFT),
    2.11 +            PAGE_HYPERVISOR);
    2.12      }
    2.13  
    2.14      memset(frame_table, 0, frame_table_size);
    2.15 @@ -2833,31 +2836,30 @@ void ptwr_destroy(struct domain *d)
    2.16      free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page);
    2.17  }
    2.18  
    2.19 -/* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
    2.20  int map_pages_to_xen(
    2.21 -    unsigned long v,
    2.22 -    unsigned long p,
    2.23 -    unsigned long s,
    2.24 +    unsigned long virt,
    2.25 +    unsigned long pfn,
    2.26 +    unsigned long nr_pfns,
    2.27      unsigned long flags)
    2.28  {
    2.29      l2_pgentry_t *pl2e, ol2e;
    2.30 -    l1_pgentry_t *pl1e;
    2.31 +    l1_pgentry_t *pl1e, ol1e;
    2.32      unsigned int  i;
    2.33  
    2.34      unsigned int  map_small_pages = !!(flags & MAP_SMALL_PAGES);
    2.35      flags &= ~MAP_SMALL_PAGES;
    2.36  
    2.37 -    while ( s != 0 )
    2.38 +    while ( nr_pfns != 0 )
    2.39      {
    2.40 -        pl2e = virt_to_xen_l2e(v);
    2.41 -
    2.42 -        if ( (((v|p) & ((1 << L2_PAGETABLE_SHIFT) - 1)) == 0) &&
    2.43 -             (s >= (1 << L2_PAGETABLE_SHIFT)) &&
    2.44 +        pl2e = virt_to_xen_l2e(virt);
    2.45 +
    2.46 +        if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
    2.47 +             (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
    2.48               !map_small_pages )
    2.49          {
    2.50              /* Super-page mapping. */
    2.51              ol2e  = *pl2e;
    2.52 -            *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
    2.53 +            *pl2e = l2e_create_pfn(pfn, flags|_PAGE_PSE);
    2.54  
    2.55              if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
    2.56              {
    2.57 @@ -2866,9 +2868,9 @@ int map_pages_to_xen(
    2.58                      free_xen_pagetable(l2e_get_page(*pl2e));
    2.59              }
    2.60  
    2.61 -            v += 1 << L2_PAGETABLE_SHIFT;
    2.62 -            p += 1 << L2_PAGETABLE_SHIFT;
    2.63 -            s -= 1 << L2_PAGETABLE_SHIFT;
    2.64 +            virt    += 1UL << L2_PAGETABLE_SHIFT;
    2.65 +            pfn     += 1UL << PAGETABLE_ORDER;
    2.66 +            nr_pfns -= 1UL << PAGETABLE_ORDER;
    2.67          }
    2.68          else
    2.69          {
    2.70 @@ -2890,26 +2892,36 @@ int map_pages_to_xen(
    2.71                  local_flush_tlb_pge();
    2.72              }
    2.73  
    2.74 -            pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
    2.75 -            if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
    2.76 -                local_flush_tlb_one(v);
    2.77 -            *pl1e = l1e_create_phys(p, flags);
    2.78 -
    2.79 -            v += 1 << L1_PAGETABLE_SHIFT;
    2.80 -            p += 1 << L1_PAGETABLE_SHIFT;
    2.81 -            s -= 1 << L1_PAGETABLE_SHIFT;       
    2.82 +            pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
    2.83 +            ol1e  = *pl1e;
    2.84 +            *pl1e = l1e_create_pfn(pfn, flags);
    2.85 +            if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
    2.86 +                local_flush_tlb_one(virt);
    2.87 +
    2.88 +            virt    += 1UL << L1_PAGETABLE_SHIFT;
    2.89 +            pfn     += 1UL;
    2.90 +            nr_pfns -= 1UL;
    2.91          }
    2.92      }
    2.93  
    2.94      return 0;
    2.95  }
    2.96  
    2.97 +void __set_fixmap(
    2.98 +    enum fixed_addresses idx, unsigned long p, unsigned long flags)
    2.99 +{
   2.100 +    if ( unlikely(idx >= __end_of_fixed_addresses) )
   2.101 +        BUG();
   2.102 +    map_pages_to_xen(fix_to_virt(idx), p >> PAGE_SHIFT, 1, flags);
   2.103 +}
   2.104 +
   2.105  #ifdef MEMORY_GUARD
   2.106  
   2.107  void memguard_init(void)
   2.108  {
   2.109      map_pages_to_xen(
   2.110 -        PAGE_OFFSET, 0, xenheap_phys_end, __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
   2.111 +        PAGE_OFFSET, 0, xenheap_phys_end >> PAGE_SHIFT,
   2.112 +        __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
   2.113  }
   2.114  
   2.115  static void __memguard_change_range(void *p, unsigned long l, int guard)
   2.116 @@ -2927,7 +2939,8 @@ static void __memguard_change_range(void
   2.117      if ( guard )
   2.118          flags &= ~_PAGE_PRESENT;
   2.119  
   2.120 -    map_pages_to_xen((unsigned long)(_p), __pa(_p), _l, flags);
   2.121 +    map_pages_to_xen(
   2.122 +        _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
   2.123  }
   2.124  
   2.125  void memguard_guard_range(void *p, unsigned long l)
     3.1 --- a/xen/arch/x86/setup.c	Thu May 19 12:36:18 2005 +0000
     3.2 +++ b/xen/arch/x86/setup.c	Thu May 19 13:24:18 2005 +0000
     3.3 @@ -399,7 +399,7 @@ static void __init start_of_day(void)
     3.4      /* Map default GDT into their final position in the idle page table. */
     3.5      map_pages_to_xen(
     3.6          GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE,
     3.7 -        virt_to_phys(gdt_table), PAGE_SIZE, PAGE_HYPERVISOR);
     3.8 +        virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
     3.9  
    3.10      /* Process CPU type information. */
    3.11      identify_cpu(&boot_cpu_data);
    3.12 @@ -580,17 +580,19 @@ void __init __start_xen(multiboot_info_t
    3.13           *     due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
    3.14           */
    3.15          {
    3.16 -            unsigned long start = (unsigned long)e820.map[i].addr;
    3.17 -            unsigned long size  = (unsigned long)e820.map[i].size;
    3.18 -            size = (size + (start & ~PAGE_MASK) + PAGE_SIZE - 1) & PAGE_MASK;
    3.19 -            if ( (start &= PAGE_MASK) < (64UL << 20) )
    3.20 -            {
    3.21 -                if ( (signed long)(size -= (64UL << 20) - start) <= 0 )
    3.22 -                    continue;
    3.23 -                start = 64UL << 20;
    3.24 -            }
    3.25 +            /* Calculate page-frame range, discarding partial frames. */
    3.26 +            unsigned long start, end;
    3.27 +            start = (e820.map[i].addr + PAGE_SIZE - 1) >> PAGE_SHIFT;
    3.28 +            end   = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
    3.29 +            /* Clip the range to above 64MB. */
    3.30 +            if ( end < (64UL << (20-PAGE_SHIFT)) )
    3.31 +                continue;
    3.32 +            if ( start < (64UL << (20-PAGE_SHIFT)) )
    3.33 +                start = 64UL << (20-PAGE_SHIFT);
    3.34 +            /* Request the mapping. */
    3.35              map_pages_to_xen(
    3.36 -                PAGE_OFFSET + start, start, size, PAGE_HYPERVISOR);
    3.37 +                PAGE_OFFSET + (start << PAGE_SHIFT),
    3.38 +                start, end-start, PAGE_HYPERVISOR);
    3.39          }
    3.40  #endif
    3.41      }
     4.1 --- a/xen/arch/x86/shadow.c	Thu May 19 12:36:18 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow.c	Thu May 19 13:24:18 2005 +0000
     4.3 @@ -789,7 +789,7 @@ set_p2m_entry(struct domain *d, unsigned
     4.4          memset(l1, 0, PAGE_SIZE);
     4.5          unmap_domain_mem_with_cache(l1, l1cache);
     4.6  
     4.7 -        l2e = l2e_create_pfn(page_to_pfn(l1page), __PAGE_HYPERVISOR);
     4.8 +        l2e = l2e_create_page(l1page, __PAGE_HYPERVISOR);
     4.9          l2[l2_table_offset(va)] = l2e;
    4.10      }
    4.11      unmap_domain_mem_with_cache(l2, l2cache);
     5.1 --- a/xen/arch/x86/x86_32/mm.c	Thu May 19 12:36:18 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu May 19 13:24:18 2005 +0000
     5.3 @@ -54,14 +54,6 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     5.4      return &idle_pg_table[l2_table_offset(v)];
     5.5  }
     5.6  
     5.7 -void __set_fixmap(
     5.8 -    enum fixed_addresses idx, unsigned long p, unsigned long flags)
     5.9 -{
    5.10 -    if ( unlikely(idx >= __end_of_fixed_addresses) )
    5.11 -        BUG();
    5.12 -    map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
    5.13 -}
    5.14 -
    5.15  void __init paging_init(void)
    5.16  {
    5.17      void *ioremap_pt;
     6.1 --- a/xen/arch/x86/x86_64/mm.c	Thu May 19 12:36:18 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu May 19 13:24:18 2005 +0000
     6.3 @@ -72,17 +72,9 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     6.4      return pl2e;
     6.5  }
     6.6  
     6.7 -void __set_fixmap(
     6.8 -    enum fixed_addresses idx, unsigned long p, unsigned long flags)
     6.9 -{
    6.10 -    if ( unlikely(idx >= __end_of_fixed_addresses) )
    6.11 -        BUG();
    6.12 -    map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
    6.13 -}
    6.14 -
    6.15  void __init paging_init(void)
    6.16  {
    6.17 -    unsigned long i, p;
    6.18 +    unsigned long i;
    6.19      l3_pgentry_t *l3rw, *l3ro;
    6.20      struct pfn_info *pg;
    6.21  
    6.22 @@ -96,10 +88,10 @@ void __init paging_init(void)
    6.23              NULL, L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
    6.24          if ( pg == NULL )
    6.25              panic("Not enough memory for m2p table\n");
    6.26 -        p = page_to_phys(pg);
    6.27          map_pages_to_xen(
    6.28 -            RDWR_MPT_VIRT_START + i*8, p, 
    6.29 -            1UL << L2_PAGETABLE_SHIFT, PAGE_HYPERVISOR | _PAGE_USER);
    6.30 +            RDWR_MPT_VIRT_START + i*8, page_to_pfn(pg), 
    6.31 +            1UL << (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT),
    6.32 +            PAGE_HYPERVISOR | _PAGE_USER);
    6.33          memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
    6.34                 1UL << L2_PAGETABLE_SHIFT);
    6.35      }
     7.1 --- a/xen/include/asm-x86/page.h	Thu May 19 12:36:18 2005 +0000
     7.2 +++ b/xen/include/asm-x86/page.h	Thu May 19 13:24:18 2005 +0000
     7.3 @@ -43,6 +43,11 @@ typedef struct { unsigned long pt_lo; } 
     7.4  #define l3e_get_page(_x)    (pfn_to_page(l3e_get_pfn(_x)))
     7.5  #define l4e_get_page(_x)    (pfn_to_page(l4e_get_pfn(_x)))
     7.6  
     7.7 +#define l1e_create_page(_x,_y) (l1e_create_pfn(page_to_pfn(_x),(_y)))
     7.8 +#define l2e_create_page(_x,_y) (l2e_create_pfn(page_to_pfn(_x),(_y)))
     7.9 +#define l3e_create_page(_x,_y) (l3e_create_pfn(page_to_pfn(_x),(_y)))
    7.10 +#define l4e_create_page(_x,_y) (l4e_create_pfn(page_to_pfn(_x),(_y)))
    7.11 +
    7.12  /* High table entries are reserved by the hypervisor. */
    7.13  #define DOMAIN_ENTRIES_PER_L2_PAGETABLE     \
    7.14    (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
    7.15 @@ -141,13 +146,13 @@ struct pfn_info *alloc_xen_pagetable(voi
    7.16  void free_xen_pagetable(struct pfn_info *pg);
    7.17  l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
    7.18  
    7.19 -/* Map physical byte range (@p, @p+@s) at address @v in Xen address space. */
    7.20 +/* Map physical page range in Xen virtual address space. */
    7.21  #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
    7.22  int
    7.23  map_pages_to_xen(
    7.24 -    unsigned long v,
    7.25 -    unsigned long p,
    7.26 -    unsigned long s,
    7.27 +    unsigned long virt,
    7.28 +    unsigned long pfn,
    7.29 +    unsigned long nr_pfns,
    7.30      unsigned long flags);
    7.31  
    7.32  #endif /* !__ASSEMBLY__ */