ia64/xen-unstable

changeset 3744:2fcf1b2bcbcf

bitkeeper revision 1.1159.256.1 (420919a4fFt2x7Ej4o_xqAI2CSYN8Q)

mmu_update fixes for x86_64. About halfway there.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 19:57:24 2005 +0000 (2005-02-08)
parents d633a3d0f36c
children 6637d3633038
files xen/arch/x86/boot/x86_64.S xen/arch/x86/mm.c xen/include/asm-x86/cpufeature.h xen/include/asm-x86/page.h xen/include/asm-x86/x86_32/page.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/boot/x86_64.S	Tue Feb 08 18:21:54 2005 +0000
     1.2 +++ b/xen/arch/x86/boot/x86_64.S	Tue Feb 08 19:57:24 2005 +0000
     1.3 @@ -75,6 +75,7 @@ 3:      in      %dx,%al
     1.4          cpuid
     1.5          bt      $29,%edx            # Long mode feature?
     1.6          jnc     bad_cpu
     1.7 +        mov     %edx,%edi
     1.8  skip_boot_checks:
     1.9  
    1.10          /* Set up FPU. */
    1.11 @@ -91,9 +92,12 @@ skip_boot_checks:
    1.12          /* Set up EFER (Extended Feature Enable Register). */
    1.13          movl    $MSR_EFER, %ecx
    1.14          rdmsr
    1.15 -        /* Long Mode, SYSCALL/SYSRET, No-Execute */
    1.16 -        movl    $(EFER_LME|EFER_SCE|EFER_NX),%eax
    1.17 -        wrmsr
    1.18 +        btsl    $_EFER_LME,%eax /* Long Mode      */
    1.19 +        btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
    1.20 +        btl     $20,%edi        /* CPUID 0x80000001, EDX[20] */
    1.21 +        jnc     1f
    1.22 +        btsl    $_EFER_NX,%eax  /* No-Execute     */
    1.23 +1:      wrmsr
    1.24  
    1.25          mov     $0x80050033,%eax /* hi-to-lo: PG,AM,WP,NE,ET,MP,PE */
    1.26          mov     %eax,%cr0
     2.1 --- a/xen/arch/x86/mm.c	Tue Feb 08 18:21:54 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Tue Feb 08 19:57:24 2005 +0000
     2.3 @@ -335,47 +335,48 @@ static int get_page_and_type_from_pagenr
     2.4  
     2.5  
     2.6  /*
     2.7 - * We allow an L2 tables to map each other (a.k.a. linear page tables). It
     2.8 - * needs some special care with reference counst and access permissions:
     2.9 + * We allow root tables to map each other (a.k.a. linear page tables). It
    2.10 + * needs some special care with reference counts and access permissions:
    2.11   *  1. The mapping entry must be read-only, or the guest may get write access
    2.12   *     to its own PTEs.
    2.13   *  2. We must only bump the reference counts for an *already validated*
    2.14   *     L2 table, or we can end up in a deadlock in get_page_type() by waiting
    2.15   *     on a validation that is required to complete that validation.
    2.16   *  3. We only need to increment the reference counts for the mapped page
    2.17 - *     frame if it is mapped by a different L2 table. This is sufficient and
    2.18 - *     also necessary to allow validation of an L2 table mapping itself.
    2.19 + *     frame if it is mapped by a different root table. This is sufficient and
    2.20 + *     also necessary to allow validation of a root table mapping itself.
    2.21   */
    2.22  static int 
    2.23  get_linear_pagetable(
    2.24 -    l2_pgentry_t l2e, unsigned long pfn, struct domain *d)
    2.25 +    root_pgentry_t re, unsigned long re_pfn, struct domain *d)
    2.26  {
    2.27      u32 x, y;
    2.28      struct pfn_info *page;
    2.29 -
    2.30 -    if ( (l2_pgentry_val(l2e) & _PAGE_RW) )
    2.31 +    unsigned long pfn;
    2.32 +
    2.33 +    if ( (root_pgentry_val(re) & _PAGE_RW) )
    2.34      {
    2.35          MEM_LOG("Attempt to create linear p.t. with write perms");
    2.36          return 0;
    2.37      }
    2.38  
    2.39 -    if ( (l2_pgentry_val(l2e) >> PAGE_SHIFT) != pfn )
    2.40 +    if ( (pfn = root_pgentry_to_pfn(re)) != re_pfn )
    2.41      {
    2.42          /* Make sure the mapped frame belongs to the correct domain. */
    2.43 -        if ( unlikely(!get_page_from_pagenr(l2_pgentry_to_pfn(l2e), d)) )
    2.44 +        if ( unlikely(!get_page_from_pagenr(pfn, d)) )
    2.45              return 0;
    2.46  
    2.47          /*
    2.48           * Make sure that the mapped frame is an already-validated L2 table. 
    2.49           * If so, atomically increment the count (checking for overflow).
    2.50           */
    2.51 -        page = &frame_table[l2_pgentry_to_pfn(l2e)];
    2.52 +        page = &frame_table[pfn];
    2.53          y = page->u.inuse.type_info;
    2.54          do {
    2.55              x = y;
    2.56              if ( unlikely((x & PGT_count_mask) == PGT_count_mask) ||
    2.57                   unlikely((x & (PGT_type_mask|PGT_validated)) != 
    2.58 -                          (PGT_l2_page_table|PGT_validated)) )
    2.59 +                          (PGT_root_page_table|PGT_validated)) )
    2.60              {
    2.61                  put_page(page);
    2.62                  return 0;
    2.63 @@ -400,9 +401,9 @@ get_page_from_l1e(
    2.64      if ( !(l1v & _PAGE_PRESENT) )
    2.65          return 1;
    2.66  
    2.67 -    if ( unlikely(l1v & (_PAGE_GLOBAL|_PAGE_PAT)) )
    2.68 +    if ( unlikely(l1v & L1_DISALLOW_MASK) )
    2.69      {
    2.70 -        MEM_LOG("Bad L1 type settings %04lx", l1v & (_PAGE_GLOBAL|_PAGE_PAT));
    2.71 +        MEM_LOG("Bad L1 type settings %04lx", l1v & L1_DISALLOW_MASK);
    2.72          return 0;
    2.73      }
    2.74  
    2.75 @@ -439,10 +440,10 @@ get_page_from_l2e(
    2.76      if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
    2.77          return 1;
    2.78  
    2.79 -    if ( unlikely((l2_pgentry_val(l2e) & (_PAGE_GLOBAL|_PAGE_PSE))) )
    2.80 +    if ( unlikely((l2_pgentry_val(l2e) & L2_DISALLOW_MASK)) )
    2.81      {
    2.82          MEM_LOG("Bad L2 page type settings %04lx",
    2.83 -                l2_pgentry_val(l2e) & (_PAGE_GLOBAL|_PAGE_PSE));
    2.84 +                l2_pgentry_val(l2e) & L2_DISALLOW_MASK);
    2.85          return 0;
    2.86      }
    2.87  
    2.88 @@ -450,12 +451,62 @@ get_page_from_l2e(
    2.89          l2_pgentry_to_pfn(l2e), 
    2.90          PGT_l1_page_table | (va_idx<<PGT_va_shift), d);
    2.91  
    2.92 +#if defined(__i386__)
    2.93 +    return rc ? rc : get_linear_pagetable(l2e, pfn, d);
    2.94 +#elif defined(__x86_64__)
    2.95 +    return rc;
    2.96 +#endif
    2.97 +}
    2.98 +
    2.99 +
   2.100 +#ifdef __x86_64__
   2.101 +
   2.102 +static int 
   2.103 +get_page_from_l3e(
   2.104 +    l3_pgentry_t l3e, unsigned long pfn, struct domain *d)
   2.105 +{
   2.106 +    if ( !(l3_pgentry_val(l3e) & _PAGE_PRESENT) )
   2.107 +        return 1;
   2.108 +
   2.109 +    if ( unlikely((l3_pgentry_val(l3e) & L3_DISALLOW_MASK)) )
   2.110 +    {
   2.111 +        MEM_LOG("Bad L3 page type settings %04lx",
   2.112 +                l3_pgentry_val(l3e) & L3_DISALLOW_MASK);
   2.113 +        return 0;
   2.114 +    }
   2.115 +
   2.116 +    return get_page_and_type_from_pagenr(
   2.117 +        l3_pgentry_to_pfn(l3e), PGT_l3_page_table, d);
   2.118 +}
   2.119 +
   2.120 +
   2.121 +static int 
   2.122 +get_page_from_l4e(
   2.123 +    l4_pgentry_t l4e, unsigned long pfn, struct domain *d)
   2.124 +{
   2.125 +    int rc;
   2.126 +
   2.127 +    if ( !(l4_pgentry_val(l4e) & _PAGE_PRESENT) )
   2.128 +        return 1;
   2.129 +
   2.130 +    if ( unlikely((l4_pgentry_val(l4e) & L4_DISALLOW_MASK)) )
   2.131 +    {
   2.132 +        MEM_LOG("Bad L4 page type settings %04lx",
   2.133 +                l4_pgentry_val(l4e) & L4_DISALLOW_MASK);
   2.134 +        return 0;
   2.135 +    }
   2.136 +
   2.137 +    rc = get_page_and_type_from_pagenr(
   2.138 +        l4_pgentry_to_pfn(l4e), PGT_l4_page_table, d);
   2.139 +
   2.140      if ( unlikely(!rc) )
   2.141 -        return get_linear_pagetable(l2e, pfn, d);
   2.142 +        return get_linear_pagetable(l4e, pfn, d);
   2.143  
   2.144      return 1;
   2.145  }
   2.146  
   2.147 +#endif /* __x86_64__ */
   2.148 +
   2.149  
   2.150  static void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
   2.151  {
   2.152 @@ -514,51 +565,34 @@ static void put_page_from_l2e(l2_pgentry
   2.153  }
   2.154  
   2.155  
   2.156 -static int alloc_l2_table(struct pfn_info *page)
   2.157 +#ifdef __x86_64__
   2.158 +
   2.159 +static void put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn)
   2.160  {
   2.161 -    struct domain *d = page_get_owner(page);
   2.162 -    unsigned long  page_nr = page_to_pfn(page);
   2.163 -    l2_pgentry_t  *pl2e;
   2.164 -    int            i;
   2.165 -   
   2.166 -    pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
   2.167 -
   2.168 -    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   2.169 -        if ( unlikely(!get_page_from_l2e(pl2e[i], page_nr, d, i)) )
   2.170 -            goto fail;
   2.171 -
   2.172 -#if defined(__i386__)
   2.173 -    /* Now we add our private high mappings. */
   2.174 -    memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
   2.175 -           &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
   2.176 -           HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
   2.177 -    pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
   2.178 -        mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   2.179 -    pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   2.180 -        mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | 
   2.181 -                      __PAGE_HYPERVISOR);
   2.182 -#endif
   2.183 -
   2.184 -    unmap_domain_mem(pl2e);
   2.185 -    return 1;
   2.186 -
   2.187 - fail:
   2.188 -    while ( i-- > 0 )
   2.189 -        put_page_from_l2e(pl2e[i], page_nr);
   2.190 -
   2.191 -    unmap_domain_mem(pl2e);
   2.192 -    return 0;
   2.193 +    if ( (l3_pgentry_val(l3e) & _PAGE_PRESENT) && 
   2.194 +         ((l3_pgentry_val(l3e) >> PAGE_SHIFT) != pfn) )
   2.195 +        put_page_and_type(&frame_table[l3_pgentry_to_pfn(l3e)]);
   2.196  }
   2.197  
   2.198  
   2.199 +static void put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn)
   2.200 +{
   2.201 +    if ( (l4_pgentry_val(l4e) & _PAGE_PRESENT) && 
   2.202 +         ((l4_pgentry_val(l4e) >> PAGE_SHIFT) != pfn) )
   2.203 +        put_page_and_type(&frame_table[l4_pgentry_to_pfn(l4e)]);
   2.204 +}
   2.205 +
   2.206 +#endif /* __x86_64__ */
   2.207 +
   2.208 +
   2.209  static int alloc_l1_table(struct pfn_info *page)
   2.210  {
   2.211      struct domain *d = page_get_owner(page);
   2.212 -    unsigned long  page_nr = page_to_pfn(page);
   2.213 +    unsigned long  pfn = page_to_pfn(page);
   2.214      l1_pgentry_t  *pl1e;
   2.215      int            i;
   2.216  
   2.217 -    pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
   2.218 +    pl1e = map_domain_mem(pfn << PAGE_SHIFT);
   2.219  
   2.220      for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   2.221          if ( unlikely(!get_page_from_l1e(pl1e[i], d)) )
   2.222 @@ -576,29 +610,97 @@ static int alloc_l1_table(struct pfn_inf
   2.223  }
   2.224  
   2.225  
   2.226 -static void free_l2_table(struct pfn_info *page)
   2.227 +static int alloc_l2_table(struct pfn_info *page)
   2.228  {
   2.229 -    unsigned long page_nr = page - frame_table;
   2.230 -    l2_pgentry_t *pl2e;
   2.231 -    int i;
   2.232 -
   2.233 -    pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
   2.234 +    struct domain *d = page_get_owner(page);
   2.235 +    unsigned long  pfn = page_to_pfn(page);
   2.236 +    l2_pgentry_t  *pl2e;
   2.237 +    int            i;
   2.238 +   
   2.239 +    pl2e = map_domain_mem(pfn << PAGE_SHIFT);
   2.240  
   2.241      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   2.242 -        put_page_from_l2e(pl2e[i], page_nr);
   2.243 +        if ( unlikely(!get_page_from_l2e(pl2e[i], pfn, d, i)) )
   2.244 +            goto fail;
   2.245 +
   2.246 +#if defined(__i386__)
   2.247 +    /* Now we add our private high mappings. */
   2.248 +    memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
   2.249 +           &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
   2.250 +           HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
   2.251 +    pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
   2.252 +        mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   2.253 +    pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   2.254 +        mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | 
   2.255 +                      __PAGE_HYPERVISOR);
   2.256 +#endif
   2.257 +
   2.258 +    unmap_domain_mem(pl2e);
   2.259 +    return 1;
   2.260 +
   2.261 + fail:
   2.262 +    while ( i-- > 0 )
   2.263 +        put_page_from_l2e(pl2e[i], pfn);
   2.264  
   2.265      unmap_domain_mem(pl2e);
   2.266 +    return 0;
   2.267  }
   2.268  
   2.269  
   2.270 +#ifdef __x86_64__
   2.271 +
   2.272 +static int alloc_l3_table(struct pfn_info *page)
   2.273 +{
   2.274 +    struct domain *d = page_get_owner(page);
   2.275 +    unsigned long  pfn = page_to_pfn(page);
   2.276 +    l3_pgentry_t  *pl3e = page_to_virt(page);
   2.277 +    int            i;
   2.278 +
   2.279 +    for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
   2.280 +        if ( unlikely(!get_page_from_l3e(pl3e[i], pfn, d)) )
   2.281 +            goto fail;
   2.282 +
   2.283 +    return 1;
   2.284 +
   2.285 + fail:
   2.286 +    while ( i-- > 0 )
   2.287 +        put_page_from_l3e(pl3e[i], pfn);
   2.288 +
   2.289 +    return 0;
   2.290 +}
   2.291 +
   2.292 +
   2.293 +static int alloc_l4_table(struct pfn_info *page)
   2.294 +{
   2.295 +    struct domain *d = page_get_owner(page);
   2.296 +    unsigned long  pfn = page_to_pfn(page);
   2.297 +    l4_pgentry_t  *pl4e = page_to_virt(page);
   2.298 +    int            i;
   2.299 +
   2.300 +    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
   2.301 +        if ( unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
   2.302 +            goto fail;
   2.303 +
   2.304 +    return 1;
   2.305 +
   2.306 + fail:
   2.307 +    while ( i-- > 0 )
   2.308 +        put_page_from_l4e(pl4e[i], pfn);
   2.309 +
   2.310 +    return 0;
   2.311 +}
   2.312 +
   2.313 +#endif /* __x86_64__ */
   2.314 +
   2.315 +
   2.316  static void free_l1_table(struct pfn_info *page)
   2.317  {
   2.318      struct domain *d = page_get_owner(page);
   2.319 -    unsigned long page_nr = page - frame_table;
   2.320 +    unsigned long pfn = page_to_pfn(page);
   2.321      l1_pgentry_t *pl1e;
   2.322      int i;
   2.323  
   2.324 -    pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
   2.325 +    pl1e = map_domain_mem(pfn << PAGE_SHIFT);
   2.326  
   2.327      for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   2.328          put_page_from_l1e(pl1e[i], d);
   2.329 @@ -607,6 +709,47 @@ static void free_l1_table(struct pfn_inf
   2.330  }
   2.331  
   2.332  
   2.333 +static void free_l2_table(struct pfn_info *page)
   2.334 +{
   2.335 +    unsigned long pfn = page_to_pfn(page);
   2.336 +    l2_pgentry_t *pl2e;
   2.337 +    int i;
   2.338 +
   2.339 +    pl2e = map_domain_mem(pfn << PAGE_SHIFT);
   2.340 +
   2.341 +    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   2.342 +        put_page_from_l2e(pl2e[i], pfn);
   2.343 +
   2.344 +    unmap_domain_mem(pl2e);
   2.345 +}
   2.346 +
   2.347 +
   2.348 +#ifdef __x86_64__
   2.349 +
   2.350 +static void free_l3_table(struct pfn_info *page)
   2.351 +{
   2.352 +    unsigned long pfn = page_to_pfn(page);
   2.353 +    l3_pgentry_t *pl3e = page_to_virt(page);
   2.354 +    int           i;
   2.355 +
   2.356 +    for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
   2.357 +        put_page_from_l3e(pl3e[i], pfn);
   2.358 +}
   2.359 +
   2.360 +
   2.361 +static void free_l4_table(struct pfn_info *page)
   2.362 +{
   2.363 +    unsigned long pfn = page_to_pfn(page);
   2.364 +    l4_pgentry_t *pl4e = page_to_virt(page);
   2.365 +    int           i;
   2.366 +
   2.367 +    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
   2.368 +        put_page_from_l4e(pl4e[i], pfn);
   2.369 +}
   2.370 +
   2.371 +#endif /* __x86_64__ */
   2.372 +
   2.373 +
   2.374  static inline int update_l2e(l2_pgentry_t *pl2e, 
   2.375                               l2_pgentry_t  ol2e, 
   2.376                               l2_pgentry_t  nl2e)
   2.377 @@ -738,6 +881,12 @@ int alloc_page_type(struct pfn_info *pag
   2.378          return alloc_l1_table(page);
   2.379      case PGT_l2_page_table:
   2.380          return alloc_l2_table(page);
   2.381 +#ifdef __x86_64__
   2.382 +    case PGT_l3_page_table:
   2.383 +        return alloc_l3_table(page);
   2.384 +    case PGT_l4_page_table:
   2.385 +        return alloc_l4_table(page);
   2.386 +#endif
   2.387      case PGT_gdt_page:
   2.388      case PGT_ldt_page:
   2.389          return alloc_segdesc_page(page);
   2.390 @@ -766,6 +915,16 @@ void free_page_type(struct pfn_info *pag
   2.391          free_l2_table(page);
   2.392          break;
   2.393  
   2.394 +#ifdef __x86_64__
   2.395 +    case PGT_l3_page_table:
   2.396 +        free_l3_table(page);
   2.397 +        break;
   2.398 +
   2.399 +    case PGT_l4_page_table:
   2.400 +        free_l4_table(page);
   2.401 +        break;
   2.402 +#endif
   2.403 +
   2.404      default:
   2.405          BUG();
   2.406      }
   2.407 @@ -856,7 +1015,8 @@ int get_page_type(struct pfn_info *page,
   2.408                   * circumstances should be very rare.
   2.409                   */
   2.410                  struct domain *d = page_get_owner(page);
   2.411 -                if ( unlikely(NEED_FLUSH(tlbflush_time[d->exec_domain[0]->processor],
   2.412 +                if ( unlikely(NEED_FLUSH(tlbflush_time[d->exec_domain[0]->
   2.413 +                                                      processor],
   2.414                                           page->tlbflush_timestamp)) )
   2.415                  {
   2.416                      perfc_incr(need_flush_tlb_flush);
     3.1 --- a/xen/include/asm-x86/cpufeature.h	Tue Feb 08 18:21:54 2005 +0000
     3.2 +++ b/xen/include/asm-x86/cpufeature.h	Tue Feb 08 19:57:24 2005 +0000
     3.3 @@ -48,6 +48,7 @@
     3.4  /* Don't duplicate feature flags which are redundant with Intel! */
     3.5  #define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
     3.6  #define X86_FEATURE_MP		(1*32+19) /* MP Capable. */
     3.7 +#define X86_FEATURE_NX		(1*32+20) /* No-Execute Bit. */
     3.8  #define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
     3.9  #define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
    3.10  #define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
    3.11 @@ -98,6 +99,7 @@
    3.12  #define cpu_has_xmm		boot_cpu_has(X86_FEATURE_XMM)
    3.13  #define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
    3.14  #define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
    3.15 +#define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
    3.16  #define cpu_has_k6_mtrr		boot_cpu_has(X86_FEATURE_K6_MTRR)
    3.17  #define cpu_has_cyrix_arr	boot_cpu_has(X86_FEATURE_CYRIX_ARR)
    3.18  #define cpu_has_centaur_mcr	boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
     4.1 --- a/xen/include/asm-x86/page.h	Tue Feb 08 18:21:54 2005 +0000
     4.2 +++ b/xen/include/asm-x86/page.h	Tue Feb 08 19:57:24 2005 +0000
     4.3 @@ -12,37 +12,37 @@
     4.4  /* Page-table type. */
     4.5  #ifndef __ASSEMBLY__
     4.6  typedef struct { unsigned long pt_lo; } pagetable_t;
     4.7 -#define pagetable_val(_x)  ((_x).pt_lo)
     4.8 -#define mk_pagetable(_x)   ( (pagetable_t) { (_x) } )
     4.9 +#define pagetable_val(_x)   ((_x).pt_lo)
    4.10 +#define mk_pagetable(_x)    ( (pagetable_t) { (_x) } )
    4.11  #endif
    4.12  
    4.13  #ifndef __ASSEMBLY__
    4.14 -#define PAGE_SIZE	         (1UL << PAGE_SHIFT)
    4.15 +#define PAGE_SIZE           (1UL << PAGE_SHIFT)
    4.16  #else
    4.17 -#define PAGE_SIZE	         (1 << PAGE_SHIFT)
    4.18 +#define PAGE_SIZE           (1 << PAGE_SHIFT)
    4.19  #endif
    4.20 -#define PAGE_MASK	         (~(PAGE_SIZE-1))
    4.21 +#define PAGE_MASK           (~(PAGE_SIZE-1))
    4.22  
    4.23 -#define clear_page(_p)           memset((void *)(_p), 0, PAGE_SIZE)
    4.24 -#define copy_page(_t,_f)         memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
    4.25 +#define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
    4.26 +#define copy_page(_t,_f)    memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
    4.27  
    4.28 -#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
    4.29 -#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
    4.30 -#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
    4.31 -#define pfn_to_page(_pfn)       (frame_table + (_pfn))
    4.32 -#define phys_to_page(kaddr)     (frame_table + ((kaddr) >> PAGE_SHIFT))
    4.33 -#define virt_to_page(kaddr)	(frame_table + (__pa(kaddr) >> PAGE_SHIFT))
    4.34 -#define VALID_PAGE(page)	((page - frame_table) < max_mapnr)
    4.35 +#define PAGE_OFFSET         ((unsigned long)__PAGE_OFFSET)
    4.36 +#define __pa(x)             ((unsigned long)(x)-PAGE_OFFSET)
    4.37 +#define __va(x)             ((void *)((unsigned long)(x)+PAGE_OFFSET))
    4.38 +#define pfn_to_page(_pfn)   (frame_table + (_pfn))
    4.39 +#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
    4.40 +#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
    4.41 +#define VALID_PAGE(page)    ((page - frame_table) < max_mapnr)
    4.42  
    4.43  /*
    4.44   * NB. We don't currently track I/O holes in the physical RAM space.
    4.45   * For now we guess that I/O devices will be mapped in the first 1MB
    4.46   * (e.g., VGA buffers) or beyond the end of physical RAM.
    4.47   */
    4.48 -#define pfn_is_ram(_pfn)        (((_pfn) > 0x100) && ((_pfn) < max_page))
    4.49 +#define pfn_is_ram(_pfn)    (((_pfn) > 0x100) && ((_pfn) < max_page))
    4.50  
    4.51  /* High table entries are reserved by the hypervisor. */
    4.52 -#define DOMAIN_ENTRIES_PER_L2_PAGETABLE	    \
    4.53 +#define DOMAIN_ENTRIES_PER_L2_PAGETABLE     \
    4.54    (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
    4.55  #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
    4.56    (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
    4.57 @@ -65,48 +65,48 @@ extern void paging_init(void);
    4.58  /* Flush global pages as well. */
    4.59  
    4.60  #define __pge_off()                                                     \
    4.61 -        do {                                                            \
    4.62 -                __asm__ __volatile__(                                   \
    4.63 -                        "mov %0, %%cr4;  # turn off PGE     "           \
    4.64 -                        :: "r" (mmu_cr4_features & ~X86_CR4_PGE));      \
    4.65 -        } while (0)
    4.66 +    do {                                                                \
    4.67 +        __asm__ __volatile__(                                           \
    4.68 +            "mov %0, %%cr4;  # turn off PGE     "                       \
    4.69 +            : : "r" (mmu_cr4_features & ~X86_CR4_PGE) );                \
    4.70 +        } while ( 0 )
    4.71  
    4.72  #define __pge_on()                                                      \
    4.73 -        do {                                                            \
    4.74 -                __asm__ __volatile__(                                   \
    4.75 -                        "mov %0, %%cr4;  # turn off PGE     "           \
    4.76 -                        :: "r" (mmu_cr4_features));                     \
    4.77 -        } while (0)
    4.78 +    do {                                                                \
    4.79 +        __asm__ __volatile__(                                           \
    4.80 +            "mov %0, %%cr4;  # turn off PGE     "                       \
    4.81 +            : : "r" (mmu_cr4_features) );                               \
    4.82 +    } while ( 0 )
    4.83  
    4.84  
    4.85 -#define __flush_tlb_pge()						\
    4.86 -	do {								\
    4.87 -                __pge_off();                                            \
    4.88 -		__flush_tlb();						\
    4.89 -                __pge_on();                                             \
    4.90 -	} while (0)
    4.91 +#define __flush_tlb_pge()                                               \
    4.92 +    do {                                                                \
    4.93 +        __pge_off();                                                    \
    4.94 +        __flush_tlb();                                                  \
    4.95 +        __pge_on();                                                     \
    4.96 +    } while ( 0 )
    4.97  
    4.98  #define __flush_tlb_one(__addr) \
    4.99 -__asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
   4.100 +    __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
   4.101  
   4.102  #endif /* !__ASSEMBLY__ */
   4.103  
   4.104  
   4.105 -#define _PAGE_PRESENT	0x001
   4.106 -#define _PAGE_RW	0x002
   4.107 -#define _PAGE_USER	0x004
   4.108 -#define _PAGE_PWT	0x008
   4.109 -#define _PAGE_PCD	0x010
   4.110 -#define _PAGE_ACCESSED	0x020
   4.111 -#define _PAGE_DIRTY	0x040
   4.112 -#define _PAGE_PAT       0x080
   4.113 -#define _PAGE_PSE	0x080
   4.114 -#define _PAGE_GLOBAL	0x100
   4.115 +#define _PAGE_PRESENT  0x001UL
   4.116 +#define _PAGE_RW       0x002UL
   4.117 +#define _PAGE_USER     0x004UL
   4.118 +#define _PAGE_PWT      0x008UL
   4.119 +#define _PAGE_PCD      0x010UL
   4.120 +#define _PAGE_ACCESSED 0x020UL
   4.121 +#define _PAGE_DIRTY    0x040UL
   4.122 +#define _PAGE_PAT      0x080UL
   4.123 +#define _PAGE_PSE      0x080UL
   4.124 +#define _PAGE_GLOBAL   0x100UL
   4.125  
   4.126  #define __PAGE_HYPERVISOR \
   4.127 -	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
   4.128 +    (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
   4.129  #define __PAGE_HYPERVISOR_NOCACHE \
   4.130 -	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
   4.131 +    (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
   4.132  
   4.133  #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
   4.134  
     5.1 --- a/xen/include/asm-x86/x86_32/page.h	Tue Feb 08 18:21:54 2005 +0000
     5.2 +++ b/xen/include/asm-x86/x86_32/page.h	Tue Feb 08 19:57:24 2005 +0000
     5.3 @@ -27,20 +27,24 @@ typedef l2_pgentry_t root_pgentry_t;
     5.4  #endif /* !__ASSEMBLY__ */
     5.5  
     5.6  /* Strip type from a table entry. */
     5.7 -#define l1_pgentry_val(_x) ((_x).l1_lo)
     5.8 -#define l2_pgentry_val(_x) ((_x).l2_lo)
     5.9 +#define l1_pgentry_val(_x)   ((_x).l1_lo)
    5.10 +#define l2_pgentry_val(_x)   ((_x).l2_lo)
    5.11 +#define root_pgentry_val(_x) (l2_pgentry_val(_x))
    5.12  
    5.13  /* Add type to a table entry. */
    5.14 -#define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
    5.15 -#define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
    5.16 +#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
    5.17 +#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
    5.18 +#define mk_root_pgentry(_x) (mk_l2_pgentry(_x))
    5.19  
    5.20  /* Turn a typed table entry into a physical address. */
    5.21 -#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
    5.22 -#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
    5.23 +#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & PAGE_MASK)
    5.24 +#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & PAGE_MASK)
    5.25 +#define root_pgentry_to_phys(_x) (l2_pgentry_to_phys(_x))
    5.26  
    5.27  /* Turn a typed table entry into a page index. */
    5.28 -#define l1_pgentry_to_pfn(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) 
    5.29 -#define l2_pgentry_to_pfn(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
    5.30 +#define l1_pgentry_to_pfn(_x)   (l1_pgentry_val(_x) >> PAGE_SHIFT) 
    5.31 +#define l2_pgentry_to_pfn(_x)   (l2_pgentry_val(_x) >> PAGE_SHIFT)
    5.32 +#define root_pgentry_to_pfn(_x) (l2_pgentry_to_pfn(_x))
    5.33  
    5.34  /* Pagetable walking. */
    5.35  #define l2_pgentry_to_l1(_x) \
    5.36 @@ -55,4 +59,13 @@ typedef l2_pgentry_t root_pgentry_t;
    5.37  /* Given a virtual address, get an entry offset into a linear page table. */
    5.38  #define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT)
    5.39  
    5.40 +#define PGT_root_page_table PGT_l2_page_table
    5.41 +
    5.42 +#define _PAGE_NX         0UL
    5.43 +
    5.44 +#define L1_DISALLOW_MASK (3UL << 7)
    5.45 +#define L2_DISALLOW_MASK (7UL << 7)
    5.46 +#define L3_DISALLOW_MASK (7UL << 7)
    5.47 +#define L2_DISALLOW_MASK (7UL << 7)
    5.48 +
    5.49  #endif /* __X86_32_PAGE_H__ */
     6.1 --- a/xen/include/asm-x86/x86_64/page.h	Tue Feb 08 18:21:54 2005 +0000
     6.2 +++ b/xen/include/asm-x86/x86_64/page.h	Tue Feb 08 19:57:24 2005 +0000
     6.3 @@ -18,8 +18,8 @@
     6.4  
     6.5  #define __PAGE_OFFSET           (0xFFFF830000000000)
     6.6  
     6.7 -/* These may increase in future (phys. bits in particular). */
     6.8 -#define PADDR_BITS              40
     6.9 +/* These are page-table limitations. Current CPUs support only 40-bit phys. */
    6.10 +#define PADDR_BITS              52
    6.11  #define VADDR_BITS              48
    6.12  #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
    6.13  #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
    6.14 @@ -34,28 +34,32 @@ typedef l4_pgentry_t root_pgentry_t;
    6.15  #endif /* !__ASSEMBLY__ */
    6.16  
    6.17  /* Strip type from a table entry. */
    6.18 -#define l1_pgentry_val(_x) ((_x).l1_lo)
    6.19 -#define l2_pgentry_val(_x) ((_x).l2_lo)
    6.20 -#define l3_pgentry_val(_x) ((_x).l3_lo)
    6.21 -#define l4_pgentry_val(_x) ((_x).l4_lo)
    6.22 +#define l1_pgentry_val(_x)   ((_x).l1_lo)
    6.23 +#define l2_pgentry_val(_x)   ((_x).l2_lo)
    6.24 +#define l3_pgentry_val(_x)   ((_x).l3_lo)
    6.25 +#define l4_pgentry_val(_x)   ((_x).l4_lo)
    6.26 +#define root_pgentry_val(_x) (l4_pgentry_val(_x))
    6.27  
    6.28  /* Add type to a table entry. */
    6.29 -#define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
    6.30 -#define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
    6.31 -#define mk_l3_pgentry(_x)  ( (l3_pgentry_t) { (_x) } )
    6.32 -#define mk_l4_pgentry(_x)  ( (l4_pgentry_t) { (_x) } )
    6.33 +#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
    6.34 +#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
    6.35 +#define mk_l3_pgentry(_x)   ( (l3_pgentry_t) { (_x) } )
    6.36 +#define mk_l4_pgentry(_x)   ( (l4_pgentry_t) { (_x) } )
    6.37 +#define mk_root_pgentry(_x) (mk_l4_pgentry(_x))
    6.38  
    6.39  /* Turn a typed table entry into a physical address. */
    6.40 -#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK))
    6.41 -#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK))
    6.42 -#define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK))
    6.43 -#define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK))
    6.44 +#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
    6.45 +#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
    6.46 +#define l3_pgentry_to_phys(_x)   (l3_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
    6.47 +#define l4_pgentry_to_phys(_x)   (l4_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
    6.48 +#define root_pgentry_to_phys(_x) (l4_pgentry_to_phys(_x))
    6.49  
    6.50  /* Turn a typed table entry into a page index. */
    6.51 -#define l1_pgentry_to_pfn(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) 
    6.52 -#define l2_pgentry_to_pfn(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
    6.53 -#define l3_pgentry_to_pfn(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
    6.54 -#define l4_pgentry_to_pfn(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
    6.55 +#define l1_pgentry_to_pfn(_x)   (l1_pgentry_val(_x) >> PAGE_SHIFT) 
    6.56 +#define l2_pgentry_to_pfn(_x)   (l2_pgentry_val(_x) >> PAGE_SHIFT)
    6.57 +#define l3_pgentry_to_pfn(_x)   (l3_pgentry_val(_x) >> PAGE_SHIFT)
    6.58 +#define l4_pgentry_to_pfn(_x)   (l4_pgentry_val(_x) >> PAGE_SHIFT)
    6.59 +#define root_pgentry_to_pfn(_x) (l4_pgentry_to_pfn(_x))
    6.60  
    6.61  /* Pagetable walking. */
    6.62  #define l2_pgentry_to_l1(_x) \
    6.63 @@ -78,4 +82,13 @@ typedef l4_pgentry_t root_pgentry_t;
    6.64  /* Given a virtual address, get an entry offset into a linear page table. */
    6.65  #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> PAGE_SHIFT)
    6.66  
    6.67 +#define PGT_root_page_table PGT_l4_page_table
    6.68 +
    6.69 +#define _PAGE_NX         (cpu_has_nx ? (1UL<<63) : 0UL)
    6.70 +
    6.71 +#define L1_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (3UL << 7))
    6.72 +#define L2_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
    6.73 +#define L3_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
    6.74 +#define L4_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
    6.75 +
    6.76  #endif /* __X86_64_PAGE_H__ */