ia64/xen-unstable

changeset 6054:69bf77e1b102

Writable pagetables for x86/64. Xen portion.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Aug 08 08:18:06 2005 +0000 (2005-08-08)
parents deff07c1b686
children c4512592a1dc
files xen/arch/x86/audit.c xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/traps.c xen/common/grant_table.c xen/include/asm-x86/mm.h xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Sun Aug 07 09:13:39 2005 +0000
     1.2 +++ b/xen/arch/x86/audit.c	Mon Aug 08 08:18:06 2005 +0000
     1.3 @@ -73,7 +73,7 @@ int audit_adjust_pgtables(struct domain 
     1.4              if ( tcount < 0 )
     1.5              {
     1.6                  APRINTK("Audit %d: type count went below zero "
     1.7 -                        "mfn=%lx t=%x ot=%x",
     1.8 +                        "mfn=%lx t=%" PRtype_info " ot=%x",
     1.9                          d->domain_id, page_to_pfn(page),
    1.10                          page->u.inuse.type_info,
    1.11                          page->tlbflush_timestamp);
    1.12 @@ -82,7 +82,7 @@ int audit_adjust_pgtables(struct domain 
    1.13              else if ( (tcount & ~PGT_count_mask) != 0 )
    1.14              {
    1.15                  APRINTK("Audit %d: type count overflowed "
    1.16 -                        "mfn=%lx t=%x ot=%x",
    1.17 +                        "mfn=%lx t=%" PRtype_info " ot=%x",
    1.18                          d->domain_id, page_to_pfn(page),
    1.19                          page->u.inuse.type_info,
    1.20                          page->tlbflush_timestamp);
    1.21 @@ -101,7 +101,7 @@ int audit_adjust_pgtables(struct domain 
    1.22          if ( count < 0 )
    1.23          {
    1.24              APRINTK("Audit %d: general count went below zero "
    1.25 -                    "mfn=%lx t=%x ot=%x",
    1.26 +                    "mfn=%lx t=%" PRtype_info " ot=%x",
    1.27                      d->domain_id, page_to_pfn(page),
    1.28                      page->u.inuse.type_info,
    1.29                      page->tlbflush_timestamp);
    1.30 @@ -110,7 +110,7 @@ int audit_adjust_pgtables(struct domain 
    1.31          else if ( (count & ~PGT_count_mask) != 0 )
    1.32          {
    1.33              APRINTK("Audit %d: general count overflowed "
    1.34 -                    "mfn=%lx t=%x ot=%x",
    1.35 +                    "mfn=%lx t=%" PRtype_info " ot=%x",
    1.36                      d->domain_id, page_to_pfn(page),
    1.37                      page->u.inuse.type_info,
    1.38                      page->tlbflush_timestamp);
    1.39 @@ -152,7 +152,8 @@ int audit_adjust_pgtables(struct domain 
    1.40                          if ( page_type != PGT_l1_shadow )
    1.41                          {
    1.42                              printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
    1.43 -                                   "Expected Shadow L1 t=%x mfn=%lx\n",
    1.44 +                                   "Expected Shadow L1 t=%" PRtype_info 
    1.45 +				   " mfn=%lx\n",
    1.46                                     d->domain_id, mfn, i,
    1.47                                     l1page->u.inuse.type_info, l1mfn);
    1.48                              errors++;
    1.49 @@ -178,14 +179,14 @@ int audit_adjust_pgtables(struct domain 
    1.50                          if ( page_type == PGT_l2_page_table )
    1.51                          {
    1.52                              printk("Audit %d: [%x] Found %s Linear PT "
    1.53 -                                   "t=%x mfn=%lx\n",
    1.54 +                                   "t=%" PRtype_info " mfn=%lx\n",
    1.55                                     d->domain_id, i, (l1mfn==mfn) ? "Self" : "Other",
    1.56                                     l1page->u.inuse.type_info, l1mfn);
    1.57                          }
    1.58                          else if ( page_type != PGT_l1_page_table )
    1.59                          {
    1.60                              printk("Audit %d: [L2 mfn=%lx i=%x] "
    1.61 -                                   "Expected L1 t=%x mfn=%lx\n",
    1.62 +                                   "Expected L1 t=%" PRtype_info " mfn=%lx\n",
    1.63                                     d->domain_id, mfn, i,
    1.64                                     l1page->u.inuse.type_info, l1mfn);
    1.65                              errors++;
    1.66 @@ -237,7 +238,8 @@ int audit_adjust_pgtables(struct domain 
    1.67                      if ( page_get_owner(gpage) != d )
    1.68                      {
    1.69                          printk("Audit %d: [hl2mfn=%lx,i=%x] Skip foreign page "
    1.70 -                               "dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
    1.71 +                               "dom=%p (id=%d) mfn=%lx c=%08x t=%"
    1.72 +			       PRtype_info "\n",
    1.73                                 d->domain_id, hl2mfn, i,
    1.74                                 page_get_owner(gpage),
    1.75                                 page_get_owner(gpage)->domain_id,
    1.76 @@ -288,7 +290,7 @@ int audit_adjust_pgtables(struct domain 
    1.77                                 PGT_writable_page) )
    1.78                          {
    1.79                              printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW "
    1.80 -                                   "t=%x mfn=%lx\n",
    1.81 +                                   "t=%" PRtype_info " mfn=%lx\n",
    1.82                                     d->domain_id, l1mfn, i,
    1.83                                     gpage->u.inuse.type_info, gmfn);
    1.84                              errors++;
    1.85 @@ -308,7 +310,8 @@ int audit_adjust_pgtables(struct domain 
    1.86                      if ( page_get_owner(gpage) != d )
    1.87                      {
    1.88                          printk("Audit %d: [l1mfn=%lx,i=%x] Skip foreign page "
    1.89 -                               "dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
    1.90 +                               "dom=%p (id=%d) mfn=%lx c=%08x t=%" 
    1.91 +			       PRtype_info "\n",
    1.92                                 d->domain_id, l1mfn, i,
    1.93                                 page_get_owner(gpage),
    1.94                                 page_get_owner(gpage)->domain_id,
    1.95 @@ -454,7 +457,7 @@ int audit_adjust_pgtables(struct domain 
    1.96                      if ( shadow_refcounts )
    1.97                      {
    1.98                          printk("Audit %d: found an L2 guest page "
    1.99 -                               "mfn=%lx t=%08x c=%08x while in shadow mode\n",
   1.100 +                               "mfn=%lx t=%" PRtype_info " c=%08x while in shadow mode\n",
   1.101                                 d->domain_id, mfn, page->u.inuse.type_info,
   1.102                                 page->count_info);
   1.103                          errors++;
   1.104 @@ -465,14 +468,16 @@ int audit_adjust_pgtables(struct domain 
   1.105                          if ( (page->u.inuse.type_info & PGT_validated) !=
   1.106                               PGT_validated )
   1.107                          {
   1.108 -                            printk("Audit %d: L2 mfn=%lx not validated %08x\n",
   1.109 +                            printk("Audit %d: L2 mfn=%lx not validated %"
   1.110 +				   PRtype_info "\n",
   1.111                                     d->domain_id, mfn, page->u.inuse.type_info);
   1.112                              errors++;
   1.113                          }
   1.114  
   1.115                          if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
   1.116                          {
   1.117 -                            printk("Audit %d: L2 mfn=%lx not pinned t=%08x\n",
   1.118 +                            printk("Audit %d: L2 mfn=%lx not pinned t=%"
   1.119 +				   PRtype_info "\n",
   1.120                                     d->domain_id, mfn, page->u.inuse.type_info);
   1.121                              errors++;
   1.122                          }
   1.123 @@ -494,7 +499,8 @@ int audit_adjust_pgtables(struct domain 
   1.124                  {
   1.125                      if ( shadow_refcounts )
   1.126                      {
   1.127 -                        printk("found an L1 guest page mfn=%lx t=%08x c=%08x "
   1.128 +                        printk("found an L1 guest page mfn=%lx t=%" 
   1.129 +			       PRtype_info " c=%08x "
   1.130                                 "while in shadow mode\n",
   1.131                                 mfn, page->u.inuse.type_info, page->count_info);
   1.132                          errors++;
   1.133 @@ -505,7 +511,8 @@ int audit_adjust_pgtables(struct domain 
   1.134                          if ( (page->u.inuse.type_info & PGT_validated) !=
   1.135                               PGT_validated )
   1.136                          {
   1.137 -                            printk("Audit %d: L1 not validated mfn=%lx t=%08x\n",
   1.138 +                            printk("Audit %d: L1 not validated mfn=%lx t=%"
   1.139 +				   PRtype_info "\n",
   1.140                                     d->domain_id, mfn, page->u.inuse.type_info);
   1.141                              errors++;
   1.142                          }
   1.143 @@ -514,7 +521,8 @@ int audit_adjust_pgtables(struct domain 
   1.144                          {
   1.145                              if ( !VM_ASSIST(d, VMASST_TYPE_writable_pagetables) )
   1.146                              {
   1.147 -                                printk("Audit %d: L1 mfn=%lx not pinned t=%08x\n",
   1.148 +                                printk("Audit %d: L1 mfn=%lx not pinned t=%"
   1.149 +				       PRtype_info "\n",
   1.150                                         d->domain_id, mfn, page->u.inuse.type_info);
   1.151                              }
   1.152                          }
   1.153 @@ -621,7 +629,7 @@ void _audit_domain(struct domain *d, int
   1.154          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   1.155          {
   1.156              if ( (pt[i] & _PAGE_PRESENT) && ((pt[i] >> PAGE_SHIFT) == xmfn) )
   1.157 -                printk("     found dom=%d mfn=%lx t=%08x c=%08x "
   1.158 +                printk("     found dom=%d mfn=%lx t=%" PRtype_info " c=%08x "
   1.159                         "pt[i=%x]=%lx\n",
   1.160                         d->domain_id, mfn, page->u.inuse.type_info,
   1.161                         page->count_info, i, pt[i]);
   1.162 @@ -754,7 +762,7 @@ void _audit_domain(struct domain *d, int
   1.163          if ( (page->u.inuse.type_info & PGT_count_mask) >
   1.164               (page->count_info & PGC_count_mask) )
   1.165          {
   1.166 -            printk("taf(%08x) > caf(%08x) mfn=%lx\n",
   1.167 +            printk("taf(%" PRtype_info ") > caf(%08x) mfn=%lx\n",
   1.168                     page->u.inuse.type_info, page->count_info, mfn);
   1.169              errors++;
   1.170          }
   1.171 @@ -763,8 +771,8 @@ void _audit_domain(struct domain *d, int
   1.172               (page_type == PGT_writable_page) &&
   1.173               !(page->u.inuse.type_info & PGT_validated) )
   1.174          {
   1.175 -            printk("shadow mode writable page not validated mfn=%lx "
   1.176 -                   "t=%08x c=%08x\n",
   1.177 +            printk("shadow mode writable page not validated mfn=%lx " 
   1.178 +		   "t=%" PRtype_info  " c=%08x\n",
   1.179                     mfn, page->u.inuse.type_info, page->count_info);
   1.180              errors++;
   1.181          }
   1.182 @@ -774,7 +782,7 @@ void _audit_domain(struct domain *d, int
   1.183               (page->u.inuse.type_info & PGT_count_mask) > 1 )
   1.184          {
   1.185              printk("writeable page with type count >1: "
   1.186 -                   "mfn=%lx t=%08x c=%08x\n",
   1.187 +                   "mfn=%lx t=%" PRtype_info " c=%08x\n",
   1.188                    mfn,
   1.189                    page->u.inuse.type_info,
   1.190                    page->count_info );
   1.191 @@ -786,7 +794,7 @@ void _audit_domain(struct domain *d, int
   1.192          if ( page_type == PGT_none && 
   1.193               (page->u.inuse.type_info & PGT_count_mask) > 0 )
   1.194          {
   1.195 -            printk("normal page with type count >0: mfn=%lx t=%08x c=%08x\n",
   1.196 +            printk("normal page with type count >0: mfn=%lx t=%" PRtype_info " c=%08x\n",
   1.197                    mfn,
   1.198                    page->u.inuse.type_info,
   1.199                    page->count_info );
   1.200 @@ -812,7 +820,7 @@ void _audit_domain(struct domain *d, int
   1.201                   : !(page_type && (page_type <= PGT_l4_page_table)) )
   1.202              {
   1.203                  printk("out of sync page mfn=%lx has strange type "
   1.204 -                       "t=%08x c=%08x\n",
   1.205 +                       "t=%" PRtype_info  " c=%08x\n",
   1.206                         mfn, page->u.inuse.type_info, page->count_info);
   1.207                  errors++;
   1.208              }
   1.209 @@ -850,7 +858,7 @@ void _audit_domain(struct domain *d, int
   1.210          case PGT_l4_page_table:
   1.211              if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
   1.212              {
   1.213 -                printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
   1.214 +                printk("Audit %d: type count!=0 t=%" PRtype_info " ot=%x c=%x mfn=%lx\n",
   1.215                         d->domain_id, page->u.inuse.type_info, 
   1.216                         page->tlbflush_timestamp,
   1.217                         page->count_info, mfn);
   1.218 @@ -864,7 +872,7 @@ void _audit_domain(struct domain *d, int
   1.219          case PGT_ldt_page:
   1.220              if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
   1.221              {
   1.222 -                printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
   1.223 +                printk("Audit %d: type count!=0 t=%" PRtype_info " ot=%x c=%x mfn=%lx\n",
   1.224                         d->domain_id, page->u.inuse.type_info, 
   1.225                         page->tlbflush_timestamp,
   1.226                         page->count_info, mfn);
   1.227 @@ -877,7 +885,7 @@ void _audit_domain(struct domain *d, int
   1.228          
   1.229          if ( (page->count_info & PGC_count_mask) != 1 )
   1.230          {
   1.231 -            printk("Audit %d: gen count!=1 (c=%x) t=%x ot=%x mfn=%lx\n",
   1.232 +            printk("Audit %d: gen count!=1 (c=%x) t=%" PRtype_info " ot=%x mfn=%lx\n",
   1.233                     d->domain_id,
   1.234                     page->count_info,
   1.235                     page->u.inuse.type_info, 
   1.236 @@ -913,7 +921,7 @@ void _audit_domain(struct domain *d, int
   1.237                           (page->count_info != 0) )
   1.238                      {
   1.239                          printk("Audit %d: shadow page counts wrong "
   1.240 -                               "mfn=%lx t=%08x c=%08x\n",
   1.241 +                               "mfn=%lx t=%" PRtype_info " c=%08x\n",
   1.242                                 d->domain_id, page_to_pfn(page),
   1.243                                 page->u.inuse.type_info,
   1.244                                 page->count_info);
     2.1 --- a/xen/arch/x86/domain.c	Sun Aug 07 09:13:39 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Mon Aug 08 08:18:06 2005 +0000
     2.3 @@ -190,7 +190,7 @@ void dump_pageframe_info(struct domain *
     2.4      {
     2.5          list_for_each_entry ( page, &d->page_list, list )
     2.6          {
     2.7 -            printk("Page %p: caf=%08x, taf=%08x\n",
     2.8 +            printk("Page %p: caf=%08x, taf=%" PRtype_info "\n",
     2.9                     _p(page_to_phys(page)), page->count_info,
    2.10                     page->u.inuse.type_info);
    2.11          }
    2.12 @@ -198,14 +198,14 @@ void dump_pageframe_info(struct domain *
    2.13  
    2.14      list_for_each_entry ( page, &d->xenpage_list, list )
    2.15      {
    2.16 -        printk("XenPage %p: caf=%08x, taf=%08x\n",
    2.17 +        printk("XenPage %p: caf=%08x, taf=%" PRtype_info "\n",
    2.18                 _p(page_to_phys(page)), page->count_info,
    2.19                 page->u.inuse.type_info);
    2.20      }
    2.21  
    2.22      
    2.23      page = virt_to_page(d->shared_info);
    2.24 -    printk("Shared_info@%p: caf=%08x, taf=%08x\n",
    2.25 +    printk("Shared_info@%p: caf=%08x, taf=%" PRtype_info "\n",
    2.26             _p(page_to_phys(page)), page->count_info,
    2.27             page->u.inuse.type_info);
    2.28  }
     3.1 --- a/xen/arch/x86/mm.c	Sun Aug 07 09:13:39 2005 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Mon Aug 08 08:18:06 2005 +0000
     3.3 @@ -122,7 +122,7 @@ static void free_l2_table(struct pfn_inf
     3.4  static void free_l1_table(struct pfn_info *page);
     3.5  
     3.6  static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
     3.7 -                        unsigned int type);
     3.8 +                        unsigned long type);
     3.9  static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
    3.10  
    3.11  /* Used to defer flushing of memory structures. */
    3.12 @@ -354,7 +354,7 @@ static int get_page_from_pagenr(unsigned
    3.13  
    3.14  
    3.15  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
    3.16 -                                         u32 type,
    3.17 +                                         unsigned long type,
    3.18                                           struct domain *d)
    3.19  {
    3.20      struct pfn_info *page = &frame_table[page_nr];
    3.21 @@ -365,7 +365,7 @@ static int get_page_and_type_from_pagenr
    3.22      if ( unlikely(!get_page_type(page, type)) )
    3.23      {
    3.24          if ( (type & PGT_type_mask) != PGT_l1_page_table )
    3.25 -            MEM_LOG("Bad page type for pfn %lx (%08x)", 
    3.26 +            MEM_LOG("Bad page type for pfn %lx (%" PRtype_info ")", 
    3.27                      page_nr, page->u.inuse.type_info);
    3.28          put_page(page);
    3.29          return 0;
    3.30 @@ -390,7 +390,7 @@ static int
    3.31  get_linear_pagetable(
    3.32      root_pgentry_t re, unsigned long re_pfn, struct domain *d)
    3.33  {
    3.34 -    u32 x, y;
    3.35 +    unsigned long x, y;
    3.36      struct pfn_info *page;
    3.37      unsigned long pfn;
    3.38  
    3.39 @@ -544,7 +544,8 @@ get_page_from_l3e(
    3.40  
    3.41  static int 
    3.42  get_page_from_l4e(
    3.43 -    l4_pgentry_t l4e, unsigned long pfn, struct domain *d)
    3.44 +    l4_pgentry_t l4e, unsigned long pfn, 
    3.45 +    struct domain *d, unsigned long vaddr)
    3.46  {
    3.47      int rc;
    3.48  
    3.49 @@ -559,8 +560,11 @@ get_page_from_l4e(
    3.50          return 0;
    3.51      }
    3.52  
    3.53 +    vaddr >>= L4_PAGETABLE_SHIFT;
    3.54 +    vaddr <<= PGT_va_shift;
    3.55      rc = get_page_and_type_from_pagenr(
    3.56 -        l4e_get_pfn(l4e), PGT_l3_page_table, d);
    3.57 +        l4e_get_pfn(l4e), 
    3.58 +        PGT_l3_page_table | vaddr, d);
    3.59  
    3.60      if ( unlikely(!rc) )
    3.61          return get_linear_pagetable(l4e, pfn, d);
    3.62 @@ -750,13 +754,47 @@ static inline int l1_backptr(
    3.63      return 1;
    3.64  }
    3.65  
    3.66 +#elif CONFIG_X86_64
    3.67 +# define create_pae_xen_mappings(pl3e) (1)
    3.68 +
    3.69 +static inline int l1_backptr(
    3.70 +    unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
    3.71 +{
    3.72 +    unsigned long l2_backptr = l2_type & PGT_va_mask;
    3.73 +    BUG_ON(l2_backptr == PGT_va_unknown);
    3.74 +
    3.75 +     *backptr = ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) | 
    3.76 +        (offset_in_l2 << L2_PAGETABLE_SHIFT);
    3.77 +    return 1;
    3.78 +}
    3.79 +
    3.80 +static inline int l2_backptr(
    3.81 +    unsigned long *backptr, unsigned long offset_in_l3, unsigned long l3_type)
    3.82 +{
    3.83 +    unsigned long l3_backptr = l3_type & PGT_va_mask;
    3.84 +    BUG_ON(l3_backptr == PGT_va_unknown);
    3.85 +
    3.86 +    *backptr = ((l3_backptr >> PGT_va_shift) << L4_PAGETABLE_SHIFT) | 
    3.87 +        (offset_in_l3 << L3_PAGETABLE_SHIFT);
    3.88 +    return 1;
    3.89 +}
    3.90 +
    3.91 +static inline int l3_backptr(
    3.92 +    unsigned long *backptr, unsigned long offset_in_l4, unsigned long l4_type)
    3.93 +{
    3.94 +    unsigned long l4_backptr = l4_type & PGT_va_mask;
    3.95 +    BUG_ON(l4_backptr == PGT_va_unknown);
    3.96 +
    3.97 +    *backptr = (offset_in_l4 << L4_PAGETABLE_SHIFT);
    3.98 +    return 1;
    3.99 +}
   3.100  #else
   3.101  # define create_pae_xen_mappings(pl3e) (1)
   3.102  # define l1_backptr(bp,l2o,l2t) \
   3.103      ({ *(bp) = (unsigned long)(l2o) << L2_PAGETABLE_SHIFT; 1; })
   3.104  #endif
   3.105  
   3.106 -static int alloc_l2_table(struct pfn_info *page, unsigned int type)
   3.107 +static int alloc_l2_table(struct pfn_info *page, unsigned long type)
   3.108  {
   3.109      struct domain *d = page_get_owner(page);
   3.110      unsigned long  pfn = page_to_pfn(page);
   3.111 @@ -808,7 +846,7 @@ static int alloc_l2_table(struct pfn_inf
   3.112  
   3.113  
   3.114  #if CONFIG_PAGING_LEVELS >= 3
   3.115 -static int alloc_l3_table(struct pfn_info *page)
   3.116 +static int alloc_l3_table(struct pfn_info *page, unsigned long type)
   3.117  {
   3.118      struct domain *d = page_get_owner(page);
   3.119      unsigned long  pfn = page_to_pfn(page);
   3.120 @@ -821,7 +859,12 @@ static int alloc_l3_table(struct pfn_inf
   3.121      pl3e = map_domain_page(pfn);
   3.122      for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
   3.123      {
   3.124 -        vaddr = (unsigned long)i << L3_PAGETABLE_SHIFT;
   3.125 +#if CONFIG_PAGING_LEVELS >= 4
   3.126 +        if ( !l2_backptr(&vaddr, i, type) )
   3.127 +            goto fail;
   3.128 +#else
   3.129 +      vaddr = (unsigned long)i << L3_PAGETABLE_SHIFT;
   3.130 +#endif
   3.131          if ( is_guest_l3_slot(i) &&
   3.132               unlikely(!get_page_from_l3e(pl3e[i], pfn, d, vaddr)) )
   3.133              goto fail;
   3.134 @@ -842,15 +885,16 @@ static int alloc_l3_table(struct pfn_inf
   3.135      return 0;
   3.136  }
   3.137  #else
   3.138 -#define alloc_l3_table(page) (0)
   3.139 +#define alloc_l3_table(page, type) (0)
   3.140  #endif
   3.141  
   3.142  #if CONFIG_PAGING_LEVELS >= 4
   3.143 -static int alloc_l4_table(struct pfn_info *page)
   3.144 +static int alloc_l4_table(struct pfn_info *page, unsigned long type)
   3.145  {
   3.146      struct domain *d = page_get_owner(page);
   3.147      unsigned long  pfn = page_to_pfn(page);
   3.148      l4_pgentry_t  *pl4e = page_to_virt(page);
   3.149 +    unsigned long vaddr;
   3.150      int            i;
   3.151  
   3.152      /* See the code in shadow_promote() to understand why this is here. */
   3.153 @@ -859,10 +903,14 @@ static int alloc_l4_table(struct pfn_inf
   3.154          return 1;
   3.155      ASSERT(!shadow_mode_refcounts(d));
   3.156  
   3.157 -    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
   3.158 +    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ ) {
   3.159 +        if ( !l3_backptr(&vaddr, i, type) )
   3.160 +            goto fail;
   3.161 +
   3.162          if ( is_guest_l4_slot(i) &&
   3.163 -             unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
   3.164 +             unlikely(!get_page_from_l4e(pl4e[i], pfn, d, vaddr)) )
   3.165              goto fail;
   3.166 +    }
   3.167  
   3.168      /* Xen private mappings. */
   3.169      memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   3.170 @@ -885,7 +933,7 @@ static int alloc_l4_table(struct pfn_inf
   3.171      return 0;
   3.172  }
   3.173  #else
   3.174 -#define alloc_l4_table(page) (0)
   3.175 +#define alloc_l4_table(page, type) (0)
   3.176  #endif
   3.177  
   3.178  
   3.179 @@ -1037,7 +1085,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
   3.180  static int mod_l2_entry(l2_pgentry_t *pl2e, 
   3.181                          l2_pgentry_t nl2e, 
   3.182                          unsigned long pfn,
   3.183 -                        unsigned int type)
   3.184 +                        unsigned long type)
   3.185  {
   3.186      l2_pgentry_t ol2e;
   3.187      unsigned long vaddr = 0;
   3.188 @@ -1090,7 +1138,8 @@ static int mod_l2_entry(l2_pgentry_t *pl
   3.189  /* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */
   3.190  static int mod_l3_entry(l3_pgentry_t *pl3e, 
   3.191                          l3_pgentry_t nl3e, 
   3.192 -                        unsigned long pfn)
   3.193 +                        unsigned long pfn,
   3.194 +                        unsigned long type)
   3.195  {
   3.196      l3_pgentry_t ol3e;
   3.197      unsigned long vaddr;
   3.198 @@ -1126,10 +1175,16 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.199          if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
   3.200              return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
   3.201  
   3.202 +#if CONFIG_PAGING_LEVELS >= 4
   3.203 +        if ( unlikely(!l2_backptr(&vaddr, pgentry_ptr_to_slot(pl3e), type)) ||
   3.204 +             unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
   3.205 +            return 0; 
   3.206 +#else
   3.207          vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
   3.208              << L3_PAGETABLE_SHIFT;
   3.209          if ( unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
   3.210              return 0;
   3.211 +#endif
   3.212  
   3.213          if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
   3.214          {
   3.215 @@ -1141,12 +1196,14 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.216          put_page_from_l3e(ol3e, pfn);
   3.217          return 1;
   3.218      }
   3.219 -
   3.220 -    if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
   3.221 -    {
   3.222 -        BUG_ON(!create_pae_xen_mappings(pl3e));
   3.223 -        return 0;
   3.224 -    }
   3.225 +    else
   3.226 +   {
   3.227 +       if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
   3.228 +           {
   3.229 +               BUG_ON(!create_pae_xen_mappings(pl3e));
   3.230 +               return 0;
   3.231 +           }
   3.232 +   }
   3.233  
   3.234      put_page_from_l3e(ol3e, pfn);
   3.235      return 1;
   3.236 @@ -1159,9 +1216,11 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.237  /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
   3.238  static int mod_l4_entry(l4_pgentry_t *pl4e, 
   3.239                          l4_pgentry_t nl4e, 
   3.240 -                        unsigned long pfn)
   3.241 +                        unsigned long pfn,
   3.242 +                        unsigned long type)
   3.243  {
   3.244      l4_pgentry_t ol4e;
   3.245 +    unsigned long vaddr;
   3.246  
   3.247      if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
   3.248      {
   3.249 @@ -1185,7 +1244,8 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.250          if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
   3.251              return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
   3.252  
   3.253 -        if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
   3.254 +         if ( unlikely(!l3_backptr(&vaddr, pgentry_ptr_to_slot(pl4e), type)) ||
   3.255 +             unlikely(!get_page_from_l4e(nl4e, pfn, current->domain, vaddr)) )
   3.256              return 0;
   3.257  
   3.258          if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
   3.259 @@ -1193,13 +1253,12 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.260              put_page_from_l4e(nl4e, pfn);
   3.261              return 0;
   3.262          }
   3.263 -        
   3.264 -        put_page_from_l4e(ol4e, pfn);
   3.265 -        return 1;
   3.266      }
   3.267 -
   3.268 -    if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
   3.269 -        return 0;
   3.270 +    else 
   3.271 +    {
   3.272 +        if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
   3.273 +            return 0;
   3.274 +     }
   3.275  
   3.276      put_page_from_l4e(ol4e, pfn);
   3.277      return 1;
   3.278 @@ -1207,7 +1266,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.279  
   3.280  #endif
   3.281  
   3.282 -int alloc_page_type(struct pfn_info *page, unsigned int type)
   3.283 +int alloc_page_type(struct pfn_info *page, unsigned long type)
   3.284  {
   3.285      switch ( type & PGT_type_mask )
   3.286      {
   3.287 @@ -1216,14 +1275,14 @@ int alloc_page_type(struct pfn_info *pag
   3.288      case PGT_l2_page_table:
   3.289          return alloc_l2_table(page, type);
   3.290      case PGT_l3_page_table:
   3.291 -        return alloc_l3_table(page);
   3.292 +        return alloc_l3_table(page, type);
   3.293      case PGT_l4_page_table:
   3.294 -        return alloc_l4_table(page);
   3.295 +        return alloc_l4_table(page, type);
   3.296      case PGT_gdt_page:
   3.297      case PGT_ldt_page:
   3.298          return alloc_segdesc_page(page);
   3.299      default:
   3.300 -        printk("Bad type in alloc_page_type %x t=%x c=%x\n", 
   3.301 +        printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 
   3.302                 type, page->u.inuse.type_info,
   3.303                 page->count_info);
   3.304          BUG();
   3.305 @@ -1233,7 +1292,7 @@ int alloc_page_type(struct pfn_info *pag
   3.306  }
   3.307  
   3.308  
   3.309 -void free_page_type(struct pfn_info *page, unsigned int type)
   3.310 +void free_page_type(struct pfn_info *page, unsigned long type)
   3.311  {
   3.312      struct domain *owner = page_get_owner(page);
   3.313      unsigned long gpfn;
   3.314 @@ -1273,7 +1332,7 @@ void free_page_type(struct pfn_info *pag
   3.315  #endif
   3.316  
   3.317      default:
   3.318 -        printk("%s: type %x pfn %lx\n",__FUNCTION__,
   3.319 +        printk("%s: type %lx pfn %lx\n",__FUNCTION__,
   3.320                 type, page_to_pfn(page));
   3.321          BUG();
   3.322      }
   3.323 @@ -1282,7 +1341,7 @@ void free_page_type(struct pfn_info *pag
   3.324  
   3.325  void put_page_type(struct pfn_info *page)
   3.326  {
   3.327 -    u32 nx, x, y = page->u.inuse.type_info;
   3.328 +    unsigned long nx, x, y = page->u.inuse.type_info;
   3.329  
   3.330   again:
   3.331      do {
   3.332 @@ -1335,9 +1394,9 @@ void put_page_type(struct pfn_info *page
   3.333  }
   3.334  
   3.335  
   3.336 -int get_page_type(struct pfn_info *page, u32 type)
   3.337 +int get_page_type(struct pfn_info *page, unsigned long type)
   3.338  {
   3.339 -    u32 nx, x, y = page->u.inuse.type_info;
   3.340 +    unsigned long nx, x, y = page->u.inuse.type_info;
   3.341  
   3.342   again:
   3.343      do {
   3.344 @@ -1350,7 +1409,11 @@ int get_page_type(struct pfn_info *page,
   3.345          }
   3.346          else if ( unlikely((x & PGT_count_mask) == 0) )
   3.347          {
   3.348 +#ifdef CONFIG_X86_64
   3.349 +            if ( (x & (PGT_type_mask|PGT_va_mask)) != (type & ~PGT_va_mask))
   3.350 +#else
   3.351              if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
   3.352 +#endif
   3.353              {
   3.354                  if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
   3.355                  {
   3.356 @@ -1382,13 +1445,17 @@ int get_page_type(struct pfn_info *page,
   3.357          }
   3.358          else
   3.359          {
   3.360 +#ifdef CONFIG_X86_64
   3.361 +            if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != (type & ~PGT_va_mask)) )
   3.362 +#else
   3.363              if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
   3.364 +#endif
   3.365              {
   3.366                  if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
   3.367                  {
   3.368                      if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
   3.369                           ((type & PGT_type_mask) != PGT_l1_page_table) )
   3.370 -                        MEM_LOG("Bad type (saw %08x != exp %08x) for pfn %lx",
   3.371 +                        MEM_LOG("Bad type (saw %" PRtype_info "!= exp %" PRtype_info ") for pfn %lx",
   3.372                                  x, type, page_to_pfn(page));
   3.373                      return 0;
   3.374                  }
   3.375 @@ -1427,8 +1494,8 @@ int get_page_type(struct pfn_info *page,
   3.376          /* Try to validate page type; drop the new reference on failure. */
   3.377          if ( unlikely(!alloc_page_type(page, type)) )
   3.378          {
   3.379 -            MEM_LOG("Error while validating pfn %lx for type %08x."
   3.380 -                    " caf=%08x taf=%08x",
   3.381 +            MEM_LOG("Error while validating pfn %lx for type %" PRtype_info "."
   3.382 +                    " caf=%08x taf=%" PRtype_info,
   3.383                      page_to_pfn(page), type,
   3.384                      page->count_info,
   3.385                      page->u.inuse.type_info);
   3.386 @@ -1596,7 +1663,7 @@ int do_mmuext_op(
   3.387  {
   3.388      struct mmuext_op op;
   3.389      int rc = 0, i = 0, okay, cpu = smp_processor_id();
   3.390 -    unsigned int type, done = 0;
   3.391 +    unsigned long type, done = 0;
   3.392      struct pfn_info *page;
   3.393      struct vcpu *v = current;
   3.394      struct domain *d = v->domain, *e;
   3.395 @@ -1651,6 +1718,9 @@ int do_mmuext_op(
   3.396              type = PGT_l1_page_table | PGT_va_mutable;
   3.397  
   3.398          pin_page:
   3.399 +#if CONFIG_PAGING_LEVELS >= 4
   3.400 +            type |= PGT_va_mutable;
   3.401 +#endif
   3.402              if ( shadow_mode_refcounts(FOREIGNDOM) )
   3.403                  type = PGT_writable_page;
   3.404  
   3.405 @@ -1876,7 +1946,7 @@ int do_mmuext_op(
   3.406                       unlikely(_nd != _d) )
   3.407                  {
   3.408                      MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
   3.409 -                            " caf=%08x, taf=%08x\n", page_to_pfn(page),
   3.410 +                            " caf=%08x, taf=%" PRtype_info "\n", page_to_pfn(page),
   3.411                              d, d->domain_id, unpickle_domptr(_nd), x,
   3.412                              page->u.inuse.type_info);
   3.413                      okay = 0;
   3.414 @@ -1951,7 +2021,7 @@ int do_mmu_update(
   3.415      unsigned int cmd, done = 0;
   3.416      struct vcpu *v = current;
   3.417      struct domain *d = v->domain;
   3.418 -    u32 type_info;
   3.419 +    unsigned long type_info;
   3.420      struct domain_mmap_cache mapcache, sh_mapcache;
   3.421  
   3.422      LOCK_BIGLOCK(d);
   3.423 @@ -2063,13 +2133,14 @@ int do_mmu_update(
   3.424  #if CONFIG_PAGING_LEVELS >= 3
   3.425              case PGT_l3_page_table:
   3.426                  ASSERT( !shadow_mode_refcounts(d) );
   3.427 -                if ( likely(get_page_type(page, PGT_l3_page_table)) )
   3.428 +                if ( likely(get_page_type(
   3.429 +                    page, type_info & (PGT_type_mask|PGT_va_mask))) )
   3.430                  {
   3.431                      l3_pgentry_t l3e;
   3.432  
   3.433                      /* FIXME: doesn't work with PAE */
   3.434                      l3e = l3e_from_intpte(req.val);
   3.435 -                    okay = mod_l3_entry(va, l3e, mfn);
   3.436 +                    okay = mod_l3_entry(va, l3e, mfn, type_info);
   3.437                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   3.438                          shadow_l3_normal_pt_update(d, req.ptr, l3e, &sh_mapcache);
   3.439                      put_page_type(page);
   3.440 @@ -2079,12 +2150,13 @@ int do_mmu_update(
   3.441  #if CONFIG_PAGING_LEVELS >= 4
   3.442              case PGT_l4_page_table:
   3.443                  ASSERT( !shadow_mode_refcounts(d) );
   3.444 -                if ( likely(get_page_type(page, PGT_l4_page_table)) )
   3.445 +                if ( likely(get_page_type(
   3.446 +                    page, type_info & (PGT_type_mask|PGT_va_mask))) )
   3.447                  {
   3.448                      l4_pgentry_t l4e;
   3.449  
   3.450                      l4e = l4e_from_intpte(req.val);
   3.451 -                    okay = mod_l4_entry(va, l4e, mfn);
   3.452 +                    okay = mod_l4_entry(va, l4e, mfn, type_info);
   3.453                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   3.454                          shadow_l4_normal_pt_update(d, req.ptr, l4e, &sh_mapcache);
   3.455                      put_page_type(page);
   3.456 @@ -2618,11 +2690,19 @@ void ptwr_flush(struct domain *d, const 
   3.457      l1_pgentry_t  *pl1e;
   3.458      l2_pgentry_t  *pl2e;
   3.459      unsigned int   modified;
   3.460 +#if defined(__x86_64__)
   3.461 +    struct vcpu *v = current;
   3.462 +    /* If in user mode, switch to kernel mode just to read LDT mapping. */
   3.463 +    extern void toggle_guest_mode(struct vcpu *);
   3.464 +    int user_mode = !(v->arch.flags & TF_kernel_mode);
   3.465 +#endif
   3.466  
   3.467      ASSERT(!shadow_mode_enabled(d));
   3.468  
   3.469      if ( unlikely(d->arch.ptwr[which].vcpu != current) )
   3.470          write_ptbase(d->arch.ptwr[which].vcpu);
   3.471 +    else 
   3.472 +        TOGGLE_MODE();
   3.473  
   3.474      l1va = d->arch.ptwr[which].l1va;
   3.475      ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
   3.476 @@ -2689,6 +2769,8 @@ void ptwr_flush(struct domain *d, const 
   3.477  
   3.478      if ( unlikely(d->arch.ptwr[which].vcpu != current) )
   3.479          write_ptbase(current);
   3.480 +    else 
   3.481 +        TOGGLE_MODE();
   3.482  }
   3.483  
   3.484  static int ptwr_emulated_update(
   3.485 @@ -2747,7 +2829,7 @@ static int ptwr_emulated_update(
   3.486           ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
   3.487           (page_get_owner(page) != d) )
   3.488      {
   3.489 -        MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%lx, %08x)\n",
   3.490 +        MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%lx, %" PRtype_info ")\n",
   3.491                  l1e_get_pfn(pte), page->u.inuse.type_info);
   3.492          return X86EMUL_UNHANDLEABLE;
   3.493      }
   3.494 @@ -2820,6 +2902,35 @@ static struct x86_mem_emulator ptwr_mem_
   3.495      .cmpxchg8b_emulated = ptwr_emulated_cmpxchg8b
   3.496  };
   3.497  
   3.498 +#if defined(__x86_64__)
   3.499 +/*
   3.500 + * Returns zero on if mapped, or -1 otherwise
   3.501 + */
   3.502 +static int __not_mapped(l2_pgentry_t *pl2e)
   3.503 +{
   3.504 +    unsigned long page = read_cr3();
   3.505 +
   3.506 +    page &= PAGE_MASK;
   3.507 +    page = ((unsigned long *) __va(page))[l4_table_offset((unsigned long)pl2e)];
   3.508 +    if ( !(page & _PAGE_PRESENT) ) 
   3.509 +        return -1;        
   3.510 +        
   3.511 +    page &= PAGE_MASK;
   3.512 +    page = ((unsigned long *) __va(page))[l3_table_offset((unsigned long)pl2e)];
   3.513 +    if ( !(page & _PAGE_PRESENT) ) 
   3.514 +        return -1;
   3.515 +
   3.516 +    page &= PAGE_MASK;
   3.517 +    page = ((unsigned long *) __va(page))[l2_table_offset((unsigned long)pl2e)];
   3.518 +    if ( !(page & _PAGE_PRESENT) )
   3.519 +        return -1;
   3.520 +
   3.521 +    return 0;
   3.522 +}
   3.523 +#else
   3.524 +#define __not_mapped(p) (0)
   3.525 +#endif
   3.526 +
   3.527  /* Write page fault handler: check if guest is trying to modify a PTE. */
   3.528  int ptwr_do_page_fault(struct domain *d, unsigned long addr)
   3.529  {
   3.530 @@ -2828,7 +2939,7 @@ int ptwr_do_page_fault(struct domain *d,
   3.531      l1_pgentry_t     pte;
   3.532      l2_pgentry_t    *pl2e;
   3.533      int              which;
   3.534 -    u32              l2_idx;
   3.535 +    unsigned long    l2_idx;
   3.536  
   3.537      if ( unlikely(shadow_mode_enabled(d)) )
   3.538          return 0;
   3.539 @@ -2837,7 +2948,7 @@ int ptwr_do_page_fault(struct domain *d,
   3.540       * Attempt to read the PTE that maps the VA being accessed. By checking for
   3.541       * PDE validity in the L2 we avoid many expensive fixups in __get_user().
   3.542       */
   3.543 -    if ( !(l2e_get_flags(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
   3.544 +    if ( !(l2e_get_flags(__linear_l2_table[l2_linear_offset(addr)]) &
   3.545             _PAGE_PRESENT) ||
   3.546           __copy_from_user(&pte,&linear_pg_table[l1_linear_offset(addr)],
   3.547                            sizeof(pte)) )
   3.548 @@ -2857,18 +2968,13 @@ int ptwr_do_page_fault(struct domain *d,
   3.549          return 0;
   3.550      }
   3.551  
   3.552 -    /* x86/64: Writable pagetable code needs auditing. Use emulator for now. */
   3.553 -#if defined(__x86_64__)
   3.554 -    goto emulate;
   3.555 -#endif
   3.556 -
   3.557      /* Get the L2 index at which this L1 p.t. is always mapped. */
   3.558      l2_idx = page->u.inuse.type_info & PGT_va_mask;
   3.559      if ( unlikely(l2_idx >= PGT_va_unknown) )
   3.560          goto emulate; /* Urk! This L1 is mapped in multiple L2 slots! */
   3.561      l2_idx >>= PGT_va_shift;
   3.562  
   3.563 -    if ( unlikely(l2_idx == (addr >> L2_PAGETABLE_SHIFT)) )
   3.564 +    if ( unlikely(l2_idx == l2_linear_offset(addr)) )
   3.565          goto emulate; /* Urk! Pagetable maps itself! */
   3.566  
   3.567      /*
   3.568 @@ -2877,6 +2983,10 @@ int ptwr_do_page_fault(struct domain *d,
   3.569       */
   3.570      pl2e = &__linear_l2_table[l2_idx];
   3.571      which = PTWR_PT_INACTIVE;
   3.572 +
   3.573 +    if ( unlikely(__not_mapped(pl2e)) )
   3.574 +        goto inactive;
   3.575 +
   3.576      if ( (l2e_get_pfn(*pl2e)) == pfn )
   3.577      {
   3.578          /*
   3.579 @@ -2891,6 +3001,8 @@ int ptwr_do_page_fault(struct domain *d,
   3.580              which = PTWR_PT_ACTIVE;
   3.581      }
   3.582  
   3.583 +  inactive:
   3.584 +
   3.585      /*
   3.586       * If this is a multi-processor guest then ensure that the page is hooked
   3.587       * into at most one L2 table, which must be the one running on this VCPU.
   3.588 @@ -2905,7 +3017,7 @@ int ptwr_do_page_fault(struct domain *d,
   3.589          goto emulate;
   3.590      }
   3.591  
   3.592 -    PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08x, "
   3.593 +    PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
   3.594                  "pfn %lx\n", PTWR_PRINT_WHICH,
   3.595                  addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
   3.596      
   3.597 @@ -2946,11 +3058,11 @@ int ptwr_do_page_fault(struct domain *d,
   3.598      
   3.599      /* Finally, make the p.t. page writable by the guest OS. */
   3.600      l1e_add_flags(pte, _PAGE_RW);
   3.601 -    if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
   3.602 +    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(addr)],
   3.603                                   &pte, sizeof(pte))) )
   3.604      {
   3.605          MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
   3.606 -                &linear_pg_table[addr>>PAGE_SHIFT]);
   3.607 +                &linear_pg_table[l1_linear_offset(addr)]);
   3.608          /* Toss the writable pagetable state and crash. */
   3.609          unmap_domain_page(d->arch.ptwr[which].pl1e);
   3.610          d->arch.ptwr[which].l1va = 0;
     4.1 --- a/xen/arch/x86/shadow32.c	Sun Aug 07 09:13:39 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow32.c	Mon Aug 08 08:18:06 2005 +0000
     4.3 @@ -418,7 +418,7 @@ void free_shadow_page(unsigned long smfn
     4.4          break;
     4.5  
     4.6      default:
     4.7 -        printk("Free shadow weird page type mfn=%lx type=%08x\n",
     4.8 +        printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
     4.9                 page_to_pfn(page), page->u.inuse.type_info);
    4.10          break;
    4.11      }
     5.1 --- a/xen/arch/x86/shadow_public.c	Sun Aug 07 09:13:39 2005 +0000
     5.2 +++ b/xen/arch/x86/shadow_public.c	Mon Aug 08 08:18:06 2005 +0000
     5.3 @@ -571,7 +571,7 @@ void free_shadow_page(unsigned long smfn
     5.4          break;
     5.5  
     5.6      default:
     5.7 -        printk("Free shadow weird page type mfn=%lx type=%08x\n",
     5.8 +        printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
     5.9                 page_to_pfn(page), page->u.inuse.type_info);
    5.10          break;
    5.11      }
    5.12 @@ -1638,14 +1638,14 @@ void shadow_drop_references(
    5.13      /* XXX This needs more thought... */
    5.14      printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n",
    5.15        __func__, page_to_pfn(page));
    5.16 -    printk("Before: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
    5.17 +    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
    5.18        page->count_info, page->u.inuse.type_info);
    5.19  
    5.20      shadow_lock(d);
    5.21      __shadow_remove_all_access(d, page_to_pfn(page));
    5.22      shadow_unlock(d);
    5.23  
    5.24 -    printk("After:  mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
    5.25 +    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
    5.26        page->count_info, page->u.inuse.type_info);
    5.27  }
    5.28  
     6.1 --- a/xen/arch/x86/traps.c	Sun Aug 07 09:13:39 2005 +0000
     6.2 +++ b/xen/arch/x86/traps.c	Mon Aug 08 08:18:06 2005 +0000
     6.3 @@ -422,7 +422,7 @@ asmlinkage int do_page_fault(struct cpu_
     6.4      {
     6.5          LOCK_BIGLOCK(d);
     6.6          if ( unlikely(d->arch.ptwr[PTWR_PT_ACTIVE].l1va) &&
     6.7 -             unlikely((addr >> L2_PAGETABLE_SHIFT) ==
     6.8 +             unlikely(l2_linear_offset(addr) ==
     6.9                        d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx) )
    6.10          {
    6.11              ptwr_flush(d, PTWR_PT_ACTIVE);
    6.12 @@ -430,7 +430,12 @@ asmlinkage int do_page_fault(struct cpu_
    6.13              return EXCRET_fault_fixed;
    6.14          }
    6.15  
    6.16 -        if ( (addr < HYPERVISOR_VIRT_START) &&
    6.17 +        if ( ((addr < HYPERVISOR_VIRT_START) 
    6.18 +#if defined(__x86_64__)
    6.19 +              || (addr >= HYPERVISOR_VIRT_END)
    6.20 +#endif        
    6.21 +            )     
    6.22 +             &&
    6.23               KERNEL_MODE(v, regs) &&
    6.24               ((regs->error_code & 3) == 3) && /* write-protection fault */
    6.25               ptwr_do_page_fault(d, addr) )
    6.26 @@ -459,7 +464,7 @@ asmlinkage int do_page_fault(struct cpu_
    6.27          goto xen_fault;
    6.28  
    6.29      propagate_page_fault(addr, regs->error_code);
    6.30 -    return 0; 
    6.31 +    return 0;
    6.32  
    6.33   xen_fault:
    6.34  
     7.1 --- a/xen/common/grant_table.c	Sun Aug 07 09:13:39 2005 +0000
     7.2 +++ b/xen/common/grant_table.c	Mon Aug 08 08:18:06 2005 +0000
     7.3 @@ -859,7 +859,7 @@ gnttab_donate(gnttab_donate_t *uop, unsi
     7.4              if (unlikely((x & (PGC_count_mask|PGC_allocated)) !=
     7.5                           (1 | PGC_allocated)) || unlikely(_nd != _d)) {
     7.6                  printk("gnttab_donate: Bad page values %p: ed=%p(%u), sd=%p,"
     7.7 -                        " caf=%08x, taf=%08x\n", (void *) page_to_pfn(page),
     7.8 +                        " caf=%08x, taf=%" PRtype_info "\n", (void *) page_to_pfn(page),
     7.9                          d, d->domain_id, unpickle_domptr(_nd), x, 
    7.10                          page->u.inuse.type_info);
    7.11                  spin_unlock(&d->page_alloc_lock);
     8.1 --- a/xen/include/asm-x86/mm.h	Sun Aug 07 09:13:39 2005 +0000
     8.2 +++ b/xen/include/asm-x86/mm.h	Mon Aug 08 08:18:06 2005 +0000
     8.3 @@ -36,7 +36,7 @@ struct pfn_info
     8.4              /* Owner of this page (NULL if page is anonymous). */
     8.5              u32 _domain; /* pickled format */
     8.6              /* Type reference count and various PGT_xxx flags and fields. */
     8.7 -            u32 type_info;
     8.8 +            unsigned long type_info;
     8.9          } inuse;
    8.10  
    8.11          /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
    8.12 @@ -77,6 +77,7 @@ struct pfn_info
    8.13   /* Owning guest has pinned this page to its current type? */
    8.14  #define _PGT_pinned         27
    8.15  #define PGT_pinned          (1U<<_PGT_pinned)
    8.16 +#if defined(__i386__)
    8.17   /* The 11 most significant bits of virt address if this is a page table. */
    8.18  #define PGT_va_shift        16
    8.19  #define PGT_va_mask         (((1U<<11)-1)<<PGT_va_shift)
    8.20 @@ -84,6 +85,19 @@ struct pfn_info
    8.21  #define PGT_va_mutable      (((1U<<11)-1)<<PGT_va_shift)
    8.22   /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
    8.23  #define PGT_va_unknown      (((1U<<11)-2)<<PGT_va_shift)
    8.24 +#elif defined(__x86_64__)
    8.25 + /* The 27 most significant bits of virt address if this is a page table. */
    8.26 +#define PGT_va_shift        32
    8.27 +#define PGT_va_mask         ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
    8.28 + /* Is the back pointer still mutable (i.e. not fixed yet)? */
    8.29 + /* Use PML4 slot for HYPERVISOR_VIRT_START.  
    8.30 +    18 = L4_PAGETABLE_SHIFT - L2_PAGETABLE_SHIFT */
    8.31 +#define PGT_va_mutable      ((unsigned long)(256U<<18)<<PGT_va_shift)
    8.32 + /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
    8.33 + /* Use PML4 slot for HYPERVISOR_VIRT_START + 1 */
    8.34 +#define PGT_va_unknown      ((unsigned long)(257U<<18)<<PGT_va_shift)
    8.35 +#endif
    8.36 +
    8.37   /* 16-bit count of uses of this frame as its current type. */
    8.38  #define PGT_count_mask      ((1U<<16)-1)
    8.39  
    8.40 @@ -114,11 +128,13 @@ struct pfn_info
    8.41  #if defined(__i386__)
    8.42  #define pickle_domptr(_d)   ((u32)(unsigned long)(_d))
    8.43  #define unpickle_domptr(_d) ((struct domain *)(unsigned long)(_d))
    8.44 +#define PRtype_info "08lx" /* should only be used for printk's */
    8.45  #elif defined(__x86_64__)
    8.46  static inline struct domain *unpickle_domptr(u32 _domain)
    8.47  { return (_domain == 0) ? NULL : __va(_domain); }
    8.48  static inline u32 pickle_domptr(struct domain *domain)
    8.49  { return (domain == NULL) ? 0 : (u32)__pa(domain); }
    8.50 +#define PRtype_info "016lx"/* should only be used for printk's */
    8.51  #endif
    8.52  
    8.53  #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
    8.54 @@ -144,8 +160,8 @@ extern struct pfn_info *frame_table;
    8.55  extern unsigned long max_page;
    8.56  void init_frametable(void);
    8.57  
    8.58 -int alloc_page_type(struct pfn_info *page, unsigned int type);
    8.59 -void free_page_type(struct pfn_info *page, unsigned int type);
    8.60 +int alloc_page_type(struct pfn_info *page, unsigned long type);
    8.61 +void free_page_type(struct pfn_info *page, unsigned long type);
    8.62  extern void invalidate_shadow_ldt(struct vcpu *d);
    8.63  extern int shadow_remove_all_write_access(
    8.64      struct domain *d, unsigned long gpfn, unsigned long gmfn);
    8.65 @@ -183,7 +199,7 @@ static inline int get_page(struct pfn_in
    8.66               unlikely(d != _domain) )                /* Wrong owner? */
    8.67          {
    8.68              if ( !_shadow_mode_refcounts(domain) )
    8.69 -                DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
    8.70 +                DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" PRtype_info "\n",
    8.71                          page_to_pfn(page), domain, unpickle_domptr(d),
    8.72                          x, page->u.inuse.type_info);
    8.73              return 0;
    8.74 @@ -200,7 +216,7 @@ static inline int get_page(struct pfn_in
    8.75  }
    8.76  
    8.77  void put_page_type(struct pfn_info *page);
    8.78 -int  get_page_type(struct pfn_info *page, u32 type);
    8.79 +int  get_page_type(struct pfn_info *page, unsigned long type);
    8.80  int  get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
    8.81  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
    8.82  
    8.83 @@ -213,7 +229,7 @@ static inline void put_page_and_type(str
    8.84  
    8.85  static inline int get_page_and_type(struct pfn_info *page,
    8.86                                      struct domain *domain,
    8.87 -                                    u32 type)
    8.88 +                                    unsigned long type)
    8.89  {
    8.90      int rc = get_page(page, domain);
    8.91  
     9.1 --- a/xen/include/asm-x86/page.h	Sun Aug 07 09:13:39 2005 +0000
     9.2 +++ b/xen/include/asm-x86/page.h	Mon Aug 08 08:18:06 2005 +0000
     9.3 @@ -208,20 +208,21 @@ typedef struct { u64 pfn; } pagetable_t;
     9.4       + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
     9.5  #endif
     9.6  
     9.7 -#define linear_l1_table                                                 \
     9.8 +#define VA_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_START & VADDR_MASK)
     9.9 +#define linear_l1_table                                                  \
    9.10      ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
    9.11 -#define __linear_l2_table                                               \
    9.12 -    ((l2_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
    9.13 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
    9.14 -#define __linear_l3_table                                               \
    9.15 -    ((l3_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
    9.16 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) +   \
    9.17 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
    9.18 -#define __linear_l4_table                                               \
    9.19 -    ((l4_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
    9.20 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) +   \
    9.21 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) +   \
    9.22 -                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
    9.23 +#define __linear_l2_table                                                \
    9.24 +    ((l2_pgentry_t *)(LINEAR_PT_VIRT_START +                             \
    9.25 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
    9.26 +#define __linear_l3_table                                                \
    9.27 +    ((l3_pgentry_t *)(LINEAR_PT_VIRT_START +                             \
    9.28 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
    9.29 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
    9.30 +#define __linear_l4_table                                                \
    9.31 +    ((l4_pgentry_t *)(LINEAR_PT_VIRT_START +                             \
    9.32 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \
    9.33 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) + \
    9.34 +                     (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
    9.35  
    9.36  #define linear_pg_table linear_l1_table
    9.37  #define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
    10.1 --- a/xen/include/asm-x86/shadow.h	Sun Aug 07 09:13:39 2005 +0000
    10.2 +++ b/xen/include/asm-x86/shadow.h	Mon Aug 08 08:18:06 2005 +0000
    10.3 @@ -485,7 +485,7 @@ static inline int __mark_dirty(struct do
    10.4      {
    10.5          SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
    10.6                 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
    10.7 -        SH_LOG("dom=%p caf=%08x taf=%08x", 
    10.8 +        SH_LOG("dom=%p caf=%08x taf=%" PRtype_info, 
    10.9                 page_get_owner(&frame_table[mfn]),
   10.10                 frame_table[mfn].count_info, 
   10.11                 frame_table[mfn].u.inuse.type_info );
   10.12 @@ -602,14 +602,14 @@ static inline void shadow_drop_reference
   10.13      /* XXX This needs more thought... */
   10.14      printk("%s: needing to call shadow_remove_all_access for mfn=%lx\n",
   10.15             __func__, page_to_pfn(page));
   10.16 -    printk("Before: mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
   10.17 +    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
   10.18             page->count_info, page->u.inuse.type_info);
   10.19  
   10.20      shadow_lock(d);
   10.21      shadow_remove_all_access(d, page_to_pfn(page));
   10.22      shadow_unlock(d);
   10.23  
   10.24 -    printk("After:  mfn=%lx c=%08x t=%08x\n", page_to_pfn(page),
   10.25 +    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
   10.26             page->count_info, page->u.inuse.type_info);
   10.27  }
   10.28  
   10.29 @@ -648,7 +648,7 @@ get_shadow_ref(unsigned long smfn)
   10.30  
   10.31      if ( unlikely(nx == 0) )
   10.32      {
   10.33 -        printk("get_shadow_ref overflow, gmfn=%x smfn=%lx\n",
   10.34 +        printk("get_shadow_ref overflow, gmfn=%" PRtype_info  " smfn=%lx\n",
   10.35                 frame_table[smfn].u.inuse.type_info & PGT_mfn_mask,
   10.36                 smfn);
   10.37          BUG();
   10.38 @@ -678,7 +678,8 @@ put_shadow_ref(unsigned long smfn)
   10.39  
   10.40      if ( unlikely(x == 0) )
   10.41      {
   10.42 -        printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%08x\n",
   10.43 +        printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%" 
   10.44 +               PRtype_info "\n",
   10.45                 smfn,
   10.46                 frame_table[smfn].count_info,
   10.47                 frame_table[smfn].u.inuse.type_info);
   10.48 @@ -1200,7 +1201,7 @@ static inline unsigned long __shadow_sta
   10.49  #ifndef NDEBUG
   10.50          if ( ___shadow_status(d, gpfn, stype) != 0 )
   10.51          {
   10.52 -            printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x "
   10.53 +            printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info " "
   10.54                     "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
   10.55                     d->domain_id, gpfn, gmfn, stype,
   10.56                     frame_table[gmfn].count_info,
    11.1 --- a/xen/include/asm-x86/x86_64/page.h	Sun Aug 07 09:13:39 2005 +0000
    11.2 +++ b/xen/include/asm-x86/x86_64/page.h	Mon Aug 08 08:18:06 2005 +0000
    11.3 @@ -42,7 +42,8 @@ typedef l4_pgentry_t root_pgentry_t;
    11.4  #endif /* !__ASSEMBLY__ */
    11.5  
    11.6  /* Given a virtual address, get an entry offset into a linear page table. */
    11.7 -#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> PAGE_SHIFT)
    11.8 +#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
    11.9 +#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
   11.10  
   11.11  #define is_guest_l1_slot(_s) (1)
   11.12  #define is_guest_l2_slot(_t, _s) (1)