direct-io.hg

changeset 5726:a29b4174d39c

Remaining files for shadow 64 mode checkin.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 11 10:23:19 2005 +0000 (2005-07-11)
parents e4272b361053
children ba925b4aef28 c3c51a34c924
files xen/include/asm-x86/page-guest32.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_public.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/xen/include/asm-x86/page-guest32.h	Mon Jul 11 10:23:19 2005 +0000
     1.3 @@ -0,0 +1,107 @@
     1.4 +
     1.5 +#ifndef __X86_PAGE_GUEST_H__
     1.6 +#define __X86_PAGE_GUEST_H__
     1.7 +
     1.8 +#ifndef __ASSEMBLY__
     1.9 +# include <asm/types.h>
    1.10 +#endif
    1.11 +
    1.12 +#define PAGETABLE_ORDER_32         10
    1.13 +#define L1_PAGETABLE_ENTRIES_32    (1<<PAGETABLE_ORDER_32)
    1.14 +#define L2_PAGETABLE_ENTRIES_32    (1<<PAGETABLE_ORDER_32)
    1.15 +#define ROOT_PAGETABLE_ENTRIES_32  L2_PAGETABLE_ENTRIES_32
    1.16 +
    1.17 +
    1.18 +#define L1_PAGETABLE_SHIFT_32 12
    1.19 +#define L2_PAGETABLE_SHIFT_32 22
    1.20 +
    1.21 +/* Extract flags into 12-bit integer, or turn 12-bit flags into a pte mask. */
    1.22 +
    1.23 +#ifndef __ASSEMBLY__
    1.24 +
    1.25 +typedef u32 intpte_32_t;
    1.26 +
    1.27 +typedef struct { intpte_32_t l1; } l1_pgentry_32_t;
    1.28 +typedef struct { intpte_32_t l2; } l2_pgentry_32_t;
    1.29 +typedef l2_pgentry_t root_pgentry_32_t;
    1.30 +#endif
    1.31 +
    1.32 +#define get_pte_flags_32(x) ((u32)(x) & 0xFFF)
    1.33 +#define put_pte_flags_32(x) ((intpte_32_t)(x))
    1.34 +
    1.35 +/* Get pte access flags (unsigned int). */
    1.36 +#define l1e_get_flags_32(x)           (get_pte_flags_32((x).l1))
    1.37 +#define l2e_get_flags_32(x)           (get_pte_flags_32((x).l2))
    1.38 +
    1.39 +/* Construct an empty pte. */
    1.40 +#define l1e_empty_32()                ((l1_pgentry_32_t) { 0 })
    1.41 +#define l2e_empty_32()                ((l2_pgentry_32_t) { 0 })
    1.42 +
    1.43 +/* Construct a pte from a pfn and access flags. */
    1.44 +#define l1e_from_pfn_32(pfn, flags)   \
    1.45 +    ((l1_pgentry_32_t) { ((intpte_32_t)(pfn) << PAGE_SHIFT) | put_pte_flags_32(flags) })
    1.46 +#define l2e_from_pfn_32(pfn, flags)   \
    1.47 +    ((l2_pgentry_32_t) { ((intpte_32_t)(pfn) << PAGE_SHIFT) | put_pte_flags_32(flags) })
    1.48 +
    1.49 +/* Construct a pte from a physical address and access flags. */
    1.50 +#ifndef __ASSEMBLY__
    1.51 +static inline l1_pgentry_32_t l1e_from_paddr_32(physaddr_t pa, unsigned int flags)
    1.52 +{
    1.53 +    ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
    1.54 +    return (l1_pgentry_32_t) { pa | put_pte_flags_32(flags) };
    1.55 +}
    1.56 +static inline l2_pgentry_32_t l2e_from_paddr_32(physaddr_t pa, unsigned int flags)
    1.57 +{
    1.58 +    ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
    1.59 +    return (l2_pgentry_32_t) { pa | put_pte_flags_32(flags) };
    1.60 +}
    1.61 +#endif /* !__ASSEMBLY__ */
    1.62 +
    1.63 +
    1.64 +/* Construct a pte from a page pointer and access flags. */
    1.65 +#define l1e_from_page_32(page, flags) (l1e_from_pfn_32(page_to_pfn(page),(flags)))
    1.66 +#define l2e_from_page_32(page, flags) (l2e_from_pfn_32(page_to_pfn(page),(flags)))
    1.67 +
    1.68 +/* Add extra flags to an existing pte. */
    1.69 +#define l1e_add_flags_32(x, flags)    ((x).l1 |= put_pte_flags_32(flags))
    1.70 +#define l2e_add_flags_32(x, flags)    ((x).l2 |= put_pte_flags_32(flags))
    1.71 +
    1.72 +/* Remove flags from an existing pte. */
    1.73 +#define l1e_remove_flags_32(x, flags) ((x).l1 &= ~put_pte_flags_32(flags))
    1.74 +#define l2e_remove_flags_32(x, flags) ((x).l2 &= ~put_pte_flags_32(flags))
    1.75 +
    1.76 +/* Check if a pte's page mapping or significant access flags have changed. */
    1.77 +#define l1e_has_changed_32(x,y,flags) \
    1.78 +    ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags_32(flags))) )
    1.79 +#define l2e_has_changed_32(x,y,flags) \
    1.80 +    ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags_32(flags))) )
    1.81 +
    1.82 +/* Given a virtual address, get an entry offset into a page table. */
    1.83 +#define l1_table_offset_32(a)         \
    1.84 +    (((a) >> L1_PAGETABLE_SHIFT_32) & (L1_PAGETABLE_ENTRIES_32 - 1))
    1.85 +#define l2_table_offset_32(a)         \
    1.86 +    (((a) >> L2_PAGETABLE_SHIFT_32) & (L2_PAGETABLE_ENTRIES_32 - 1))
    1.87 +
    1.88 +#define linear_l1_table_32                                                 \
    1.89 +    ((l1_pgentry_32_t *)(LINEAR_PT_VIRT_START))
    1.90 +#define __linear_l2_table_32                                                 \
    1.91 +    ((l2_pgentry_32_t *)(LINEAR_PT_VIRT_START +                            \
    1.92 +                     (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
    1.93 +
    1.94 +#define linear_pg_table_32 linear_l1_table_32
    1.95 +#define linear_l2_table_32(_ed) ((_ed)->arch.guest_vtable)
    1.96 +
    1.97 +#define va_to_l1mfn_32(_ed, _va) \
    1.98 +    (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
    1.99 +
   1.100 +#endif /* __X86_PAGE_GUEST_H__ */
   1.101 +
   1.102 +/*
   1.103 + * Local variables:
   1.104 + * mode: C
   1.105 + * c-set-style: "BSD"
   1.106 + * c-basic-offset: 4
   1.107 + * tab-width: 4
   1.108 + * indent-tabs-mode: nil
   1.109 + * End:
   1.110 + */
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/include/asm-x86/shadow_64.h	Mon Jul 11 10:23:19 2005 +0000
     2.3 @@ -0,0 +1,504 @@
     2.4 +/******************************************************************************
     2.5 + * include/asm-x86/shadow_64.h
     2.6 + * 
     2.7 + * Copyright (c) 2005 Michael A Fetterman
     2.8 + * Based on an earlier implementation by Ian Pratt et al
     2.9 + * 
    2.10 + * This program is free software; you can redistribute it and/or modify
    2.11 + * it under the terms of the GNU General Public License as published by
    2.12 + * the Free Software Foundation; either version 2 of the License, or
    2.13 + * (at your option) any later version.
    2.14 + * 
    2.15 + * This program is distributed in the hope that it will be useful,
    2.16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    2.17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    2.18 + * GNU General Public License for more details.
    2.19 + * 
    2.20 + * You should have received a copy of the GNU General Public License
    2.21 + * along with this program; if not, write to the Free Software
    2.22 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    2.23 + */
    2.24 +/*
    2.25 + * Jun Nakajima <jun.nakajima@intel.com>
    2.26 + * Chengyuan Li <chengyuan.li@intel.com>
    2.27 + *
    2.28 + * Extended to support 64-bit guests.
    2.29 + */
    2.30 +#ifndef _XEN_SHADOW_64_H
    2.31 +#define _XEN_SHADOW_64_H
    2.32 +#include <asm/shadow.h>
    2.33 +
    2.34 +#define READ_FAULT  0
    2.35 +#define WRITE_FAULT 1
    2.36 +
    2.37 +#define ERROR_W    2
    2.38 +#define ERROR_U     4
    2.39 +#define X86_64_SHADOW_DEBUG 0
    2.40 +
    2.41 +#if X86_64_SHADOW_DEBUG
    2.42 +#define ESH_LOG(_f, _a...)              \
    2.43 +        printk(_f, ##_a)
    2.44 +#else
    2.45 +#define ESH_LOG(_f, _a...) ((void)0)
    2.46 +#endif
    2.47 +
    2.48 +#define L4      4UL
    2.49 +#define L3      3UL
    2.50 +#define L2      2UL
    2.51 +#define L1      1UL
    2.52 +#define L_MASK  0xff
    2.53 +
    2.54 +#define ROOT_LEVEL_64   L4
    2.55 +#define ROOT_LEVEL_32   L2
    2.56 +
    2.57 +#define SHADOW_ENTRY    (2UL << 16)
    2.58 +#define GUEST_ENTRY     (1UL << 16)
    2.59 +
    2.60 +#define GET_ENTRY   (2UL << 8)
    2.61 +#define SET_ENTRY   (1UL << 8)
    2.62 +
    2.63 +#define PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
    2.64 +
    2.65 +typedef struct { intpte_t lo; } pgentry_64_t;
    2.66 +#define shadow_level_to_type(l)    (l << 29)
    2.67 +#define shadow_type_to_level(t)    (t >> 29)
    2.68 +
    2.69 +#define entry_get_value(_x)         ((_x).lo)
    2.70 +#define entry_get_pfn(_x)           \
    2.71 +      (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
    2.72 +#define entry_get_paddr(_x)          (((_x).lo & (PADDR_MASK&PAGE_MASK)))
    2.73 +#define entry_get_flags(_x)         (get_pte_flags((_x).lo))
    2.74 +
    2.75 +#define entry_empty()           ((pgentry_64_t) { 0 })
    2.76 +#define entry_from_pfn(pfn, flags)  \
    2.77 +    ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
    2.78 +#define entry_add_flags(x, flags)    ((x).lo |= put_pte_flags(flags))
    2.79 +#define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
    2.80 +#define entry_has_changed(x,y,flags) \
    2.81 +        ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
    2.82 +static inline int  table_offset_64(unsigned long va, int level)
    2.83 +{
    2.84 +    switch(level) {
    2.85 +        case 1:
    2.86 +            return  (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
    2.87 +        case 2:
    2.88 +            return  (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
    2.89 +        case 3:
    2.90 +            return  (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1));
    2.91 +        case 4:
    2.92 +            return  (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
    2.93 +        default:
    2.94 +            //printk("<table_offset_64> level %d is too big\n", level);
    2.95 +            return -1;
    2.96 +    }
    2.97 +}
    2.98 +
    2.99 +static inline void free_out_of_sync_state(struct domain *d)
   2.100 +{
   2.101 +    struct out_of_sync_entry *entry;
   2.102 +
   2.103 +    // NB: Be careful not to call something that manipulates this list
   2.104 +    //     while walking it.  Remove one item at a time, and always
   2.105 +    //     restart from start of list.
   2.106 +    //
   2.107 +    while ( (entry = d->arch.out_of_sync) )
   2.108 +    {
   2.109 +        d->arch.out_of_sync = entry->next;
   2.110 +        release_out_of_sync_entry(d, entry);
   2.111 +
   2.112 +        entry->next = d->arch.out_of_sync_free;
   2.113 +        d->arch.out_of_sync_free = entry;
   2.114 +    }
   2.115 +}
   2.116 +
   2.117 +static inline pgentry_64_t *__entry(
   2.118 +    struct vcpu *v, u64 va, u32 flag)
   2.119 +{
   2.120 +    int i;
   2.121 +    pgentry_64_t *le_e;
   2.122 +    pgentry_64_t *le_p;
   2.123 +    unsigned long mfn;
   2.124 +    int index;
   2.125 +    u32 level = flag & L_MASK;
   2.126 +    struct domain *d = v->domain;
   2.127 +
   2.128 +    index = table_offset_64(va, ROOT_LEVEL_64);
   2.129 +    if (flag & SHADOW_ENTRY)
   2.130 +        le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index];
   2.131 +    else
   2.132 +        le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
   2.133 +
   2.134 +    /*
   2.135 +     * If it's not external mode, then mfn should be machine physical.
   2.136 +     */
   2.137 +    for (i = ROOT_LEVEL_64 - level; i > 0; i--) {
   2.138 +        if (unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT)))
   2.139 +            return NULL;
   2.140 +        mfn = entry_get_value(*le_e) >> PAGE_SHIFT;
   2.141 +        if ((flag & GUEST_ENTRY) && shadow_mode_translate(d))
   2.142 +            mfn = phys_to_machine_mapping(mfn);
   2.143 +        le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT);
   2.144 +        index = table_offset_64(va, (level + i - 1));
   2.145 +        le_e = &le_p[index];
   2.146 +
   2.147 +    }
   2.148 +    return le_e;
   2.149 +
   2.150 +}
   2.151 +
   2.152 +static inline pgentry_64_t *__rw_entry(
   2.153 +    struct vcpu *ed, u64 va, void *e_p, u32 flag)
   2.154 +{
   2.155 +    pgentry_64_t *le_e = __entry(ed, va, flag);
   2.156 +    pgentry_64_t *e = (pgentry_64_t *)e_p;
   2.157 +    if (le_e == NULL)
   2.158 +        return NULL;
   2.159 +
   2.160 +    if (e) {
   2.161 +        if (flag & SET_ENTRY)
   2.162 +            *le_e = *e;
   2.163 +        else
   2.164 +            *e = *le_e;
   2.165 +    }
   2.166 +    return le_e;
   2.167 +}
   2.168 +#define __shadow_set_l4e(v, va, value) \
   2.169 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L4)
   2.170 +#define __shadow_get_l4e(v, va, sl4e) \
   2.171 +  __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | L4)
   2.172 +#define __shadow_set_l3e(v, va, value) \
   2.173 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L3)
   2.174 +#define __shadow_get_l3e(v, va, sl3e) \
   2.175 +  __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | L3)
   2.176 +#define __shadow_set_l2e(v, va, value) \
   2.177 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L2)
   2.178 +#define __shadow_get_l2e(v, va, sl2e) \
   2.179 +  __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | L2)
   2.180 +#define __shadow_set_l1e(v, va, value) \
   2.181 +  __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | L1)
   2.182 +#define __shadow_get_l1e(v, va, sl1e) \
   2.183 +  __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | L1)
   2.184 +
   2.185 +#define __guest_set_l4e(v, va, value) \
   2.186 +  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L4)
   2.187 +#define __guest_get_l4e(v, va, gl4e) \
   2.188 +  __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | L4)
   2.189 +#define __guest_set_l3e(v, va, value) \
   2.190 +  __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L3)
   2.191 +#define __guest_get_l3e(v, va, sl3e) \
   2.192 +  __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | L3)
   2.193 +
   2.194 +static inline void *  __guest_set_l2e(
   2.195 +    struct vcpu *v, u64 va, void *value, int size)
   2.196 +{
   2.197 +    switch(size) {
   2.198 +        case 4:
   2.199 +            // 32-bit guest
   2.200 +            {
   2.201 +                l2_pgentry_32_t *l2va;
   2.202 +
   2.203 +                l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
   2.204 +                if (value)
   2.205 +                    l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value;
   2.206 +                return &l2va[l2_table_offset_32(va)];
   2.207 +            }
   2.208 +        case 8:
   2.209 +            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L2);
   2.210 +        default:
   2.211 +            BUG();
   2.212 +            return NULL;
   2.213 +    }
   2.214 +    return NULL;
   2.215 +}
   2.216 +
   2.217 +#define __guest_set_l2e(v, va, value) \
   2.218 +  ( __typeof__(value) )__guest_set_l2e(v, (u64)va, value, sizeof(*value))
   2.219 +
   2.220 +static inline void * __guest_get_l2e(
   2.221 +  struct vcpu *v, u64 va, void *gl2e, int size)
   2.222 +{
   2.223 +    switch(size) {
   2.224 +        case 4:
   2.225 +            // 32-bit guest
   2.226 +            {
   2.227 +                l2_pgentry_32_t *l2va;
   2.228 +                l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
   2.229 +                if (gl2e)
   2.230 +                    *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)];
   2.231 +                return &l2va[l2_table_offset_32(va)];
   2.232 +            }
   2.233 +        case 8:
   2.234 +            return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | L2);
   2.235 +        default:
   2.236 +            BUG();
   2.237 +            return NULL;
   2.238 +    }
   2.239 +    return NULL;
   2.240 +}
   2.241 +
   2.242 +#define __guest_get_l2e(v, va, gl2e) \
   2.243 +  (__typeof__ (gl2e))__guest_get_l2e(v, (u64)va, gl2e, sizeof(*gl2e))
   2.244 +
   2.245 +static inline void *  __guest_set_l1e(
   2.246 +  struct vcpu *v, u64 va, void *value, int size)
   2.247 +{
   2.248 +    switch(size) {
   2.249 +        case 4:
   2.250 +            // 32-bit guest
   2.251 +            {
   2.252 +                l2_pgentry_32_t gl2e;
   2.253 +                l1_pgentry_32_t *l1va;
   2.254 +                unsigned long l1mfn;
   2.255 +
   2.256 +                if (!__guest_get_l2e(v, va, &gl2e))
   2.257 +                    return NULL;
   2.258 +                if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
   2.259 +                    return NULL;
   2.260 +
   2.261 +                l1mfn = phys_to_machine_mapping(
   2.262 +                  l2e_get_pfn(gl2e));
   2.263 +
   2.264 +                l1va = (l1_pgentry_32_t *)
   2.265 +                  phys_to_virt(l1mfn << L1_PAGETABLE_SHIFT);
   2.266 +                if (value)
   2.267 +                    l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value;
   2.268 +
   2.269 +                return &l1va[l1_table_offset_32(va)];
   2.270 +            }
   2.271 +
   2.272 +        case 8:
   2.273 +            return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | L1);
   2.274 +        default:
   2.275 +            BUG();
   2.276 +            return NULL;
   2.277 +    }
   2.278 +    return NULL;
   2.279 +}
   2.280 +
   2.281 +#define __guest_set_l1e(v, va, value) \
   2.282 +  ( __typeof__(value) )__guest_set_l1e(v, (u64)va, value, sizeof(*value))
   2.283 +
   2.284 +static inline void *  __guest_get_l1e(
   2.285 +  struct vcpu *v, u64 va, void *gl1e, int size)
   2.286 +{
   2.287 +    switch(size) {
   2.288 +        case 4:
   2.289 +            // 32-bit guest
   2.290 +            {
   2.291 +                l2_pgentry_32_t gl2e;
   2.292 +                l1_pgentry_32_t *l1va;
   2.293 +                unsigned long l1mfn;
   2.294 +
   2.295 +                if (!(__guest_get_l2e(v, va, &gl2e)))
   2.296 +                    return NULL;
   2.297 +
   2.298 +
   2.299 +                if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
   2.300 +                    return NULL;
   2.301 +
   2.302 +
   2.303 +                l1mfn = phys_to_machine_mapping(
   2.304 +                  l2e_get_pfn(gl2e));
   2.305 +                l1va = (l1_pgentry_32_t *) phys_to_virt(
   2.306 +                  l1mfn << L1_PAGETABLE_SHIFT);
   2.307 +                if (gl1e)
   2.308 +                    *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)];
   2.309 +
   2.310 +                return &l1va[l1_table_offset_32(va)];
   2.311 +            }
   2.312 +        case 8:
   2.313 +            // 64-bit guest
   2.314 +            return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | L1);
   2.315 +        default:
   2.316 +            BUG();
   2.317 +            return NULL;
   2.318 +    }
   2.319 +    return NULL;
   2.320 +}
   2.321 +
   2.322 +#define __guest_get_l1e(v, va, gl1e) \
   2.323 +  ( __typeof__(gl1e) )__guest_get_l1e(v, (u64)va, gl1e, sizeof(*gl1e))
   2.324 +
   2.325 +static inline void entry_general(
   2.326 +  struct domain *d,
   2.327 +  pgentry_64_t *gle_p,
   2.328 +  pgentry_64_t *sle_p,
   2.329 +  unsigned long smfn, u32 level)
   2.330 +
   2.331 +{
   2.332 +    pgentry_64_t gle = *gle_p;
   2.333 +    pgentry_64_t sle;
   2.334 +
   2.335 +    sle = entry_empty();
   2.336 +    if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) )
   2.337 +    {
   2.338 +        if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
   2.339 +            sle = entry_from_pfn(smfn, entry_get_flags(gle));
   2.340 +            entry_remove_flags(sle, _PAGE_PSE);
   2.341 +
   2.342 +            if ( shadow_mode_log_dirty(d) ||
   2.343 +		 !(entry_get_flags(gle) & _PAGE_DIRTY) )
   2.344 +            {
   2.345 +                pgentry_64_t *l1_p;
   2.346 +                int i;
   2.347 +
   2.348 +                l1_p =(pgentry_64_t *)map_domain_page(smfn);
   2.349 +                for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   2.350 +                    entry_remove_flags(l1_p[i], _PAGE_RW);
   2.351 +
   2.352 +                unmap_domain_page(l1_p);
   2.353 +            }
   2.354 +        } else {
   2.355 +            sle = entry_from_pfn(smfn,
   2.356 +				 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
   2.357 +            entry_add_flags(gle, _PAGE_ACCESSED);
   2.358 +        }
   2.359 +        // XXX mafetter: Hmm...
   2.360 +        //     Shouldn't the dirty log be checked/updated here?
   2.361 +        //     Actually, it needs to be done in this function's callers.
   2.362 +        //
   2.363 +        *gle_p = gle;
   2.364 +    }
   2.365 +
   2.366 +    if ( entry_get_value(sle) || entry_get_value(gle) )
   2.367 +        SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
   2.368 +          entry_get_value(gle), entry_get_value(sle));
   2.369 +
   2.370 +    *sle_p = sle;
   2.371 +}
   2.372 +
   2.373 +static inline void entry_propagate_from_guest(
   2.374 +  struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level)
   2.375 +{
   2.376 +    pgentry_64_t gle = *gle_p;
   2.377 +    unsigned long smfn = 0;
   2.378 +
   2.379 +    if ( entry_get_flags(gle) & _PAGE_PRESENT ) {
   2.380 +        if ((entry_get_flags(gle) & _PAGE_PSE) && level == L2) {
   2.381 +            smfn =  __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow);
   2.382 +        } else {
   2.383 +            smfn =  __shadow_status(d, entry_get_pfn(gle), 
   2.384 +              shadow_level_to_type((level -1 )));
   2.385 +        }
   2.386 +    }
   2.387 +    entry_general(d, gle_p, sle_p, smfn, level);
   2.388 +
   2.389 +}
   2.390 +
   2.391 +static int inline
   2.392 +validate_entry_change(
   2.393 +  struct domain *d,
   2.394 +  pgentry_64_t *new_gle_p,
   2.395 +  pgentry_64_t *shadow_le_p,
   2.396 +  u32 level)
   2.397 +{
   2.398 +    pgentry_64_t old_sle, new_sle;
   2.399 +    pgentry_64_t new_gle = *new_gle_p;
   2.400 +
   2.401 +    old_sle = *shadow_le_p;
   2.402 +    entry_propagate_from_guest(d, &new_gle, &new_sle, level);
   2.403 +
   2.404 +    ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n",
   2.405 +      entry_get_value(old_sle), entry_get_value(new_gle),
   2.406 +      entry_get_value(new_sle));
   2.407 +
   2.408 +    if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) &&
   2.409 +      entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) )
   2.410 +    {
   2.411 +        perfc_incrc(validate_entry_changes);
   2.412 +
   2.413 +        if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) &&
   2.414 +          !get_shadow_ref(entry_get_pfn(new_sle)) )
   2.415 +            BUG();
   2.416 +        if ( entry_get_flags(old_sle) & _PAGE_PRESENT )
   2.417 +            put_shadow_ref(entry_get_pfn(old_sle));
   2.418 +    }
   2.419 +
   2.420 +    *shadow_le_p = new_sle;
   2.421 +
   2.422 +    return 1;
   2.423 +}
   2.424 +
   2.425 +/*
   2.426 + * Check P, R/W, U/S bits in the guest page table.
   2.427 + * If the fault belongs to guest return 1,
   2.428 + * else return 0.
   2.429 + */
   2.430 +static inline int guest_page_fault(struct vcpu *v,
   2.431 +  unsigned long va, unsigned int error_code, pgentry_64_t *gpl2e, pgentry_64_t *gpl1e)
   2.432 +{
   2.433 +    struct domain *d = v->domain;
   2.434 +    pgentry_64_t gle, *lva;
   2.435 +    unsigned long mfn;
   2.436 +    int i;
   2.437 +
   2.438 +    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | L4);
   2.439 +    if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
   2.440 +        return 1;
   2.441 +
   2.442 +    if (error_code & ERROR_W) {
   2.443 +        if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
   2.444 +            return 1;
   2.445 +    }
   2.446 +    if (error_code & ERROR_U) {
   2.447 +        if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
   2.448 +            return 1;
   2.449 +    }
   2.450 +    for (i = L3; i >= L1; i--) {
   2.451 +	/*
   2.452 +	 * If it's not external mode, then mfn should be machine physical.
   2.453 +	 */
   2.454 +	mfn = __gpfn_to_mfn(d, (entry_get_value(gle) >> PAGE_SHIFT));
   2.455 +
   2.456 +        lva = (pgentry_64_t *) phys_to_virt(
   2.457 +	    mfn << PAGE_SHIFT);
   2.458 +        gle = lva[table_offset_64(va, i)];
   2.459 +
   2.460 +        if (unlikely(!(entry_get_flags(gle) & _PAGE_PRESENT)))
   2.461 +            return 1;
   2.462 +
   2.463 +        if (error_code & ERROR_W) {
   2.464 +            if (unlikely(!(entry_get_flags(gle) & _PAGE_RW)))
   2.465 +                return 1;
   2.466 +        }
   2.467 +        if (error_code & ERROR_U) {
   2.468 +            if (unlikely(!(entry_get_flags(gle) & _PAGE_USER)))
   2.469 +                return 1;
   2.470 +        }
   2.471 +
   2.472 +        if (i == L2) {
   2.473 +            if (gpl2e)
   2.474 +                *gpl2e = gle;
   2.475 +
   2.476 +            if (likely(entry_get_flags(gle) & _PAGE_PSE))
   2.477 +                return 0;
   2.478 +
   2.479 +        }
   2.480 +
   2.481 +        if (i == L1)
   2.482 +            if (gpl1e)
   2.483 +                *gpl1e = gle;
   2.484 +    }
   2.485 +    return 0;
   2.486 +}
   2.487 +
   2.488 +static inline unsigned long gva_to_gpa(unsigned long gva)
   2.489 +{
   2.490 +    struct vcpu *v = current;
   2.491 +    pgentry_64_t gl1e;
   2.492 +    pgentry_64_t gl2e;
   2.493 +    unsigned long gpa;
   2.494 +
   2.495 +    if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
   2.496 +        return 0;
   2.497 +    if (entry_get_flags(gl2e) & _PAGE_PSE)
   2.498 +        gpa = entry_get_paddr(gl2e) + (gva & ((1 << L2_PAGETABLE_SHIFT) - 1));
   2.499 +    else
   2.500 +        gpa = entry_get_paddr(gl1e) + (gva & ~PAGE_MASK);
   2.501 +
   2.502 +    return gpa;
   2.503 +
   2.504 +}
   2.505 +#endif
   2.506 +
   2.507 +
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/include/asm-x86/shadow_public.h	Mon Jul 11 10:23:19 2005 +0000
     3.3 @@ -0,0 +1,59 @@
     3.4 +/******************************************************************************
     3.5 + * include/asm-x86/shadow_public.h
     3.6 + * 
     3.7 + * Copyright (c) 2005 Michael A Fetterman
     3.8 + * Based on an earlier implementation by Ian Pratt et al
     3.9 + * 
    3.10 + * This program is free software; you can redistribute it and/or modify
    3.11 + * it under the terms of the GNU General Public License as published by
    3.12 + * the Free Software Foundation; either version 2 of the License, or
    3.13 + * (at your option) any later version.
    3.14 + * 
    3.15 + * This program is distributed in the hope that it will be useful,
    3.16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    3.17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    3.18 + * GNU General Public License for more details.
    3.19 + * 
    3.20 + * You should have received a copy of the GNU General Public License
    3.21 + * along with this program; if not, write to the Free Software
    3.22 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    3.23 + */
    3.24 +
    3.25 +#ifndef _XEN_SHADOW_PUBLIC_H
    3.26 +#define _XEN_SHADOW_PUBLIC_H
    3.27 +#if CONFIG_PAGING_LEVELS >= 4
    3.28 +#define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
    3.29 +
    3.30 +extern int alloc_p2m_table(struct domain *d);
    3.31 +
    3.32 +extern void shadow_sync_and_drop_references(
    3.33 +      struct domain *d, struct pfn_info *page);
    3.34 +extern void shadow_drop_references(
    3.35 +      struct domain *d, struct pfn_info *page);
    3.36 +
    3.37 +extern void shadow_l4_normal_pt_update(struct domain *d,
    3.38 +                                       unsigned long pa, l4_pgentry_t l4e,
    3.39 +                                       struct domain_mmap_cache *cache);
    3.40 +
    3.41 +extern int shadow_set_guest_paging_levels(struct domain *d, int levels);
    3.42 +
    3.43 +extern void release_out_of_sync_entry(
    3.44 +    struct domain *d, struct out_of_sync_entry *entry);
    3.45 +
    3.46 +struct shadow_ops {
    3.47 +    unsigned long guest_paging_levels; /* guest paging levels */
    3.48 +    void (*invlpg)(struct vcpu *v, unsigned long va);
    3.49 +    int  (*fault)(unsigned long va, struct cpu_user_regs *regs);
    3.50 +    void (*update_pagetables)(struct vcpu *v);
    3.51 +    void (*sync_all)(struct domain *d);
    3.52 +    int  (*remove_all_write_access)(struct domain *d,
    3.53 +             unsigned long readonly_gpfn, unsigned long readonly_gmfn);
    3.54 +    int  (*do_update_va_mapping)(unsigned long va, l1_pgentry_t val, struct vcpu *v);
    3.55 +    struct out_of_sync_entry *
    3.56 +         (*mark_mfn_out_of_sync)(struct vcpu *v, unsigned long gpfn,
    3.57 +                              unsigned long mfn);
    3.58 +    int  (*is_out_of_sync)(struct vcpu *v, unsigned long va);
    3.59 +};
    3.60 +#endif
    3.61 +
    3.62 +#endif