ia64/xen-unstable
changeset 12724:aedeaa926893
merge
author | Alastair Tse <atse@xensource.com> |
---|---|
date | Fri Dec 01 11:08:34 2006 +0000 (2006-12-01) |
parents | 6cbe0449dc85 f35f17d24a23 |
children | 36fe7ca48e54 |
files |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/emulate.c Fri Dec 01 11:07:22 2006 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/emulate.c Fri Dec 01 11:08:34 2006 +0000 1.3 @@ -128,17 +128,6 @@ static inline unsigned long DECODE_GPR_V 1.4 return (unsigned long) -1; \ 1.5 } 1.6 1.7 -#if 0 1.8 -/* 1.9 - * hv_is_canonical - checks if the given address is canonical 1.10 - */ 1.11 -static inline u64 hv_is_canonical(u64 addr) 1.12 -{ 1.13 - u64 bits = addr & (u64)0xffff800000000000; 1.14 - return (u64)((bits == (u64)0xffff800000000000) || (bits == (u64)0x0)); 1.15 -} 1.16 -#endif 1.17 - 1.18 #define modrm operand [0] 1.19 1.20 #define sib operand [1]
2.1 --- a/xen/arch/x86/hvm/svm/svm.c Fri Dec 01 11:07:22 2006 +0000 2.2 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Dec 01 11:08:34 2006 +0000 2.3 @@ -269,13 +269,11 @@ static int svm_long_mode_enabled(struct 2.4 return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state); 2.5 } 2.6 2.7 -#define IS_CANO_ADDRESS(add) 1 2.8 - 2.9 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) 2.10 { 2.11 u64 msr_content = 0; 2.12 - struct vcpu *vc = current; 2.13 - struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb; 2.14 + struct vcpu *v = current; 2.15 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.16 2.17 switch ((u32)regs->ecx) 2.18 { 2.19 @@ -284,17 +282,25 @@ static inline int long_mode_do_msr_read( 2.20 msr_content &= ~EFER_SVME; 2.21 break; 2.22 2.23 +#ifdef __x86_64__ 2.24 case MSR_FS_BASE: 2.25 msr_content = vmcb->fs.base; 2.26 - break; 2.27 + goto check_long_mode; 2.28 2.29 case MSR_GS_BASE: 2.30 msr_content = vmcb->gs.base; 2.31 - break; 2.32 + goto check_long_mode; 2.33 2.34 case MSR_SHADOW_GS_BASE: 2.35 msr_content = vmcb->kerngsbase; 2.36 + check_long_mode: 2.37 + if ( !svm_long_mode_enabled(v) ) 2.38 + { 2.39 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.40 + return 0; 2.41 + } 2.42 break; 2.43 +#endif 2.44 2.45 case MSR_STAR: 2.46 msr_content = vmcb->star; 2.47 @@ -326,25 +332,25 @@ static inline int long_mode_do_msr_read( 2.48 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) 2.49 { 2.50 u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); 2.51 + u32 ecx = regs->ecx; 2.52 struct vcpu *v = current; 2.53 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.54 2.55 HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n", 2.56 - (u32)regs->ecx, msr_content); 2.57 - 2.58 - switch ( (u32)regs->ecx ) 2.59 + ecx, msr_content); 2.60 + 2.61 + switch ( ecx ) 2.62 { 2.63 case MSR_EFER: 2.64 -#ifdef __x86_64__ 2.65 /* offending reserved bit will cause #GP */ 2.66 if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) 2.67 { 2.68 - printk("Trying to set reserved bit in EFER: %"PRIx64"\n", 2.69 - msr_content); 2.70 - svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.71 - return 0; 2.72 + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " 2.73 + "EFER: %"PRIx64"\n", msr_content); 2.74 + goto gp_fault; 2.75 } 2.76 2.77 +#ifdef __x86_64__ 2.78 /* LME: 0 -> 1 */ 2.79 if ( msr_content & EFER_LME && 2.80 !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state)) 2.81 @@ -353,10 +359,9 @@ static inline int long_mode_do_msr_write 2.82 !test_bit(SVM_CPU_STATE_PAE_ENABLED, 2.83 &v->arch.hvm_svm.cpu_state) ) 2.84 { 2.85 - printk("Trying to set LME bit when " 2.86 - "in paging mode or PAE bit is not set\n"); 2.87 - svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.88 - return 0; 2.89 + gdprintk(XENLOG_WARNING, "Trying to set LME bit when " 2.90 + "in paging mode or PAE bit is not set\n"); 2.91 + goto gp_fault; 2.92 } 2.93 set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state); 2.94 } 2.95 @@ -371,37 +376,38 @@ static inline int long_mode_do_msr_write 2.96 vmcb->efer = msr_content | EFER_SVME; 2.97 break; 2.98 2.99 +#ifdef __x86_64__ 2.100 case MSR_FS_BASE: 2.101 case MSR_GS_BASE: 2.102 + case MSR_SHADOW_GS_BASE: 2.103 if ( !svm_long_mode_enabled(v) ) 2.104 - goto exit_and_crash; 2.105 - 2.106 - if (!IS_CANO_ADDRESS(msr_content)) 2.107 - { 2.108 - HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); 2.109 - svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.110 - } 2.111 - 2.112 - if (regs->ecx == MSR_FS_BASE) 2.113 + goto gp_fault; 2.114 + 2.115 + if ( !is_canonical_address(msr_content) ) 2.116 + goto uncanonical_address; 2.117 + 2.118 + if ( ecx == MSR_FS_BASE ) 2.119 vmcb->fs.base = msr_content; 2.120 - else 2.121 + else if ( ecx == MSR_GS_BASE ) 2.122 vmcb->gs.base = msr_content; 2.123 + else 2.124 + vmcb->kerngsbase = msr_content; 2.125 break; 2.126 - 2.127 - case MSR_SHADOW_GS_BASE: 2.128 - vmcb->kerngsbase = msr_content; 2.129 - break; 2.130 +#endif 2.131 2.132 case MSR_STAR: 2.133 vmcb->star = msr_content; 2.134 break; 2.135 2.136 case MSR_LSTAR: 2.137 - vmcb->lstar = msr_content; 2.138 - break; 2.139 - 2.140 case MSR_CSTAR: 2.141 - vmcb->cstar = msr_content; 2.142 + if ( !is_canonical_address(msr_content) ) 2.143 + goto uncanonical_address; 2.144 + 2.145 + if ( ecx == MSR_LSTAR ) 2.146 + vmcb->lstar = msr_content; 2.147 + else 2.148 + vmcb->cstar = msr_content; 2.149 break; 2.150 2.151 case MSR_SYSCALL_MASK: 2.152 @@ -414,10 +420,11 @@ static inline int long_mode_do_msr_write 2.153 2.154 return 1; 2.155 2.156 - exit_and_crash: 2.157 - gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx); 2.158 - domain_crash(v->domain); 2.159 - return 1; /* handled */ 2.160 + uncanonical_address: 2.161 + HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx); 2.162 + gp_fault: 2.163 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.164 + return 0; 2.165 } 2.166 2.167 2.168 @@ -1272,7 +1279,7 @@ static inline int svm_get_io_address( 2.169 #endif 2.170 2.171 /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 2.172 - * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit. 2.173 + * l field combined with EFER_LMA says whether it's 16 or 64 bit. 2.174 */ 2.175 asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16); 2.176 2.177 @@ -1383,8 +1390,35 @@ static inline int svm_get_io_address( 2.178 2.179 *addr += seg->base; 2.180 } 2.181 - else if (seg == &vmcb->fs || seg == &vmcb->gs) 2.182 - *addr += seg->base; 2.183 +#ifdef __x86_64__ 2.184 + else 2.185 + { 2.186 + if (seg == &vmcb->fs || seg == &vmcb->gs) 2.187 + *addr += seg->base; 2.188 + 2.189 + if (!is_canonical_address(*addr) || 2.190 + !is_canonical_address(*addr + size - 1)) 2.191 + { 2.192 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 2.193 + return 0; 2.194 + } 2.195 + if (*count > (1UL << 48) / size) 2.196 + *count = (1UL << 48) / size; 2.197 + if (!(regs->eflags & EF_DF)) 2.198 + { 2.199 + if (*addr + *count * size - 1 < *addr || 2.200 + !is_canonical_address(*addr + *count * size - 1)) 2.201 + *count = (*addr & ~((1UL << 48) - 1)) / size; 2.202 + } 2.203 + else 2.204 + { 2.205 + if ((*count - 1) * size > *addr || 2.206 + !is_canonical_address(*addr + (*count - 1) * size)) 2.207 + *count = (*addr & ~((1UL << 48) - 1)) / size + 1; 2.208 + } 2.209 + ASSERT(*count); 2.210 + } 2.211 +#endif 2.212 2.213 return 1; 2.214 }
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 01 11:07:22 2006 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 01 11:08:34 2006 +0000 3.3 @@ -95,13 +95,7 @@ static void vmx_save_host_msrs(void) 3.4 rdmsrl(msr_index[i], host_msr_state->msrs[i]); 3.5 } 3.6 3.7 -#define CASE_READ_MSR(address) \ 3.8 - case MSR_ ## address: \ 3.9 - msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address]; \ 3.10 - break 3.11 - 3.12 -#define CASE_WRITE_MSR(address) \ 3.13 - case MSR_ ## address: \ 3.14 +#define WRITE_MSR(address) \ 3.15 guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \ 3.16 if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\ 3.17 set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \ 3.18 @@ -109,7 +103,6 @@ static void vmx_save_host_msrs(void) 3.19 set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \ 3.20 break 3.21 3.22 -#define IS_CANO_ADDRESS(add) 1 3.23 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) 3.24 { 3.25 u64 msr_content = 0; 3.26 @@ -123,27 +116,38 @@ static inline int long_mode_do_msr_read( 3.27 break; 3.28 3.29 case MSR_FS_BASE: 3.30 - if ( !(vmx_long_mode_enabled(v)) ) 3.31 - goto exit_and_crash; 3.32 - 3.33 msr_content = __vmread(GUEST_FS_BASE); 3.34 - break; 3.35 + goto check_long_mode; 3.36 3.37 case MSR_GS_BASE: 3.38 - if ( !(vmx_long_mode_enabled(v)) ) 3.39 - goto exit_and_crash; 3.40 - 3.41 msr_content = __vmread(GUEST_GS_BASE); 3.42 - break; 3.43 + goto check_long_mode; 3.44 3.45 case MSR_SHADOW_GS_BASE: 3.46 msr_content = guest_msr_state->shadow_gs; 3.47 + check_long_mode: 3.48 + if ( !(vmx_long_mode_enabled(v)) ) 3.49 + { 3.50 + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.51 + return 0; 3.52 + } 3.53 + break; 3.54 + 3.55 + case MSR_STAR: 3.56 + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR]; 3.57 break; 3.58 3.59 - CASE_READ_MSR(STAR); 3.60 - CASE_READ_MSR(LSTAR); 3.61 - CASE_READ_MSR(CSTAR); 3.62 - CASE_READ_MSR(SYSCALL_MASK); 3.63 + case MSR_LSTAR: 3.64 + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR]; 3.65 + break; 3.66 + 3.67 + case MSR_CSTAR: 3.68 + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_CSTAR]; 3.69 + break; 3.70 + 3.71 + case MSR_SYSCALL_MASK: 3.72 + msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK]; 3.73 + break; 3.74 3.75 default: 3.76 return 0; 3.77 @@ -155,32 +159,28 @@ static inline int long_mode_do_msr_read( 3.78 regs->edx = (u32)(msr_content >> 32); 3.79 3.80 return 1; 3.81 - 3.82 - exit_and_crash: 3.83 - gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx); 3.84 - domain_crash(v->domain); 3.85 - return 1; /* handled */ 3.86 } 3.87 3.88 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) 3.89 { 3.90 u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); 3.91 + u32 ecx = regs->ecx; 3.92 struct vcpu *v = current; 3.93 struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; 3.94 struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state); 3.95 3.96 HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n", 3.97 - (u32)regs->ecx, msr_content); 3.98 + ecx, msr_content); 3.99 3.100 - switch ( (u32)regs->ecx ) { 3.101 + switch ( ecx ) 3.102 + { 3.103 case MSR_EFER: 3.104 /* offending reserved bit will cause #GP */ 3.105 if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) 3.106 { 3.107 - printk("Trying to set reserved bit in EFER: %"PRIx64"\n", 3.108 - msr_content); 3.109 - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.110 - return 0; 3.111 + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " 3.112 + "EFER: %"PRIx64"\n", msr_content); 3.113 + goto gp_fault; 3.114 } 3.115 3.116 if ( (msr_content & EFER_LME) 3.117 @@ -188,9 +188,9 @@ static inline int long_mode_do_msr_write 3.118 { 3.119 if ( unlikely(vmx_paging_enabled(v)) ) 3.120 { 3.121 - printk("Trying to set EFER.LME with paging enabled\n"); 3.122 - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.123 - return 0; 3.124 + gdprintk(XENLOG_WARNING, 3.125 + "Trying to set EFER.LME with paging enabled\n"); 3.126 + goto gp_fault; 3.127 } 3.128 } 3.129 else if ( !(msr_content & EFER_LME) 3.130 @@ -198,9 +198,9 @@ static inline int long_mode_do_msr_write 3.131 { 3.132 if ( unlikely(vmx_paging_enabled(v)) ) 3.133 { 3.134 - printk("Trying to clear EFER.LME with paging enabled\n"); 3.135 - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.136 - return 0; 3.137 + gdprintk(XENLOG_WARNING, 3.138 + "Trying to clear EFER.LME with paging enabled\n"); 3.139 + goto gp_fault; 3.140 } 3.141 } 3.142 3.143 @@ -209,35 +209,40 @@ static inline int long_mode_do_msr_write 3.144 3.145 case MSR_FS_BASE: 3.146 case MSR_GS_BASE: 3.147 + case MSR_SHADOW_GS_BASE: 3.148 if ( !vmx_long_mode_enabled(v) ) 3.149 - goto exit_and_crash; 3.150 + goto gp_fault; 3.151 + 3.152 + if ( !is_canonical_address(msr_content) ) 3.153 + goto uncanonical_address; 3.154 3.155 - if ( !IS_CANO_ADDRESS(msr_content) ) 3.156 + if ( ecx == MSR_FS_BASE ) 3.157 + __vmwrite(GUEST_FS_BASE, msr_content); 3.158 + else if ( ecx == MSR_GS_BASE ) 3.159 + __vmwrite(GUEST_GS_BASE, msr_content); 3.160 + else 3.161 { 3.162 - HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); 3.163 - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.164 - return 0; 3.165 + v->arch.hvm_vmx.msr_state.shadow_gs = msr_content; 3.166 + wrmsrl(MSR_SHADOW_GS_BASE, msr_content); 3.167 } 3.168 3.169 - if ( regs->ecx == MSR_FS_BASE ) 3.170 - __vmwrite(GUEST_FS_BASE, msr_content); 3.171 - else 3.172 - __vmwrite(GUEST_GS_BASE, msr_content); 3.173 - 3.174 break; 3.175 3.176 - case MSR_SHADOW_GS_BASE: 3.177 - if ( !(vmx_long_mode_enabled(v)) ) 3.178 - goto exit_and_crash; 3.179 + case MSR_STAR: 3.180 + WRITE_MSR(STAR); 3.181 + 3.182 + case MSR_LSTAR: 3.183 + if ( !is_canonical_address(msr_content) ) 3.184 + goto uncanonical_address; 3.185 + WRITE_MSR(LSTAR); 3.186 3.187 - v->arch.hvm_vmx.msr_state.shadow_gs = msr_content; 3.188 - wrmsrl(MSR_SHADOW_GS_BASE, msr_content); 3.189 - break; 3.190 + case MSR_CSTAR: 3.191 + if ( !is_canonical_address(msr_content) ) 3.192 + goto uncanonical_address; 3.193 + WRITE_MSR(CSTAR); 3.194 3.195 - CASE_WRITE_MSR(STAR); 3.196 - CASE_WRITE_MSR(LSTAR); 3.197 - CASE_WRITE_MSR(CSTAR); 3.198 - CASE_WRITE_MSR(SYSCALL_MASK); 3.199 + case MSR_SYSCALL_MASK: 3.200 + WRITE_MSR(SYSCALL_MASK); 3.201 3.202 default: 3.203 return 0; 3.204 @@ -245,10 +250,11 @@ static inline int long_mode_do_msr_write 3.205 3.206 return 1; 3.207 3.208 - exit_and_crash: 3.209 - gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx); 3.210 - domain_crash(v->domain); 3.211 - return 1; /* handled */ 3.212 + uncanonical_address: 3.213 + HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx); 3.214 + gp_fault: 3.215 + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 3.216 + return 0; 3.217 } 3.218 3.219 /* 3.220 @@ -1283,6 +1289,32 @@ static void vmx_io_instruction(unsigned 3.221 ASSERT(count); 3.222 } 3.223 } 3.224 +#ifdef __x86_64__ 3.225 + else 3.226 + { 3.227 + if ( !is_canonical_address(addr) || 3.228 + !is_canonical_address(addr + size - 1) ) 3.229 + { 3.230 + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); 3.231 + return; 3.232 + } 3.233 + if ( count > (1UL << 48) / size ) 3.234 + count = (1UL << 48) / size; 3.235 + if ( !(regs->eflags & EF_DF) ) 3.236 + { 3.237 + if ( addr + count * size - 1 < addr || 3.238 + !is_canonical_address(addr + count * size - 1) ) 3.239 + count = (addr & ~((1UL << 48) - 1)) / size; 3.240 + } 3.241 + else 3.242 + { 3.243 + if ( (count - 1) * size > addr || 3.244 + !is_canonical_address(addr + (count - 1) * size) ) 3.245 + count = (addr & ~((1UL << 48) - 1)) / size + 1; 3.246 + } 3.247 + ASSERT(count); 3.248 + } 3.249 +#endif 3.250 3.251 /* 3.252 * Handle string pio instructions that cross pages or that
4.1 --- a/xen/arch/x86/mm/shadow/common.c Fri Dec 01 11:07:22 2006 +0000 4.2 +++ b/xen/arch/x86/mm/shadow/common.c Fri Dec 01 11:08:34 2006 +0000 4.3 @@ -120,12 +120,17 @@ static int hvm_translate_linear_addr( 4.4 */ 4.5 addr = (uint32_t)(addr + dreg.base); 4.6 } 4.7 - else if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) ) 4.8 + else 4.9 { 4.10 /* 4.11 - * LONG MODE: FS and GS add a segment base. 4.12 + * LONG MODE: FS and GS add segment base. Addresses must be canonical. 4.13 */ 4.14 - addr += dreg.base; 4.15 + 4.16 + if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) ) 4.17 + addr += dreg.base; 4.18 + 4.19 + if ( !is_canonical_address(addr) ) 4.20 + goto gpf; 4.21 } 4.22 4.23 *paddr = addr;
5.1 --- a/xen/include/asm-x86/hvm/hvm.h Fri Dec 01 11:07:22 2006 +0000 5.2 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Dec 01 11:08:34 2006 +0000 5.3 @@ -157,11 +157,15 @@ hvm_paging_enabled(struct vcpu *v) 5.4 return hvm_funcs.paging_enabled(v); 5.5 } 5.6 5.7 +#ifdef __x86_64__ 5.8 static inline int 5.9 hvm_long_mode_enabled(struct vcpu *v) 5.10 { 5.11 return hvm_funcs.long_mode_enabled(v); 5.12 } 5.13 +#else 5.14 +#define hvm_long_mode_enabled(v) 0 5.15 +#endif 5.16 5.17 static inline int 5.18 hvm_pae_enabled(struct vcpu *v)
6.1 --- a/xen/include/asm-x86/x86_32/page.h Fri Dec 01 11:07:22 2006 +0000 6.2 +++ b/xen/include/asm-x86/x86_32/page.h Fri Dec 01 11:08:34 2006 +0000 6.3 @@ -7,6 +7,8 @@ 6.4 #define VADDR_BITS 32 6.5 #define VADDR_MASK (~0UL) 6.6 6.7 +#define is_canonical_address(x) 1 6.8 + 6.9 #include <xen/config.h> 6.10 #ifdef CONFIG_X86_PAE 6.11 # include <asm/x86_32/page-3level.h>
7.1 --- a/xen/include/asm-x86/x86_64/page.h Fri Dec 01 11:07:22 2006 +0000 7.2 +++ b/xen/include/asm-x86/x86_64/page.h Fri Dec 01 11:08:34 2006 +0000 7.3 @@ -24,6 +24,8 @@ 7.4 #define PADDR_MASK ((1UL << PADDR_BITS)-1) 7.5 #define VADDR_MASK ((1UL << VADDR_BITS)-1) 7.6 7.7 +#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) 7.8 + 7.9 #ifndef __ASSEMBLY__ 7.10 7.11 #include <xen/config.h>