ia64/xen-unstable
changeset 14029:6746873997b5
[XEN] Get rid of gva_to_gpa translation
It didn't have any sensible error checking. Make all callers
use gva_to_gfn translation and check the result. MMIO and PIO
callers inject pagefaults to the guest iof the non-IO address is
not mapped.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
It didn't have any sensible error checking. Make all callers
use gva_to_gfn translation and check the result. MMIO and PIO
callers inject pagefaults to the guest iof the non-IO address is
not mapped.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Tue Feb 20 15:37:03 2007 +0000 (2007-02-20) |
parents | e4ddec3dffb0 |
children | 92a4aafb8cbc |
files | xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/hvm/io.h xen/include/asm-x86/p2m.h xen/include/asm-x86/paging.h |
line diff
1.1 --- a/xen/arch/x86/hvm/io.c Tue Feb 20 13:57:26 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/io.c Tue Feb 20 15:37:03 2007 +0000 1.3 @@ -371,7 +371,20 @@ static void hvm_pio_assist(struct cpu_us 1.4 { 1.5 unsigned long addr = pio_opp->addr; 1.6 if ( hvm_paging_enabled(current) ) 1.7 - (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 1.8 + { 1.9 + int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 1.10 + if ( rv != 0 ) 1.11 + { 1.12 + /* Failed on the page-spanning copy. Inject PF into 1.13 + * the guest for the address where we failed. */ 1.14 + addr += p->size - rv; 1.15 + gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side " 1.16 + "of a page-spanning PIO: va=%#lx\n", addr); 1.17 + hvm_inject_exception(TRAP_page_fault, 1.18 + PFEC_write_access, addr); 1.19 + return; 1.20 + } 1.21 + } 1.22 else 1.23 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 1.24 } 1.25 @@ -489,7 +502,20 @@ static void hvm_mmio_assist(struct cpu_u 1.26 unsigned long addr = mmio_opp->addr; 1.27 1.28 if (hvm_paging_enabled(current)) 1.29 - (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 1.30 + { 1.31 + int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 1.32 + if ( rv != 0 ) 1.33 + { 1.34 + /* Failed on the page-spanning copy. Inject PF into 1.35 + * the guest for the address where we failed. */ 1.36 + addr += p->size - rv; 1.37 + gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of " 1.38 + "a page-spanning MMIO: va=%#lx\n", addr); 1.39 + hvm_inject_exception(TRAP_page_fault, 1.40 + PFEC_write_access, addr); 1.41 + return; 1.42 + } 1.43 + } 1.44 else 1.45 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 1.46 }
2.1 --- a/xen/arch/x86/hvm/platform.c Tue Feb 20 13:57:26 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/platform.c Tue Feb 20 15:37:03 2007 +0000 2.3 @@ -815,7 +815,7 @@ int inst_copy_from_guest(unsigned char * 2.4 } 2.5 2.6 void send_pio_req(unsigned long port, unsigned long count, int size, 2.7 - long value, int dir, int df, int value_is_ptr) 2.8 + paddr_t value, int dir, int df, int value_is_ptr) 2.9 { 2.10 struct vcpu *v = current; 2.11 vcpu_iodata_t *vio; 2.12 @@ -823,7 +823,7 @@ void send_pio_req(unsigned long port, un 2.13 2.14 if ( size == 0 || count == 0 ) { 2.15 printk("null pio request? port %lx, count %lx, " 2.16 - "size %d, value %lx, dir %d, value_is_ptr %d.\n", 2.17 + "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n", 2.18 port, count, size, value, dir, value_is_ptr); 2.19 } 2.20 2.21 @@ -849,15 +849,7 @@ void send_pio_req(unsigned long port, un 2.22 2.23 p->io_count++; 2.24 2.25 - if ( value_is_ptr ) /* get physical address of data */ 2.26 - { 2.27 - if ( hvm_paging_enabled(current) ) 2.28 - p->data = paging_gva_to_gpa(current, value); 2.29 - else 2.30 - p->data = value; /* guest VA == guest PA */ 2.31 - } 2.32 - else if ( dir == IOREQ_WRITE ) 2.33 - p->data = value; 2.34 + p->data = value; 2.35 2.36 if ( hvm_portio_intercept(p) ) 2.37 { 2.38 @@ -870,7 +862,7 @@ void send_pio_req(unsigned long port, un 2.39 } 2.40 2.41 static void send_mmio_req(unsigned char type, unsigned long gpa, 2.42 - unsigned long count, int size, long value, 2.43 + unsigned long count, int size, paddr_t value, 2.44 int dir, int df, int value_is_ptr) 2.45 { 2.46 struct vcpu *v = current; 2.47 @@ -879,7 +871,8 @@ static void send_mmio_req(unsigned char 2.48 2.49 if ( size == 0 || count == 0 ) { 2.50 printk("null mmio request? type %d, gpa %lx, " 2.51 - "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n", 2.52 + "count %lx, size %d, value %"PRIpaddr"x, dir %d, " 2.53 + "value_is_ptr %d.\n", 2.54 type, gpa, count, size, value, dir, value_is_ptr); 2.55 } 2.56 2.57 @@ -905,15 +898,7 @@ static void send_mmio_req(unsigned char 2.58 2.59 p->io_count++; 2.60 2.61 - if ( value_is_ptr ) 2.62 - { 2.63 - if ( hvm_paging_enabled(v) ) 2.64 - p->data = paging_gva_to_gpa(v, value); 2.65 - else 2.66 - p->data = value; /* guest VA == guest PA */ 2.67 - } 2.68 - else 2.69 - p->data = value; 2.70 + p->data = value; 2.71 2.72 if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) 2.73 { 2.74 @@ -960,6 +945,7 @@ static void mmio_operands(int type, unsi 2.75 #define GET_REPEAT_COUNT() \ 2.76 (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : regs->ecx) : 1) 2.77 2.78 + 2.79 void handle_mmio(unsigned long gpa) 2.80 { 2.81 unsigned long inst_addr; 2.82 @@ -1014,7 +1000,8 @@ void handle_mmio(unsigned long gpa) 2.83 { 2.84 unsigned long count = GET_REPEAT_COUNT(); 2.85 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 2.86 - unsigned long addr; 2.87 + unsigned long addr, gfn; 2.88 + paddr_t paddr; 2.89 int dir, size = op_size; 2.90 2.91 ASSERT(count); 2.92 @@ -1024,7 +1011,9 @@ void handle_mmio(unsigned long gpa) 2.93 if ( ad_size == WORD ) 2.94 addr &= 0xFFFF; 2.95 addr += hvm_get_segment_base(v, x86_seg_es); 2.96 - if ( paging_gva_to_gpa(v, addr) == gpa ) 2.97 + gfn = paging_gva_to_gfn(v, addr); 2.98 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 2.99 + if ( paddr == gpa ) 2.100 { 2.101 enum x86_segment seg; 2.102 2.103 @@ -1044,10 +1033,24 @@ void handle_mmio(unsigned long gpa) 2.104 default: domain_crash_synchronous(); 2.105 } 2.106 addr += hvm_get_segment_base(v, seg); 2.107 + gfn = paging_gva_to_gfn(v, addr); 2.108 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 2.109 } 2.110 else 2.111 dir = IOREQ_READ; 2.112 2.113 + if ( gfn == INVALID_GFN ) 2.114 + { 2.115 + /* The guest does not have the non-mmio address mapped. 2.116 + * Need to send in a page fault */ 2.117 + int errcode = 0; 2.118 + /* IO read --> memory write */ 2.119 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 2.120 + regs->eip -= inst_len; /* do not advance %eip */ 2.121 + hvm_inject_exception(TRAP_page_fault, errcode, addr); 2.122 + return; 2.123 + } 2.124 + 2.125 /* 2.126 * In case of a movs spanning multiple pages, we break the accesses 2.127 * up into multiple pages (the device model works with non-continguous 2.128 @@ -1065,10 +1068,27 @@ void handle_mmio(unsigned long gpa) 2.129 2.130 if ( dir == IOREQ_WRITE ) { 2.131 if ( hvm_paging_enabled(v) ) 2.132 - (void)hvm_copy_from_guest_virt(&value, addr, size); 2.133 + { 2.134 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 2.135 + if ( rv != 0 ) 2.136 + { 2.137 + /* Failed on the page-spanning copy. Inject PF into 2.138 + * the guest for the address where we failed */ 2.139 + regs->eip -= inst_len; /* do not advance %eip */ 2.140 + /* Must set CR2 at the failing address */ 2.141 + addr += size - rv; 2.142 + gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a " 2.143 + "page-spanning MMIO: va=%#lx\n", addr); 2.144 + hvm_inject_exception(TRAP_page_fault, 0, addr); 2.145 + return; 2.146 + } 2.147 + } 2.148 else 2.149 - (void)hvm_copy_from_guest_phys(&value, addr, size); 2.150 - } else 2.151 + (void) hvm_copy_from_guest_phys(&value, addr, size); 2.152 + } else /* dir != IOREQ_WRITE */ 2.153 + /* Remember where to write the result, as a *VA*. 2.154 + * Must be a VA so we can handle the page overlap 2.155 + * correctly in hvm_mmio_assist() */ 2.156 mmio_op->addr = addr; 2.157 2.158 if ( count != 1 ) 2.159 @@ -1091,7 +1111,8 @@ void handle_mmio(unsigned long gpa) 2.160 2.161 ASSERT(count); 2.162 2.163 - send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, df, 1); 2.164 + send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, 2.165 + paddr, dir, df, 1); 2.166 } 2.167 break; 2.168 }
3.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Feb 20 13:57:26 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Feb 20 15:37:03 2007 +0000 3.3 @@ -1589,6 +1589,8 @@ static void svm_io_instruction(struct vc 3.4 if (info.fields.str) 3.5 { 3.6 unsigned long addr, count; 3.7 + paddr_t paddr; 3.8 + unsigned long gfn; 3.9 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 3.10 3.11 if (!svm_get_io_address(v, regs, size, info, &count, &addr)) 3.12 @@ -1606,6 +1608,20 @@ static void svm_io_instruction(struct vc 3.13 pio_opp->flags |= REPZ; 3.14 } 3.15 3.16 + /* Translate the address to a physical address */ 3.17 + gfn = paging_gva_to_gfn(v, addr); 3.18 + if ( gfn == INVALID_GFN ) 3.19 + { 3.20 + /* The guest does not have the RAM address mapped. 3.21 + * Need to send in a page fault */ 3.22 + int errcode = 0; 3.23 + /* IO read --> memory write */ 3.24 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 3.25 + svm_hvm_inject_exception(TRAP_page_fault, errcode, addr); 3.26 + return; 3.27 + } 3.28 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 3.29 + 3.30 /* 3.31 * Handle string pio instructions that cross pages or that 3.32 * are unaligned. See the comments in hvm_platform.c/handle_mmio() 3.33 @@ -1619,11 +1635,27 @@ static void svm_io_instruction(struct vc 3.34 3.35 if (dir == IOREQ_WRITE) /* OUTS */ 3.36 { 3.37 - if (hvm_paging_enabled(current)) 3.38 - (void)hvm_copy_from_guest_virt(&value, addr, size); 3.39 + if ( hvm_paging_enabled(current) ) 3.40 + { 3.41 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 3.42 + if ( rv != 0 ) 3.43 + { 3.44 + /* Failed on the page-spanning copy. Inject PF into 3.45 + * the guest for the address where we failed. */ 3.46 + addr += size - rv; 3.47 + gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 3.48 + "of a page-spanning PIO: va=%#lx\n", addr); 3.49 + svm_hvm_inject_exception(TRAP_page_fault, 0, addr); 3.50 + return; 3.51 + } 3.52 + } 3.53 else 3.54 - (void)hvm_copy_from_guest_phys(&value, addr, size); 3.55 - } 3.56 + (void) hvm_copy_from_guest_phys(&value, addr, size); 3.57 + } else /* dir != IOREQ_WRITE */ 3.58 + /* Remember where to write the result, as a *VA*. 3.59 + * Must be a VA so we can handle the page overlap 3.60 + * correctly in hvm_pio_assist() */ 3.61 + pio_opp->addr = addr; 3.62 3.63 if (count == 1) 3.64 regs->eip = vmcb->exitinfo2; 3.65 @@ -1645,7 +1677,7 @@ static void svm_io_instruction(struct vc 3.66 else 3.67 regs->eip = vmcb->exitinfo2; 3.68 3.69 - send_pio_req(port, count, size, addr, dir, df, 1); 3.70 + send_pio_req(port, count, size, paddr, dir, df, 1); 3.71 } 3.72 } 3.73 else 3.74 @@ -2718,7 +2750,8 @@ asmlinkage void svm_vmexit_handler(struc 3.75 if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 3.76 { 3.77 if (svm_paging_enabled(v) && 3.78 - !mmio_space(paging_gva_to_gpa(current, vmcb->exitinfo2))) 3.79 + !mmio_space( 3.80 + paging_gva_to_gfn(current, vmcb->exitinfo2) << PAGE_SHIFT)) 3.81 { 3.82 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64"," 3.83 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", " 3.84 @@ -2728,7 +2761,8 @@ asmlinkage void svm_vmexit_handler(struc 3.85 (u64)vmcb->exitinfo1, 3.86 (u64)vmcb->exitinfo2, 3.87 (u64)vmcb->exitintinfo.bytes, 3.88 - (u64)paging_gva_to_gpa(current, vmcb->exitinfo2)); 3.89 + (((u64)paging_gva_to_gfn(current, vmcb->exitinfo2) 3.90 + << PAGE_SHIFT) | (vmcb->exitinfo2 & ~PAGE_MASK))); 3.91 } 3.92 else 3.93 {
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Feb 20 13:57:26 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Feb 20 15:37:03 2007 +0000 4.3 @@ -1426,6 +1426,8 @@ static void vmx_io_instruction(unsigned 4.4 4.5 if ( test_bit(4, &exit_qualification) ) { /* string instruction */ 4.6 unsigned long addr, count = 1, base; 4.7 + paddr_t paddr; 4.8 + unsigned long gfn; 4.9 u32 ar_bytes, limit; 4.10 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 4.11 int long_mode = 0; 4.12 @@ -1545,6 +1547,20 @@ static void vmx_io_instruction(unsigned 4.13 } 4.14 #endif 4.15 4.16 + /* Translate the address to a physical address */ 4.17 + gfn = paging_gva_to_gfn(current, addr); 4.18 + if ( gfn == INVALID_GFN ) 4.19 + { 4.20 + /* The guest does not have the RAM address mapped. 4.21 + * Need to send in a page fault */ 4.22 + int errcode = 0; 4.23 + /* IO read --> memory write */ 4.24 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 4.25 + vmx_inject_exception(TRAP_page_fault, errcode, addr); 4.26 + return; 4.27 + } 4.28 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 4.29 + 4.30 /* 4.31 * Handle string pio instructions that cross pages or that 4.32 * are unaligned. See the comments in hvm_domain.c/handle_mmio() 4.33 @@ -1557,10 +1573,25 @@ static void vmx_io_instruction(unsigned 4.34 if ( dir == IOREQ_WRITE ) /* OUTS */ 4.35 { 4.36 if ( hvm_paging_enabled(current) ) 4.37 - (void)hvm_copy_from_guest_virt(&value, addr, size); 4.38 + { 4.39 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 4.40 + if ( rv != 0 ) 4.41 + { 4.42 + /* Failed on the page-spanning copy. Inject PF into 4.43 + * the guest for the address where we failed. */ 4.44 + addr += size - rv; 4.45 + gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 4.46 + "of a page-spanning PIO: va=%#lx\n", addr); 4.47 + vmx_inject_exception(TRAP_page_fault, 0, addr); 4.48 + return; 4.49 + } 4.50 + } 4.51 else 4.52 - (void)hvm_copy_from_guest_phys(&value, addr, size); 4.53 - } else 4.54 + (void) hvm_copy_from_guest_phys(&value, addr, size); 4.55 + } else /* dir != IOREQ_WRITE */ 4.56 + /* Remember where to write the result, as a *VA*. 4.57 + * Must be a VA so we can handle the page overlap 4.58 + * correctly in hvm_pio_assist() */ 4.59 pio_opp->addr = addr; 4.60 4.61 if ( count == 1 ) 4.62 @@ -1580,7 +1611,7 @@ static void vmx_io_instruction(unsigned 4.63 } else 4.64 regs->eip += inst_len; 4.65 4.66 - send_pio_req(port, count, size, addr, dir, df, 1); 4.67 + send_pio_req(port, count, size, paddr, dir, df, 1); 4.68 } 4.69 } else { 4.70 if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
5.1 --- a/xen/arch/x86/mm/shadow/multi.c Tue Feb 20 13:57:26 2007 +0000 5.2 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Feb 20 15:37:03 2007 +0000 5.3 @@ -3038,19 +3038,6 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l 5.4 } 5.5 5.6 5.7 -static paddr_t 5.8 -sh_gva_to_gpa(struct vcpu *v, unsigned long va) 5.9 -/* Called to translate a guest virtual address to what the *guest* 5.10 - * pagetables would map it to. */ 5.11 -{ 5.12 - unsigned long gfn = sh_gva_to_gfn(v, va); 5.13 - if ( gfn == INVALID_GFN ) 5.14 - return 0; 5.15 - else 5.16 - return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK); 5.17 -} 5.18 - 5.19 - 5.20 static inline void 5.21 sh_update_linear_entries(struct vcpu *v) 5.22 /* Sync up all the linear mappings for this vcpu's pagetables */ 5.23 @@ -4348,7 +4335,6 @@ int sh_audit_l4_table(struct vcpu *v, mf 5.24 struct paging_mode sh_paging_mode = { 5.25 .page_fault = sh_page_fault, 5.26 .invlpg = sh_invlpg, 5.27 - .gva_to_gpa = sh_gva_to_gpa, 5.28 .gva_to_gfn = sh_gva_to_gfn, 5.29 .update_cr3 = sh_update_cr3, 5.30 .update_paging_modes = shadow_update_paging_modes,
6.1 --- a/xen/arch/x86/mm/shadow/types.h Tue Feb 20 13:57:26 2007 +0000 6.2 +++ b/xen/arch/x86/mm/shadow/types.h Tue Feb 20 15:37:03 2007 +0000 6.3 @@ -244,6 +244,7 @@ static inline shadow_l4e_t shadow_l4e_fr 6.4 6.5 /* Type of the guest's frame numbers */ 6.6 TYPE_SAFE(u32,gfn) 6.7 +#undef INVALID_GFN 6.8 #define INVALID_GFN ((u32)(-1u)) 6.9 #define SH_PRI_gfn "05x" 6.10 6.11 @@ -307,6 +308,7 @@ static inline guest_l2e_t guest_l2e_from 6.12 6.13 /* Type of the guest's frame numbers */ 6.14 TYPE_SAFE(unsigned long,gfn) 6.15 +#undef INVALID_GFN 6.16 #define INVALID_GFN ((unsigned long)(-1ul)) 6.17 #define SH_PRI_gfn "05lx" 6.18 6.19 @@ -467,7 +469,6 @@ struct shadow_walk_t 6.20 */ 6.21 #define sh_page_fault INTERNAL_NAME(sh_page_fault) 6.22 #define sh_invlpg INTERNAL_NAME(sh_invlpg) 6.23 -#define sh_gva_to_gpa INTERNAL_NAME(sh_gva_to_gpa) 6.24 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn) 6.25 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3) 6.26 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
7.1 --- a/xen/include/asm-x86/hvm/io.h Tue Feb 20 13:57:26 2007 +0000 7.2 +++ b/xen/include/asm-x86/hvm/io.h Tue Feb 20 15:37:03 2007 +0000 7.3 @@ -144,7 +144,7 @@ static inline int irq_masked(unsigned lo 7.4 #endif 7.5 7.6 extern void send_pio_req(unsigned long port, unsigned long count, int size, 7.7 - long value, int dir, int df, int value_is_ptr); 7.8 + paddr_t value, int dir, int df, int value_is_ptr); 7.9 extern void handle_mmio(unsigned long gpa); 7.10 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type); 7.11 extern void hvm_io_assist(struct vcpu *v);
8.1 --- a/xen/include/asm-x86/p2m.h Tue Feb 20 13:57:26 2007 +0000 8.2 +++ b/xen/include/asm-x86/p2m.h Tue Feb 20 15:37:03 2007 +0000 8.3 @@ -89,7 +89,7 @@ static inline unsigned long get_mfn_from 8.4 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */ 8.5 static inline int mmio_space(paddr_t gpa) 8.6 { 8.7 - unsigned long gfn = gpa >> PAGE_SHIFT; 8.8 + unsigned long gfn = gpa >> PAGE_SHIFT; 8.9 return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn))); 8.10 } 8.11
9.1 --- a/xen/include/asm-x86/paging.h Tue Feb 20 13:57:26 2007 +0000 9.2 +++ b/xen/include/asm-x86/paging.h Tue Feb 20 15:37:03 2007 +0000 9.3 @@ -115,7 +115,6 @@ struct paging_mode { 9.4 int (*page_fault )(struct vcpu *v, unsigned long va, 9.5 struct cpu_user_regs *regs); 9.6 int (*invlpg )(struct vcpu *v, unsigned long va); 9.7 - paddr_t (*gva_to_gpa )(struct vcpu *v, unsigned long va); 9.8 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va); 9.9 void (*update_cr3 )(struct vcpu *v, int do_locking); 9.10 void (*update_paging_modes )(struct vcpu *v); 9.11 @@ -190,18 +189,10 @@ static inline int paging_invlpg(struct v 9.12 return v->arch.paging.mode->invlpg(v, va); 9.13 } 9.14 9.15 -/* Translate a guest virtual address to the physical address that the 9.16 - * *guest* pagetables would map it to. */ 9.17 -static inline paddr_t paging_gva_to_gpa(struct vcpu *v, unsigned long va) 9.18 -{ 9.19 - if ( unlikely(!paging_vcpu_mode_translate(v)) ) 9.20 - return (paddr_t) va; 9.21 - 9.22 - return v->arch.paging.mode->gva_to_gpa(v, va); 9.23 -} 9.24 - 9.25 /* Translate a guest virtual address to the frame number that the 9.26 - * *guest* pagetables would map it to. */ 9.27 + * *guest* pagetables would map it to. Returns INVALID_GFN if the guest 9.28 + * tables don't map this address. */ 9.29 +#define INVALID_GFN (-1UL) 9.30 static inline unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va) 9.31 { 9.32 if ( unlikely(!paging_vcpu_mode_translate(v)) )