direct-io.hg
changeset 12050:2db4388fecb9
[HVM] Use correct types for guest physical addresses
Guest physical addresses are not guaranteed to fit in either a pointer
or an unsigned long int; use paddr_t for them.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
Guest physical addresses are not guaranteed to fit in either a pointer
or an unsigned long int; use paddr_t for them.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Tue Oct 31 16:42:46 2006 +0000 (2006-10-31) |
parents | d93280670c3f |
children | 90164f5fbd50 |
files | tools/ioemu/target-i386-dm/helper2.c tools/libxc/xc_hvm_build.c xen/arch/ia64/vmx/mmio.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/i8259.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/pmtimer.c xen/arch/x86/hvm/rtc.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/shadow.h xen/include/public/hvm/ioreq.h |
line diff
1.1 --- a/tools/ioemu/target-i386-dm/helper2.c Tue Oct 31 16:22:39 2006 +0000 1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c Tue Oct 31 16:42:46 2006 +0000 1.3 @@ -193,10 +193,10 @@ void sp_info() 1.4 for (i = 0; i < vcpus; i++) { 1.5 req = &(shared_page->vcpu_iodata[i].vp_ioreq); 1.6 term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]); 1.7 - term_printf(" req state: %x, pvalid: %x, addr: %"PRIx64", " 1.8 + term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", " 1.9 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n", 1.10 - req->state, req->pdata_valid, req->addr, 1.11 - req->u.data, req->count, req->size); 1.12 + req->state, req->data_is_ptr, req->addr, 1.13 + req->data, req->count, req->size); 1.14 term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n", 1.15 req->io_count); 1.16 } 1.17 @@ -216,10 +216,10 @@ static ioreq_t *__cpu_get_ioreq(int vcpu 1.18 } 1.19 1.20 fprintf(logfile, "False I/O request ... in-service already: " 1.21 - "%x, pvalid: %x, port: %"PRIx64", " 1.22 + "%x, ptr: %x, port: %"PRIx64", " 1.23 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n", 1.24 - req->state, req->pdata_valid, req->addr, 1.25 - req->u.data, req->count, req->size); 1.26 + req->state, req->data_is_ptr, req->addr, 1.27 + req->data, req->count, req->size); 1.28 return NULL; 1.29 } 1.30 1.31 @@ -305,26 +305,26 @@ void cpu_ioreq_pio(CPUState *env, ioreq_ 1.32 sign = req->df ? -1 : 1; 1.33 1.34 if (req->dir == IOREQ_READ) { 1.35 - if (!req->pdata_valid) { 1.36 - req->u.data = do_inp(env, req->addr, req->size); 1.37 + if (!req->data_is_ptr) { 1.38 + req->data = do_inp(env, req->addr, req->size); 1.39 } else { 1.40 unsigned long tmp; 1.41 1.42 for (i = 0; i < req->count; i++) { 1.43 tmp = do_inp(env, req->addr, req->size); 1.44 - write_physical((target_phys_addr_t) req->u.pdata 1.45 + write_physical((target_phys_addr_t) req->data 1.46 + (sign * i * req->size), 1.47 req->size, &tmp); 1.48 } 1.49 } 1.50 } else if (req->dir == IOREQ_WRITE) { 1.51 - if (!req->pdata_valid) { 1.52 - do_outp(env, req->addr, req->size, req->u.data); 1.53 + if (!req->data_is_ptr) { 1.54 + do_outp(env, req->addr, req->size, req->data); 1.55 } else { 1.56 for (i = 0; i < req->count; i++) { 1.57 unsigned long tmp; 1.58 1.59 - read_physical((target_phys_addr_t) req->u.pdata 1.60 + read_physical((target_phys_addr_t) req->data 1.61 + (sign * i * req->size), 1.62 req->size, &tmp); 1.63 do_outp(env, req->addr, req->size, tmp); 1.64 @@ -339,18 +339,18 @@ void cpu_ioreq_move(CPUState *env, ioreq 1.65 1.66 sign = req->df ? -1 : 1; 1.67 1.68 - if (!req->pdata_valid) { 1.69 + if (!req->data_is_ptr) { 1.70 if (req->dir == IOREQ_READ) { 1.71 for (i = 0; i < req->count; i++) { 1.72 read_physical(req->addr 1.73 + (sign * i * req->size), 1.74 - req->size, &req->u.data); 1.75 + req->size, &req->data); 1.76 } 1.77 } else if (req->dir == IOREQ_WRITE) { 1.78 for (i = 0; i < req->count; i++) { 1.79 write_physical(req->addr 1.80 + (sign * i * req->size), 1.81 - req->size, &req->u.data); 1.82 + req->size, &req->data); 1.83 } 1.84 } 1.85 } else { 1.86 @@ -361,13 +361,13 @@ void cpu_ioreq_move(CPUState *env, ioreq 1.87 read_physical(req->addr 1.88 + (sign * i * req->size), 1.89 req->size, &tmp); 1.90 - write_physical((target_phys_addr_t )req->u.pdata 1.91 + write_physical((target_phys_addr_t )req->data 1.92 + (sign * i * req->size), 1.93 req->size, &tmp); 1.94 } 1.95 } else if (req->dir == IOREQ_WRITE) { 1.96 for (i = 0; i < req->count; i++) { 1.97 - read_physical((target_phys_addr_t) req->u.pdata 1.98 + read_physical((target_phys_addr_t) req->data 1.99 + (sign * i * req->size), 1.100 req->size, &tmp); 1.101 write_physical(req->addr 1.102 @@ -382,66 +382,66 @@ void cpu_ioreq_and(CPUState *env, ioreq_ 1.103 { 1.104 unsigned long tmp1, tmp2; 1.105 1.106 - if (req->pdata_valid != 0) 1.107 + if (req->data_is_ptr != 0) 1.108 hw_error("expected scalar value"); 1.109 1.110 read_physical(req->addr, req->size, &tmp1); 1.111 if (req->dir == IOREQ_WRITE) { 1.112 - tmp2 = tmp1 & (unsigned long) req->u.data; 1.113 + tmp2 = tmp1 & (unsigned long) req->data; 1.114 write_physical(req->addr, req->size, &tmp2); 1.115 } 1.116 - req->u.data = tmp1; 1.117 + req->data = tmp1; 1.118 } 1.119 1.120 void cpu_ioreq_add(CPUState *env, ioreq_t *req) 1.121 { 1.122 unsigned long tmp1, tmp2; 1.123 1.124 - if (req->pdata_valid != 0) 1.125 + if (req->data_is_ptr != 0) 1.126 hw_error("expected scalar value"); 1.127 1.128 read_physical(req->addr, req->size, &tmp1); 1.129 if (req->dir == IOREQ_WRITE) { 1.130 - tmp2 = tmp1 + (unsigned long) req->u.data; 1.131 + tmp2 = tmp1 + (unsigned long) req->data; 1.132 write_physical(req->addr, req->size, &tmp2); 1.133 } 1.134 - req->u.data = tmp1; 1.135 + req->data = tmp1; 1.136 } 1.137 1.138 void cpu_ioreq_or(CPUState *env, ioreq_t *req) 1.139 { 1.140 unsigned long tmp1, tmp2; 1.141 1.142 - if (req->pdata_valid != 0) 1.143 + if (req->data_is_ptr != 0) 1.144 hw_error("expected scalar value"); 1.145 1.146 read_physical(req->addr, req->size, &tmp1); 1.147 if (req->dir == IOREQ_WRITE) { 1.148 - tmp2 = tmp1 | (unsigned long) req->u.data; 1.149 + tmp2 = tmp1 | (unsigned long) req->data; 1.150 write_physical(req->addr, req->size, &tmp2); 1.151 } 1.152 - req->u.data = tmp1; 1.153 + req->data = tmp1; 1.154 } 1.155 1.156 void cpu_ioreq_xor(CPUState *env, ioreq_t *req) 1.157 { 1.158 unsigned long tmp1, tmp2; 1.159 1.160 - if (req->pdata_valid != 0) 1.161 + if (req->data_is_ptr != 0) 1.162 hw_error("expected scalar value"); 1.163 1.164 read_physical(req->addr, req->size, &tmp1); 1.165 if (req->dir == IOREQ_WRITE) { 1.166 - tmp2 = tmp1 ^ (unsigned long) req->u.data; 1.167 + tmp2 = tmp1 ^ (unsigned long) req->data; 1.168 write_physical(req->addr, req->size, &tmp2); 1.169 } 1.170 - req->u.data = tmp1; 1.171 + req->data = tmp1; 1.172 } 1.173 1.174 void __handle_ioreq(CPUState *env, ioreq_t *req) 1.175 { 1.176 - if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4) 1.177 - req->u.data &= (1UL << (8 * req->size)) - 1; 1.178 + if (!req->data_is_ptr && req->dir == IOREQ_WRITE && req->size != 4) 1.179 + req->data &= (1UL << (8 * req->size)) - 1; 1.180 1.181 switch (req->type) { 1.182 case IOREQ_TYPE_PIO:
2.1 --- a/tools/libxc/xc_hvm_build.c Tue Oct 31 16:22:39 2006 +0000 2.2 +++ b/tools/libxc/xc_hvm_build.c Tue Oct 31 16:42:46 2006 +0000 2.3 @@ -12,7 +12,6 @@ 2.4 #include <unistd.h> 2.5 #include <zlib.h> 2.6 #include <xen/hvm/hvm_info_table.h> 2.7 -#include <xen/hvm/ioreq.h> 2.8 #include <xen/hvm/params.h> 2.9 #include <xen/hvm/e820.h> 2.10
3.1 --- a/xen/arch/ia64/vmx/mmio.c Tue Oct 31 16:22:39 2006 +0000 3.2 +++ b/xen/arch/ia64/vmx/mmio.c Tue Oct 31 16:42:46 2006 +0000 3.3 @@ -214,8 +214,8 @@ static void low_mmio_access(VCPU *vcpu, 3.4 p->count = 1; 3.5 p->dir = dir; 3.6 if(dir==IOREQ_WRITE) //write; 3.7 - p->u.data = *val; 3.8 - p->pdata_valid = 0; 3.9 + p->data = *val; 3.10 + p->data_is_ptr = 0; 3.11 p->type = 1; 3.12 p->df = 0; 3.13 3.14 @@ -227,7 +227,7 @@ static void low_mmio_access(VCPU *vcpu, 3.15 }else 3.16 vmx_send_assist_req(v); 3.17 if(dir==IOREQ_READ){ //read 3.18 - *val=p->u.data; 3.19 + *val=p->data; 3.20 } 3.21 return; 3.22 } 3.23 @@ -249,8 +249,8 @@ static void legacy_io_access(VCPU *vcpu, 3.24 p->count = 1; 3.25 p->dir = dir; 3.26 if(dir==IOREQ_WRITE) //write; 3.27 - p->u.data = *val; 3.28 - p->pdata_valid = 0; 3.29 + p->data = *val; 3.30 + p->data_is_ptr = 0; 3.31 p->type = 0; 3.32 p->df = 0; 3.33 3.34 @@ -258,15 +258,15 @@ static void legacy_io_access(VCPU *vcpu, 3.35 3.36 vmx_send_assist_req(v); 3.37 if(dir==IOREQ_READ){ //read 3.38 - *val=p->u.data; 3.39 + *val=p->data; 3.40 } 3.41 #ifdef DEBUG_PCI 3.42 if(dir==IOREQ_WRITE) 3.43 if(p->addr == 0xcf8UL) 3.44 - printk("Write 0xcf8, with val [0x%lx]\n", p->u.data); 3.45 + printk("Write 0xcf8, with val [0x%lx]\n", p->data); 3.46 else 3.47 if(p->addr == 0xcfcUL) 3.48 - printk("Read 0xcfc, with val [0x%lx]\n", p->u.data); 3.49 + printk("Read 0xcfc, with val [0x%lx]\n", p->data); 3.50 #endif //DEBUG_PCI 3.51 return; 3.52 }
4.1 --- a/xen/arch/x86/hvm/hvm.c Tue Oct 31 16:22:39 2006 +0000 4.2 +++ b/xen/arch/x86/hvm/hvm.c Tue Oct 31 16:42:46 2006 +0000 4.3 @@ -406,16 +406,13 @@ void hvm_hlt(unsigned long rflags) 4.4 /* 4.5 * __hvm_copy(): 4.6 * @buf = hypervisor buffer 4.7 - * @addr = guest virtual or physical address to copy to/from 4.8 + * @addr = guest physical address to copy to/from 4.9 * @size = number of bytes to copy 4.10 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)? 4.11 - * @phy = interpret addr as physical (TRUE) or virtual (FALSE) address? 4.12 * Returns number of bytes failed to copy (0 == complete success). 4.13 */ 4.14 -static int __hvm_copy( 4.15 - void *buf, unsigned long addr, int size, int dir, int phy) 4.16 +static int __hvm_copy(void *buf, paddr_t addr, int size, int dir) 4.17 { 4.18 - struct vcpu *v = current; 4.19 unsigned long mfn; 4.20 char *p; 4.21 int count, todo; 4.22 @@ -425,9 +422,7 @@ static int __hvm_copy( 4.23 { 4.24 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo); 4.25 4.26 - mfn = phy ? 4.27 - get_mfn_from_gpfn(addr >> PAGE_SHIFT) : 4.28 - mfn_x(sh_vcpu_gfn_to_mfn(v, shadow_gva_to_gfn(v, addr))); 4.29 + mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT); 4.30 if ( mfn == INVALID_MFN ) 4.31 return todo; 4.32 4.33 @@ -448,24 +443,24 @@ static int __hvm_copy( 4.34 return 0; 4.35 } 4.36 4.37 -int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size) 4.38 +int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size) 4.39 { 4.40 - return __hvm_copy(buf, paddr, size, 1, 1); 4.41 + return __hvm_copy(buf, paddr, size, 1); 4.42 } 4.43 4.44 -int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size) 4.45 +int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size) 4.46 { 4.47 - return __hvm_copy(buf, paddr, size, 0, 1); 4.48 + return __hvm_copy(buf, paddr, size, 0); 4.49 } 4.50 4.51 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size) 4.52 { 4.53 - return __hvm_copy(buf, vaddr, size, 1, 0); 4.54 + return __hvm_copy(buf, shadow_gva_to_gpa(current, vaddr), size, 1); 4.55 } 4.56 4.57 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size) 4.58 { 4.59 - return __hvm_copy(buf, vaddr, size, 0, 0); 4.60 + return __hvm_copy(buf, shadow_gva_to_gpa(current, vaddr), size, 0); 4.61 } 4.62 4.63 /*
5.1 --- a/xen/arch/x86/hvm/i8254.c Tue Oct 31 16:22:39 2006 +0000 5.2 +++ b/xen/arch/x86/hvm/i8254.c Tue Oct 31 16:42:46 2006 +0000 5.3 @@ -392,17 +392,17 @@ static int handle_pit_io(ioreq_t *p) 5.4 struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit); 5.5 5.6 if (p->size != 1 || 5.7 - p->pdata_valid || 5.8 + p->data_is_ptr || 5.9 p->type != IOREQ_TYPE_PIO){ 5.10 printk("HVM_PIT:wrong PIT IO!\n"); 5.11 return 1; 5.12 } 5.13 5.14 if (p->dir == 0) {/* write */ 5.15 - pit_ioport_write(vpit, p->addr, p->u.data); 5.16 + pit_ioport_write(vpit, p->addr, p->data); 5.17 } else if (p->dir == 1) { /* read */ 5.18 if ( (p->addr & 3) != 3 ) { 5.19 - p->u.data = pit_ioport_read(vpit, p->addr); 5.20 + p->data = pit_ioport_read(vpit, p->addr); 5.21 } else { 5.22 printk("HVM_PIT: read A1:A0=3!\n"); 5.23 } 5.24 @@ -434,16 +434,16 @@ static int handle_speaker_io(ioreq_t *p) 5.25 struct PITState *vpit = &(v->domain->arch.hvm_domain.pl_time.vpit); 5.26 5.27 if (p->size != 1 || 5.28 - p->pdata_valid || 5.29 + p->data_is_ptr || 5.30 p->type != IOREQ_TYPE_PIO){ 5.31 printk("HVM_SPEAKER:wrong SPEAKER IO!\n"); 5.32 return 1; 5.33 } 5.34 5.35 if (p->dir == 0) {/* write */ 5.36 - speaker_ioport_write(vpit, p->addr, p->u.data); 5.37 + speaker_ioport_write(vpit, p->addr, p->data); 5.38 } else if (p->dir == 1) {/* read */ 5.39 - p->u.data = speaker_ioport_read(vpit, p->addr); 5.40 + p->data = speaker_ioport_read(vpit, p->addr); 5.41 } 5.42 5.43 return 1;
6.1 --- a/xen/arch/x86/hvm/i8259.c Tue Oct 31 16:22:39 2006 +0000 6.2 +++ b/xen/arch/x86/hvm/i8259.c Tue Oct 31 16:42:46 2006 +0000 6.3 @@ -491,11 +491,10 @@ static int intercept_pic_io(ioreq_t *p) 6.4 6.5 pic = ¤t->domain->arch.hvm_domain.vpic; 6.6 if ( p->dir == IOREQ_WRITE ) { 6.7 - if ( p->pdata_valid ) 6.8 - (void)hvm_copy_from_guest_phys( 6.9 - &data, (unsigned long)p->u.pdata, p->size); 6.10 + if ( p->data_is_ptr ) 6.11 + (void)hvm_copy_from_guest_phys(&data, p->data, p->size); 6.12 else 6.13 - data = p->u.data; 6.14 + data = p->data; 6.15 spin_lock_irqsave(&pic->lock, flags); 6.16 pic_ioport_write((void*)&pic->pics[p->addr>>7], 6.17 (uint32_t) p->addr, (uint32_t) (data & 0xff)); 6.18 @@ -506,11 +505,10 @@ static int intercept_pic_io(ioreq_t *p) 6.19 data = pic_ioport_read( 6.20 (void*)&pic->pics[p->addr>>7], (uint32_t) p->addr); 6.21 spin_unlock_irqrestore(&pic->lock, flags); 6.22 - if ( p->pdata_valid ) 6.23 - (void)hvm_copy_to_guest_phys( 6.24 - (unsigned long)p->u.pdata, &data, p->size); 6.25 + if ( p->data_is_ptr ) 6.26 + (void)hvm_copy_to_guest_phys(p->data, &data, p->size); 6.27 else 6.28 - p->u.data = (u64)data; 6.29 + p->data = (u64)data; 6.30 } 6.31 return 1; 6.32 } 6.33 @@ -528,11 +526,10 @@ static int intercept_elcr_io(ioreq_t *p) 6.34 6.35 s = ¤t->domain->arch.hvm_domain.vpic; 6.36 if ( p->dir == IOREQ_WRITE ) { 6.37 - if ( p->pdata_valid ) 6.38 - (void)hvm_copy_from_guest_phys( 6.39 - &data, (unsigned long)p->u.pdata, p->size); 6.40 + if ( p->data_is_ptr ) 6.41 + (void)hvm_copy_from_guest_phys(&data, p->data, p->size); 6.42 else 6.43 - data = p->u.data; 6.44 + data = p->data; 6.45 spin_lock_irqsave(&s->lock, flags); 6.46 elcr_ioport_write((void*)&s->pics[p->addr&1], 6.47 (uint32_t) p->addr, (uint32_t)( data & 0xff)); 6.48 @@ -543,11 +540,10 @@ static int intercept_elcr_io(ioreq_t *p) 6.49 else { 6.50 data = (u64) elcr_ioport_read( 6.51 (void*)&s->pics[p->addr&1], (uint32_t) p->addr); 6.52 - if ( p->pdata_valid ) 6.53 - (void)hvm_copy_to_guest_phys( 6.54 - (unsigned long)p->u.pdata, &data, p->size); 6.55 + if ( p->data_is_ptr ) 6.56 + (void)hvm_copy_to_guest_phys(p->data, &data, p->size); 6.57 else 6.58 - p->u.data = (u64)data; 6.59 + p->data = (u64)data; 6.60 } 6.61 return 1; 6.62 }
7.1 --- a/xen/arch/x86/hvm/intercept.c Tue Oct 31 16:22:39 2006 +0000 7.2 +++ b/xen/arch/x86/hvm/intercept.c Tue Oct 31 16:42:46 2006 +0000 7.3 @@ -67,12 +67,12 @@ static inline void hvm_mmio_access(struc 7.4 switch ( p->type ) { 7.5 case IOREQ_TYPE_COPY: 7.6 { 7.7 - if ( !p->pdata_valid ) { 7.8 + if ( !p->data_is_ptr ) { 7.9 if ( p->dir == IOREQ_READ ) 7.10 - p->u.data = read_handler(v, p->addr, p->size); 7.11 + p->data = read_handler(v, p->addr, p->size); 7.12 else /* p->dir == IOREQ_WRITE */ 7.13 - write_handler(v, p->addr, p->size, p->u.data); 7.14 - } else { /* !p->pdata_valid */ 7.15 + write_handler(v, p->addr, p->size, p->data); 7.16 + } else { /* p->data_is_ptr */ 7.17 int i, sign = (p->df) ? -1 : 1; 7.18 7.19 if ( p->dir == IOREQ_READ ) { 7.20 @@ -81,7 +81,7 @@ static inline void hvm_mmio_access(struc 7.21 p->addr + (sign * i * p->size), 7.22 p->size); 7.23 (void)hvm_copy_to_guest_phys( 7.24 - (unsigned long)p->u.pdata + (sign * i * p->size), 7.25 + p->data + (sign * i * p->size), 7.26 &data, 7.27 p->size); 7.28 } 7.29 @@ -89,7 +89,7 @@ static inline void hvm_mmio_access(struc 7.30 for ( i = 0; i < p->count; i++ ) { 7.31 (void)hvm_copy_from_guest_phys( 7.32 &data, 7.33 - (unsigned long)p->u.pdata + (sign * i * p->size), 7.34 + p->data + (sign * i * p->size), 7.35 p->size); 7.36 write_handler(v, 7.37 p->addr + (sign * i * p->size), 7.38 @@ -103,37 +103,37 @@ static inline void hvm_mmio_access(struc 7.39 case IOREQ_TYPE_AND: 7.40 tmp1 = read_handler(v, p->addr, p->size); 7.41 if ( p->dir == IOREQ_WRITE ) { 7.42 - tmp2 = tmp1 & (unsigned long) p->u.data; 7.43 + tmp2 = tmp1 & (unsigned long) p->data; 7.44 write_handler(v, p->addr, p->size, tmp2); 7.45 } 7.46 - p->u.data = tmp1; 7.47 + p->data = tmp1; 7.48 break; 7.49 7.50 case IOREQ_TYPE_ADD: 7.51 tmp1 = read_handler(v, p->addr, p->size); 7.52 if (p->dir == IOREQ_WRITE) { 7.53 - tmp2 = tmp1 + (unsigned long) p->u.data; 7.54 + tmp2 = tmp1 + (unsigned long) p->data; 7.55 write_handler(v, p->addr, p->size, tmp2); 7.56 } 7.57 - p->u.data = tmp1; 7.58 + p->data = tmp1; 7.59 break; 7.60 7.61 case IOREQ_TYPE_OR: 7.62 tmp1 = read_handler(v, p->addr, p->size); 7.63 if ( p->dir == IOREQ_WRITE ) { 7.64 - tmp2 = tmp1 | (unsigned long) p->u.data; 7.65 + tmp2 = tmp1 | (unsigned long) p->data; 7.66 write_handler(v, p->addr, p->size, tmp2); 7.67 } 7.68 - p->u.data = tmp1; 7.69 + p->data = tmp1; 7.70 break; 7.71 7.72 case IOREQ_TYPE_XOR: 7.73 tmp1 = read_handler(v, p->addr, p->size); 7.74 if ( p->dir == IOREQ_WRITE ) { 7.75 - tmp2 = tmp1 ^ (unsigned long) p->u.data; 7.76 + tmp2 = tmp1 ^ (unsigned long) p->data; 7.77 write_handler(v, p->addr, p->size, tmp2); 7.78 } 7.79 - p->u.data = tmp1; 7.80 + p->data = tmp1; 7.81 break; 7.82 7.83 case IOREQ_TYPE_XCHG: 7.84 @@ -142,8 +142,8 @@ static inline void hvm_mmio_access(struc 7.85 * its own local APIC. 7.86 */ 7.87 tmp1 = read_handler(v, p->addr, p->size); 7.88 - write_handler(v, p->addr, p->size, (unsigned long) p->u.data); 7.89 - p->u.data = tmp1; 7.90 + write_handler(v, p->addr, p->size, (unsigned long) p->data); 7.91 + p->data = tmp1; 7.92 break; 7.93 7.94 default:
8.1 --- a/xen/arch/x86/hvm/io.c Tue Oct 31 16:22:39 2006 +0000 8.2 +++ b/xen/arch/x86/hvm/io.c Tue Oct 31 16:42:46 2006 +0000 8.3 @@ -365,7 +365,7 @@ static void hvm_pio_assist(struct cpu_us 8.4 unsigned long old_eax; 8.5 int sign = p->df ? -1 : 1; 8.6 8.7 - if ( p->pdata_valid || (pio_opp->flags & OVERLAP) ) 8.8 + if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) ) 8.9 { 8.10 if ( pio_opp->flags & REPZ ) 8.11 regs->ecx -= p->count; 8.12 @@ -376,9 +376,9 @@ static void hvm_pio_assist(struct cpu_us 8.13 { 8.14 unsigned long addr = pio_opp->addr; 8.15 if ( hvm_paging_enabled(current) ) 8.16 - (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size); 8.17 + (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 8.18 else 8.19 - (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size); 8.20 + (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 8.21 } 8.22 regs->edi += sign * p->count * p->size; 8.23 } 8.24 @@ -394,13 +394,13 @@ static void hvm_pio_assist(struct cpu_us 8.25 switch ( p->size ) 8.26 { 8.27 case 1: 8.28 - regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 8.29 + regs->eax = (old_eax & 0xffffff00) | (p->data & 0xff); 8.30 break; 8.31 case 2: 8.32 - regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); 8.33 + regs->eax = (old_eax & 0xffff0000) | (p->data & 0xffff); 8.34 break; 8.35 case 4: 8.36 - regs->eax = (p->u.data & 0xffffffff); 8.37 + regs->eax = (p->data & 0xffffffff); 8.38 break; 8.39 default: 8.40 printk("Error: %s unknown port size\n", __FUNCTION__); 8.41 @@ -425,7 +425,7 @@ static void hvm_mmio_assist(struct cpu_u 8.42 case INSTR_MOV: 8.43 if (dst & REGISTER) { 8.44 index = operand_index(dst); 8.45 - set_reg_value(size, index, 0, regs, p->u.data); 8.46 + set_reg_value(size, index, 0, regs, p->data); 8.47 } 8.48 break; 8.49 8.50 @@ -433,15 +433,15 @@ static void hvm_mmio_assist(struct cpu_u 8.51 if (dst & REGISTER) { 8.52 switch (size) { 8.53 case BYTE: 8.54 - p->u.data &= 0xFFULL; 8.55 + p->data &= 0xFFULL; 8.56 break; 8.57 8.58 case WORD: 8.59 - p->u.data &= 0xFFFFULL; 8.60 + p->data &= 0xFFFFULL; 8.61 break; 8.62 8.63 case LONG: 8.64 - p->u.data &= 0xFFFFFFFFULL; 8.65 + p->data &= 0xFFFFFFFFULL; 8.66 break; 8.67 8.68 default: 8.69 @@ -449,7 +449,7 @@ static void hvm_mmio_assist(struct cpu_u 8.70 domain_crash_synchronous(); 8.71 } 8.72 index = operand_index(dst); 8.73 - set_reg_value(operand_size(dst), index, 0, regs, p->u.data); 8.74 + set_reg_value(operand_size(dst), index, 0, regs, p->data); 8.75 } 8.76 break; 8.77 8.78 @@ -457,21 +457,21 @@ static void hvm_mmio_assist(struct cpu_u 8.79 if (dst & REGISTER) { 8.80 switch (size) { 8.81 case BYTE: 8.82 - p->u.data &= 0xFFULL; 8.83 - if ( p->u.data & 0x80ULL ) 8.84 - p->u.data |= 0xFFFFFFFFFFFFFF00ULL; 8.85 + p->data &= 0xFFULL; 8.86 + if ( p->data & 0x80ULL ) 8.87 + p->data |= 0xFFFFFFFFFFFFFF00ULL; 8.88 break; 8.89 8.90 case WORD: 8.91 - p->u.data &= 0xFFFFULL; 8.92 - if ( p->u.data & 0x8000ULL ) 8.93 - p->u.data |= 0xFFFFFFFFFFFF0000ULL; 8.94 + p->data &= 0xFFFFULL; 8.95 + if ( p->data & 0x8000ULL ) 8.96 + p->data |= 0xFFFFFFFFFFFF0000ULL; 8.97 break; 8.98 8.99 case LONG: 8.100 - p->u.data &= 0xFFFFFFFFULL; 8.101 - if ( p->u.data & 0x80000000ULL ) 8.102 - p->u.data |= 0xFFFFFFFF00000000ULL; 8.103 + p->data &= 0xFFFFFFFFULL; 8.104 + if ( p->data & 0x80000000ULL ) 8.105 + p->data |= 0xFFFFFFFF00000000ULL; 8.106 break; 8.107 8.108 default: 8.109 @@ -479,7 +479,7 @@ static void hvm_mmio_assist(struct cpu_u 8.110 domain_crash_synchronous(); 8.111 } 8.112 index = operand_index(dst); 8.113 - set_reg_value(operand_size(dst), index, 0, regs, p->u.data); 8.114 + set_reg_value(operand_size(dst), index, 0, regs, p->data); 8.115 } 8.116 break; 8.117 8.118 @@ -493,9 +493,9 @@ static void hvm_mmio_assist(struct cpu_u 8.119 unsigned long addr = mmio_opp->addr; 8.120 8.121 if (hvm_paging_enabled(current)) 8.122 - (void)hvm_copy_to_guest_virt(addr, &p->u.data, p->size); 8.123 + (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 8.124 else 8.125 - (void)hvm_copy_to_guest_phys(addr, &p->u.data, p->size); 8.126 + (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 8.127 } 8.128 8.129 regs->esi += sign * p->count * p->size; 8.130 @@ -521,14 +521,14 @@ static void hvm_mmio_assist(struct cpu_u 8.131 if (src & REGISTER) { 8.132 index = operand_index(src); 8.133 value = get_reg_value(size, index, 0, regs); 8.134 - diff = (unsigned long) p->u.data & value; 8.135 + diff = (unsigned long) p->data & value; 8.136 } else if (src & IMMEDIATE) { 8.137 value = mmio_opp->immediate; 8.138 - diff = (unsigned long) p->u.data & value; 8.139 + diff = (unsigned long) p->data & value; 8.140 } else if (src & MEMORY) { 8.141 index = operand_index(dst); 8.142 value = get_reg_value(size, index, 0, regs); 8.143 - diff = (unsigned long) p->u.data & value; 8.144 + diff = (unsigned long) p->data & value; 8.145 set_reg_value(size, index, 0, regs, diff); 8.146 } 8.147 8.148 @@ -536,14 +536,14 @@ static void hvm_mmio_assist(struct cpu_u 8.149 if (src & REGISTER) { 8.150 index = operand_index(src); 8.151 value = get_reg_value(size, index, 0, regs); 8.152 - diff = (unsigned long) p->u.data + value; 8.153 + diff = (unsigned long) p->data + value; 8.154 } else if (src & IMMEDIATE) { 8.155 value = mmio_opp->immediate; 8.156 - diff = (unsigned long) p->u.data + value; 8.157 + diff = (unsigned long) p->data + value; 8.158 } else if (src & MEMORY) { 8.159 index = operand_index(dst); 8.160 value = get_reg_value(size, index, 0, regs); 8.161 - diff = (unsigned long) p->u.data + value; 8.162 + diff = (unsigned long) p->data + value; 8.163 set_reg_value(size, index, 0, regs, diff); 8.164 } 8.165 8.166 @@ -563,14 +563,14 @@ static void hvm_mmio_assist(struct cpu_u 8.167 if (src & REGISTER) { 8.168 index = operand_index(src); 8.169 value = get_reg_value(size, index, 0, regs); 8.170 - diff = (unsigned long) p->u.data | value; 8.171 + diff = (unsigned long) p->data | value; 8.172 } else if (src & IMMEDIATE) { 8.173 value = mmio_opp->immediate; 8.174 - diff = (unsigned long) p->u.data | value; 8.175 + diff = (unsigned long) p->data | value; 8.176 } else if (src & MEMORY) { 8.177 index = operand_index(dst); 8.178 value = get_reg_value(size, index, 0, regs); 8.179 - diff = (unsigned long) p->u.data | value; 8.180 + diff = (unsigned long) p->data | value; 8.181 set_reg_value(size, index, 0, regs, diff); 8.182 } 8.183 8.184 @@ -590,14 +590,14 @@ static void hvm_mmio_assist(struct cpu_u 8.185 if (src & REGISTER) { 8.186 index = operand_index(src); 8.187 value = get_reg_value(size, index, 0, regs); 8.188 - diff = (unsigned long) p->u.data ^ value; 8.189 + diff = (unsigned long) p->data ^ value; 8.190 } else if (src & IMMEDIATE) { 8.191 value = mmio_opp->immediate; 8.192 - diff = (unsigned long) p->u.data ^ value; 8.193 + diff = (unsigned long) p->data ^ value; 8.194 } else if (src & MEMORY) { 8.195 index = operand_index(dst); 8.196 value = get_reg_value(size, index, 0, regs); 8.197 - diff = (unsigned long) p->u.data ^ value; 8.198 + diff = (unsigned long) p->data ^ value; 8.199 set_reg_value(size, index, 0, regs, diff); 8.200 } 8.201 8.202 @@ -618,14 +618,14 @@ static void hvm_mmio_assist(struct cpu_u 8.203 if (src & REGISTER) { 8.204 index = operand_index(src); 8.205 value = get_reg_value(size, index, 0, regs); 8.206 - diff = (unsigned long) p->u.data - value; 8.207 + diff = (unsigned long) p->data - value; 8.208 } else if (src & IMMEDIATE) { 8.209 value = mmio_opp->immediate; 8.210 - diff = (unsigned long) p->u.data - value; 8.211 + diff = (unsigned long) p->data - value; 8.212 } else if (src & MEMORY) { 8.213 index = operand_index(dst); 8.214 value = get_reg_value(size, index, 0, regs); 8.215 - diff = value - (unsigned long) p->u.data; 8.216 + diff = value - (unsigned long) p->data; 8.217 if ( mmio_opp->instr == INSTR_SUB ) 8.218 set_reg_value(size, index, 0, regs, diff); 8.219 } 8.220 @@ -636,9 +636,9 @@ static void hvm_mmio_assist(struct cpu_u 8.221 */ 8.222 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| 8.223 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 8.224 - set_eflags_CF(size, value, (unsigned long) p->u.data, regs); 8.225 - set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs); 8.226 - set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs); 8.227 + set_eflags_CF(size, value, (unsigned long) p->data, regs); 8.228 + set_eflags_OF(size, diff, value, (unsigned long) p->data, regs); 8.229 + set_eflags_AF(size, diff, value, (unsigned long) p->data, regs); 8.230 set_eflags_ZF(size, diff, regs); 8.231 set_eflags_SF(size, diff, regs); 8.232 set_eflags_PF(size, diff, regs); 8.233 @@ -654,7 +654,7 @@ static void hvm_mmio_assist(struct cpu_u 8.234 index = operand_index(dst); 8.235 value = get_reg_value(size, index, 0, regs); 8.236 } 8.237 - diff = (unsigned long) p->u.data & value; 8.238 + diff = (unsigned long) p->data & value; 8.239 8.240 /* 8.241 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0 8.242 @@ -674,7 +674,7 @@ static void hvm_mmio_assist(struct cpu_u 8.243 } 8.244 else if ( src & IMMEDIATE ) 8.245 value = mmio_opp->immediate; 8.246 - if (p->u.data & (1 << (value & ((1 << 5) - 1)))) 8.247 + if (p->data & (1 << (value & ((1 << 5) - 1)))) 8.248 regs->eflags |= X86_EFLAGS_CF; 8.249 else 8.250 regs->eflags &= ~X86_EFLAGS_CF; 8.251 @@ -684,10 +684,10 @@ static void hvm_mmio_assist(struct cpu_u 8.252 case INSTR_XCHG: 8.253 if (src & REGISTER) { 8.254 index = operand_index(src); 8.255 - set_reg_value(size, index, 0, regs, p->u.data); 8.256 + set_reg_value(size, index, 0, regs, p->data); 8.257 } else { 8.258 index = operand_index(dst); 8.259 - set_reg_value(size, index, 0, regs, p->u.data); 8.260 + set_reg_value(size, index, 0, regs, p->data); 8.261 } 8.262 break; 8.263 }
9.1 --- a/xen/arch/x86/hvm/platform.c Tue Oct 31 16:22:39 2006 +0000 9.2 +++ b/xen/arch/x86/hvm/platform.c Tue Oct 31 16:42:46 2006 +0000 9.3 @@ -741,7 +741,7 @@ static void hvm_send_assist_req(struct v 9.4 } 9.5 9.6 void send_pio_req(unsigned long port, unsigned long count, int size, 9.7 - long value, int dir, int df, int pvalid) 9.8 + long value, int dir, int df, int value_is_ptr) 9.9 { 9.10 struct vcpu *v = current; 9.11 vcpu_iodata_t *vio; 9.12 @@ -749,8 +749,8 @@ void send_pio_req(unsigned long port, un 9.13 9.14 if ( size == 0 || count == 0 ) { 9.15 printk("null pio request? port %lx, count %lx, " 9.16 - "size %d, value %lx, dir %d, pvalid %d.\n", 9.17 - port, count, size, value, dir, pvalid); 9.18 + "size %d, value %lx, dir %d, value_is_ptr %d.\n", 9.19 + port, count, size, value, dir, value_is_ptr); 9.20 } 9.21 9.22 vio = get_vio(v->domain, v->vcpu_id); 9.23 @@ -765,7 +765,7 @@ void send_pio_req(unsigned long port, un 9.24 p->state); 9.25 9.26 p->dir = dir; 9.27 - p->pdata_valid = pvalid; 9.28 + p->data_is_ptr = value_is_ptr; 9.29 9.30 p->type = IOREQ_TYPE_PIO; 9.31 p->size = size; 9.32 @@ -775,14 +775,14 @@ void send_pio_req(unsigned long port, un 9.33 9.34 p->io_count++; 9.35 9.36 - if ( pvalid ) /* get physical address of data */ 9.37 + if ( value_is_ptr ) /* get physical address of data */ 9.38 { 9.39 if ( hvm_paging_enabled(current) ) 9.40 - p->u.pdata = (void *)shadow_gva_to_gpa(current, value); 9.41 + p->data = shadow_gva_to_gpa(current, value); 9.42 else 9.43 - p->u.pdata = (void *)value; /* guest VA == guest PA */ 9.44 + p->data = value; /* guest VA == guest PA */ 9.45 } else if ( dir == IOREQ_WRITE ) 9.46 - p->u.data = value; 9.47 + p->data = value; 9.48 9.49 if ( hvm_portio_intercept(p) ) { 9.50 p->state = STATE_IORESP_READY; 9.51 @@ -795,7 +795,7 @@ void send_pio_req(unsigned long port, un 9.52 9.53 static void send_mmio_req(unsigned char type, unsigned long gpa, 9.54 unsigned long count, int size, long value, 9.55 - int dir, int df, int pvalid) 9.56 + int dir, int df, int value_is_ptr) 9.57 { 9.58 struct vcpu *v = current; 9.59 vcpu_iodata_t *vio; 9.60 @@ -803,8 +803,8 @@ static void send_mmio_req(unsigned char 9.61 9.62 if ( size == 0 || count == 0 ) { 9.63 printk("null mmio request? type %d, gpa %lx, " 9.64 - "count %lx, size %d, value %lx, dir %d, pvalid %d.\n", 9.65 - type, gpa, count, size, value, dir, pvalid); 9.66 + "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n", 9.67 + type, gpa, count, size, value, dir, value_is_ptr); 9.68 } 9.69 9.70 vio = get_vio(v->domain, v->vcpu_id); 9.71 @@ -819,7 +819,7 @@ static void send_mmio_req(unsigned char 9.72 printk("WARNING: send mmio with something already pending (%d)?\n", 9.73 p->state); 9.74 p->dir = dir; 9.75 - p->pdata_valid = pvalid; 9.76 + p->data_is_ptr = value_is_ptr; 9.77 9.78 p->type = type; 9.79 p->size = size; 9.80 @@ -829,13 +829,13 @@ static void send_mmio_req(unsigned char 9.81 9.82 p->io_count++; 9.83 9.84 - if (pvalid) { 9.85 + if (value_is_ptr) { 9.86 if (hvm_paging_enabled(v)) 9.87 - p->u.data = shadow_gva_to_gpa(v, value); 9.88 + p->data = shadow_gva_to_gpa(v, value); 9.89 else 9.90 - p->u.pdata = (void *) value; /* guest VA == guest PA */ 9.91 + p->data = value; /* guest VA == guest PA */ 9.92 } else 9.93 - p->u.data = value; 9.94 + p->data = value; 9.95 9.96 if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) { 9.97 p->state = STATE_IORESP_READY;
10.1 --- a/xen/arch/x86/hvm/pmtimer.c Tue Oct 31 16:22:39 2006 +0000 10.2 +++ b/xen/arch/x86/hvm/pmtimer.c Tue Oct 31 16:42:46 2006 +0000 10.3 @@ -20,7 +20,7 @@ static int handle_pmt_io(ioreq_t *p) 10.4 uint64_t curr_gtime; 10.5 10.6 if (p->size != 4 || 10.7 - p->pdata_valid || 10.8 + p->data_is_ptr || 10.9 p->type != IOREQ_TYPE_PIO){ 10.10 printk("HVM_PMT: wrong PM timer IO\n"); 10.11 return 1; 10.12 @@ -32,7 +32,7 @@ static int handle_pmt_io(ioreq_t *p) 10.13 } else if (p->dir == 1) { /* read */ 10.14 curr_gtime = hvm_get_guest_time(s->vcpu); 10.15 s->pm1_timer += ((curr_gtime - s->last_gtime) * s->scale) >> 32; 10.16 - p->u.data = s->pm1_timer; 10.17 + p->data = s->pm1_timer; 10.18 s->last_gtime = curr_gtime; 10.19 return 1; 10.20 }
11.1 --- a/xen/arch/x86/hvm/rtc.c Tue Oct 31 16:22:39 2006 +0000 11.2 +++ b/xen/arch/x86/hvm/rtc.c Tue Oct 31 16:42:46 2006 +0000 11.3 @@ -345,17 +345,17 @@ static int handle_rtc_io(ioreq_t *p) 11.4 struct RTCState *vrtc = &v->domain->arch.hvm_domain.pl_time.vrtc; 11.5 11.6 if (p->size != 1 || 11.7 - p->pdata_valid || 11.8 + p->data_is_ptr || 11.9 p->type != IOREQ_TYPE_PIO){ 11.10 printk("HVM_RTC: wrong RTC IO!\n"); 11.11 return 1; 11.12 } 11.13 11.14 if (p->dir == 0) { /* write */ 11.15 - if (rtc_ioport_write(vrtc, p->addr, p->u.data & 0xFF)) 11.16 + if (rtc_ioport_write(vrtc, p->addr, p->data & 0xFF)) 11.17 return 1; 11.18 } else if (p->dir == 1 && vrtc->cmos_index < RTC_SIZE) { /* read */ 11.19 - p->u.data = rtc_ioport_read(vrtc, p->addr); 11.20 + p->data = rtc_ioport_read(vrtc, p->addr); 11.21 return 1; 11.22 } 11.23 return 0;
12.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Oct 31 16:22:39 2006 +0000 12.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Oct 31 16:42:46 2006 +0000 12.3 @@ -2545,10 +2545,10 @@ void walk_shadow_and_guest_pt(unsigned l 12.4 l1_pgentry_t spte; 12.5 struct vcpu *v = current; 12.6 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 12.7 - unsigned long gpa; 12.8 + paddr_t gpa; 12.9 12.10 gpa = shadow_gva_to_gpa(current, gva); 12.11 - printk( "gva = %lx, gpa=%lx, gCR3=%x\n", gva, gpa, (u32)vmcb->cr3 ); 12.12 + printk("gva = %lx, gpa=%"PRIpaddr", gCR3=%x\n", gva, gpa, (u32)vmcb->cr3); 12.13 if( !svm_paging_enabled(v) || mmio_space(gpa) ) 12.14 return; 12.15
13.1 --- a/xen/arch/x86/mm/shadow/multi.c Tue Oct 31 16:22:39 2006 +0000 13.2 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Oct 31 16:42:46 2006 +0000 13.3 @@ -41,10 +41,6 @@ 13.4 * 13.5 * THINGS TO DO LATER: 13.6 * 13.7 - * FIX GVA_TO_GPA 13.8 - * The current interface returns an unsigned long, which is not big enough 13.9 - * to hold a physical address in PAE. Should return a gfn instead. 13.10 - * 13.11 * TEARDOWN HEURISTICS 13.12 * Also: have a heuristic for when to destroy a previous paging-mode's 13.13 * shadows. When a guest is done with its start-of-day 32-bit tables 13.14 @@ -2837,7 +2833,7 @@ static int sh_page_fault(struct vcpu *v, 13.15 perfc_incrc(shadow_fault_mmio); 13.16 sh_audit_gw(v, &gw); 13.17 unmap_walk(v, &gw); 13.18 - SHADOW_PRINTK("mmio\n"); 13.19 + SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa); 13.20 shadow_audit_tables(v); 13.21 reset_early_unshadow(v); 13.22 shadow_unlock(d); 13.23 @@ -2941,7 +2937,7 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l 13.24 } 13.25 13.26 13.27 -static unsigned long 13.28 +static paddr_t 13.29 sh_gva_to_gpa(struct vcpu *v, unsigned long va) 13.30 /* Called to translate a guest virtual address to what the *guest* 13.31 * pagetables would map it to. */ 13.32 @@ -2950,7 +2946,7 @@ sh_gva_to_gpa(struct vcpu *v, unsigned l 13.33 if ( gfn == INVALID_GFN ) 13.34 return 0; 13.35 else 13.36 - return (gfn << PAGE_SHIFT) | (va & ~PAGE_MASK); 13.37 + return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK); 13.38 } 13.39 13.40
14.1 --- a/xen/arch/x86/mm/shadow/types.h Tue Oct 31 16:22:39 2006 +0000 14.2 +++ b/xen/arch/x86/mm/shadow/types.h Tue Oct 31 16:42:46 2006 +0000 14.3 @@ -404,11 +404,22 @@ valid_gfn(gfn_t m) 14.4 } 14.5 14.6 /* Translation between mfns and gfns */ 14.7 + 14.8 +// vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty 14.9 +// little secret that, for hvm guests with paging disabled, nearly all of the 14.10 +// shadow code actually think that the guest is running on *untranslated* page 14.11 +// tables (which is actually domain->phys_table). 14.12 +// 14.13 + 14.14 static inline mfn_t 14.15 vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gfn) 14.16 { 14.17 - return sh_vcpu_gfn_to_mfn(v, gfn_x(gfn)); 14.18 -} 14.19 + if ( !shadow_vcpu_mode_translate(v) ) 14.20 + return _mfn(gfn_x(gfn)); 14.21 + if ( likely(current->domain == v->domain) ) 14.22 + return _mfn(get_mfn_from_gpfn(gfn_x(gfn))); 14.23 + return sh_gfn_to_mfn_foreign(v->domain, gfn_x(gfn)); 14.24 +} 14.25 14.26 static inline gfn_t 14.27 mfn_to_gfn(struct domain *d, mfn_t mfn)
15.1 --- a/xen/include/asm-x86/hvm/io.h Tue Oct 31 16:22:39 2006 +0000 15.2 +++ b/xen/include/asm-x86/hvm/io.h Tue Oct 31 16:42:46 2006 +0000 15.3 @@ -142,7 +142,7 @@ static inline int irq_masked(unsigned lo 15.4 #endif 15.5 15.6 extern void send_pio_req(unsigned long port, unsigned long count, int size, 15.7 - long value, int dir, int df, int pvalid); 15.8 + long value, int dir, int df, int value_is_ptr); 15.9 extern void handle_mmio(unsigned long gpa); 15.10 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type); 15.11 extern void hvm_io_assist(struct vcpu *v);
16.1 --- a/xen/include/asm-x86/hvm/support.h Tue Oct 31 16:22:39 2006 +0000 16.2 +++ b/xen/include/asm-x86/hvm/support.h Tue Oct 31 16:42:46 2006 +0000 16.3 @@ -136,8 +136,8 @@ extern unsigned int opt_hvm_debug_level; 16.4 16.5 extern int hvm_enabled; 16.6 16.7 -int hvm_copy_to_guest_phys(unsigned long paddr, void *buf, int size); 16.8 -int hvm_copy_from_guest_phys(void *buf, unsigned long paddr, int size); 16.9 +int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size); 16.10 +int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size); 16.11 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size); 16.12 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size); 16.13
17.1 --- a/xen/include/asm-x86/shadow.h Tue Oct 31 16:22:39 2006 +0000 17.2 +++ b/xen/include/asm-x86/shadow.h Tue Oct 31 16:42:46 2006 +0000 17.3 @@ -259,7 +259,7 @@ struct shadow_paging_mode { 17.4 int (*page_fault )(struct vcpu *v, unsigned long va, 17.5 struct cpu_user_regs *regs); 17.6 int (*invlpg )(struct vcpu *v, unsigned long va); 17.7 - unsigned long (*gva_to_gpa )(struct vcpu *v, unsigned long va); 17.8 + paddr_t (*gva_to_gpa )(struct vcpu *v, unsigned long va); 17.9 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va); 17.10 void (*update_cr3 )(struct vcpu *v); 17.11 int (*map_and_validate_gl1e )(struct vcpu *v, mfn_t gmfn, 17.12 @@ -368,11 +368,13 @@ shadow_invlpg(struct vcpu *v, unsigned l 17.13 return v->arch.shadow.mode->invlpg(v, va); 17.14 } 17.15 17.16 -static inline unsigned long 17.17 +static inline paddr_t 17.18 shadow_gva_to_gpa(struct vcpu *v, unsigned long va) 17.19 /* Called to translate a guest virtual address to what the *guest* 17.20 * pagetables would map it to. */ 17.21 { 17.22 + if ( unlikely(!shadow_vcpu_mode_translate(v)) ) 17.23 + return (paddr_t) va; 17.24 return v->arch.shadow.mode->gva_to_gpa(v, va); 17.25 } 17.26 17.27 @@ -381,6 +383,8 @@ shadow_gva_to_gfn(struct vcpu *v, unsign 17.28 /* Called to translate a guest virtual address to what the *guest* 17.29 * pagetables would map it to. */ 17.30 { 17.31 + if ( unlikely(!shadow_vcpu_mode_translate(v)) ) 17.32 + return va >> PAGE_SHIFT; 17.33 return v->arch.shadow.mode->gva_to_gfn(v, va); 17.34 } 17.35 17.36 @@ -673,21 +677,6 @@ sh_gfn_to_mfn(struct domain *d, unsigned 17.37 return sh_gfn_to_mfn_foreign(d, gfn); 17.38 } 17.39 17.40 -// vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty 17.41 -// little secret that, for hvm guests with paging disabled, nearly all of the 17.42 -// shadow code actually think that the guest is running on *untranslated* page 17.43 -// tables (which is actually domain->phys_table). 17.44 -// 17.45 -static inline mfn_t 17.46 -sh_vcpu_gfn_to_mfn(struct vcpu *v, unsigned long gfn) 17.47 -{ 17.48 - if ( !shadow_vcpu_mode_translate(v) ) 17.49 - return _mfn(gfn); 17.50 - if ( likely(current->domain == v->domain) ) 17.51 - return _mfn(get_mfn_from_gpfn(gfn)); 17.52 - return sh_gfn_to_mfn_foreign(v->domain, gfn); 17.53 -} 17.54 - 17.55 static inline unsigned long 17.56 sh_mfn_to_gfn(struct domain *d, mfn_t mfn) 17.57 {
18.1 --- a/xen/include/public/hvm/ioreq.h Tue Oct 31 16:22:39 2006 +0000 18.2 +++ b/xen/include/public/hvm/ioreq.h Tue Oct 31 16:42:46 2006 +0000 18.3 @@ -45,12 +45,10 @@ struct ioreq { 18.4 uint64_t addr; /* physical address */ 18.5 uint64_t size; /* size in bytes */ 18.6 uint64_t count; /* for rep prefixes */ 18.7 - union { 18.8 - uint64_t data; /* data */ 18.9 - void *pdata; /* pointer to data */ 18.10 - } u; 18.11 + uint64_t data; /* data (or paddr of data) */ 18.12 uint8_t state:4; 18.13 - uint8_t pdata_valid:1; /* if 1, use pdata above */ 18.14 + uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr 18.15 + * of the real data to use. */ 18.16 uint8_t dir:1; /* 1=read, 0=write */ 18.17 uint8_t df:1; 18.18 uint8_t type; /* I/O type */