ia64/xen-unstable
changeset 14036:315c348e5f9e
merge with xen-unstable.hg
author | awilliam@xenbuild2.aw |
---|---|
date | Tue Feb 20 15:43:57 2007 -0700 (2007-02-20) |
parents | 409e94d0a35b e7994a122aab |
children | 9364bea18bc4 |
files |
line diff
1.1 --- a/tools/check/check_crypto_lib Tue Feb 20 15:12:11 2007 -0700 1.2 +++ b/tools/check/check_crypto_lib Tue Feb 20 15:43:57 2007 -0700 1.3 @@ -3,8 +3,9 @@ 1.4 1.5 RC=0 1.6 1.7 +PATH=/sbin:$PATH 1.8 set -e 1.9 -ldconfig -v 2>&1 | grep -q libcrypto.so || RC=1 1.10 +ldconfig -p 2>&1 | grep -q libcrypto.so || RC=1 1.11 1.12 if test ${RC} -ne 0; then 1.13 echo
2.1 --- a/xen/acm/acm_simple_type_enforcement_hooks.c Tue Feb 20 15:12:11 2007 -0700 2.2 +++ b/xen/acm/acm_simple_type_enforcement_hooks.c Tue Feb 20 15:43:57 2007 -0700 2.3 @@ -235,7 +235,7 @@ ste_init_state(struct acm_ste_policy_buf 2.4 } 2.5 /* b) check for grant table conflicts on shared pages */ 2.6 spin_lock(&(*pd)->grant_table->lock); 2.7 - for ( i = 0; i < nr_grant_frames((*pd)->grant_table); i++ ) { 2.8 + for ( i = 0; i < nr_grant_entries((*pd)->grant_table); i++ ) { 2.9 #define SPP (PAGE_SIZE / sizeof(struct grant_entry)) 2.10 sha_copy = (*pd)->grant_table->shared[i/SPP][i%SPP]; 2.11 if ( sha_copy.flags ) { 2.12 @@ -244,8 +244,9 @@ ste_init_state(struct acm_ste_policy_buf 2.13 (unsigned long)sha_copy.frame); 2.14 rdomid = sha_copy.domid; 2.15 if ((rdom = get_domain_by_id(rdomid)) == NULL) { 2.16 + spin_unlock(&(*pd)->grant_table->lock); 2.17 printkd("%s: domain not found ERROR!\n", __func__); 2.18 - goto out_gnttab; 2.19 + goto out; 2.20 }; 2.21 /* rdom now has remote domain */ 2.22 ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, 2.23 @@ -253,16 +254,16 @@ ste_init_state(struct acm_ste_policy_buf 2.24 ste_rssidref = ste_rssid->ste_ssidref; 2.25 put_domain(rdom); 2.26 if (!have_common_type(ste_ssidref, ste_rssidref)) { 2.27 + spin_unlock(&(*pd)->grant_table->lock); 2.28 printkd("%s: Policy violation in grant table sharing domain %x -> domain %x.\n", 2.29 __func__, (*pd)->domain_id, rdomid); 2.30 - goto out_gnttab; 2.31 + goto out; 2.32 } 2.33 } 2.34 } 2.35 + spin_unlock(&(*pd)->grant_table->lock); 2.36 } 2.37 violation = 0; 2.38 - out_gnttab: 2.39 - spin_unlock(&(*pd)->grant_table->lock); 2.40 out: 2.41 read_unlock(&domlist_lock); 2.42 return violation;
3.1 --- a/xen/arch/x86/hvm/io.c Tue Feb 20 15:12:11 2007 -0700 3.2 +++ b/xen/arch/x86/hvm/io.c Tue Feb 20 15:43:57 2007 -0700 3.3 @@ -371,7 +371,20 @@ static void hvm_pio_assist(struct cpu_us 3.4 { 3.5 unsigned long addr = pio_opp->addr; 3.6 if ( hvm_paging_enabled(current) ) 3.7 - (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 3.8 + { 3.9 + int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 3.10 + if ( rv != 0 ) 3.11 + { 3.12 + /* Failed on the page-spanning copy. Inject PF into 3.13 + * the guest for the address where we failed. */ 3.14 + addr += p->size - rv; 3.15 + gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side " 3.16 + "of a page-spanning PIO: va=%#lx\n", addr); 3.17 + hvm_inject_exception(TRAP_page_fault, 3.18 + PFEC_write_access, addr); 3.19 + return; 3.20 + } 3.21 + } 3.22 else 3.23 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 3.24 } 3.25 @@ -489,7 +502,20 @@ static void hvm_mmio_assist(struct cpu_u 3.26 unsigned long addr = mmio_opp->addr; 3.27 3.28 if (hvm_paging_enabled(current)) 3.29 - (void)hvm_copy_to_guest_virt(addr, &p->data, p->size); 3.30 + { 3.31 + int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 3.32 + if ( rv != 0 ) 3.33 + { 3.34 + /* Failed on the page-spanning copy. Inject PF into 3.35 + * the guest for the address where we failed. */ 3.36 + addr += p->size - rv; 3.37 + gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of " 3.38 + "a page-spanning MMIO: va=%#lx\n", addr); 3.39 + hvm_inject_exception(TRAP_page_fault, 3.40 + PFEC_write_access, addr); 3.41 + return; 3.42 + } 3.43 + } 3.44 else 3.45 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 3.46 } 3.47 @@ -689,7 +715,18 @@ static void hvm_mmio_assist(struct cpu_u 3.48 3.49 case INSTR_PUSH: 3.50 mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss); 3.51 - hvm_copy_to_guest_virt(mmio_opp->addr, &p->data, size); 3.52 + { 3.53 + unsigned long addr = mmio_opp->addr; 3.54 + int rv = hvm_copy_to_guest_virt(addr, &p->data, size); 3.55 + if ( rv != 0 ) 3.56 + { 3.57 + addr += p->size - rv; 3.58 + gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO: " 3.59 + "va=%#lx\n", addr); 3.60 + hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr); 3.61 + return; 3.62 + } 3.63 + } 3.64 break; 3.65 } 3.66 }
4.1 --- a/xen/arch/x86/hvm/platform.c Tue Feb 20 15:12:11 2007 -0700 4.2 +++ b/xen/arch/x86/hvm/platform.c Tue Feb 20 15:43:57 2007 -0700 4.3 @@ -815,7 +815,7 @@ int inst_copy_from_guest(unsigned char * 4.4 } 4.5 4.6 void send_pio_req(unsigned long port, unsigned long count, int size, 4.7 - long value, int dir, int df, int value_is_ptr) 4.8 + paddr_t value, int dir, int df, int value_is_ptr) 4.9 { 4.10 struct vcpu *v = current; 4.11 vcpu_iodata_t *vio; 4.12 @@ -823,7 +823,7 @@ void send_pio_req(unsigned long port, un 4.13 4.14 if ( size == 0 || count == 0 ) { 4.15 printk("null pio request? port %lx, count %lx, " 4.16 - "size %d, value %lx, dir %d, value_is_ptr %d.\n", 4.17 + "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n", 4.18 port, count, size, value, dir, value_is_ptr); 4.19 } 4.20 4.21 @@ -849,15 +849,7 @@ void send_pio_req(unsigned long port, un 4.22 4.23 p->io_count++; 4.24 4.25 - if ( value_is_ptr ) /* get physical address of data */ 4.26 - { 4.27 - if ( hvm_paging_enabled(current) ) 4.28 - p->data = paging_gva_to_gpa(current, value); 4.29 - else 4.30 - p->data = value; /* guest VA == guest PA */ 4.31 - } 4.32 - else if ( dir == IOREQ_WRITE ) 4.33 - p->data = value; 4.34 + p->data = value; 4.35 4.36 if ( hvm_portio_intercept(p) ) 4.37 { 4.38 @@ -870,7 +862,7 @@ void send_pio_req(unsigned long port, un 4.39 } 4.40 4.41 static void send_mmio_req(unsigned char type, unsigned long gpa, 4.42 - unsigned long count, int size, long value, 4.43 + unsigned long count, int size, paddr_t value, 4.44 int dir, int df, int value_is_ptr) 4.45 { 4.46 struct vcpu *v = current; 4.47 @@ -879,7 +871,8 @@ static void send_mmio_req(unsigned char 4.48 4.49 if ( size == 0 || count == 0 ) { 4.50 printk("null mmio request? type %d, gpa %lx, " 4.51 - "count %lx, size %d, value %lx, dir %d, value_is_ptr %d.\n", 4.52 + "count %lx, size %d, value %"PRIpaddr"x, dir %d, " 4.53 + "value_is_ptr %d.\n", 4.54 type, gpa, count, size, value, dir, value_is_ptr); 4.55 } 4.56 4.57 @@ -905,15 +898,7 @@ static void send_mmio_req(unsigned char 4.58 4.59 p->io_count++; 4.60 4.61 - if ( value_is_ptr ) 4.62 - { 4.63 - if ( hvm_paging_enabled(v) ) 4.64 - p->data = paging_gva_to_gpa(v, value); 4.65 - else 4.66 - p->data = value; /* guest VA == guest PA */ 4.67 - } 4.68 - else 4.69 - p->data = value; 4.70 + p->data = value; 4.71 4.72 if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) 4.73 { 4.74 @@ -960,6 +945,7 @@ static void mmio_operands(int type, unsi 4.75 #define GET_REPEAT_COUNT() \ 4.76 (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : regs->ecx) : 1) 4.77 4.78 + 4.79 void handle_mmio(unsigned long gpa) 4.80 { 4.81 unsigned long inst_addr; 4.82 @@ -1014,7 +1000,8 @@ void handle_mmio(unsigned long gpa) 4.83 { 4.84 unsigned long count = GET_REPEAT_COUNT(); 4.85 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 4.86 - unsigned long addr; 4.87 + unsigned long addr, gfn; 4.88 + paddr_t paddr; 4.89 int dir, size = op_size; 4.90 4.91 ASSERT(count); 4.92 @@ -1024,7 +1011,9 @@ void handle_mmio(unsigned long gpa) 4.93 if ( ad_size == WORD ) 4.94 addr &= 0xFFFF; 4.95 addr += hvm_get_segment_base(v, x86_seg_es); 4.96 - if ( paging_gva_to_gpa(v, addr) == gpa ) 4.97 + gfn = paging_gva_to_gfn(v, addr); 4.98 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 4.99 + if ( paddr == gpa ) 4.100 { 4.101 enum x86_segment seg; 4.102 4.103 @@ -1044,10 +1033,24 @@ void handle_mmio(unsigned long gpa) 4.104 default: domain_crash_synchronous(); 4.105 } 4.106 addr += hvm_get_segment_base(v, seg); 4.107 + gfn = paging_gva_to_gfn(v, addr); 4.108 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 4.109 } 4.110 else 4.111 dir = IOREQ_READ; 4.112 4.113 + if ( gfn == INVALID_GFN ) 4.114 + { 4.115 + /* The guest does not have the non-mmio address mapped. 4.116 + * Need to send in a page fault */ 4.117 + int errcode = 0; 4.118 + /* IO read --> memory write */ 4.119 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 4.120 + regs->eip -= inst_len; /* do not advance %eip */ 4.121 + hvm_inject_exception(TRAP_page_fault, errcode, addr); 4.122 + return; 4.123 + } 4.124 + 4.125 /* 4.126 * In case of a movs spanning multiple pages, we break the accesses 4.127 * up into multiple pages (the device model works with non-continguous 4.128 @@ -1065,10 +1068,27 @@ void handle_mmio(unsigned long gpa) 4.129 4.130 if ( dir == IOREQ_WRITE ) { 4.131 if ( hvm_paging_enabled(v) ) 4.132 - (void)hvm_copy_from_guest_virt(&value, addr, size); 4.133 + { 4.134 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 4.135 + if ( rv != 0 ) 4.136 + { 4.137 + /* Failed on the page-spanning copy. Inject PF into 4.138 + * the guest for the address where we failed */ 4.139 + regs->eip -= inst_len; /* do not advance %eip */ 4.140 + /* Must set CR2 at the failing address */ 4.141 + addr += size - rv; 4.142 + gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a " 4.143 + "page-spanning MMIO: va=%#lx\n", addr); 4.144 + hvm_inject_exception(TRAP_page_fault, 0, addr); 4.145 + return; 4.146 + } 4.147 + } 4.148 else 4.149 - (void)hvm_copy_from_guest_phys(&value, addr, size); 4.150 - } else 4.151 + (void) hvm_copy_from_guest_phys(&value, addr, size); 4.152 + } else /* dir != IOREQ_WRITE */ 4.153 + /* Remember where to write the result, as a *VA*. 4.154 + * Must be a VA so we can handle the page overlap 4.155 + * correctly in hvm_mmio_assist() */ 4.156 mmio_op->addr = addr; 4.157 4.158 if ( count != 1 ) 4.159 @@ -1091,7 +1111,8 @@ void handle_mmio(unsigned long gpa) 4.160 4.161 ASSERT(count); 4.162 4.163 - send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, df, 1); 4.164 + send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, 4.165 + paddr, dir, df, 1); 4.166 } 4.167 break; 4.168 }
5.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Feb 20 15:12:11 2007 -0700 5.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Feb 20 15:43:57 2007 -0700 5.3 @@ -1589,6 +1589,8 @@ static void svm_io_instruction(struct vc 5.4 if (info.fields.str) 5.5 { 5.6 unsigned long addr, count; 5.7 + paddr_t paddr; 5.8 + unsigned long gfn; 5.9 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 5.10 5.11 if (!svm_get_io_address(v, regs, size, info, &count, &addr)) 5.12 @@ -1606,6 +1608,20 @@ static void svm_io_instruction(struct vc 5.13 pio_opp->flags |= REPZ; 5.14 } 5.15 5.16 + /* Translate the address to a physical address */ 5.17 + gfn = paging_gva_to_gfn(v, addr); 5.18 + if ( gfn == INVALID_GFN ) 5.19 + { 5.20 + /* The guest does not have the RAM address mapped. 5.21 + * Need to send in a page fault */ 5.22 + int errcode = 0; 5.23 + /* IO read --> memory write */ 5.24 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 5.25 + svm_hvm_inject_exception(TRAP_page_fault, errcode, addr); 5.26 + return; 5.27 + } 5.28 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 5.29 + 5.30 /* 5.31 * Handle string pio instructions that cross pages or that 5.32 * are unaligned. See the comments in hvm_platform.c/handle_mmio() 5.33 @@ -1619,11 +1635,27 @@ static void svm_io_instruction(struct vc 5.34 5.35 if (dir == IOREQ_WRITE) /* OUTS */ 5.36 { 5.37 - if (hvm_paging_enabled(current)) 5.38 - (void)hvm_copy_from_guest_virt(&value, addr, size); 5.39 + if ( hvm_paging_enabled(current) ) 5.40 + { 5.41 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 5.42 + if ( rv != 0 ) 5.43 + { 5.44 + /* Failed on the page-spanning copy. Inject PF into 5.45 + * the guest for the address where we failed. */ 5.46 + addr += size - rv; 5.47 + gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 5.48 + "of a page-spanning PIO: va=%#lx\n", addr); 5.49 + svm_hvm_inject_exception(TRAP_page_fault, 0, addr); 5.50 + return; 5.51 + } 5.52 + } 5.53 else 5.54 - (void)hvm_copy_from_guest_phys(&value, addr, size); 5.55 - } 5.56 + (void) hvm_copy_from_guest_phys(&value, addr, size); 5.57 + } else /* dir != IOREQ_WRITE */ 5.58 + /* Remember where to write the result, as a *VA*. 5.59 + * Must be a VA so we can handle the page overlap 5.60 + * correctly in hvm_pio_assist() */ 5.61 + pio_opp->addr = addr; 5.62 5.63 if (count == 1) 5.64 regs->eip = vmcb->exitinfo2; 5.65 @@ -1645,7 +1677,7 @@ static void svm_io_instruction(struct vc 5.66 else 5.67 regs->eip = vmcb->exitinfo2; 5.68 5.69 - send_pio_req(port, count, size, addr, dir, df, 1); 5.70 + send_pio_req(port, count, size, paddr, dir, df, 1); 5.71 } 5.72 } 5.73 else 5.74 @@ -2718,7 +2750,8 @@ asmlinkage void svm_vmexit_handler(struc 5.75 if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 5.76 { 5.77 if (svm_paging_enabled(v) && 5.78 - !mmio_space(paging_gva_to_gpa(current, vmcb->exitinfo2))) 5.79 + !mmio_space( 5.80 + paging_gva_to_gfn(current, vmcb->exitinfo2) << PAGE_SHIFT)) 5.81 { 5.82 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64"," 5.83 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", " 5.84 @@ -2728,7 +2761,8 @@ asmlinkage void svm_vmexit_handler(struc 5.85 (u64)vmcb->exitinfo1, 5.86 (u64)vmcb->exitinfo2, 5.87 (u64)vmcb->exitintinfo.bytes, 5.88 - (u64)paging_gva_to_gpa(current, vmcb->exitinfo2)); 5.89 + (((u64)paging_gva_to_gfn(current, vmcb->exitinfo2) 5.90 + << PAGE_SHIFT) | (vmcb->exitinfo2 & ~PAGE_MASK))); 5.91 } 5.92 else 5.93 {
6.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Feb 20 15:12:11 2007 -0700 6.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Feb 20 15:43:57 2007 -0700 6.3 @@ -1426,6 +1426,8 @@ static void vmx_io_instruction(unsigned 6.4 6.5 if ( test_bit(4, &exit_qualification) ) { /* string instruction */ 6.6 unsigned long addr, count = 1, base; 6.7 + paddr_t paddr; 6.8 + unsigned long gfn; 6.9 u32 ar_bytes, limit; 6.10 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 6.11 int long_mode = 0; 6.12 @@ -1545,6 +1547,20 @@ static void vmx_io_instruction(unsigned 6.13 } 6.14 #endif 6.15 6.16 + /* Translate the address to a physical address */ 6.17 + gfn = paging_gva_to_gfn(current, addr); 6.18 + if ( gfn == INVALID_GFN ) 6.19 + { 6.20 + /* The guest does not have the RAM address mapped. 6.21 + * Need to send in a page fault */ 6.22 + int errcode = 0; 6.23 + /* IO read --> memory write */ 6.24 + if ( dir == IOREQ_READ ) errcode |= PFEC_write_access; 6.25 + vmx_inject_exception(TRAP_page_fault, errcode, addr); 6.26 + return; 6.27 + } 6.28 + paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK); 6.29 + 6.30 /* 6.31 * Handle string pio instructions that cross pages or that 6.32 * are unaligned. See the comments in hvm_domain.c/handle_mmio() 6.33 @@ -1557,10 +1573,25 @@ static void vmx_io_instruction(unsigned 6.34 if ( dir == IOREQ_WRITE ) /* OUTS */ 6.35 { 6.36 if ( hvm_paging_enabled(current) ) 6.37 - (void)hvm_copy_from_guest_virt(&value, addr, size); 6.38 + { 6.39 + int rv = hvm_copy_from_guest_virt(&value, addr, size); 6.40 + if ( rv != 0 ) 6.41 + { 6.42 + /* Failed on the page-spanning copy. Inject PF into 6.43 + * the guest for the address where we failed. */ 6.44 + addr += size - rv; 6.45 + gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 6.46 + "of a page-spanning PIO: va=%#lx\n", addr); 6.47 + vmx_inject_exception(TRAP_page_fault, 0, addr); 6.48 + return; 6.49 + } 6.50 + } 6.51 else 6.52 - (void)hvm_copy_from_guest_phys(&value, addr, size); 6.53 - } else 6.54 + (void) hvm_copy_from_guest_phys(&value, addr, size); 6.55 + } else /* dir != IOREQ_WRITE */ 6.56 + /* Remember where to write the result, as a *VA*. 6.57 + * Must be a VA so we can handle the page overlap 6.58 + * correctly in hvm_pio_assist() */ 6.59 pio_opp->addr = addr; 6.60 6.61 if ( count == 1 ) 6.62 @@ -1580,7 +1611,7 @@ static void vmx_io_instruction(unsigned 6.63 } else 6.64 regs->eip += inst_len; 6.65 6.66 - send_pio_req(port, count, size, addr, dir, df, 1); 6.67 + send_pio_req(port, count, size, paddr, dir, df, 1); 6.68 } 6.69 } else { 6.70 if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
7.1 --- a/xen/arch/x86/mm/shadow/multi.c Tue Feb 20 15:12:11 2007 -0700 7.2 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Feb 20 15:43:57 2007 -0700 7.3 @@ -3038,19 +3038,6 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l 7.4 } 7.5 7.6 7.7 -static paddr_t 7.8 -sh_gva_to_gpa(struct vcpu *v, unsigned long va) 7.9 -/* Called to translate a guest virtual address to what the *guest* 7.10 - * pagetables would map it to. */ 7.11 -{ 7.12 - unsigned long gfn = sh_gva_to_gfn(v, va); 7.13 - if ( gfn == INVALID_GFN ) 7.14 - return 0; 7.15 - else 7.16 - return (((paddr_t)gfn) << PAGE_SHIFT) + (va & ~PAGE_MASK); 7.17 -} 7.18 - 7.19 - 7.20 static inline void 7.21 sh_update_linear_entries(struct vcpu *v) 7.22 /* Sync up all the linear mappings for this vcpu's pagetables */ 7.23 @@ -3932,8 +3919,7 @@ static int safe_not_to_verify_write(mfn_ 7.24 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) 7.25 struct page_info *pg = mfn_to_page(gmfn); 7.26 if ( !(pg->shadow_flags & SHF_32) 7.27 - && bytes == 4 7.28 - && ((unsigned long)dst & 3) == 0 ) 7.29 + && ((unsigned long)dst & 7) == 0 ) 7.30 { 7.31 /* Not shadowed 32-bit: aligned 64-bit writes that leave the 7.32 * present bit unset are safe to ignore. */ 7.33 @@ -3942,8 +3928,7 @@ static int safe_not_to_verify_write(mfn_ 7.34 return 1; 7.35 } 7.36 else if ( !(pg->shadow_flags & (SHF_PAE|SHF_64)) 7.37 - && bytes == 8 7.38 - && ((unsigned long)dst & 7) == 0 ) 7.39 + && ((unsigned long)dst & 3) == 0 ) 7.40 { 7.41 /* Not shadowed PAE/64-bit: aligned 32-bit writes that leave the 7.42 * present bit unset are safe to ignore. */ 7.43 @@ -4350,7 +4335,6 @@ int sh_audit_l4_table(struct vcpu *v, mf 7.44 struct paging_mode sh_paging_mode = { 7.45 .page_fault = sh_page_fault, 7.46 .invlpg = sh_invlpg, 7.47 - .gva_to_gpa = sh_gva_to_gpa, 7.48 .gva_to_gfn = sh_gva_to_gfn, 7.49 .update_cr3 = sh_update_cr3, 7.50 .update_paging_modes = shadow_update_paging_modes,
8.1 --- a/xen/arch/x86/mm/shadow/types.h Tue Feb 20 15:12:11 2007 -0700 8.2 +++ b/xen/arch/x86/mm/shadow/types.h Tue Feb 20 15:43:57 2007 -0700 8.3 @@ -244,6 +244,7 @@ static inline shadow_l4e_t shadow_l4e_fr 8.4 8.5 /* Type of the guest's frame numbers */ 8.6 TYPE_SAFE(u32,gfn) 8.7 +#undef INVALID_GFN 8.8 #define INVALID_GFN ((u32)(-1u)) 8.9 #define SH_PRI_gfn "05x" 8.10 8.11 @@ -307,6 +308,7 @@ static inline guest_l2e_t guest_l2e_from 8.12 8.13 /* Type of the guest's frame numbers */ 8.14 TYPE_SAFE(unsigned long,gfn) 8.15 +#undef INVALID_GFN 8.16 #define INVALID_GFN ((unsigned long)(-1ul)) 8.17 #define SH_PRI_gfn "05lx" 8.18 8.19 @@ -467,7 +469,6 @@ struct shadow_walk_t 8.20 */ 8.21 #define sh_page_fault INTERNAL_NAME(sh_page_fault) 8.22 #define sh_invlpg INTERNAL_NAME(sh_invlpg) 8.23 -#define sh_gva_to_gpa INTERNAL_NAME(sh_gva_to_gpa) 8.24 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn) 8.25 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3) 8.26 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
9.1 --- a/xen/arch/x86/x86_emulate.c Tue Feb 20 15:12:11 2007 -0700 9.2 +++ b/xen/arch/x86/x86_emulate.c Tue Feb 20 15:43:57 2007 -0700 9.3 @@ -519,6 +519,37 @@ do { 9.4 ? (uint16_t)_regs.eip : (uint32_t)_regs.eip); \ 9.5 } while (0) 9.6 9.7 +static int __handle_rep_prefix( 9.8 + struct cpu_user_regs *int_regs, 9.9 + struct cpu_user_regs *ext_regs, 9.10 + int ad_bytes) 9.11 +{ 9.12 + unsigned long ecx = ((ad_bytes == 2) ? (uint16_t)int_regs->ecx : 9.13 + (ad_bytes == 4) ? (uint32_t)int_regs->ecx : 9.14 + int_regs->ecx); 9.15 + 9.16 + if ( ecx-- == 0 ) 9.17 + { 9.18 + ext_regs->eip = int_regs->eip; 9.19 + return 1; 9.20 + } 9.21 + 9.22 + if ( ad_bytes == 2 ) 9.23 + *(uint16_t *)&int_regs->ecx = ecx; 9.24 + else if ( ad_bytes == 4 ) 9.25 + int_regs->ecx = (uint32_t)ecx; 9.26 + else 9.27 + int_regs->ecx = ecx; 9.28 + int_regs->eip = ext_regs->eip; 9.29 + return 0; 9.30 +} 9.31 + 9.32 +#define handle_rep_prefix() \ 9.33 +do { \ 9.34 + if ( rep_prefix && __handle_rep_prefix(&_regs, ctxt->regs, ad_bytes) ) \ 9.35 + goto done; \ 9.36 +} while (0) 9.37 + 9.38 /* 9.39 * Unsigned multiplication with double-word result. 9.40 * IN: Multiplicand=m[0], Multiplier=m[1] 9.41 @@ -1579,17 +1610,6 @@ x86_emulate( 9.42 if ( twobyte ) 9.43 goto twobyte_special_insn; 9.44 9.45 - if ( rep_prefix ) 9.46 - { 9.47 - if ( _regs.ecx == 0 ) 9.48 - { 9.49 - ctxt->regs->eip = _regs.eip; 9.50 - goto done; 9.51 - } 9.52 - _regs.ecx--; 9.53 - _regs.eip = ctxt->regs->eip; 9.54 - } 9.55 - 9.56 switch ( b ) 9.57 { 9.58 case 0x27: /* daa */ { 9.59 @@ -1727,6 +1747,7 @@ x86_emulate( 9.60 break; 9.61 9.62 case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ 9.63 + handle_rep_prefix(); 9.64 generate_exception_if(!mode_iopl(), EXC_GP); 9.65 dst.type = OP_MEM; 9.66 dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 9.67 @@ -1741,6 +1762,7 @@ x86_emulate( 9.68 break; 9.69 9.70 case 0x6e ... 0x6f: /* outs %esi,%dx */ 9.71 + handle_rep_prefix(); 9.72 generate_exception_if(!mode_iopl(), EXC_GP); 9.73 dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 9.74 if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi), 9.75 @@ -1827,6 +1849,7 @@ x86_emulate( 9.76 break; 9.77 9.78 case 0xa4 ... 0xa5: /* movs */ 9.79 + handle_rep_prefix(); 9.80 dst.type = OP_MEM; 9.81 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 9.82 dst.mem.seg = x86_seg_es; 9.83 @@ -1841,6 +1864,7 @@ x86_emulate( 9.84 break; 9.85 9.86 case 0xaa ... 0xab: /* stos */ 9.87 + handle_rep_prefix(); 9.88 dst.type = OP_MEM; 9.89 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 9.90 dst.mem.seg = x86_seg_es; 9.91 @@ -1851,6 +1875,7 @@ x86_emulate( 9.92 break; 9.93 9.94 case 0xac ... 0xad: /* lods */ 9.95 + handle_rep_prefix(); 9.96 dst.type = OP_REG; 9.97 dst.bytes = (d & ByteOp) ? 1 : op_bytes; 9.98 dst.reg = (unsigned long *)&_regs.eax; 9.99 @@ -2325,33 +2350,23 @@ x86_emulate( 9.100 #endif 9.101 9.102 case 0xc8 ... 0xcf: /* bswap */ 9.103 - dst.type = OP_REG; 9.104 - dst.reg = decode_register(b & 7, &_regs, 0); 9.105 - dst.val = *dst.reg; 9.106 + dst.type = OP_REG; 9.107 + dst.reg = decode_register( 9.108 + (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0); 9.109 switch ( dst.bytes = op_bytes ) 9.110 { 9.111 - case 2: 9.112 - dst.val = (((dst.val & 0x00FFUL) << 8) | 9.113 - ((dst.val & 0xFF00UL) >> 8)); 9.114 + default: /* case 2: */ 9.115 + /* Undefined behaviour. Writes zero on all tested CPUs. */ 9.116 + dst.val = 0; 9.117 break; 9.118 case 4: 9.119 - dst.val = (((dst.val & 0x000000FFUL) << 24) | 9.120 - ((dst.val & 0x0000FF00UL) << 8) | 9.121 - ((dst.val & 0x00FF0000UL) >> 8) | 9.122 - ((dst.val & 0xFF000000UL) >> 24)); 9.123 +#ifdef __x86_64__ 9.124 + __asm__ ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) ); 9.125 break; 9.126 -#ifdef __x86_64__ 9.127 case 8: 9.128 - dst.val = (((dst.val & 0x00000000000000FFUL) << 56) | 9.129 - ((dst.val & 0x000000000000FF00UL) << 40) | 9.130 - ((dst.val & 0x0000000000FF0000UL) << 24) | 9.131 - ((dst.val & 0x00000000FF000000UL) << 8) | 9.132 - ((dst.val & 0x000000FF00000000UL) >> 8) | 9.133 - ((dst.val & 0x0000FF0000000000UL) >> 24) | 9.134 - ((dst.val & 0x00FF000000000000UL) >> 40) | 9.135 - ((dst.val & 0xFF00000000000000UL) >> 56)); 9.136 +#endif 9.137 + __asm__ ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) ); 9.138 break; 9.139 -#endif 9.140 } 9.141 break; 9.142 }
10.1 --- a/xen/include/asm-x86/hvm/io.h Tue Feb 20 15:12:11 2007 -0700 10.2 +++ b/xen/include/asm-x86/hvm/io.h Tue Feb 20 15:43:57 2007 -0700 10.3 @@ -144,7 +144,7 @@ static inline int irq_masked(unsigned lo 10.4 #endif 10.5 10.6 extern void send_pio_req(unsigned long port, unsigned long count, int size, 10.7 - long value, int dir, int df, int value_is_ptr); 10.8 + paddr_t value, int dir, int df, int value_is_ptr); 10.9 extern void handle_mmio(unsigned long gpa); 10.10 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type); 10.11 extern void hvm_io_assist(struct vcpu *v);
11.1 --- a/xen/include/asm-x86/p2m.h Tue Feb 20 15:12:11 2007 -0700 11.2 +++ b/xen/include/asm-x86/p2m.h Tue Feb 20 15:43:57 2007 -0700 11.3 @@ -89,7 +89,7 @@ static inline unsigned long get_mfn_from 11.4 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */ 11.5 static inline int mmio_space(paddr_t gpa) 11.6 { 11.7 - unsigned long gfn = gpa >> PAGE_SHIFT; 11.8 + unsigned long gfn = gpa >> PAGE_SHIFT; 11.9 return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn))); 11.10 } 11.11
12.1 --- a/xen/include/asm-x86/paging.h Tue Feb 20 15:12:11 2007 -0700 12.2 +++ b/xen/include/asm-x86/paging.h Tue Feb 20 15:43:57 2007 -0700 12.3 @@ -115,7 +115,6 @@ struct paging_mode { 12.4 int (*page_fault )(struct vcpu *v, unsigned long va, 12.5 struct cpu_user_regs *regs); 12.6 int (*invlpg )(struct vcpu *v, unsigned long va); 12.7 - paddr_t (*gva_to_gpa )(struct vcpu *v, unsigned long va); 12.8 unsigned long (*gva_to_gfn )(struct vcpu *v, unsigned long va); 12.9 void (*update_cr3 )(struct vcpu *v, int do_locking); 12.10 void (*update_paging_modes )(struct vcpu *v); 12.11 @@ -190,18 +189,10 @@ static inline int paging_invlpg(struct v 12.12 return v->arch.paging.mode->invlpg(v, va); 12.13 } 12.14 12.15 -/* Translate a guest virtual address to the physical address that the 12.16 - * *guest* pagetables would map it to. */ 12.17 -static inline paddr_t paging_gva_to_gpa(struct vcpu *v, unsigned long va) 12.18 -{ 12.19 - if ( unlikely(!paging_vcpu_mode_translate(v)) ) 12.20 - return (paddr_t) va; 12.21 - 12.22 - return v->arch.paging.mode->gva_to_gpa(v, va); 12.23 -} 12.24 - 12.25 /* Translate a guest virtual address to the frame number that the 12.26 - * *guest* pagetables would map it to. */ 12.27 + * *guest* pagetables would map it to. Returns INVALID_GFN if the guest 12.28 + * tables don't map this address. */ 12.29 +#define INVALID_GFN (-1UL) 12.30 static inline unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va) 12.31 { 12.32 if ( unlikely(!paging_vcpu_mode_translate(v)) )