ia64/xen-unstable
changeset 11713:96a77ef725b8
[XEN][HVM] Fix a bug which could lead to the guest locking up if it
tried to a backwards memcpy across a page boundary in an MMIO region.
This should fix the problems some people have been reporting running
vim.
Signed-off-by: Steven Smith <sos22@cam.ac.uk>
tried to a backwards memcpy across a page boundary in an MMIO region.
This should fix the problems some people have been reporting running
vim.
Signed-off-by: Steven Smith <sos22@cam.ac.uk>
author | Steven Smith <ssmith@xensource.com> |
---|---|
date | Mon Oct 02 13:45:44 2006 +0100 (2006-10-02) |
parents | 02311d8aba86 |
children | 0d796dced5f7 0d120850741a |
files | xen/arch/x86/hvm/platform.c |
line diff
1.1 --- a/xen/arch/x86/hvm/platform.c Sat Sep 30 11:35:02 2006 +0100 1.2 +++ b/xen/arch/x86/hvm/platform.c Mon Oct 02 13:45:44 2006 +0100 1.3 @@ -730,6 +730,11 @@ void send_pio_req(struct cpu_user_regs * 1.4 vcpu_iodata_t *vio; 1.5 ioreq_t *p; 1.6 1.7 + if (size == 0 || count == 0) { 1.8 + printf("null pio request? port %lx, count %lx, size %d, value %lx, dir %d, pvalid %d.\n", 1.9 + port, count, size, value, dir, pvalid); 1.10 + } 1.11 + 1.12 vio = get_vio(v->domain, v->vcpu_id); 1.13 if (vio == NULL) { 1.14 printk("bad shared page: %lx\n", (unsigned long) vio); 1.15 @@ -768,7 +773,7 @@ void send_pio_req(struct cpu_user_regs * 1.16 hvm_send_assist_req(v); 1.17 } 1.18 1.19 -void send_mmio_req( 1.20 +static void send_mmio_req( 1.21 unsigned char type, unsigned long gpa, 1.22 unsigned long count, int size, long value, int dir, int pvalid) 1.23 { 1.24 @@ -777,6 +782,11 @@ void send_mmio_req( 1.25 ioreq_t *p; 1.26 struct cpu_user_regs *regs; 1.27 1.28 + if (size == 0 || count == 0) { 1.29 + printf("null mmio request? type %d, gpa %lx, count %lx, size %d, value %lx, dir %d, pvalid %d.\n", 1.30 + type, gpa, count, size, value, dir, pvalid); 1.31 + } 1.32 + 1.33 regs = ¤t->arch.hvm_vcpu.io_op.io_context; 1.34 1.35 vio = get_vio(v->domain, v->vcpu_id); 1.36 @@ -918,6 +928,8 @@ void handle_mmio(unsigned long va, unsig 1.37 unsigned long addr = 0; 1.38 int dir; 1.39 1.40 + ASSERT(count); 1.41 + 1.42 /* determine non-MMIO address */ 1.43 if (realmode) { 1.44 if (((regs->es << 4) + (regs->edi & 0xFFFF)) == va) { 1.45 @@ -940,6 +952,9 @@ void handle_mmio(unsigned long va, unsig 1.46 mmio_opp->flags = mmio_inst.flags; 1.47 mmio_opp->instr = mmio_inst.instr; 1.48 1.49 + if (addr & (size - 1)) 1.50 + DPRINTK("Unaligned ioport access: %lx, %ld\n", addr, size); 1.51 + 1.52 /* 1.53 * In case of a movs spanning multiple pages, we break the accesses 1.54 * up into multiple pages (the device model works with non-continguous 1.55 @@ -953,6 +968,7 @@ void handle_mmio(unsigned long va, unsig 1.56 if ((addr & PAGE_MASK) != ((addr + sign * (size - 1)) & PAGE_MASK)) { 1.57 unsigned long value = 0; 1.58 1.59 + DPRINTK("Single io request in a movs crossing page boundary.\n"); 1.60 mmio_opp->flags |= OVERLAP; 1.61 1.62 regs->eip -= inst_len; /* do not advance %eip */ 1.63 @@ -964,12 +980,19 @@ void handle_mmio(unsigned long va, unsig 1.64 if ((addr & PAGE_MASK) != ((addr + sign * (count * size - 1)) & PAGE_MASK)) { 1.65 regs->eip -= inst_len; /* do not advance %eip */ 1.66 1.67 - if (sign > 0) 1.68 + if (sign > 0) { 1.69 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size; 1.70 - else 1.71 - count = (addr & ~PAGE_MASK) / size; 1.72 + } else { 1.73 + /* We need to make sure we advance to the point 1.74 + where the next request will be on a different 1.75 + page. If we're going down, that means 1.76 + advancing until one byte before the start of 1.77 + the page, hence +1. */ 1.78 + count = ((addr + 1) & ~PAGE_MASK) / size; 1.79 + } 1.80 } 1.81 1.82 + ASSERT(count); 1.83 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1); 1.84 } 1.85 break;