ia64/xen-unstable

changeset 17677:4269ab4b37ee

merge with xen-unstable.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue May 20 11:33:15 2008 +0900 (2008-05-20)
parents e78f5dbedbe0 2ada81810ddb
children f04ce41dab84 2757cf34d1ea
files
line diff
     1.1 --- a/tools/ioemu/hw/pci.c	Fri May 16 22:25:47 2008 +0900
     1.2 +++ b/tools/ioemu/hw/pci.c	Tue May 20 11:33:15 2008 +0900
     1.3 @@ -101,7 +101,7 @@ int pci_device_load(PCIDevice *s, QEMUFi
     1.4          int i;
     1.5          qemu_get_buffer(f, &irq_state, 1);
     1.6          for (i = 0; i < 4; i++)
     1.7 -            pci_set_irq(s, i, !!(irq_state >> i));
     1.8 +            pci_set_irq(s, i, (irq_state >> i) & 1);
     1.9      }
    1.10      return 0;
    1.11  }
     2.1 --- a/tools/python/xen/xm/main.py	Fri May 16 22:25:47 2008 +0900
     2.2 +++ b/tools/python/xen/xm/main.py	Tue May 20 11:33:15 2008 +0900
     2.3 @@ -1096,7 +1096,7 @@ def xm_vcpu_list(args):
     2.4  
     2.5              # normalize cpumap by modulus nr_cpus, and drop duplicates
     2.6              cpumap = dict.fromkeys(
     2.7 -                       map(lambda x: x % nr_cpus, cpumap)).keys()
     2.8 +                       filter(lambda x: x < nr_cpus, cpumap)).keys()
     2.9              if len(cpumap) == nr_cpus:
    2.10                  return "any cpu"
    2.11  
     3.1 --- a/xen/arch/x86/hvm/stdvga.c	Fri May 16 22:25:47 2008 +0900
     3.2 +++ b/xen/arch/x86/hvm/stdvga.c	Tue May 20 11:33:15 2008 +0900
     3.3 @@ -271,9 +271,9 @@ static uint8_t stdvga_mem_readb(uint64_t
     3.4      return ret;
     3.5  }
     3.6  
     3.7 -static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size)
     3.8 +static uint64_t stdvga_mem_read(uint64_t addr, uint64_t size)
     3.9  {
    3.10 -    uint32_t data = 0;
    3.11 +    uint64_t data = 0;
    3.12  
    3.13      switch ( size )
    3.14      {
    3.15 @@ -293,8 +293,19 @@ static uint32_t stdvga_mem_read(uint32_t
    3.16          data |= stdvga_mem_readb(addr + 3) << 24;
    3.17          break;
    3.18  
    3.19 +    case 8:
    3.20 +        data =  (uint64_t)(stdvga_mem_readb(addr));
    3.21 +        data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8;
    3.22 +        data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16;
    3.23 +        data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24;
    3.24 +        data |= (uint64_t)(stdvga_mem_readb(addr + 4)) << 32;
    3.25 +        data |= (uint64_t)(stdvga_mem_readb(addr + 5)) << 40;
    3.26 +        data |= (uint64_t)(stdvga_mem_readb(addr + 6)) << 48;
    3.27 +        data |= (uint64_t)(stdvga_mem_readb(addr + 7)) << 56;
    3.28 +        break;
    3.29 +
    3.30      default:
    3.31 -        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
    3.32 +        gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
    3.33          break;
    3.34      }
    3.35  
    3.36 @@ -409,7 +420,7 @@ static void stdvga_mem_writeb(uint64_t a
    3.37      }
    3.38  }
    3.39  
    3.40 -static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size)
    3.41 +static void stdvga_mem_write(uint64_t addr, uint64_t data, uint64_t size)
    3.42  {
    3.43      /* Intercept mmio write */
    3.44      switch ( size )
    3.45 @@ -430,8 +441,19 @@ static void stdvga_mem_write(uint32_t ad
    3.46          stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
    3.47          break;
    3.48  
    3.49 +    case 8:
    3.50 +        stdvga_mem_writeb(addr+0, (data >>  0) & 0xff);
    3.51 +        stdvga_mem_writeb(addr+1, (data >>  8) & 0xff);
    3.52 +        stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
    3.53 +        stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
    3.54 +        stdvga_mem_writeb(addr+4, (data >> 32) & 0xff);
    3.55 +        stdvga_mem_writeb(addr+5, (data >> 40) & 0xff);
    3.56 +        stdvga_mem_writeb(addr+6, (data >> 48) & 0xff);
    3.57 +        stdvga_mem_writeb(addr+7, (data >> 56) & 0xff);
    3.58 +        break;
    3.59 +
    3.60      default:
    3.61 -        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
    3.62 +        gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
    3.63          break;
    3.64      }
    3.65  }
    3.66 @@ -447,7 +469,7 @@ static int mmio_move(struct hvm_hw_stdvg
    3.67      {
    3.68          if ( p->dir == IOREQ_READ )
    3.69          {
    3.70 -            uint32_t addr = p->addr, data = p->data, tmp;
    3.71 +            uint64_t addr = p->addr, data = p->data, tmp;
    3.72              for ( i = 0; i < p->count; i++ ) 
    3.73              {
    3.74                  tmp = stdvga_mem_read(addr, p->size);
     4.1 --- a/xen/arch/x86/hvm/svm/intr.c	Fri May 16 22:25:47 2008 +0900
     4.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Tue May 20 11:33:15 2008 +0900
     4.3 @@ -51,6 +51,12 @@ static void svm_inject_nmi(struct vcpu *
     4.4  
     4.5      ASSERT(vmcb->eventinj.fields.v == 0);
     4.6      vmcb->eventinj = event;
     4.7 +
     4.8 +    /*
     4.9 +     * SVM does not virtualise the NMI mask, so we emulate it by intercepting
    4.10 +     * the next IRET and blocking NMI injection until the intercept triggers.
    4.11 +     */
    4.12 +    vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
    4.13  }
    4.14      
    4.15  static void svm_inject_extint(struct vcpu *v, int vector)
     5.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri May 16 22:25:47 2008 +0900
     5.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue May 20 11:33:15 2008 +0900
     5.3 @@ -367,15 +367,27 @@ static void svm_fpu_leave(struct vcpu *v
     5.4  static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
     5.5  {
     5.6      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     5.7 -    return (vmcb->interrupt_shadow ?
     5.8 -            (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI) : 0);
     5.9 +    unsigned int intr_shadow = 0;
    5.10 +
    5.11 +    if ( vmcb->interrupt_shadow )
    5.12 +        intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
    5.13 +
    5.14 +    if ( vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET )
    5.15 +        intr_shadow |= HVM_INTR_SHADOW_NMI;
    5.16 +
    5.17 +    return intr_shadow;
    5.18  }
    5.19  
    5.20  static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
    5.21  {
    5.22      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    5.23 +
    5.24      vmcb->interrupt_shadow =
    5.25          !!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
    5.26 +
    5.27 +    vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
    5.28 +    if ( intr_shadow & HVM_INTR_SHADOW_NMI )
    5.29 +        vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
    5.30  }
    5.31  
    5.32  static int svm_guest_x86_mode(struct vcpu *v)
    5.33 @@ -1266,6 +1278,15 @@ asmlinkage void svm_vmexit_handler(struc
    5.34              reason = TSW_call_or_int;
    5.35          if ( (vmcb->exitinfo2 >> 44) & 1 )
    5.36              errcode = (uint32_t)vmcb->exitinfo2;
    5.37 +
    5.38 +        /*
    5.39 +         * Some processors set the EXITINTINFO field when the task switch
    5.40 +         * is caused by a task gate in the IDT. In this case we will be
    5.41 +         * emulating the event injection, so we do not want the processor
    5.42 +         * to re-inject the original event!
    5.43 +         */
    5.44 +        vmcb->eventinj.bytes = 0;
    5.45 +
    5.46          hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);
    5.47          break;
    5.48      }
    5.49 @@ -1331,6 +1352,19 @@ asmlinkage void svm_vmexit_handler(struc
    5.50          svm_do_nested_pgfault(vmcb->exitinfo2, regs);
    5.51          break;
    5.52  
    5.53 +    case VMEXIT_IRET:
    5.54 +        /*
    5.55 +         * IRET clears the NMI mask. However because we clear the mask
    5.56 +         * /before/ executing IRET, we set the interrupt shadow to prevent
    5.57 +         * a pending NMI from being injected immediately. This will work
    5.58 +         * perfectly unless the IRET instruction faults: in that case we
    5.59 +         * may inject an NMI before the NMI handler's IRET instruction is
    5.60 +         * retired.
    5.61 +         */
    5.62 +        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
    5.63 +        vmcb->interrupt_shadow = 1;
    5.64 +        break;
    5.65 +
    5.66      default:
    5.67      exit_and_crash:
    5.68          gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
     6.1 --- a/xen/arch/x86/setup.c	Fri May 16 22:25:47 2008 +0900
     6.2 +++ b/xen/arch/x86/setup.c	Tue May 20 11:33:15 2008 +0900
     6.3 @@ -362,7 +362,7 @@ void __init kexec_reserve_area(struct e8
     6.4  
     6.5      is_reserved = 1;
     6.6  
     6.7 -    if ( !reserve_e820_ram(e820, kdump_start, kdump_size) )
     6.8 +    if ( !reserve_e820_ram(e820, kdump_start, kdump_start + kdump_size) )
     6.9      {
    6.10          printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)"
    6.11                 "\n", kdump_size >> 20, kdump_size >> 10, kdump_start);