ia64/xen-unstable

changeset 12226:45e34f00a78f

[HVM] Clean up VCPU initialisation in Xen. No longer
parse HVM e820 tables in Xen (add some extra HVM parameters as a
cleaner alternative). Lots of code removal.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 02 15:55:51 2006 +0000 (2006-11-02)
parents 722cc2390021
children 4eac59fe1abc
files tools/firmware/vmxassist/setup.c tools/libxc/xc_hvm_build.c xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/setup.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/public/hvm/e820.h xen/include/public/hvm/params.h
line diff
     1.1 --- a/tools/firmware/vmxassist/setup.c	Thu Nov 02 14:27:16 2006 +0000
     1.2 +++ b/tools/firmware/vmxassist/setup.c	Thu Nov 02 15:55:51 2006 +0000
     1.3 @@ -53,13 +53,10 @@ unsigned pgd[NR_PGD] __attribute__ ((ali
     1.4  struct e820entry e820map[] = {
     1.5  	{ 0x0000000000000000ULL, 0x000000000009F800ULL, E820_RAM },
     1.6  	{ 0x000000000009F800ULL, 0x0000000000000800ULL, E820_RESERVED },
     1.7 -	{ 0x00000000000A0000ULL, 0x0000000000020000ULL, E820_IO },
     1.8  	{ 0x00000000000C0000ULL, 0x0000000000040000ULL, E820_RESERVED },
     1.9  	{ 0x0000000000100000ULL, 0x0000000000000000ULL, E820_RAM },
    1.10 -	{ 0x0000000000000000ULL, 0x0000000000001000ULL, E820_SHARED_PAGE },
    1.11  	{ 0x0000000000000000ULL, 0x0000000000003000ULL, E820_NVS },
    1.12  	{ 0x0000000000003000ULL, 0x000000000000A000ULL, E820_ACPI },
    1.13 -	{ 0x00000000FEC00000ULL, 0x0000000001400000ULL, E820_IO },
    1.14  };
    1.15  #endif /* TEST */
    1.16  
     2.1 --- a/tools/libxc/xc_hvm_build.c	Thu Nov 02 14:27:16 2006 +0000
     2.2 +++ b/tools/libxc/xc_hvm_build.c	Thu Nov 02 15:55:51 2006 +0000
     2.3 @@ -56,11 +56,12 @@ static void build_e820map(void *e820_pag
     2.4      unsigned char nr_map = 0;
     2.5  
     2.6      /*
     2.7 -     * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
     2.8 +     * Physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
     2.9       * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
    2.10       * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
    2.11       */
    2.12 -    if ( mem_size > HVM_BELOW_4G_RAM_END ) {
    2.13 +    if ( mem_size > HVM_BELOW_4G_RAM_END )
    2.14 +    {
    2.15          extra_mem_size = mem_size - HVM_BELOW_4G_RAM_END;
    2.16          mem_size = HVM_BELOW_4G_RAM_END;
    2.17      }
    2.18 @@ -75,11 +76,6 @@ static void build_e820map(void *e820_pag
    2.19      e820entry[nr_map].type = E820_RESERVED;
    2.20      nr_map++;
    2.21  
    2.22 -    e820entry[nr_map].addr = 0xA0000;
    2.23 -    e820entry[nr_map].size = 0x20000;
    2.24 -    e820entry[nr_map].type = E820_IO;
    2.25 -    nr_map++;
    2.26 -
    2.27      e820entry[nr_map].addr = 0xEA000;
    2.28      e820entry[nr_map].size = 0x01000;
    2.29      e820entry[nr_map].type = E820_ACPI;
    2.30 @@ -90,54 +86,14 @@ static void build_e820map(void *e820_pag
    2.31      e820entry[nr_map].type = E820_RESERVED;
    2.32      nr_map++;
    2.33  
    2.34 -/* buffered io page.    */
    2.35 -#define BUFFERED_IO_PAGES   1
    2.36 -/* xenstore page.       */
    2.37 -#define XENSTORE_PAGES      1
    2.38 -/* shared io page.      */
    2.39 -#define SHARED_IO_PAGES     1
    2.40 -/* totally 16 static pages are reserved in E820 table */
    2.41 -
    2.42 -    /* Most of the ram goes here */
    2.43 +    /* Low RAM goes here. Remove 3 pages for ioreq, bufioreq, and xenstore. */
    2.44      e820entry[nr_map].addr = 0x100000;
    2.45 -    e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE *
    2.46 -                                                (BUFFERED_IO_PAGES +
    2.47 -                                                 XENSTORE_PAGES +
    2.48 -                                                 SHARED_IO_PAGES);
    2.49 +    e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE * 3;
    2.50      e820entry[nr_map].type = E820_RAM;
    2.51      nr_map++;
    2.52  
    2.53 -    /* Statically allocated special pages */
    2.54 -
    2.55 -    /* For buffered IO requests */
    2.56 -    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    2.57 -                                        (BUFFERED_IO_PAGES +
    2.58 -                                         XENSTORE_PAGES +
    2.59 -                                         SHARED_IO_PAGES);
    2.60 -    e820entry[nr_map].size = PAGE_SIZE * BUFFERED_IO_PAGES;
    2.61 -    e820entry[nr_map].type = E820_BUFFERED_IO;
    2.62 -    nr_map++;
    2.63 -
    2.64 -    /* For xenstore */
    2.65 -    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    2.66 -                                        (XENSTORE_PAGES +
    2.67 -                                         SHARED_IO_PAGES);
    2.68 -    e820entry[nr_map].size = PAGE_SIZE * XENSTORE_PAGES;
    2.69 -    e820entry[nr_map].type = E820_XENSTORE;
    2.70 -    nr_map++;
    2.71 -
    2.72 -    /* Shared ioreq_t page */
    2.73 -    e820entry[nr_map].addr = mem_size - PAGE_SIZE * SHARED_IO_PAGES;
    2.74 -    e820entry[nr_map].size = PAGE_SIZE * SHARED_IO_PAGES;
    2.75 -    e820entry[nr_map].type = E820_SHARED_PAGE;
    2.76 -    nr_map++;
    2.77 -
    2.78 -    e820entry[nr_map].addr = 0xFEC00000;
    2.79 -    e820entry[nr_map].size = 0x1400000;
    2.80 -    e820entry[nr_map].type = E820_IO;
    2.81 -    nr_map++;
    2.82 -
    2.83 -    if ( extra_mem_size ) {
    2.84 +    if ( extra_mem_size )
    2.85 +    {
    2.86          e820entry[nr_map].addr = (1ULL << 32);
    2.87          e820entry[nr_map].size = extra_mem_size;
    2.88          e820entry[nr_map].type = E820_RAM;
    2.89 @@ -212,6 +168,7 @@ static int setup_guest(int xc_handle,
    2.90      void *e820_page;
    2.91      struct domain_setup_info dsi;
    2.92      uint64_t v_end;
    2.93 +    int rc;
    2.94  
    2.95      memset(&dsi, 0, sizeof(struct domain_setup_info));
    2.96  
    2.97 @@ -253,13 +210,28 @@ static int setup_guest(int xc_handle,
    2.98      for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
    2.99          page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
   2.100  
   2.101 -    if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
   2.102 -                                           0, 0, page_array) )
   2.103 +    /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
   2.104 +    rc = xc_domain_memory_populate_physmap(
   2.105 +        xc_handle, dom, (nr_pages > 0xa0) ? 0xa0 : nr_pages,
   2.106 +        0, 0, &page_array[0x00]);
   2.107 +    if ( (rc == 0) && (nr_pages > 0xc0) )
   2.108 +        rc = xc_domain_memory_populate_physmap(
   2.109 +            xc_handle, dom, nr_pages - 0xc0, 0, 0, &page_array[0xc0]);
   2.110 +    if ( rc != 0 )
   2.111      {
   2.112          PERROR("Could not allocate memory for HVM guest.\n");
   2.113          goto error_out;
   2.114      }
   2.115  
   2.116 +    if ( (nr_pages > 0xa0) &&
   2.117 +         xc_domain_memory_decrease_reservation(
   2.118 +             xc_handle, dom, (nr_pages < 0xc0) ? (nr_pages - 0xa0) : 0x20,
   2.119 +             0, &page_array[0xa0]) )
   2.120 +    {
   2.121 +        PERROR("Could not free VGA hole.\n");
   2.122 +        goto error_out;
   2.123 +    }
   2.124 +
   2.125      if ( xc_domain_translate_gpfn_list(xc_handle, dom, nr_pages,
   2.126                                         page_array, page_array) )
   2.127      {
   2.128 @@ -295,6 +267,8 @@ static int setup_guest(int xc_handle,
   2.129      /* Mask all upcalls... */
   2.130      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   2.131          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   2.132 +    memset(&shared_info->evtchn_mask[0], 0xff,
   2.133 +           sizeof(shared_info->evtchn_mask));
   2.134      munmap(shared_info, PAGE_SIZE);
   2.135  
   2.136      if ( v_end > HVM_BELOW_4G_RAM_END )
   2.137 @@ -302,22 +276,17 @@ static int setup_guest(int xc_handle,
   2.138      else
   2.139          shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
   2.140  
   2.141 -    *store_mfn = page_array[shared_page_nr - 1];
   2.142 -
   2.143 -    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr - 1);
   2.144 -    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
   2.145 -
   2.146 -    /* Paranoia */
   2.147 -    /* clean the shared IO requests page */
   2.148 -    if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) )
   2.149 +    /* Paranoia: clean pages. */
   2.150 +    if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) ||
   2.151 +         xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr-1]) ||
   2.152 +         xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr-2]) )
   2.153          goto error_out;
   2.154  
   2.155 -    /* clean the buffered IO requests page */
   2.156 -    if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr - 2]) )
   2.157 -        goto error_out;
   2.158 -
   2.159 -    if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
   2.160 -        goto error_out;
   2.161 +    *store_mfn = page_array[shared_page_nr - 1];
   2.162 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr-1);
   2.163 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
   2.164 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, shared_page_nr-2);
   2.165 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, shared_page_nr);
   2.166  
   2.167      free(page_array);
   2.168  
     3.1 --- a/xen/arch/x86/domain.c	Thu Nov 02 14:27:16 2006 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Thu Nov 02 15:55:51 2006 +0000
     3.3 @@ -123,20 +123,31 @@ struct vcpu *alloc_vcpu_struct(struct do
     3.4  
     3.5      memset(v, 0, sizeof(*v));
     3.6  
     3.7 +    v->vcpu_id = vcpu_id;
     3.8 +    v->domain  = d;
     3.9 +
    3.10      v->arch.flags = TF_kernel_mode;
    3.11  
    3.12 -    if ( is_idle_domain(d) )
    3.13 +    if ( is_hvm_domain(d) )
    3.14      {
    3.15 -        v->arch.schedule_tail = continue_idle_domain;
    3.16 -        v->arch.cr3           = __pa(idle_pg_table);
    3.17 +        if ( hvm_vcpu_initialise(v) != 0 )
    3.18 +        {
    3.19 +            xfree(v);
    3.20 +            return NULL;
    3.21 +        }
    3.22      }
    3.23      else
    3.24      {
    3.25          v->arch.schedule_tail = continue_nonidle_domain;
    3.26 -    }
    3.27 +        v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
    3.28 +        v->arch.ctxt_switch_to   = paravirt_ctxt_switch_to;
    3.29  
    3.30 -    v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
    3.31 -    v->arch.ctxt_switch_to   = paravirt_ctxt_switch_to;
    3.32 +        if ( is_idle_domain(d) )
    3.33 +        {
    3.34 +            v->arch.schedule_tail = continue_idle_domain;
    3.35 +            v->arch.cr3           = __pa(idle_pg_table);
    3.36 +        }
    3.37 +    }
    3.38  
    3.39      v->arch.perdomain_ptes =
    3.40          d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT);
    3.41 @@ -335,22 +346,11 @@ int arch_set_info_guest(
    3.42  
    3.43      if ( !is_hvm_vcpu(v) )
    3.44      {
    3.45 -        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
    3.46 -        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
    3.47 -    }
    3.48 -
    3.49 -    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    3.50 -        return rc;
    3.51 +        if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    3.52 +            return rc;
    3.53  
    3.54 -    if ( is_hvm_vcpu(v) )
    3.55 -    {
    3.56 -        v->arch.guest_table = pagetable_null();
    3.57 +        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
    3.58  
    3.59 -        if ( !hvm_initialize_guest_resources(v) )
    3.60 -            return -EINVAL;
    3.61 -    }
    3.62 -    else
    3.63 -    {
    3.64          if ( shadow_mode_refcounts(d)
    3.65               ? !get_page(mfn_to_page(cr3_pfn), d)
    3.66               : !get_page_and_type(mfn_to_page(cr3_pfn), d,
    3.67 @@ -359,6 +359,8 @@ int arch_set_info_guest(
    3.68              destroy_gdt(v);
    3.69              return -EINVAL;
    3.70          }
    3.71 +
    3.72 +        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
    3.73      }    
    3.74  
    3.75      /* Shadow: make sure the domain has enough shadow memory to
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Nov 02 14:27:16 2006 +0000
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Nov 02 15:55:51 2006 +0000
     4.3 @@ -57,150 +57,14 @@ integer_param("hvm_debug", opt_hvm_debug
     4.4  
     4.5  struct hvm_function_table hvm_funcs;
     4.6  
     4.7 -static void hvm_zap_mmio_range(
     4.8 -    struct domain *d, unsigned long pfn, unsigned long nr_pfn)
     4.9 -{
    4.10 -    unsigned long i;
    4.11 -
    4.12 -    ASSERT(d == current->domain);
    4.13 -
    4.14 -    for ( i = 0; i < nr_pfn; i++ )
    4.15 -    {
    4.16 -        if ( pfn + i >= 0xfffff )
    4.17 -            break;
    4.18 -
    4.19 -        if ( VALID_MFN(gmfn_to_mfn(d, pfn + i)) )
    4.20 -            guest_remove_page(d, pfn + i);
    4.21 -    }
    4.22 -}
    4.23 -
    4.24 -static void e820_zap_iommu_callback(struct domain *d,
    4.25 -                                    struct e820entry *e,
    4.26 -                                    void *ign)
    4.27 -{
    4.28 -    if ( e->type == E820_IO )
    4.29 -        hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
    4.30 -}
    4.31 -
    4.32 -static void e820_foreach(struct domain *d,
    4.33 -                         void (*cb)(struct domain *d,
    4.34 -                                    struct e820entry *e,
    4.35 -                                    void *data),
    4.36 -                         void *data)
    4.37 -{
    4.38 -    int i;
    4.39 -    unsigned char e820_map_nr;
    4.40 -    struct e820entry *e820entry;
    4.41 -    unsigned char *p;
    4.42 -    unsigned long mfn;
    4.43 -
    4.44 -    mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
    4.45 -    if ( mfn == INVALID_MFN )
    4.46 -    {
    4.47 -        printk("Can not find E820 memory map page for HVM domain.\n");
    4.48 -        domain_crash_synchronous();
    4.49 -    }
    4.50 -
    4.51 -    p = map_domain_page(mfn);
    4.52 -    if ( p == NULL )
    4.53 -    {
    4.54 -        printk("Can not map E820 memory map page for HVM domain.\n");
    4.55 -        domain_crash_synchronous();
    4.56 -    }
    4.57 -
    4.58 -    e820_map_nr = *(p + E820_MAP_NR_OFFSET);
    4.59 -    e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
    4.60 -
    4.61 -    for ( i = 0; i < e820_map_nr; i++ )
    4.62 -        cb(d, e820entry + i, data);
    4.63 -
    4.64 -    unmap_domain_page(p);
    4.65 -}
    4.66 -
    4.67 -static void hvm_zap_iommu_pages(struct domain *d)
    4.68 -{
    4.69 -    e820_foreach(d, e820_zap_iommu_callback, NULL);
    4.70 -}
    4.71 -
    4.72 -static void e820_map_io_shared_callback(struct domain *d,
    4.73 -                                        struct e820entry *e,
    4.74 -                                        void *data)
    4.75 +void hvm_create_event_channel(struct vcpu *v)
    4.76  {
    4.77 -    unsigned long *mfn = data;
    4.78 -    if ( e->type == E820_SHARED_PAGE )
    4.79 -    {
    4.80 -        ASSERT(*mfn == INVALID_MFN);
    4.81 -        *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
    4.82 -    }
    4.83 -}
    4.84 -
    4.85 -static void e820_map_buffered_io_callback(struct domain *d,
    4.86 -                                          struct e820entry *e,
    4.87 -                                          void *data)
    4.88 -{
    4.89 -    unsigned long *mfn = data;
    4.90 -    if ( e->type == E820_BUFFERED_IO ) {
    4.91 -        ASSERT(*mfn == INVALID_MFN);
    4.92 -        *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
    4.93 -    }
    4.94 +    v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
    4.95 +    if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
    4.96 +        get_vio(v->domain, v->vcpu_id)->vp_eport =
    4.97 +            v->arch.hvm_vcpu.xen_port;
    4.98  }
    4.99  
   4.100 -void hvm_map_io_shared_pages(struct vcpu *v)
   4.101 -{
   4.102 -    unsigned long mfn;
   4.103 -    void *p;
   4.104 -    struct domain *d = v->domain;
   4.105 -
   4.106 -    if ( d->arch.hvm_domain.shared_page_va ||
   4.107 -         d->arch.hvm_domain.buffered_io_va )
   4.108 -        return;
   4.109 -
   4.110 -    mfn = INVALID_MFN;
   4.111 -    e820_foreach(d, e820_map_io_shared_callback, &mfn);
   4.112 -
   4.113 -    if ( mfn == INVALID_MFN )
   4.114 -    {
   4.115 -        printk("Can not find io request shared page for HVM domain.\n");
   4.116 -        domain_crash_synchronous();
   4.117 -    }
   4.118 -
   4.119 -    p = map_domain_page_global(mfn);
   4.120 -    if ( p == NULL )
   4.121 -    {
   4.122 -        printk("Can not map io request shared page for HVM domain.\n");
   4.123 -        domain_crash_synchronous();
   4.124 -    }
   4.125 -
   4.126 -    d->arch.hvm_domain.shared_page_va = (unsigned long)p;
   4.127 -
   4.128 -    mfn = INVALID_MFN;
   4.129 -    e820_foreach(d, e820_map_buffered_io_callback, &mfn);
   4.130 -    if ( mfn != INVALID_MFN ) {
   4.131 -        p = map_domain_page_global(mfn);
   4.132 -        if ( p )
   4.133 -            d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
   4.134 -    }
   4.135 -}
   4.136 -
   4.137 -void hvm_create_event_channels(struct vcpu *v)
   4.138 -{
   4.139 -    vcpu_iodata_t *p;
   4.140 -    struct vcpu *o;
   4.141 -
   4.142 -    if ( v->vcpu_id == 0 ) {
   4.143 -        /* Ugly: create event channels for every vcpu when vcpu 0
   4.144 -           starts, so that they're available for ioemu to bind to. */
   4.145 -        for_each_vcpu(v->domain, o) {
   4.146 -            p = get_vio(v->domain, o->vcpu_id);
   4.147 -            o->arch.hvm_vcpu.xen_port = p->vp_eport =
   4.148 -                alloc_unbound_xen_event_channel(o, 0);
   4.149 -            dprintk(XENLOG_INFO, "Allocated port %d for hvm.\n",
   4.150 -                    o->arch.hvm_vcpu.xen_port);
   4.151 -        }
   4.152 -    }
   4.153 -}
   4.154 -
   4.155 -
   4.156  void hvm_stts(struct vcpu *v)
   4.157  {
   4.158      /* FPU state already dirty? Then no need to setup_fpu() lazily. */
   4.159 @@ -268,8 +132,6 @@ void hvm_setup_platform(struct domain *d
   4.160      if ( !is_hvm_domain(d) || (v->vcpu_id != 0) )
   4.161          return;
   4.162  
   4.163 -    hvm_zap_iommu_pages(d);
   4.164 -
   4.165      platform = &d->arch.hvm_domain;
   4.166      pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
   4.167      register_pic_io_hook();
   4.168 @@ -689,6 +551,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
   4.169      {
   4.170          struct xen_hvm_param a;
   4.171          struct domain *d;
   4.172 +        struct vcpu *v;
   4.173 +        unsigned long mfn;
   4.174 +        void *p;
   4.175  
   4.176          if ( copy_from_guest(&a, arg, 1) )
   4.177              return -EFAULT;
   4.178 @@ -712,8 +577,41 @@ long do_hvm_op(unsigned long op, XEN_GUE
   4.179              return -EPERM;
   4.180          }
   4.181  
   4.182 +        rc = -EINVAL;
   4.183 +        if ( !is_hvm_domain(d) )
   4.184 +            goto param_fail;
   4.185 +
   4.186          if ( op == HVMOP_set_param )
   4.187          {
   4.188 +            switch ( a.index )
   4.189 +            {
   4.190 +            case HVM_PARAM_IOREQ_PFN:
   4.191 +                if ( d->arch.hvm_domain.shared_page_va )
   4.192 +                    goto param_fail;
   4.193 +                mfn = gmfn_to_mfn(d, a.value);
   4.194 +                if ( mfn == INVALID_MFN )
   4.195 +                    goto param_fail;
   4.196 +                p = map_domain_page_global(mfn);
   4.197 +                if ( p == NULL )
   4.198 +                    goto param_fail;
   4.199 +                d->arch.hvm_domain.shared_page_va = (unsigned long)p;
   4.200 +                /* Initialise evtchn port info if VCPUs already created. */
   4.201 +                for_each_vcpu ( d, v )
   4.202 +                    get_vio(d, v->vcpu_id)->vp_eport =
   4.203 +                    v->arch.hvm_vcpu.xen_port;
   4.204 +                break;
   4.205 +            case HVM_PARAM_BUFIOREQ_PFN:
   4.206 +                if ( d->arch.hvm_domain.buffered_io_va )
   4.207 +                    goto param_fail;
   4.208 +                mfn = gmfn_to_mfn(d, a.value);
   4.209 +                if ( mfn == INVALID_MFN )
   4.210 +                    goto param_fail;
   4.211 +                p = map_domain_page_global(mfn);
   4.212 +                if ( p == NULL )
   4.213 +                    goto param_fail;
   4.214 +                d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
   4.215 +                break;
   4.216 +            }
   4.217              d->arch.hvm_domain.params[a.index] = a.value;
   4.218              rc = 0;
   4.219          }
   4.220 @@ -723,6 +621,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
   4.221              rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
   4.222          }
   4.223  
   4.224 +    param_fail:
   4.225          put_domain(d);
   4.226          break;
   4.227      }
     5.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Nov 02 14:27:16 2006 +0000
     5.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Nov 02 15:55:51 2006 +0000
     5.3 @@ -789,41 +789,14 @@ static void svm_ctxt_switch_to(struct vc
     5.4      svm_restore_dr(v);
     5.5  }
     5.6  
     5.7 -
     5.8 -static void svm_final_setup_guest(struct vcpu *v)
     5.9 +static int svm_vcpu_initialise(struct vcpu *v)
    5.10  {
    5.11 -    struct domain *d = v->domain;
    5.12 -
    5.13      v->arch.schedule_tail    = arch_svm_do_launch;
    5.14      v->arch.ctxt_switch_from = svm_ctxt_switch_from;
    5.15      v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
    5.16 -
    5.17 -    if ( v != d->vcpu[0] )
    5.18 -        return;
    5.19 -
    5.20 -    if ( !shadow_mode_external(d) )
    5.21 -    {
    5.22 -        gdprintk(XENLOG_ERR, "Can't init HVM for dom %u vcpu %u: "
    5.23 -                "not in shadow external mode\n", d->domain_id, v->vcpu_id);
    5.24 -        domain_crash(d);
    5.25 -    }
    5.26 -
    5.27 -    /* 
    5.28 -     * Required to do this once per domain
    5.29 -     * TODO: add a seperate function to do these.
    5.30 -     */
    5.31 -    memset(&d->shared_info->evtchn_mask[0], 0xff, 
    5.32 -           sizeof(d->shared_info->evtchn_mask));       
    5.33 +    return 0;
    5.34  }
    5.35  
    5.36 -
    5.37 -static int svm_initialize_guest_resources(struct vcpu *v)
    5.38 -{
    5.39 -    svm_final_setup_guest(v);
    5.40 -    return 1;
    5.41 -}
    5.42 -
    5.43 -
    5.44  int start_svm(void)
    5.45  {
    5.46      u32 eax, ecx, edx;
    5.47 @@ -871,7 +844,7 @@ int start_svm(void)
    5.48      /* Setup HVM interfaces */
    5.49      hvm_funcs.disable = stop_svm;
    5.50  
    5.51 -    hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
    5.52 +    hvm_funcs.vcpu_initialise = svm_vcpu_initialise;
    5.53      hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
    5.54  
    5.55      hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
     6.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Thu Nov 02 14:27:16 2006 +0000
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Nov 02 15:55:51 2006 +0000
     6.3 @@ -56,7 +56,7 @@
     6.4        CPU_BASED_INVDPG_EXITING |                        \
     6.5        CPU_BASED_MWAIT_EXITING |                         \
     6.6        CPU_BASED_MOV_DR_EXITING |                        \
     6.7 -      CPU_BASED_ACTIVATE_IO_BITMAP |                    \
     6.8 +      CPU_BASED_UNCOND_IO_EXITING |                     \
     6.9        CPU_BASED_USE_TSC_OFFSETING )
    6.10  
    6.11  /* Basic flags for VM-Exit controls. */
    6.12 @@ -240,22 +240,9 @@ static inline int construct_vmcs_control
    6.13      int error = 0;
    6.14  
    6.15      error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
    6.16 -
    6.17      error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
    6.18 -
    6.19      error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
    6.20  
    6.21 -    error |= __vmwrite(IO_BITMAP_A, virt_to_maddr(arch_vmx->io_bitmap_a));
    6.22 -    error |= __vmwrite(IO_BITMAP_B, virt_to_maddr(arch_vmx->io_bitmap_b));
    6.23 -
    6.24 -#ifdef CONFIG_X86_PAE
    6.25 -    /* On PAE bitmaps may in future be above 4GB. Write high words. */
    6.26 -    error |= __vmwrite(IO_BITMAP_A_HIGH,
    6.27 -                       (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_a) >> 32);
    6.28 -    error |= __vmwrite(IO_BITMAP_B_HIGH,
    6.29 -                       (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_b) >> 32);
    6.30 -#endif
    6.31 -
    6.32      return error;
    6.33  }
    6.34  
    6.35 @@ -589,12 +576,6 @@ void vmx_destroy_vmcs(struct vcpu *v)
    6.36  
    6.37      vmx_clear_vmcs(v);
    6.38  
    6.39 -    free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
    6.40 -    free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
    6.41 -
    6.42 -    arch_vmx->io_bitmap_a = NULL;
    6.43 -    arch_vmx->io_bitmap_b = NULL;
    6.44 -
    6.45      vmx_free_vmcs(arch_vmx->vmcs);
    6.46      arch_vmx->vmcs = NULL;
    6.47  }
     7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 02 14:27:16 2006 +0000
     7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 02 15:55:51 2006 +0000
     7.3 @@ -53,77 +53,25 @@ static DEFINE_PER_CPU(unsigned long, tra
     7.4  static void vmx_ctxt_switch_from(struct vcpu *v);
     7.5  static void vmx_ctxt_switch_to(struct vcpu *v);
     7.6  
     7.7 -static int vmx_initialize_guest_resources(struct vcpu *v)
     7.8 +static int vmx_vcpu_initialise(struct vcpu *v)
     7.9  {
    7.10 -    struct domain *d = v->domain;
    7.11 -    struct vcpu *vc;
    7.12 -    void *io_bitmap_a, *io_bitmap_b;
    7.13      int rc;
    7.14  
    7.15      v->arch.schedule_tail    = arch_vmx_do_launch;
    7.16      v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
    7.17      v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
    7.18  
    7.19 -    if ( v->vcpu_id != 0 )
    7.20 -        return 1;
    7.21 -
    7.22 -    if ( !shadow_mode_external(d) )
    7.23 +    if ( (rc = vmx_create_vmcs(v)) != 0 )
    7.24      {
    7.25 -        dprintk(XENLOG_ERR, "Can't init HVM for dom %u vcpu %u: "
    7.26 -                "not in shadow external mode\n", 
    7.27 -                d->domain_id, v->vcpu_id);
    7.28 -        domain_crash(d);
    7.29 +        dprintk(XENLOG_WARNING,
    7.30 +                "Failed to create VMCS for vcpu %d: err=%d.\n",
    7.31 +                v->vcpu_id, rc);
    7.32 +        return rc;
    7.33      }
    7.34  
    7.35 -    for_each_vcpu ( d, vc )
    7.36 -    {
    7.37 -        memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct));
    7.38 -
    7.39 -        if ( (rc = vmx_create_vmcs(vc)) != 0 )
    7.40 -        {
    7.41 -            dprintk(XENLOG_WARNING,
    7.42 -                    "Failed to create VMCS for vcpu %d: err=%d.\n",
    7.43 -                    vc->vcpu_id, rc);
    7.44 -            return 0;
    7.45 -        }
    7.46 -
    7.47 -        spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock);
    7.48 -
    7.49 -        if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
    7.50 -        {
    7.51 -            dprintk(XENLOG_WARNING,
    7.52 -                   "Failed to allocate io bitmap b for vcpu %d.\n",
    7.53 -                    vc->vcpu_id);
    7.54 -            return 0;
    7.55 -        }
    7.56 +    spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
    7.57  
    7.58 -        if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
    7.59 -        {
    7.60 -            dprintk(XENLOG_WARNING,
    7.61 -                    "Failed to allocate io bitmap b for vcpu %d.\n",
    7.62 -                    vc->vcpu_id);
    7.63 -            return 0;
    7.64 -        }
    7.65 -
    7.66 -        memset(io_bitmap_a, 0xff, 0x1000);
    7.67 -        memset(io_bitmap_b, 0xff, 0x1000);
    7.68 -
    7.69 -        /* don't bother debug port access */
    7.70 -        clear_bit(PC_DEBUG_PORT, io_bitmap_a);
    7.71 -
    7.72 -        vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a;
    7.73 -        vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b;
    7.74 -
    7.75 -    }
    7.76 -
    7.77 -    /*
    7.78 -     * Required to do this once per domain XXX todo: add a seperate function 
    7.79 -     * to do these.
    7.80 -     */
    7.81 -    memset(&d->shared_info->evtchn_mask[0], 0xff,
    7.82 -           sizeof(d->shared_info->evtchn_mask));
    7.83 -
    7.84 -    return 1;
    7.85 +    return 0;
    7.86  }
    7.87  
    7.88  static void vmx_relinquish_guest_resources(struct domain *d)
    7.89 @@ -747,7 +695,7 @@ static void vmx_setup_hvm_funcs(void)
    7.90  
    7.91      hvm_funcs.disable = stop_vmx;
    7.92  
    7.93 -    hvm_funcs.initialize_guest_resources = vmx_initialize_guest_resources;
    7.94 +    hvm_funcs.vcpu_initialise = vmx_vcpu_initialise;
    7.95      hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources;
    7.96  
    7.97      hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
     8.1 --- a/xen/arch/x86/setup.c	Thu Nov 02 14:27:16 2006 +0000
     8.2 +++ b/xen/arch/x86/setup.c	Thu Nov 02 15:55:51 2006 +0000
     8.3 @@ -363,7 +363,7 @@ void __init __start_xen(multiboot_info_t
     8.4              e820_raw[e820_raw_nr].size = 
     8.5                  ((u64)map->length_high << 32) | (u64)map->length_low;
     8.6              e820_raw[e820_raw_nr].type = 
     8.7 -                (map->type > E820_SHARED_PAGE) ? E820_RESERVED : map->type;
     8.8 +                (map->type > E820_NVS) ? E820_RESERVED : map->type;
     8.9              e820_raw_nr++;
    8.10  
    8.11              bytes += map->size + 4;
     9.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Nov 02 14:27:16 2006 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Nov 02 15:55:51 2006 +0000
     9.3 @@ -35,7 +35,7 @@ struct hvm_function_table {
     9.4      /*
     9.5       * Initialize/relinguish HVM guest resources
     9.6       */
     9.7 -    int  (*initialize_guest_resources)(struct vcpu *v);
     9.8 +    int  (*vcpu_initialise)(struct vcpu *v);
     9.9      void (*relinquish_guest_resources)(struct domain *d);
    9.10  
    9.11      /*
    9.12 @@ -91,27 +91,21 @@ hvm_disable(void)
    9.13          hvm_funcs.disable();
    9.14  }
    9.15  
    9.16 -void hvm_create_event_channels(struct vcpu *v);
    9.17 -void hvm_map_io_shared_pages(struct vcpu *v);
    9.18 +void hvm_create_event_channel(struct vcpu *v);
    9.19  
    9.20  static inline int
    9.21 -hvm_initialize_guest_resources(struct vcpu *v)
    9.22 +hvm_vcpu_initialise(struct vcpu *v)
    9.23  {
    9.24 -    int ret = 1;
    9.25 -    if ( hvm_funcs.initialize_guest_resources )
    9.26 -        ret = hvm_funcs.initialize_guest_resources(v);
    9.27 -    if ( ret == 1 ) {
    9.28 -        hvm_map_io_shared_pages(v);
    9.29 -        hvm_create_event_channels(v);
    9.30 -    }
    9.31 -    return ret;
    9.32 +    int rc;
    9.33 +    if ( (rc = hvm_funcs.vcpu_initialise(v)) == 0 )
    9.34 +        hvm_create_event_channel(v);
    9.35 +    return rc;
    9.36  }
    9.37  
    9.38  static inline void
    9.39  hvm_relinquish_guest_resources(struct domain *d)
    9.40  {
    9.41 -    if (hvm_funcs.relinquish_guest_resources)
    9.42 -        hvm_funcs.relinquish_guest_resources(d);
    9.43 +    hvm_funcs.relinquish_guest_resources(d);
    9.44  }
    9.45  
    9.46  static inline void
    10.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Nov 02 14:27:16 2006 +0000
    10.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Nov 02 15:55:51 2006 +0000
    10.3 @@ -50,10 +50,6 @@ struct vmx_msr_state {
    10.4      unsigned long shadow_gs;
    10.5  };
    10.6  
    10.7 -/* io bitmap is 4KBytes in size */
    10.8 -#define IO_BITMAP_SIZE      0x1000
    10.9 -#define IO_BITMAP_ORDER     (get_order_from_bytes(IO_BITMAP_SIZE))
   10.10 -
   10.11  struct arch_vmx_struct {
   10.12      /* Virtual address of VMCS. */
   10.13      struct vmcs_struct  *vmcs;
   10.14 @@ -82,7 +78,6 @@ struct arch_vmx_struct {
   10.15      unsigned long        cpu_cr3;
   10.16      unsigned long        cpu_based_exec_control;
   10.17      struct vmx_msr_state msr_content;
   10.18 -    void                *io_bitmap_a, *io_bitmap_b;
   10.19      unsigned long        vmxassist_enabled:1; 
   10.20  };
   10.21  
    11.1 --- a/xen/include/public/hvm/e820.h	Thu Nov 02 14:27:16 2006 +0000
    11.2 +++ b/xen/include/public/hvm/e820.h	Thu Nov 02 15:55:51 2006 +0000
    11.3 @@ -7,12 +7,6 @@
    11.4  #define E820_ACPI         3
    11.5  #define E820_NVS          4
    11.6  
    11.7 -/* Xen HVM extended E820 types. */
    11.8 -#define E820_IO          16
    11.9 -#define E820_SHARED_PAGE 17
   11.10 -#define E820_XENSTORE    18
   11.11 -#define E820_BUFFERED_IO 19
   11.12 -
   11.13  /* E820 location in HVM virtual address space. */
   11.14  #define E820_MAP_PAGE        0x00090000
   11.15  #define E820_MAP_NR_OFFSET   0x000001E8
    12.1 --- a/xen/include/public/hvm/params.h	Thu Nov 02 14:27:16 2006 +0000
    12.2 +++ b/xen/include/public/hvm/params.h	Thu Nov 02 15:55:51 2006 +0000
    12.3 @@ -7,7 +7,9 @@
    12.4  #define HVM_PARAM_STORE_EVTCHN 2
    12.5  #define HVM_PARAM_APIC_ENABLED 3
    12.6  #define HVM_PARAM_PAE_ENABLED  4
    12.7 -#define HVM_NR_PARAMS          5
    12.8 +#define HVM_PARAM_IOREQ_PFN    5
    12.9 +#define HVM_PARAM_BUFIOREQ_PFN 6
   12.10 +#define HVM_NR_PARAMS          7
   12.11  
   12.12  /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
   12.13  #define HVMOP_set_param 0