ia64/xen-unstable

changeset 12521:357a3c90d67b

[IA64] New initial memory interface for HVM on ia64

Use xc_domain_memory_populate_physmap() to allocate memory and
build P2M/M2P table in setup_guest(). vmx_build_physmap_table()
is only used to mark IO space now.

Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
author awilliam@xenbuild.aw
date Wed Nov 22 09:30:27 2006 -0700 (2006-11-22)
parents bcd2960d6dfd
children 8254ba7c0def
files tools/libxc/ia64/xc_ia64_hvm_build.c xen/arch/ia64/vmx/vmx_init.c
line diff
     1.1 --- a/tools/libxc/ia64/xc_ia64_hvm_build.c	Mon Nov 20 21:10:59 2006 -0700
     1.2 +++ b/tools/libxc/ia64/xc_ia64_hvm_build.c	Wed Nov 22 09:30:27 2006 -0700
     1.3 @@ -546,19 +546,32 @@ add_pal_hob(void* hob_buf)
     1.4      return 0;
     1.5  }
     1.6  
     1.7 +#define GFW_PAGES (GFW_SIZE >> PAGE_SHIFT)
     1.8 +#define VGA_START_PAGE (VGA_IO_START >> PAGE_SHIFT)
     1.9 +#define VGA_END_PAGE ((VGA_IO_START + VGA_IO_SIZE) >> PAGE_SHIFT)
    1.10 +/*
    1.11 + * In this function, we will allocate memory and build P2M/M2P table for VTI
    1.12 + * guest.  Frist, a pfn list will be initialized discontiguous, normal memory
    1.13 + * begins with 0, GFW memory and other three pages at their place defined in
    1.14 + * xen/include/public/arch-ia64.h xc_domain_memory_populate_physmap() called
    1.15 + * three times, to set parameter 'extent_order' to different value, this is
    1.16 + * convenient to allocate discontiguous memory with different size.
    1.17 + */
    1.18  static int
    1.19  setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
    1.20              char *image, unsigned long image_size, uint32_t vcpus,
    1.21              unsigned int store_evtchn, unsigned long *store_mfn)
    1.22  {
    1.23 -    unsigned long page_array[3];
    1.24 +    xen_pfn_t *pfn_list;
    1.25      shared_iopage_t *sp;
    1.26      void *ioreq_buffer_page;
    1.27 -    // memsize = required memsize(in configure file) + 16M
    1.28 +    // memsize equal to normal memory size(in configure file) + 16M
    1.29      // dom_memsize will pass to xc_ia64_build_hob(), so must be subbed 16M 
    1.30      unsigned long dom_memsize = ((memsize - 16) << 20);
    1.31      unsigned long nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
    1.32 +    unsigned long normal_pages = nr_pages - GFW_PAGES;
    1.33      int rc;
    1.34 +    long i, j;
    1.35      DECLARE_DOMCTL;
    1.36  
    1.37      // ROM size for guest firmware, ioreq page and xenstore page
    1.38 @@ -569,14 +582,63 @@ setup_guest(int xc_handle, uint32_t dom,
    1.39          return -1;
    1.40      }
    1.41  
    1.42 -    rc = xc_domain_memory_increase_reservation(xc_handle, dom, nr_pages,
    1.43 -                                               0, 0, NULL); 
    1.44 +    pfn_list = malloc(nr_pages * sizeof(xen_pfn_t));
    1.45 +    if (pfn_list == NULL) {
    1.46 +        PERROR("Could not allocate memory.\n");
    1.47 +        return -1;
    1.48 +    }
    1.49 +
    1.50 +    // Allocate pfn for normal memory
    1.51 +    for (i = 0; i < dom_memsize >> PAGE_SHIFT; i++)
    1.52 +        pfn_list[i] = i;
    1.53 +
    1.54 +    // If normal memory > 3G. Reserve 3G ~ 4G for MMIO, GFW and others.
    1.55 +    for (j = (MMIO_START >> PAGE_SHIFT); j < (dom_memsize >> PAGE_SHIFT); j++)
    1.56 +        pfn_list[j] += ((1 * MEM_G) >> PAGE_SHIFT);
    1.57 +
    1.58 +    // Allocate memory for VTI guest, up to VGA hole from 0xA0000-0xC0000. 
    1.59 +    rc = xc_domain_memory_populate_physmap(xc_handle, dom,
    1.60 +                                           (normal_pages > VGA_START_PAGE) ?
    1.61 +                                           VGA_START_PAGE : normal_pages,
    1.62 +                                           0, 0, &pfn_list[0]);
    1.63 +
    1.64 +    // We're not likely to attempt to create a domain with less than
    1.65 +    // 640k of memory, but test for completeness
    1.66 +    if (rc == 0 && nr_pages > VGA_END_PAGE)
    1.67 +        rc = xc_domain_memory_populate_physmap(xc_handle, dom,
    1.68 +                                               normal_pages - VGA_END_PAGE,
    1.69 +                                               0, 0, &pfn_list[VGA_END_PAGE]);
    1.70      if (rc != 0) {
    1.71 -        PERROR("Could not allocate memory for HVM guest.\n");
    1.72 +        PERROR("Could not allocate normal memory for Vti guest.\n");
    1.73          goto error_out;
    1.74      }
    1.75  
    1.76 -    /* This will creates the physmap.  */
    1.77 +    // We allocate additional pfn for GFW and other three pages, so
    1.78 +    // the pfn_list is not contiguous.  Due to this we must support
    1.79 +    // old interface xc_ia64_get_pfn_list().
    1.80 +    // Here i = (dom_memsize >> PAGE_SHIFT)
    1.81 +    for (j = 0; i < nr_pages - 3; i++, j++) 
    1.82 +        pfn_list[i] = (GFW_START >> PAGE_SHIFT) + j;
    1.83 +
    1.84 +    rc = xc_domain_memory_populate_physmap(xc_handle, dom, GFW_PAGES,
    1.85 +                                           0, 0, &pfn_list[normal_pages]);
    1.86 +    if (rc != 0) {
    1.87 +        PERROR("Could not allocate GFW memory for Vti guest.\n");
    1.88 +        goto error_out;
    1.89 +    }
    1.90 +
    1.91 +    // Here i = (dom_memsize >> PAGE_SHIFT) + GFW_PAGES
    1.92 +    pfn_list[i] = IO_PAGE_START >> PAGE_SHIFT;
    1.93 +    pfn_list[i+1] = STORE_PAGE_START >> PAGE_SHIFT;
    1.94 +    pfn_list[i+2] = BUFFER_IO_PAGE_START >> PAGE_SHIFT; 
    1.95 +
    1.96 +    rc = xc_domain_memory_populate_physmap(xc_handle, dom, 3,
    1.97 +                                           0, 0, &pfn_list[nr_pages - 3]);
    1.98 +    if (rc != 0) {
    1.99 +        PERROR("Could not allocate GFW memory for Vti guest.\n");
   1.100 +        goto error_out;
   1.101 +    }
   1.102 +
   1.103      domctl.u.arch_setup.flags = XEN_DOMAINSETUP_hvm_guest;
   1.104      domctl.u.arch_setup.bp = 0;
   1.105      domctl.u.arch_setup.maxmem = 0;
   1.106 @@ -585,7 +647,13 @@ setup_guest(int xc_handle, uint32_t dom,
   1.107      if (xc_domctl(xc_handle, &domctl))
   1.108          goto error_out;
   1.109  
   1.110 -    /* Load guest firmware */
   1.111 +    if (xc_domain_translate_gpfn_list(xc_handle, dom, nr_pages,
   1.112 +                                      pfn_list, pfn_list)) {
   1.113 +        PERROR("Could not translate addresses of HVM guest.\n");
   1.114 +        goto error_out;
   1.115 +    }
   1.116 +
   1.117 +    // Load guest firmware 
   1.118      if (xc_ia64_copy_to_domain_pages(xc_handle, dom, image,
   1.119                              (GFW_START + GFW_SIZE - image_size) >> PAGE_SHIFT,
   1.120                              image_size >> PAGE_SHIFT)) {
   1.121 @@ -593,36 +661,33 @@ setup_guest(int xc_handle, uint32_t dom,
   1.122          goto error_out;
   1.123      }
   1.124  
   1.125 -    /* Hand-off state passed to guest firmware */
   1.126 +    // Hand-off state passed to guest firmware 
   1.127      if (xc_ia64_build_hob(xc_handle, dom, dom_memsize,
   1.128                            (unsigned long)vcpus) < 0) {
   1.129          PERROR("Could not build hob\n");
   1.130          goto error_out;
   1.131      }
   1.132  
   1.133 -    /* Retrieve special pages like io, xenstore, etc. */
   1.134 -    if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
   1.135 -                             IO_PAGE_START>>PAGE_SHIFT, 3) != 3) {
   1.136 -        PERROR("Could not get the page frame list");
   1.137 -        goto error_out;
   1.138 -    }
   1.139 -
   1.140      xc_set_hvm_param(xc_handle, dom,
   1.141                       HVM_PARAM_STORE_PFN, STORE_PAGE_START>>PAGE_SHIFT);
   1.142      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
   1.143  
   1.144 -    *store_mfn = page_array[1];
   1.145 -    sp = (shared_iopage_t *)xc_map_foreign_range(xc_handle, dom,
   1.146 -                               PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
   1.147 +    // Retrieve special pages like io, xenstore, etc. 
   1.148 +    *store_mfn = pfn_list[nr_pages - 2];
   1.149 +    sp = (shared_iopage_t *)xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.150 +                                                 PROT_READ | PROT_WRITE,
   1.151 +                                                 pfn_list[nr_pages - 3]);
   1.152      if (sp == 0)
   1.153          goto error_out;
   1.154  
   1.155      memset(sp, 0, PAGE_SIZE);
   1.156      munmap(sp, PAGE_SIZE);
   1.157 -    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom,
   1.158 -                               PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[2]); 
   1.159 +    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   1.160 +                                             PROT_READ | PROT_WRITE,
   1.161 +                                             pfn_list[nr_pages - 1]); 
   1.162      memset(ioreq_buffer_page,0,PAGE_SIZE);
   1.163      munmap(ioreq_buffer_page, PAGE_SIZE);
   1.164 +    free(pfn_list);
   1.165      return 0;
   1.166  
   1.167  error_out:
     2.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Mon Nov 20 21:10:59 2006 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed Nov 22 09:30:27 2006 -0700
     2.3 @@ -359,17 +359,11 @@ static const io_range_t io_ranges[] = {
     2.4  	{PIB_START, PIB_SIZE, GPFN_PIB},
     2.5  };
     2.6  
     2.7 -/* Reseve 1 page for shared I/O ,1 page for xenstore and 1 page for buffer I/O.  */
     2.8 -#define VMX_SYS_PAGES	(3 + (GFW_SIZE >> PAGE_SHIFT))
     2.9 -/* If we support maxmem for domVTi, we should change from tot_page to max_pages.
    2.10 - * #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
    2.11 - */
    2.12 -#define VMX_CONFIG_PAGES(d) ((d)->tot_pages - VMX_SYS_PAGES)
    2.13 -
    2.14 -static void vmx_build_physmap_table(struct domain *d)
    2.15 +// The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest()
    2.16 +// so only mark IO memory space here
    2.17 +static void vmx_build_io_physmap_table(struct domain *d)
    2.18  {
    2.19 -	unsigned long i, j, start, tmp, end, mfn;
    2.20 -	struct list_head *list_ent = d->page_list.next;
    2.21 +	unsigned long i, j;
    2.22  
    2.23  	/* Mark I/O ranges */
    2.24  	for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
    2.25 @@ -379,63 +373,13 @@ static void vmx_build_physmap_table(stru
    2.26  			                           ASSIGN_writable);
    2.27  	}
    2.28  
    2.29 -	/* Map normal memory below 3G */
    2.30 -	end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
    2.31 -	tmp = end < MMIO_START ? end : MMIO_START;
    2.32 -	for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
    2.33 -		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    2.34 -		list_ent = mfn_to_page(mfn)->list.next;
    2.35 -		if (VGA_IO_START <= i && i < VGA_IO_START + VGA_IO_SIZE)
    2.36 -			continue;
    2.37 -		assign_domain_page(d, i, mfn << PAGE_SHIFT);
    2.38 -	}
    2.39 -	ASSERT(list_ent != &d->page_list);
    2.40 -
    2.41 -	/* Map normal memory beyond 4G */
    2.42 -	if (unlikely(end > MMIO_START)) {
    2.43 -		start = 4 * MEM_G;
    2.44 -		end = start + (end - 3 * MEM_G);
    2.45 -		for (i = start;
    2.46 -		     (i < end) && (list_ent != &d->page_list); i += PAGE_SIZE) {
    2.47 -			mfn = page_to_mfn(list_entry(list_ent,
    2.48 -			                             struct page_info, list));
    2.49 -			assign_domain_page(d, i, mfn << PAGE_SHIFT);
    2.50 -			list_ent = mfn_to_page(mfn)->list.next;
    2.51 -		}
    2.52 -		ASSERT(list_ent != &d->page_list);
    2.53 -	}
    2.54 -	 
    2.55 -	/* Map guest firmware */
    2.56 -	for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
    2.57 -	     (list_ent != &d->page_list); i += PAGE_SIZE) {
    2.58 -		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    2.59 -		assign_domain_page(d, i, mfn << PAGE_SHIFT);
    2.60 -		list_ent = mfn_to_page(mfn)->list.next;
    2.61 -	}
    2.62 -	ASSERT(list_ent != &d->page_list);
    2.63 -
    2.64 -	/* Map for shared I/O page and xenstore */
    2.65 -	mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    2.66 -	assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT);
    2.67 -	list_ent = mfn_to_page(mfn)->list.next;
    2.68 -	ASSERT(list_ent != &d->page_list);
    2.69 -
    2.70 -	mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    2.71 -	assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
    2.72 -	list_ent = mfn_to_page(mfn)->list.next;
    2.73 -	ASSERT(list_ent != &d->page_list);
    2.74 -
    2.75 -	mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    2.76 -	assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
    2.77 -	list_ent = mfn_to_page(mfn)->list.next;
    2.78 -	ASSERT(list_ent == &d->page_list);
    2.79  }
    2.80  
    2.81  void vmx_setup_platform(struct domain *d)
    2.82  {
    2.83  	ASSERT(d != dom0); /* only for non-privileged vti domain */
    2.84  
    2.85 -	vmx_build_physmap_table(d);
    2.86 +	vmx_build_io_physmap_table(d);
    2.87  
    2.88  	d->arch.vmx_platform.shared_page_va =
    2.89  		(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));