ia64/xen-unstable

changeset 6799:2c823d27cf33

Catchup with xen-unstable, add ia64 specifics to tools, and some VTI merge
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Tue Sep 13 13:42:33 2005 -0600 (2005-09-13)
parents 5cd24dd33033
children 6dadf4d93ee3
files tools/libxc/Makefile tools/libxc/xc_ia64_stubs.c tools/libxc/xc_linux_build.c tools/libxc/xc_private.c tools/libxc/xenctrl.h tools/libxc/xg_private.h tools/python/xen/xend/image.py xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/dom_fw.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hypercall.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenpage.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/tools/libxc/Makefile	Tue Sep 13 13:08:00 2005 -0600
     1.2 +++ b/tools/libxc/Makefile	Tue Sep 13 13:42:33 2005 -0600
     1.3 @@ -23,6 +23,10 @@ SRCS       += xc_misc.c
     1.4  SRCS       += xc_physdev.c
     1.5  SRCS       += xc_private.c
     1.6  SRCS       += xc_sedf.c
     1.7 +BUILD_SRCS += xc_linux_build.c
     1.8 +BUILD_SRCS += xc_load_bin.c
     1.9 +BUILD_SRCS += xc_load_elf.c
    1.10 +BUILD_SRCS += xg_private.c
    1.11  
    1.12  ifeq ($(XEN_TARGET_ARCH),ia64)
    1.13  BUILD_SRCS += xc_ia64_stubs.c
    1.14 @@ -31,13 +35,9 @@ SRCS       += xc_ptrace.c
    1.15  SRCS       += xc_ptrace_core.c
    1.16  
    1.17  BUILD_SRCS := xc_load_aout9.c
    1.18 -BUILD_SRCS += xc_load_bin.c
    1.19 -BUILD_SRCS += xc_load_elf.c
    1.20 -BUILD_SRCS += xc_linux_build.c
    1.21  BUILD_SRCS += xc_linux_restore.c
    1.22  BUILD_SRCS += xc_linux_save.c
    1.23  BUILD_SRCS += xc_vmx_build.c
    1.24 -BUILD_SRCS += xg_private.c
    1.25  endif
    1.26  
    1.27  CFLAGS   += -Wall
     2.1 --- a/tools/libxc/xc_ia64_stubs.c	Tue Sep 13 13:08:00 2005 -0600
     2.2 +++ b/tools/libxc/xc_ia64_stubs.c	Tue Sep 13 13:42:33 2005 -0600
     2.3 @@ -1,12 +1,16 @@
     2.4 -#include "xc_private.h"
     2.5 +#include "xg_private.h"
     2.6 +#include "xenguest.h"
     2.7  
     2.8 -int xc_linux_save(int xc_handle, int io_fd, u32 dom)
     2.9 +int xc_linux_save(int xc_handle, int io_fd, u32 dom, u32 max_iters, 
    2.10 +                  u32 max_factor, u32 flags)
    2.11  {
    2.12      PERROR("xc_linux_save not implemented\n");
    2.13      return -1;
    2.14  }
    2.15  
    2.16 -int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns)
    2.17 +int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns,
    2.18 +		     unsigned int store_evtchn, unsigned long *store_mfn,
    2.19 +		     unsigned int console_evtchn, unsigned long *console_mfn)
    2.20  {
    2.21      PERROR("xc_linux_restore not implemented\n");
    2.22      return -1;
    2.23 @@ -20,7 +24,10 @@ int xc_vmx_build(int xc_handle,
    2.24                     const char *ramdisk_name,
    2.25                     const char *cmdline,
    2.26                     unsigned int control_evtchn,
    2.27 -                   unsigned long flags)
    2.28 +                   unsigned long flags,
    2.29 +                   unsigned int vcpus,
    2.30 +                   unsigned int store_evtchn,
    2.31 +                   unsigned long *store_mfn)
    2.32  {
    2.33      PERROR("xc_vmx_build not implemented\n");
    2.34      return -1;
     3.1 --- a/tools/libxc/xc_linux_build.c	Tue Sep 13 13:08:00 2005 -0600
     3.2 +++ b/tools/libxc/xc_linux_build.c	Tue Sep 13 13:42:33 2005 -0600
     3.3 @@ -296,12 +296,14 @@ static int setup_guest(int xc_handle,
     3.4                           unsigned long shared_info_frame,
     3.5                           unsigned long flags,
     3.6                           unsigned int vcpus,
     3.7 -                         unsigned int store_evtchn, unsigned long *store_mfn)
     3.8 +                         unsigned int store_evtchn, unsigned long *store_mfn,
     3.9 +		         unsigned int console_evtchn, unsigned long *console_mfn)
    3.10  {
    3.11      unsigned long *page_array = NULL;
    3.12      struct load_funcs load_funcs;
    3.13      struct domain_setup_info dsi;
    3.14 -    unsigned long start_page;
    3.15 +    unsigned long start_page, pgnr;
    3.16 +    start_info_t *start_info;
    3.17      int rc;
    3.18  
    3.19      rc = probeimageformat(image, image_size, &load_funcs);
    3.20 @@ -318,14 +320,14 @@ static int setup_guest(int xc_handle,
    3.21      dsi.v_end   = round_pgup(dsi.v_end);
    3.22  
    3.23      start_page = dsi.v_start >> PAGE_SHIFT;
    3.24 -    nr_pages = (dsi.v_end - dsi.v_start) >> PAGE_SHIFT;
    3.25 -    if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
    3.26 +    pgnr = (dsi.v_end - dsi.v_start) >> PAGE_SHIFT;
    3.27 +    if ( (page_array = malloc(pgnr * sizeof(unsigned long))) == NULL )
    3.28      {
    3.29          PERROR("Could not allocate memory");
    3.30          goto error_out;
    3.31      }
    3.32  
    3.33 -    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, start_page, nr_pages) != nr_pages )
    3.34 +    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, start_page, pgnr) != pgnr )
    3.35      {
    3.36          PERROR("Could not get the page frame list");
    3.37          goto error_out;
    3.38 @@ -335,6 +337,33 @@ static int setup_guest(int xc_handle,
    3.39                             &dsi);
    3.40  
    3.41      *pvke = dsi.v_kernentry;
    3.42 +
    3.43 +    /* Now need to retrieve machine pfn for system pages:
    3.44 +     * 	start_info/store/console
    3.45 +     */
    3.46 +    pgnr = 3;
    3.47 +    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, nr_pages - 3, pgnr) != pgnr)
    3.48 +    {
    3.49 +	PERROR("Could not get page frame for xenstore");
    3.50 +	goto error_out;
    3.51 +    }
    3.52 +
    3.53 +    *store_mfn = page_array[1];
    3.54 +    *console_mfn = page_array[2];
    3.55 +    printf("store_mfn: 0x%lx, console_mfn: 0x%lx\n",
    3.56 +	(u64)store_mfn, (u64)console_mfn);
    3.57 +
    3.58 +    start_info = xc_map_foreign_range(
    3.59 +        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
    3.60 +    memset(start_info, 0, sizeof(*start_info));
    3.61 +    start_info->flags        = flags;
    3.62 +    start_info->store_mfn    = nr_pages - 2;
    3.63 +    start_info->store_evtchn = store_evtchn;
    3.64 +    start_info->console_mfn   = nr_pages - 1;
    3.65 +    start_info->console_evtchn = console_evtchn;
    3.66 +    munmap(start_info, PAGE_SIZE);
    3.67 +
    3.68 +    free(page_array);
    3.69      return 0;
    3.70  
    3.71   error_out:
    3.72 @@ -674,7 +703,12 @@ int xc_linux_build(int xc_handle,
    3.73      unsigned long image_size, initrd_size=0;
    3.74      unsigned long vstartinfo_start, vkern_entry, vstack_start;
    3.75  
    3.76 +#ifdef __ia64__
    3.77 +    /* Current xen/ia64 allocates domU pages on demand */
    3.78 +    if ( (nr_pages = xc_get_max_pages(xc_handle, domid)) < 0 )
    3.79 +#else
    3.80      if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
    3.81 +#endif
    3.82      {
    3.83          PERROR("Could not find total pages for domain");
    3.84          goto error_out;
    3.85 @@ -753,13 +787,16 @@ int xc_linux_build(int xc_handle,
    3.86  
    3.87  #ifdef __ia64__
    3.88      /* based on new_thread in xen/arch/ia64/domain.c */
    3.89 +    ctxt->flags = 0;
    3.90 +    ctxt->shared.flags = flags;
    3.91 +    ctxt->shared.start_info_pfn = nr_pages - 3; // metaphysical
    3.92      ctxt->regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
    3.93      ctxt->regs.cr_iip = vkern_entry;
    3.94      ctxt->regs.cr_ifs = 1UL << 63;
    3.95      ctxt->regs.ar_fpsr = FPSR_DEFAULT;
    3.96      /* ctxt->regs.r28 = dom_fw_setup(); currently done by hypervisor, should move here */
    3.97      ctxt->vcpu.privregs = 0;
    3.98 -    ctxt->shared.flags = flags;
    3.99 +    ctxt->sys_pgnr = nr_pages - 3;
   3.100      i = 0; /* silence unused variable warning */
   3.101  #else /* x86 */
   3.102      /*
     4.1 --- a/tools/libxc/xc_private.c	Tue Sep 13 13:08:00 2005 -0600
     4.2 +++ b/tools/libxc/xc_private.c	Tue Sep 13 13:42:33 2005 -0600
     4.3 @@ -351,6 +351,15 @@ int xc_ia64_get_pfn_list(int xc_handle,
     4.4  
     4.5      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
     4.6  }
     4.7 +
     4.8 +long xc_get_max_pages(int xc_handle, u32 domid)
     4.9 +{
    4.10 +    dom0_op_t op;
    4.11 +    op.cmd = DOM0_GETDOMAININFO;
    4.12 +    op.u.getdomaininfo.domain = (domid_t)domid;
    4.13 +    return (do_dom0_op(xc_handle, &op) < 0) ? 
    4.14 +        -1 : op.u.getdomaininfo.max_pages;
    4.15 +}
    4.16  #endif
    4.17  
    4.18  long xc_get_tot_pages(int xc_handle, u32 domid)
     5.1 --- a/tools/libxc/xenctrl.h	Tue Sep 13 13:08:00 2005 -0600
     5.2 +++ b/tools/libxc/xenctrl.h	Tue Sep 13 13:42:33 2005 -0600
     5.3 @@ -440,6 +440,8 @@ int xc_get_pfn_list(int xc_handle, u32 d
     5.4  int xc_ia64_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf, 
     5.5                      unsigned int start_page, unsigned int nr_pages);
     5.6  
     5.7 +long xc_get_max_pages(int xc_handle, u32 domid);
     5.8 +
     5.9  int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
    5.10  		 domid_t dom);
    5.11  
     6.1 --- a/tools/libxc/xg_private.h	Tue Sep 13 13:08:00 2005 -0600
     6.2 +++ b/tools/libxc/xg_private.h	Tue Sep 13 13:42:33 2005 -0600
     6.3 @@ -1,6 +1,7 @@
     6.4  #ifndef XG_PRIVATE_H
     6.5  #define XG_PRIVATE_H
     6.6  
     6.7 +#include <unistd.h>
     6.8  #include <errno.h>
     6.9  #include <fcntl.h>
    6.10  #include <stdio.h>
     7.1 --- a/tools/python/xen/xend/image.py	Tue Sep 13 13:08:00 2005 -0600
     7.2 +++ b/tools/python/xen/xend/image.py	Tue Sep 13 13:42:33 2005 -0600
     7.3 @@ -222,7 +222,11 @@ class ImageHandler:
     7.4  
     7.5      def getDomainMemory(self, mem_mb):
     7.6          """Memory (in KB) the domain will need for mem_mb (in MB)."""
     7.7 -        return mem_mb * 1024
     7.8 +        if os.uname()[4] == 'ia64':
     7.9 +	    """Append extra system pages, like xenstore and console"""
    7.10 +	    return (mem_mb * 1024 + 3 * 16)
    7.11 +	else:
    7.12 +            return mem_mb * 1024
    7.13  
    7.14      def buildDomain(self):
    7.15          """Build the domain. Define in subclass."""
    7.16 @@ -457,5 +461,8 @@ class VmxImageHandler(ImageHandler):
    7.17          # 1 page for the PGD + 1 pte page for 4MB of memory (rounded)
    7.18          if os.uname()[4] == 'x86_64':
    7.19              return (5 + ((mem_mb + 1) >> 1)) * 4
    7.20 +	elif os.uname()[4] == 'ia64':
    7.21 +	    # XEN/IA64 has p2m table allocated on demand, so only return guest firmware size here.
    7.22 +	    return 16 * 1024
    7.23          else:
    7.24              return (1 + ((mem_mb + 3) >> 2)) * 4
     8.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Tue Sep 13 13:08:00 2005 -0600
     8.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Tue Sep 13 13:42:33 2005 -0600
     8.3 @@ -164,7 +164,6 @@ void vmx_setup_platform(struct vcpu *v, 
     8.4  
     8.5  	/* FIXME: only support PMT table continuously by far */
     8.6  	d->arch.pmt = __va(c->pt_base);
     8.7 -	d->arch.max_pfn = c->pt_max_pfn;
     8.8  
     8.9  	vmx_final_setup_domain(d);
    8.10  }
    8.11 @@ -373,3 +372,119 @@ vmx_final_setup_domain(struct domain *d)
    8.12  
    8.13  	/* Other vmx specific initialization work */
    8.14  }
    8.15 +
    8.16 +/*
    8.17 + * Following stuff should really move to domain builder. However currently
    8.18 + * XEN/IA64 doesn't export physical -> machine page table to domain builder,
    8.19 + * instead only the copy. Also there's no hypercall to notify hypervisor
    8.20 + * IO ranges by far. Let's enhance it later.
    8.21 + */
    8.22 +
    8.23 +#define MEM_G   (1UL << 30)	
    8.24 +#define MEM_M   (1UL << 20)	
    8.25 +
    8.26 +#define MMIO_START       (3 * MEM_G)
    8.27 +#define MMIO_SIZE        (512 * MEM_M)
    8.28 +
    8.29 +#define VGA_IO_START     0xA0000UL
    8.30 +#define VGA_IO_SIZE      0x20000
    8.31 +
    8.32 +#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
    8.33 +#define LEGACY_IO_SIZE   (64*MEM_M)  
    8.34 +
    8.35 +#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
    8.36 +#define IO_PAGE_SIZE  PAGE_SIZE
    8.37 +
    8.38 +#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
    8.39 +#define STORE_PAGE_SIZE	 PAGE_SIZE
    8.40 +
    8.41 +#define IO_SAPIC_START   0xfec00000UL
    8.42 +#define IO_SAPIC_SIZE    0x100000
    8.43 +
    8.44 +#define PIB_START 0xfee00000UL
    8.45 +#define PIB_SIZE 0x100000 
    8.46 +
    8.47 +#define GFW_START        (4*MEM_G -16*MEM_M)
    8.48 +#define GFW_SIZE         (16*MEM_M)
    8.49 +
    8.50 +typedef struct io_range {
    8.51 +	unsigned long start;
    8.52 +	unsigned long size;
    8.53 +	unsigned long type;
    8.54 +} io_range_t;
    8.55 +
    8.56 +io_range_t io_ranges[] = {
    8.57 +	{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
    8.58 +	{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
    8.59 +	{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
    8.60 +	{IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
    8.61 +	{PIB_START, PIB_SIZE, GPFN_PIB},
    8.62 +};
    8.63 +
    8.64 +#define VMX_SYS_PAGES	(2 + GFW_SIZE >> PAGE_SHIFT)
    8.65 +#define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
    8.66 +
    8.67 +int vmx_alloc_contig_pages(struct domain *d)
    8.68 +{
    8.69 +	unsigned int order, i, j;
    8.70 +	unsigned long start, end, pgnr, conf_nr;
    8.71 +	struct pfn_info *page;
    8.72 +	struct vcpu *v = d->vcpu[0];
    8.73 +
    8.74 +	ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
    8.75 +
    8.76 +	conf_nr = VMX_CONFIG_PAGES(d);
    8.77 +	order = get_order_from_pages(conf_nr);
    8.78 +	if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
    8.79 +	    printk("Could not allocate order=%d pages for vmx contig alloc\n",
    8.80 +			order);
    8.81 +	    return -1;
    8.82 +	}
    8.83 +
    8.84 +	/* Map normal memory below 3G */
    8.85 +	pgnr = page_to_pfn(page);
    8.86 +	end = conf_nr << PAGE_SHIFT;
    8.87 +	for (i = 0;
    8.88 +	     i < (end < MMIO_START ? end : MMIO_START);
    8.89 +	     i += PAGE_SIZE, pgnr++)
    8.90 +	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    8.91 +
    8.92 +	/* Map normal memory beyond 4G */
    8.93 +	if (unlikely(end > MMIO_START)) {
    8.94 +	    start = 4 * MEM_G;
    8.95 +	    end = start + (end - 3 * MEM_G);
    8.96 +	    for (i = start; i < end; i += PAGE_SIZE, pgnr++)
    8.97 +		map_domain_page(d, i, pgnr << PAGE_SHIFT);
    8.98 +	}
    8.99 +
   8.100 +	d->arch.max_pfn = end >> PAGE_SHIFT;
   8.101 +
   8.102 +	order = get_order_from_pages(VMX_SYS_PAGES);
   8.103 +	if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
   8.104 +	    printk("Could not allocate order=%d pages for vmx contig alloc\n",
   8.105 +			order);
   8.106 +	    return -1;
   8.107 +	}
   8.108 +
   8.109 +	/* Map for shared I/O page and xenstore */
   8.110 +	pgnr = page_to_pfn(page);
   8.111 +	map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
   8.112 +	pgnr++;
   8.113 +	map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
   8.114 +	pgnr++;
   8.115 +
   8.116 +	/* Map guest firmware */
   8.117 +	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
   8.118 +	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
   8.119 +
   8.120 +	/* Mark I/O ranges */
   8.121 +	for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
   8.122 +	    for (j = io_ranges[i].start;
   8.123 +		 j < io_ranges[i].start + io_ranges[i].size;
   8.124 +		 j += PAGE_SIZE)
   8.125 +		map_domain_io_page(d, j);
   8.126 +	}
   8.127 +
   8.128 +	set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
   8.129 +	return 0;
   8.130 +}
     9.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Tue Sep 13 13:08:00 2005 -0600
     9.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Tue Sep 13 13:42:33 2005 -0600
     9.3 @@ -148,7 +148,6 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     9.4          put_domain(d);
     9.5      }
     9.6      break;
     9.7 -#ifndef CONFIG_VTI
     9.8      /*
     9.9       * NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
    9.10       * it actually allocates and maps pages.
    9.11 @@ -168,6 +167,14 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    9.12          {
    9.13              ret = 0;
    9.14  
    9.15 +	    /* A temp trick here. When max_pfns == -1, we assume
    9.16 +	     * the request is for  machine contiguous pages, so request
    9.17 +	     * all pages at first query
    9.18 +	     */
    9.19 +	    if ((op->u.getmemlist.max_pfns == -1UL) &&
    9.20 +		!test_bit(ARCH_VMX_CONTIG_MEM,&d->vcpu[0]->arch.arch_vmx.flags))
    9.21 +		return vmx_alloc_contig_pages(d) ? (-ENOMEM) : 0;
    9.22 +
    9.23              for ( i = start_page; i < (start_page + nr_pages); i++ )
    9.24              {
    9.25                  page = map_new_domain_page(d, i << PAGE_SHIFT);
    9.26 @@ -192,42 +199,6 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    9.27          }
    9.28      }
    9.29      break;
    9.30 -#else
    9.31 -    case DOM0_GETMEMLIST:
    9.32 -    {
    9.33 -	int i;
    9.34 -	struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
    9.35 -	unsigned long max_pfns = op->u.getmemlist.max_pfns;
    9.36 -	unsigned long pfn;
    9.37 -	unsigned long *buffer = op->u.getmemlist.buffer;
    9.38 -	struct list_head *list_ent;
    9.39 -
    9.40 -	ret = -EINVAL;
    9.41 -	if (!d) {
    9.42 -	    ret = 0;
    9.43 -
    9.44 -	    spin_lock(&d->page_alloc_lock);
    9.45 -	    list_ent = d->page_list.next;
    9.46 -	    for (i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++) {
    9.47 -		pfn = list_entry(list_ent, struct pfn_info, list) -
    9.48 -		    frame_table;
    9.49 -		if (put_user(pfn, buffer)) {
    9.50 -		    ret = -EFAULT;
    9.51 -		    break;
    9.52 -		}
    9.53 -		buffer++;
    9.54 -		list_ent = frame_table[pfn].list.next;
    9.55 -	    }
    9.56 -	    spin_unlock(&d->page_alloc_lock);
    9.57 -
    9.58 -	    op->u.getmemlist.num_pfns = i;
    9.59 -	    copy_to_user(u_dom0_op, op, sizeof(*op));
    9.60 -
    9.61 -	    put_domain(d);
    9.62 -	}
    9.63 -    }
    9.64 -    break;
    9.65 -#endif // CONFIG_VTI
    9.66      default:
    9.67          ret = -ENOSYS;
    9.68  
    10.1 --- a/xen/arch/ia64/xen/dom_fw.c	Tue Sep 13 13:08:00 2005 -0600
    10.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Tue Sep 13 13:42:33 2005 -0600
    10.3 @@ -490,7 +490,7 @@ dom_fw_init (struct domain *d, char *arg
    10.4  	unsigned char checksum = 0;
    10.5  	char *cp, *cmd_line, *fw_vendor;
    10.6  	int i = 0;
    10.7 -	unsigned long maxmem = d->max_pages * PAGE_SIZE;
    10.8 +	unsigned long maxmem = (d->max_pages - d->arch.sys_pgnr) * PAGE_SIZE;
    10.9  	unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
   10.10  
   10.11  #	define MAKE_MD(typ, attr, start, end, abs) 	\	
   10.12 @@ -512,10 +512,6 @@ dom_fw_init (struct domain *d, char *arg
   10.13  		return 0;
   10.14  	}
   10.15  */
   10.16 -       /* Last page is for xenstore, and not exported to domain */
   10.17 -       if (d != dom0)
   10.18 -               maxmem = (d->max_pages - 1) * PAGE_SIZE;
   10.19 -
   10.20  	memset(fw_mem, 0, fw_mem_size);
   10.21  
   10.22  #ifdef XEN
    11.1 --- a/xen/arch/ia64/xen/domain.c	Tue Sep 13 13:08:00 2005 -0600
    11.2 +++ b/xen/arch/ia64/xen/domain.c	Tue Sep 13 13:42:33 2005 -0600
    11.3 @@ -233,6 +233,7 @@ void arch_do_createdomain(struct vcpu *v
    11.4  	d->arch.breakimm = 0x1000;
    11.5  	v->arch.breakimm = d->arch.breakimm;
    11.6  
    11.7 +	d->arch.sys_pgnr = 0;
    11.8  	d->arch.mm = xmalloc(struct mm_struct);
    11.9  	if (unlikely(!d->arch.mm)) {
   11.10  		printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
   11.11 @@ -295,6 +296,7 @@ int arch_set_info_guest(struct vcpu *v, 
   11.12  	}
   11.13  
   11.14  	v->arch.domain_itm_last = -1L;
   11.15 +	d->arch.sys_pgnr = c->sys_pgnr;
   11.16  	d->shared_info->arch = c->shared;
   11.17  
   11.18  	/* Don't redo final setup */
   11.19 @@ -471,6 +473,43 @@ void map_domain_page(struct domain *d, u
   11.20  	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
   11.21  }
   11.22  
   11.23 +/* map a physical address with specified I/O flag */
   11.24 +void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
   11.25 +{
   11.26 +	struct mm_struct *mm = d->arch.mm;
   11.27 +	pgd_t *pgd;
   11.28 +	pud_t *pud;
   11.29 +	pmd_t *pmd;
   11.30 +	pte_t *pte;
   11.31 +	pte_t io_pte;
   11.32 +
   11.33 +	if (!mm->pgd) {
   11.34 +		printk("map_domain_page: domain pgd must exist!\n");
   11.35 +		return;
   11.36 +	}
   11.37 +	ASSERT(flags & GPFN_IO_MASK);
   11.38 +
   11.39 +	pgd = pgd_offset(mm,mpaddr);
   11.40 +	if (pgd_none(*pgd))
   11.41 +		pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
   11.42 +
   11.43 +	pud = pud_offset(pgd, mpaddr);
   11.44 +	if (pud_none(*pud))
   11.45 +		pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
   11.46 +
   11.47 +	pmd = pmd_offset(pud, mpaddr);
   11.48 +	if (pmd_none(*pmd))
   11.49 +		pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
   11.50 +//		pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
   11.51 +
   11.52 +	pte = pte_offset_map(pmd, mpaddr);
   11.53 +	if (pte_none(*pte)) {
   11.54 +		pte_val(io_pte) = flags;
   11.55 +		set_pte(pte, io_pte);
   11.56 +	}
   11.57 +	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
   11.58 +}
   11.59 +
   11.60  void mpafoo(unsigned long mpaddr)
   11.61  {
   11.62  	extern unsigned long privop_trace;
   11.63 @@ -910,10 +949,12 @@ int construct_dom0(struct domain *d,
   11.64  	    panic("PAL CACHE FLUSH failed for dom0.\n");
   11.65  	printk("Sync i/d cache for dom0 image SUCC\n");
   11.66  
   11.67 +	/* Set up start info area. */
   11.68 +	si = (start_info_t *)alloc_xenheap_page();
   11.69 +	memset(si, 0, PAGE_SIZE);
   11.70 +	d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
   11.71 +
   11.72  #if 0
   11.73 -	/* Set up start info area. */
   11.74 -	//si = (start_info_t *)vstartinfo_start;
   11.75 -	memset(si, 0, PAGE_SIZE);
   11.76  	si->nr_pages     = d->tot_pages;
   11.77  	si->shared_info  = virt_to_phys(d->shared_info);
   11.78  	si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
    12.1 --- a/xen/arch/ia64/xen/hypercall.c	Tue Sep 13 13:08:00 2005 -0600
    12.2 +++ b/xen/arch/ia64/xen/hypercall.c	Tue Sep 13 13:42:33 2005 -0600
    12.3 @@ -152,12 +152,9 @@ ia64_hypercall (struct pt_regs *regs)
    12.4  		break;
    12.5  
    12.6  	    case __HYPERVISOR_memory_op:
    12.7 -#ifdef CONFIG_VTI
    12.8 -		regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16, regs->r17, regs->r18); 
    12.9 -#else
   12.10 +		//regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16, regs->r17, regs->r18); 
   12.11  		/* we don't handle reservations; just return success */
   12.12  		regs->r8 = regs->r16;
   12.13 -#endif
   12.14  		break;
   12.15  
   12.16  	    case __HYPERVISOR_event_channel_op:
    13.1 --- a/xen/arch/ia64/xen/process.c	Tue Sep 13 13:08:00 2005 -0600
    13.2 +++ b/xen/arch/ia64/xen/process.c	Tue Sep 13 13:42:33 2005 -0600
    13.3 @@ -30,6 +30,7 @@
    13.4  #include <asm/ia64_int.h>
    13.5  #include <asm/dom_fw.h>
    13.6  #include "hpsim_ssc.h"
    13.7 +#include <xen/multicall.h>
    13.8  
    13.9  extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
   13.10  extern struct ia64_sal_retval pal_emulator_static(UINT64);
   13.11 @@ -659,7 +660,8 @@ ia64_handle_break (unsigned long ifa, st
   13.12  		else do_ssc(vcpu_get_gr(current,36), regs);
   13.13  	}
   13.14  	else if (iim == d->arch.breakimm) {
   13.15 -		if (ia64_hypercall(regs))
   13.16 +		if (ia64_hypercall(regs) &&
   13.17 +		    !PSCBX(v, hypercall_continuation))
   13.18  			vcpu_increment_iip(current);
   13.19  	}
   13.20  	else if (!PSCB(v,interrupt_collection_enabled)) {
   13.21 @@ -747,3 +749,40 @@ printf("*** Handled privop masquerading 
   13.22  	if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
   13.23  	reflect_interruption(ifa,isr,itir,regs,vector);
   13.24  }
   13.25 +
   13.26 +unsigned long __hypercall_create_continuation(
   13.27 +	unsigned int op, unsigned int nr_args, ...)
   13.28 +{
   13.29 +    struct mc_state *mcs = &mc_state[smp_processor_id()];
   13.30 +    VCPU *vcpu = current;
   13.31 +    struct cpu_user_regs *regs = vcpu->arch.regs;
   13.32 +    unsigned int i;
   13.33 +    va_list args;
   13.34 +
   13.35 +    va_start(args, nr_args);
   13.36 +    if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
   13.37 +	panic("PREEMPT happen in multicall\n");	// Not support yet
   13.38 +    } else {
   13.39 +	vcpu_set_gr(vcpu, 2, op);
   13.40 +	for ( i = 0; i < nr_args; i++) {
   13.41 +	    switch (i) {
   13.42 +	    case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long));
   13.43 +		    break;
   13.44 +	    case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long));
   13.45 +		    break;
   13.46 +	    case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long));
   13.47 +		    break;
   13.48 +	    case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long));
   13.49 +		    break;
   13.50 +	    case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long));
   13.51 +		    break;
   13.52 +	    default: panic("Too many args for hypercall continuation\n");
   13.53 +		    break;
   13.54 +	    }
   13.55 +	}
   13.56 +    }
   13.57 +    vcpu->arch.hypercall_continuation = 1;
   13.58 +    va_end(args);
   13.59 +    return op;
   13.60 +}
   13.61 +
    14.1 --- a/xen/arch/ia64/xen/xenmisc.c	Tue Sep 13 13:08:00 2005 -0600
    14.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Tue Sep 13 13:42:33 2005 -0600
    14.3 @@ -106,14 +106,6 @@ while(1);
    14.4  }
    14.5  #endif
    14.6  
    14.7 -#ifndef CONFIG_VTI
    14.8 -unsigned long __hypercall_create_continuation(
    14.9 -	unsigned int op, unsigned int nr_args, ...)
   14.10 -{
   14.11 -	printf("__hypercall_create_continuation: not implemented!!!\n");
   14.12 -}
   14.13 -#endif
   14.14 -
   14.15  ///////////////////////////////
   14.16  // from arch/ia64/page_alloc.c
   14.17  ///////////////////////////////
    15.1 --- a/xen/include/asm-ia64/domain.h	Tue Sep 13 13:08:00 2005 -0600
    15.2 +++ b/xen/include/asm-ia64/domain.h	Tue Sep 13 13:42:33 2005 -0600
    15.3 @@ -26,12 +26,9 @@ struct arch_domain {
    15.4  
    15.5      int imp_va_msb;
    15.6      unsigned long *pmt;	/* physical to machine table */
    15.7 -    /*
    15.8 -     * max_pfn is the maximum page frame in guest physical space, including
    15.9 -     * inter-middle I/O ranges and memory holes. This is different with
   15.10 -     * max_pages in domain struct, which indicates maximum memory size
   15.11 -     */
   15.12 -    unsigned long max_pfn;
   15.13 +    /* System pages out of guest memory, like for xenstore/console */
   15.14 +    unsigned long sys_pgnr;
   15.15 +    unsigned long max_pfn; /* Max pfn including I/O holes */
   15.16      struct virutal_platform_def     vmx_platform;
   15.17  
   15.18      u64 xen_vastart;
    16.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Tue Sep 13 13:08:00 2005 -0600
    16.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Tue Sep 13 13:42:33 2005 -0600
    16.3 @@ -89,7 +89,8 @@ struct arch_vmx_struct {
    16.4  #define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
    16.5  #define ARCH_VMX_VMCS_RESUME    2       /* Needs VMCS resume */
    16.6  #define ARCH_VMX_IO_WAIT        3       /* Waiting for I/O completion */
    16.7 -#define ARCH_VMX_INTR_ASSIST   4       /* Need DM's assist to issue intr */
    16.8 +#define ARCH_VMX_INTR_ASSIST    4       /* Need DM's assist to issue intr */
    16.9 +#define ARCH_VMX_CONTIG_MEM 	5	/* Need contiguous machine pages */
   16.10  
   16.11  
   16.12  #define VMX_DEBUG 1
    17.1 --- a/xen/include/asm-ia64/xenpage.h	Tue Sep 13 13:08:00 2005 -0600
    17.2 +++ b/xen/include/asm-ia64/xenpage.h	Tue Sep 13 13:42:33 2005 -0600
    17.3 @@ -8,7 +8,7 @@
    17.4  #undef pfn_valid
    17.5  #undef page_to_pfn
    17.6  #undef pfn_to_page
    17.7 -# define pfn_valid(pfn)		(0)
    17.8 +# define pfn_valid(_pfn)		((_pfn) > max_page)
    17.9  # define page_to_pfn(_page)	((unsigned long) ((_page) - frame_table))
   17.10  # define pfn_to_page(_pfn)	(frame_table + (_pfn))
   17.11  
    18.1 --- a/xen/include/public/arch-ia64.h	Tue Sep 13 13:08:00 2005 -0600
    18.2 +++ b/xen/include/public/arch-ia64.h	Tue Sep 13 13:42:33 2005 -0600
    18.3 @@ -255,11 +255,8 @@ typedef mapped_regs_t vpd_t;
    18.4  #define __ARCH_HAS_VCPU_INFO
    18.5  
    18.6  typedef struct {
    18.7 -	int domain_controller_evtchn;
    18.8  	unsigned int flags;
    18.9 -	unsigned short store_evtchn;
   18.10 -	unsigned long store_mfn;
   18.11 -//} arch_shared_info_t;
   18.12 +	unsigned long start_info_pfn;
   18.13  } arch_shared_info_t;		// DON'T PACK 
   18.14  
   18.15  typedef struct vcpu_guest_context {
   18.16 @@ -268,10 +265,9 @@ typedef struct vcpu_guest_context {
   18.17  #define VGCF_IN_KERNEL (1<<2)
   18.18  	unsigned long flags;       /* VGCF_* flags */
   18.19  	unsigned long pt_base;     /* PMT table base */
   18.20 -	unsigned long pt_max_pfn;  /* Max pfn including holes */
   18.21  	unsigned long share_io_pg; /* Shared page for I/O emulation */
   18.22 +	unsigned long sys_pgnr;    /* System pages out of domain memory */
   18.23  	unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
   18.24 -	unsigned long guest_iip;   /* Guest entry point */
   18.25  
   18.26  	cpu_user_regs_t regs;
   18.27  	arch_vcpu_info_t vcpu;