ia64/xen-unstable

changeset 8827:dfdb9cfc8b79

merge
author awilliam@xenbuild.aw
date Fri Feb 10 08:37:14 2006 -0700 (2006-02-10)
parents 1d36cca98fc3 6526a91d5555
children 982b9678af2c
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Fri Feb 10 12:35:19 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Fri Feb 10 08:37:14 2006 -0700
     1.3 @@ -61,6 +61,9 @@
     1.4  #include <asm/system.h>
     1.5  #include <asm/unistd.h>
     1.6  #include <asm/system.h>
     1.7 +#ifdef CONFIG_XEN
     1.8 +#include <asm/hypervisor.h>
     1.9 +#endif
    1.10  
    1.11  #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
    1.12  # error "struct cpuinfo_ia64 too big!"
    1.13 @@ -241,6 +244,12 @@ reserve_memory (void)
    1.14  	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
    1.15  	n++;
    1.16  
    1.17 +#ifdef CONFIG_XEN
    1.18 +	rsvd_region[n].start = (unsigned long) (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT);
    1.19 +	rsvd_region[n].end   = rsvd_region[n].start + PAGE_SIZE;
    1.20 +	n++;
    1.21 +#endif
    1.22 +
    1.23  #ifdef CONFIG_BLK_DEV_INITRD
    1.24  	if (ia64_boot_param->initrd_start) {
    1.25  		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
     2.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Fri Feb 10 12:35:19 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Fri Feb 10 08:37:14 2006 -0700
     2.3 @@ -106,8 +106,10 @@ int bind_virq_to_irqhandler(
     2.4      BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
     2.5      evtchn = op.u.bind_virq.port;
     2.6  
     2.7 -    if (!unbound_irq(evtchn))
     2.8 -	return -EINVAL;
     2.9 +    if (!unbound_irq(evtchn)) {
    2.10 +        evtchn = -EINVAL;
    2.11 +        goto out;
    2.12 +    }
    2.13  
    2.14      evtchns[evtchn].handler = handler;
    2.15      evtchns[evtchn].dev_id = dev_id;
    2.16 @@ -115,6 +117,7 @@ int bind_virq_to_irqhandler(
    2.17      irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
    2.18  
    2.19      unmask_evtchn(evtchn);
    2.20 +out:
    2.21      spin_unlock(&irq_mapping_update_lock);
    2.22      return evtchn;
    2.23  }
    2.24 @@ -125,8 +128,10 @@ int bind_evtchn_to_irqhandler(unsigned i
    2.25  {
    2.26      spin_lock(&irq_mapping_update_lock);
    2.27  
    2.28 -    if (!unbound_irq(evtchn))
    2.29 -	return -EINVAL;
    2.30 +    if (!unbound_irq(evtchn)) {
    2.31 +	evtchn = -EINVAL;
    2.32 +	goto out;
    2.33 +    }
    2.34  
    2.35      evtchns[evtchn].handler = handler;
    2.36      evtchns[evtchn].dev_id = dev_id;
    2.37 @@ -134,6 +139,7 @@ int bind_evtchn_to_irqhandler(unsigned i
    2.38      irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
    2.39  
    2.40      unmask_evtchn(evtchn);
    2.41 +out:
    2.42      spin_unlock(&irq_mapping_update_lock);
    2.43      return evtchn;
    2.44  }
    2.45 @@ -158,7 +164,7 @@ void unbind_from_irqhandler(unsigned int
    2.46      spin_lock(&irq_mapping_update_lock);
    2.47  
    2.48      if (unbound_irq(irq))
    2.49 -        return;
    2.50 +        goto out;
    2.51  
    2.52      op.cmd = EVTCHNOP_close;
    2.53      op.u.close.port = evtchn;
    2.54 @@ -179,6 +185,7 @@ void unbind_from_irqhandler(unsigned int
    2.55      evtchns[evtchn].handler = NULL;
    2.56      evtchns[evtchn].opened = 0;
    2.57  
    2.58 +out:
    2.59      spin_unlock(&irq_mapping_update_lock);
    2.60  }
    2.61  
     3.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Fri Feb 10 12:35:19 2006 +0100
     3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Fri Feb 10 08:37:14 2006 -0700
     3.3 @@ -317,7 +317,7 @@ int vmx_alloc_contig_pages(struct domain
     3.4  	    for (j = io_ranges[i].start;
     3.5  		 j < io_ranges[i].start + io_ranges[i].size;
     3.6  		 j += PAGE_SIZE)
     3.7 -		map_domain_page(d, j, io_ranges[i].type);
     3.8 +		assign_domain_page(d, j, io_ranges[i].type);
     3.9  	}
    3.10  
    3.11  	conf_nr = VMX_CONFIG_PAGES(d);
    3.12 @@ -334,14 +334,14 @@ int vmx_alloc_contig_pages(struct domain
    3.13  	for (i = 0;
    3.14  	     i < (end < MMIO_START ? end : MMIO_START);
    3.15  	     i += PAGE_SIZE, pgnr++)
    3.16 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.17 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.18  
    3.19  	/* Map normal memory beyond 4G */
    3.20  	if (unlikely(end > MMIO_START)) {
    3.21  	    start = 4 * MEM_G;
    3.22  	    end = start + (end - 3 * MEM_G);
    3.23  	    for (i = start; i < end; i += PAGE_SIZE, pgnr++)
    3.24 -		map_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.25 +		assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.26  	}
    3.27  
    3.28  	d->arch.max_pfn = end >> PAGE_SHIFT;
    3.29 @@ -356,7 +356,7 @@ int vmx_alloc_contig_pages(struct domain
    3.30  	/* Map guest firmware */
    3.31  	pgnr = page_to_mfn(page);
    3.32  	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
    3.33 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.34 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.35  
    3.36  	if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
    3.37  	    printk("Could not allocate order=1 pages for vmx contig alloc\n");
    3.38 @@ -365,9 +365,9 @@ int vmx_alloc_contig_pages(struct domain
    3.39  
    3.40  	/* Map for shared I/O page and xenstore */
    3.41  	pgnr = page_to_mfn(page);
    3.42 -	map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
    3.43 +	assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
    3.44  	pgnr++;
    3.45 -	map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
    3.46 +	assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
    3.47  
    3.48  	set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
    3.49  	return 0;
     4.1 --- a/xen/arch/ia64/xen/domain.c	Fri Feb 10 12:35:19 2006 +0100
     4.2 +++ b/xen/arch/ia64/xen/domain.c	Fri Feb 10 08:37:14 2006 -0700
     4.3 @@ -389,7 +389,7 @@ printk("map_new_domain0_page: start=%p,e
     4.4  }
     4.5  
     4.6  /* allocate new page for domain and map it to the specified metaphysical addr */
     4.7 -struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
     4.8 +struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
     4.9  {
    4.10  	struct mm_struct *mm = d->arch.mm;
    4.11  	struct page *p = (struct page *)0;
    4.12 @@ -400,7 +400,7 @@ struct page * map_new_domain_page(struct
    4.13  extern unsigned long vhpt_paddr, vhpt_pend;
    4.14  
    4.15  	if (!mm->pgd) {
    4.16 -		printk("map_new_domain_page: domain pgd must exist!\n");
    4.17 +		printk("assign_new_domain_page: domain pgd must exist!\n");
    4.18  		return(p);
    4.19  	}
    4.20  	pgd = pgd_offset(mm,mpaddr);
    4.21 @@ -428,21 +428,21 @@ extern unsigned long vhpt_paddr, vhpt_pe
    4.22  			if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
    4.23  		}
    4.24  		if (unlikely(!p)) {
    4.25 -printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    4.26 +printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    4.27  			return(p);
    4.28  		}
    4.29  if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
    4.30 -  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
    4.31 +  printf("assign_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
    4.32  }
    4.33  		set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
    4.34  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    4.35  	}
    4.36 -	else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.37 +	else printk("assign_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.38  	return p;
    4.39  }
    4.40  
    4.41  /* map a physical address to the specified metaphysical addr */
    4.42 -void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
    4.43 +void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
    4.44  {
    4.45  	struct mm_struct *mm = d->arch.mm;
    4.46  	pgd_t *pgd;
    4.47 @@ -451,7 +451,7 @@ void map_domain_page(struct domain *d, u
    4.48  	pte_t *pte;
    4.49  
    4.50  	if (!mm->pgd) {
    4.51 -		printk("map_domain_page: domain pgd must exist!\n");
    4.52 +		printk("assign_domain_page: domain pgd must exist!\n");
    4.53  		return;
    4.54  	}
    4.55  	pgd = pgd_offset(mm,mpaddr);
    4.56 @@ -472,11 +472,11 @@ void map_domain_page(struct domain *d, u
    4.57  		set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
    4.58  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    4.59  	}
    4.60 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.61 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.62  }
    4.63  #if 0
    4.64  /* map a physical address with specified I/O flag */
    4.65 -void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
    4.66 +void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
    4.67  {
    4.68  	struct mm_struct *mm = d->arch.mm;
    4.69  	pgd_t *pgd;
    4.70 @@ -486,7 +486,7 @@ void map_domain_io_page(struct domain *d
    4.71  	pte_t io_pte;
    4.72  
    4.73  	if (!mm->pgd) {
    4.74 -		printk("map_domain_page: domain pgd must exist!\n");
    4.75 +		printk("assign_domain_page: domain pgd must exist!\n");
    4.76  		return;
    4.77  	}
    4.78  	ASSERT(flags & GPFN_IO_MASK);
    4.79 @@ -509,7 +509,7 @@ void map_domain_io_page(struct domain *d
    4.80  		pte_val(io_pte) = flags;
    4.81  		set_pte(pte, io_pte);
    4.82  	}
    4.83 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.84 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    4.85  }
    4.86  #endif
    4.87  void mpafoo(unsigned long mpaddr)
    4.88 @@ -557,7 +557,7 @@ tryagain:
    4.89  	}
    4.90  	/* if lookup fails and mpaddr is "legal", "create" the page */
    4.91  	if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
    4.92 -		if (map_new_domain_page(d,mpaddr)) goto tryagain;
    4.93 +		if (assign_new_domain_page(d,mpaddr)) goto tryagain;
    4.94  	}
    4.95  	printk("lookup_domain_mpa: bad mpa %p (> %p\n",
    4.96  		mpaddr,d->max_pages<<PAGE_SHIFT);
    4.97 @@ -655,15 +655,9 @@ void loaddomainelfimage(struct domain *d
    4.98  	else
    4.99  #endif
   4.100  	while (memsz > 0) {
   4.101 -#ifdef DOMU_AUTO_RESTART
   4.102 -		pteval = lookup_domain_mpa(d,dom_mpaddr);
   4.103 -		if (pteval) dom_imva = __va(pteval & _PFN_MASK);
   4.104 -		else { printf("loaddomainelfimage: BAD!\n"); while(1); }
   4.105 -#else
   4.106 -		p = map_new_domain_page(d,dom_mpaddr);
   4.107 +		p = assign_new_domain_page(d,dom_mpaddr);
   4.108  		if (unlikely(!p)) BUG();
   4.109  		dom_imva = __va(page_to_maddr(p));
   4.110 -#endif
   4.111  		if (filesz > 0) {
   4.112  			if (filesz >= PAGE_SIZE)
   4.113  				copy_memory(dom_imva,elfaddr,PAGE_SIZE);
   4.114 @@ -788,16 +782,15 @@ int construct_dom0(struct domain *d,
   4.115  	unsigned long pkern_entry;
   4.116  	unsigned long pkern_end;
   4.117  	unsigned long pinitrd_start = 0;
   4.118 +	unsigned long pstart_info;
   4.119  	unsigned long ret, progress = 0;
   4.120  
   4.121  //printf("construct_dom0: starting\n");
   4.122  
   4.123 -#ifndef CLONE_DOMAIN0
   4.124  	/* Sanity! */
   4.125  	BUG_ON(d != dom0);
   4.126  	BUG_ON(d->vcpu[0] == NULL);
   4.127  	BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
   4.128 -#endif
   4.129  
   4.130  	memset(&dsi, 0, sizeof(struct domain_setup_info));
   4.131  
   4.132 @@ -847,13 +840,18 @@ int construct_dom0(struct domain *d,
   4.133                            (PAGE_ALIGN(initrd_len) + 4*1024*1024);
   4.134  
   4.135               memcpy(__va(pinitrd_start),initrd_start,initrd_len);
   4.136 +             pstart_info = PAGE_ALIGN(pinitrd_start + initrd_len);
   4.137 +        } else {
   4.138 +             pstart_info = PAGE_ALIGN(pkern_end);
   4.139          }
   4.140  
   4.141  	printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
   4.142  	       " Kernel image:  %lx->%lx\n"
   4.143  	       " Entry address: %lx\n"
   4.144 -               " Init. ramdisk: %lx len %lx\n",
   4.145 -               pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len);
   4.146 +	       " Init. ramdisk: %lx len %lx\n"
   4.147 +	       " Start info.:   %lx->%lx\n",
   4.148 +	       pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len,
   4.149 +	       pstart_info, pstart_info + PAGE_SIZE);
   4.150  
   4.151  	if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
   4.152  	{
   4.153 @@ -908,9 +906,9 @@ int construct_dom0(struct domain *d,
   4.154  
   4.155  
   4.156  	/* Set up start info area. */
   4.157 -	si = (start_info_t *)alloc_xenheap_page();
   4.158 +	d->shared_info->arch.start_info_pfn = pstart_info >> PAGE_SHIFT;
   4.159 +	si = __va(pstart_info);
   4.160  	memset(si, 0, PAGE_SIZE);
   4.161 -	d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
   4.162  	sprintf(si->magic, "xen-%i.%i-ia64", XEN_VERSION, XEN_SUBVERSION);
   4.163  	si->nr_pages     = d->tot_pages;
   4.164  
   4.165 @@ -962,9 +960,6 @@ int construct_dom0(struct domain *d,
   4.166  	sync_split_caches();
   4.167  
   4.168  	// FIXME: Hack for keyboard input
   4.169 -#ifdef CLONE_DOMAIN0
   4.170 -if (d == dom0)
   4.171 -#endif
   4.172  	serial_input_init();
   4.173  	if (d == dom0) {
   4.174  		VCPU(v, delivery_mask[0]) = -1L;
   4.175 @@ -977,65 +972,6 @@ if (d == dom0)
   4.176  	return 0;
   4.177  }
   4.178  
   4.179 -// FIXME: When dom0 can construct domains, this goes away (or is rewritten)
   4.180 -int construct_domU(struct domain *d,
   4.181 -		   unsigned long image_start, unsigned long image_len,
   4.182 -	           unsigned long initrd_start, unsigned long initrd_len,
   4.183 -	           char *cmdline)
   4.184 -{
   4.185 -	int i, rc;
   4.186 -	struct vcpu *v = d->vcpu[0];
   4.187 -	unsigned long pkern_entry;
   4.188 -
   4.189 -#ifndef DOMU_AUTO_RESTART
   4.190 -	BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
   4.191 -#endif
   4.192 -
   4.193 -	printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
   4.194 -
   4.195 -	d->max_pages = dom0_size/PAGE_SIZE;	// FIXME: use dom0 size
   4.196 -	// FIXME: use domain0 command line
   4.197 -	rc = parsedomainelfimage(image_start, image_len, &pkern_entry);
   4.198 -	printk("parsedomainelfimage returns %d\n",rc);
   4.199 -	if ( rc != 0 ) return rc;
   4.200 -
   4.201 -	/* Mask all upcalls... */
   4.202 -	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   4.203 -		d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   4.204 -
   4.205 -	/* Copy the OS image. */
   4.206 -	printk("calling loaddomainelfimage(%p,%p)\n",d,image_start);
   4.207 -	loaddomainelfimage(d,image_start);
   4.208 -	printk("loaddomainelfimage returns\n");
   4.209 -
   4.210 -	set_bit(_VCPUF_initialised, &v->vcpu_flags);
   4.211 -
   4.212 -	printk("calling new_thread, entry=%p\n",pkern_entry);
   4.213 -#ifdef DOMU_AUTO_RESTART
   4.214 -	v->domain->arch.image_start = image_start;
   4.215 -	v->domain->arch.image_len = image_len;
   4.216 -	v->domain->arch.entry = pkern_entry;
   4.217 -#endif
   4.218 -	new_thread(v, pkern_entry, 0, 0);
   4.219 -	printk("new_thread returns\n");
   4.220 -	sync_split_caches();
   4.221 -	__set_bit(0x30, VCPU(v, delivery_mask));
   4.222 -
   4.223 -	return 0;
   4.224 -}
   4.225 -
   4.226 -#ifdef DOMU_AUTO_RESTART
   4.227 -void reconstruct_domU(struct vcpu *v)
   4.228 -{
   4.229 -	/* re-copy the OS image to reset data values to original */
   4.230 -	printk("reconstruct_domU: restarting domain %d...\n",
   4.231 -		v->domain->domain_id);
   4.232 -	loaddomainelfimage(v->domain,v->domain->arch.image_start);
   4.233 -	new_thread(v, v->domain->arch.entry, 0, 0);
   4.234 -	sync_split_caches();
   4.235 -}
   4.236 -#endif
   4.237 -
   4.238  void machine_restart(char * __unused)
   4.239  {
   4.240  	if (platform_is_hp_ski()) dummy();
     5.1 --- a/xen/arch/ia64/xen/hypercall.c	Fri Feb 10 12:35:19 2006 +0100
     5.2 +++ b/xen/arch/ia64/xen/hypercall.c	Fri Feb 10 08:37:14 2006 -0700
     5.3 @@ -9,6 +9,7 @@
     5.4  #include <xen/config.h>
     5.5  #include <xen/sched.h>
     5.6  #include <xen/hypercall.h>
     5.7 +#include <xen/multicall.h>
     5.8  
     5.9  #include <linux/efi.h>	/* FOR EFI_UNIMPLEMENTED */
    5.10  #include <asm/sal.h>	/* FOR struct ia64_sal_retval */
    5.11 @@ -23,6 +24,42 @@ extern unsigned long translate_domain_mp
    5.12  unsigned long idle_when_pending = 0;
    5.13  unsigned long pal_halt_light_count = 0;
    5.14  
    5.15 +hypercall_t ia64_hypercall_table[] =
    5.16 +	{
    5.17 +	(hypercall_t)do_ni_hypercall,		/* do_set_trap_table */		/*  0 */
    5.18 +	(hypercall_t)do_ni_hypercall,		/* do_mmu_update */
    5.19 +	(hypercall_t)do_ni_hypercall,		/* do_set_gdt */
    5.20 +	(hypercall_t)do_ni_hypercall,		/* do_stack_switch */
    5.21 +	(hypercall_t)do_ni_hypercall,		/* do_set_callbacks */
    5.22 +	(hypercall_t)do_ni_hypercall,		/* do_fpu_taskswitch */		/*  5 */
    5.23 +	(hypercall_t)do_ni_hypercall,		/* do_sched_op */
    5.24 +	(hypercall_t)do_dom0_op,
    5.25 +	(hypercall_t)do_ni_hypercall,		/* do_set_debugreg */
    5.26 +	(hypercall_t)do_ni_hypercall,		/* do_get_debugreg */
    5.27 +	(hypercall_t)do_ni_hypercall,		/* do_update_descriptor */	/* 10 */
    5.28 +	(hypercall_t)do_ni_hypercall,		/* do_ni_hypercall */
    5.29 +	(hypercall_t)do_memory_op,
    5.30 +	(hypercall_t)do_multicall,
    5.31 +	(hypercall_t)do_ni_hypercall,		/* do_update_va_mapping */
    5.32 +	(hypercall_t)do_ni_hypercall,		/* do_set_timer_op */		/* 15 */
    5.33 +	(hypercall_t)do_event_channel_op,
    5.34 +	(hypercall_t)do_xen_version,
    5.35 +	(hypercall_t)do_console_io,
    5.36 +	(hypercall_t)do_ni_hypercall,           /* do_physdev_op */
    5.37 +	(hypercall_t)do_grant_table_op,						/* 20 */
    5.38 +	(hypercall_t)do_ni_hypercall,		/* do_vm_assist */
    5.39 +	(hypercall_t)do_ni_hypercall,		/* do_update_va_mapping_otherdomain */
    5.40 +	(hypercall_t)do_ni_hypercall,		/* (x86 only) */
    5.41 +	(hypercall_t)do_ni_hypercall,		/* do_vcpu_op */
    5.42 +	(hypercall_t)do_ni_hypercall,		/* (x86_64 only) */		/* 25 */
    5.43 +	(hypercall_t)do_ni_hypercall,		/* do_mmuext_op */
    5.44 +	(hypercall_t)do_ni_hypercall,		/* do_acm_op */
    5.45 +	(hypercall_t)do_ni_hypercall,		/* do_nmi_op */
    5.46 +	(hypercall_t)do_ni_hypercall,		/*  */
    5.47 +	(hypercall_t)do_ni_hypercall,		/*  */				/* 30 */
    5.48 +	(hypercall_t)do_ni_hypercall		/*  */
    5.49 +	};
    5.50 +
    5.51  int
    5.52  ia64_hypercall (struct pt_regs *regs)
    5.53  {
    5.54 @@ -94,15 +131,8 @@ ia64_hypercall (struct pt_regs *regs)
    5.55  			printf("(by dom0)\n ");
    5.56  			(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
    5.57  		}
    5.58 -#ifdef DOMU_AUTO_RESTART
    5.59 -		else {
    5.60 -			reconstruct_domU(current);
    5.61 -			return 0;  // don't increment ip!
    5.62 -		}
    5.63 -#else	
    5.64  		printf("(not supported for non-0 domain)\n");
    5.65  		regs->r8 = EFI_UNSUPPORTED;
    5.66 -#endif
    5.67  		break;
    5.68  	    case FW_HYPERCALL_EFI_GET_TIME:
    5.69  		tv = vcpu_get_gr(v,32);
    5.70 @@ -181,9 +211,13 @@ ia64_hypercall (struct pt_regs *regs)
    5.71  		regs->r8 = do_xen_version(regs->r14, regs->r15);
    5.72  		break;
    5.73  
    5.74 +	    case __HYPERVISOR_multicall:
    5.75 +		regs->r8 = do_multicall(regs->r14, regs->r15);
    5.76 +		break;
    5.77 +
    5.78  	    default:
    5.79  		printf("unknown hypercall %x\n", regs->r2);
    5.80 -		regs->r8 = (unsigned long)-1;
    5.81 +		regs->r8 = do_ni_hypercall();
    5.82  	}
    5.83  	return 1;
    5.84  }
     6.1 --- a/xen/arch/ia64/xen/xenmisc.c	Fri Feb 10 12:35:19 2006 +0100
     6.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Fri Feb 10 08:37:14 2006 -0700
     6.3 @@ -168,7 +168,11 @@ void __free_pages(struct page *page, uns
     6.4  
     6.5  void *pgtable_quicklist_alloc(void)
     6.6  {
     6.7 -	return alloc_xenheap_pages(0);
     6.8 +    void *p;
     6.9 +    p = alloc_xenheap_pages(0);
    6.10 +    if (p) 
    6.11 +        clear_page(p);
    6.12 +    return p;
    6.13  }
    6.14  
    6.15  void pgtable_quicklist_free(void *pgtable_entry)
     7.1 --- a/xen/arch/ia64/xen/xensetup.c	Fri Feb 10 12:35:19 2006 +0100
     7.2 +++ b/xen/arch/ia64/xen/xensetup.c	Fri Feb 10 08:37:14 2006 -0700
     7.3 @@ -31,9 +31,6 @@ struct vcpu *idle_vcpu[NR_CPUS];
     7.4  
     7.5  cpumask_t cpu_present_map;
     7.6  
     7.7 -#ifdef CLONE_DOMAIN0
     7.8 -struct domain *clones[CLONE_DOMAIN0];
     7.9 -#endif
    7.10  extern unsigned long domain0_ready;
    7.11  
    7.12  int find_max_pfn (unsigned long, unsigned long, void *);
    7.13 @@ -342,16 +339,6 @@ printk("About to call sort_main_extable(
    7.14  printk("About to call domain_create()\n");
    7.15      dom0 = domain_create(0, 0);
    7.16  
    7.17 -#ifdef CLONE_DOMAIN0
    7.18 -    {
    7.19 -    int i;
    7.20 -    for (i = 0; i < CLONE_DOMAIN0; i++) {
    7.21 -	clones[i] = domain_create(i+1, 0);
    7.22 -        if ( clones[i] == NULL )
    7.23 -            panic("Error creating domain0 clone %d\n",i);
    7.24 -    }
    7.25 -    }
    7.26 -#endif
    7.27      if ( dom0 == NULL )
    7.28          panic("Error creating domain 0\n");
    7.29  
    7.30 @@ -376,22 +363,6 @@ printk("About to call domain_create()\n"
    7.31      /* PIN domain0 on CPU 0.  */
    7.32      dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
    7.33  
    7.34 -#ifdef CLONE_DOMAIN0
    7.35 -    {
    7.36 -    int i;
    7.37 -    dom0_memory_start = __va(ia64_boot_param->domain_start);
    7.38 -    dom0_memory_size = ia64_boot_param->domain_size;
    7.39 -
    7.40 -    for (i = 0; i < CLONE_DOMAIN0; i++) {
    7.41 -      printk("CONSTRUCTING DOMAIN0 CLONE #%d\n",i+1);
    7.42 -      if ( construct_domU(clones[i], dom0_memory_start, dom0_memory_size,
    7.43 -			  dom0_initrd_start,dom0_initrd_size,
    7.44 -			  0) != 0)
    7.45 -            panic("Could not set up DOM0 clone %d\n",i);
    7.46 -    }
    7.47 -    }
    7.48 -#endif
    7.49 -
    7.50      /* The stash space for the initial kernel image can now be freed up. */
    7.51      init_domheap_pages(ia64_boot_param->domain_start,
    7.52                         ia64_boot_param->domain_size);
    7.53 @@ -412,13 +383,6 @@ printk("About to call init_trace_bufs()\
    7.54      console_endboot(cmdline && strstr(cmdline, "tty0"));
    7.55  #endif
    7.56  
    7.57 -#ifdef CLONE_DOMAIN0
    7.58 -    {
    7.59 -    int i;
    7.60 -    for (i = 0; i < CLONE_DOMAIN0; i++)
    7.61 -	domain_unpause_by_systemcontroller(clones[i]);
    7.62 -    }
    7.63 -#endif
    7.64      domain0_ready = 1;
    7.65  
    7.66      local_irq_enable();
     8.1 --- a/xen/include/asm-ia64/config.h	Fri Feb 10 12:35:19 2006 +0100
     8.2 +++ b/xen/include/asm-ia64/config.h	Fri Feb 10 08:37:14 2006 -0700
     8.3 @@ -3,11 +3,8 @@
     8.4  
     8.5  #undef USE_PAL_EMULATOR
     8.6  // control flags for turning on/off features under test
     8.7 -#undef CLONE_DOMAIN0
     8.8 -//#define CLONE_DOMAIN0 1
     8.9  #undef DOMU_BUILD_STAGING
    8.10  #define VHPT_GLOBAL
    8.11 -#define DOMU_AUTO_RESTART
    8.12  
    8.13  #undef DEBUG_PFMON
    8.14  
    8.15 @@ -215,9 +212,6 @@ void sort_main_extable(void);
    8.16  // see include/asm-ia64/mm.h, handle remaining page_info uses until gone
    8.17  #define page_info page
    8.18  
    8.19 -// see common/memory.c
    8.20 -#define set_gpfn_from_mfn(x,y)	do { } while (0)
    8.21 -
    8.22  // see common/keyhandler.c
    8.23  #define	nop()	asm volatile ("nop 0")
    8.24  
     9.1 --- a/xen/include/asm-ia64/domain.h	Fri Feb 10 12:35:19 2006 +0100
     9.2 +++ b/xen/include/asm-ia64/domain.h	Fri Feb 10 08:37:14 2006 -0700
     9.3 @@ -32,11 +32,6 @@ struct arch_domain {
     9.4      u64 xen_vastart;
     9.5      u64 xen_vaend;
     9.6      u64 shared_info_va;
     9.7 -#ifdef DOMU_AUTO_RESTART
     9.8 -    u64 image_start;
     9.9 -    u64 image_len;
    9.10 -    u64 entry;
    9.11 -#endif
    9.12      unsigned long initrd_start;
    9.13      unsigned long initrd_len;
    9.14      char *cmdline;
    10.1 --- a/xen/include/asm-ia64/grant_table.h	Fri Feb 10 12:35:19 2006 +0100
    10.2 +++ b/xen/include/asm-ia64/grant_table.h	Fri Feb 10 08:37:14 2006 -0700
    10.3 @@ -17,7 +17,7 @@
    10.4  #define gnttab_shared_gmfn(d, t, i)                                     \
    10.5      ( ((d) == dom0) ?                                                   \
    10.6        ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
    10.7 -      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
    10.8 +      (assign_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
    10.9         1UL << (40 - PAGE_SHIFT))                                        \
   10.10      )
   10.11  
    11.1 --- a/xen/include/asm-ia64/multicall.h	Fri Feb 10 12:35:19 2006 +0100
    11.2 +++ b/xen/include/asm-ia64/multicall.h	Fri Feb 10 08:37:14 2006 -0700
    11.3 @@ -1,5 +1,27 @@
    11.4  #ifndef __ASM_IA64_MULTICALL_H__
    11.5  #define __ASM_IA64_MULTICALL_H__
    11.6  
    11.7 -#define do_multicall_call(_call) BUG()
    11.8 +#include <public/xen.h>
    11.9 +
   11.10 +typedef unsigned long (*hypercall_t)(
   11.11 +			unsigned long arg0,
   11.12 +			unsigned long arg1,
   11.13 +			unsigned long arg2,
   11.14 +			unsigned long arg3,
   11.15 +			unsigned long arg4,
   11.16 +			unsigned long arg5);
   11.17 +
   11.18 +extern hypercall_t ia64_hypercall_table[];
   11.19 +
   11.20 +static inline void do_multicall_call(multicall_entry_t *call)
   11.21 +{
   11.22 +	call->result = (*ia64_hypercall_table[call->op])(
   11.23 +			call->args[0],
   11.24 +			call->args[1],
   11.25 +			call->args[2],
   11.26 +			call->args[3],
   11.27 +			call->args[4],
   11.28 +			call->args[5]);
   11.29 +}
   11.30 +
   11.31  #endif /* __ASM_IA64_MULTICALL_H__ */