ia64/xen-unstable

changeset 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents 89d92ce10924
children d0a1154755c9
files xen/arch/ia64/Makefile xen/arch/ia64/asm-offsets.c xen/arch/ia64/domain.c xen/arch/ia64/mm.c xen/arch/ia64/mmio.c xen/arch/ia64/pal_emul.c xen/arch/ia64/vlsapic.c xen/arch/ia64/vmx_entry.S xen/arch/ia64/vmx_hypercall.c xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_minstate.h xen/arch/ia64/vmx_phy_mode.c xen/arch/ia64/vmx_process.c xen/arch/ia64/vmx_support.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vmx_virt.c xen/arch/ia64/vtlb.c xen/arch/ia64/xensetup.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenprocessor.h xen/include/asm-ia64/xensystem.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Sat Jul 09 07:37:13 2005 -0700
     1.2 +++ b/xen/arch/ia64/Makefile	Sat Jul 09 07:58:56 2005 -0700
     1.3 @@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o i
     1.4  ifeq ($(CONFIG_VTI),y)
     1.5  OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
     1.6  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
     1.7 -	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
     1.8 +	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
     1.9  endif
    1.10  # perfmon.o
    1.11  # unwind.o needed for kernel unwinding (rare)
     2.1 --- a/xen/arch/ia64/asm-offsets.c	Sat Jul 09 07:37:13 2005 -0700
     2.2 +++ b/xen/arch/ia64/asm-offsets.c	Sat Jul 09 07:58:56 2005 -0700
     2.3 @@ -224,6 +224,7 @@ void foo(void)
     2.4  
     2.5  #ifdef  CONFIG_VTI
     2.6  	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
     2.7 + 	DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.in_service[0]));
     2.8  	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
     2.9  	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
    2.10  
     3.1 --- a/xen/arch/ia64/domain.c	Sat Jul 09 07:37:13 2005 -0700
     3.2 +++ b/xen/arch/ia64/domain.c	Sat Jul 09 07:58:56 2005 -0700
     3.3 @@ -37,10 +37,13 @@
     3.4  #include <asm/asm-offsets.h>  /* for IA64_THREAD_INFO_SIZE */
     3.5  
     3.6  #include <asm/vcpu.h>   /* for function declarations */
     3.7 +#include <public/arch-ia64.h>
     3.8  #ifdef CONFIG_VTI
     3.9  #include <asm/vmx.h>
    3.10  #include <asm/vmx_vcpu.h>
    3.11 +#include <asm/vmx_vpd.h>
    3.12  #include <asm/pal.h>
    3.13 +#include <public/io/ioreq.h>
    3.14  #endif // CONFIG_VTI
    3.15  
    3.16  #define CONFIG_DOMAIN0_CONTIGUOUS
    3.17 @@ -203,18 +206,20 @@ void arch_do_createdomain(struct vcpu *v
    3.18   	 * after up.
    3.19   	 */
    3.20   	d->shared_info = (void *)alloc_xenheap_page();
    3.21 -
    3.22 -	/* FIXME: Because full virtual cpu info is placed in this area,
    3.23 -	 * it's unlikely to put it into one shareinfo page. Later
    3.24 -	 * need split vcpu context from vcpu_info and conforms to
    3.25 -	 * normal xen convention.
    3.26 +	/* Now assume all vcpu info and event indicators can be
    3.27 +	 * held in one shared page. Definitely later we need to
    3.28 +	 * consider more about it
    3.29  	 */
    3.30 -	v->vcpu_info = (void *)alloc_xenheap_page();
    3.31 -	if (!v->vcpu_info) {
    3.32 -   		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    3.33 -   		while (1);
    3.34 +
    3.35 +	memset(d->shared_info, 0, PAGE_SIZE);
    3.36 +	v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    3.37 +	/* Mask all events, and specific port will be unmasked
    3.38 +	 * when customer subscribes to it.
    3.39 +	 */
    3.40 +	if(v == d->vcpu[0]) {
    3.41 +	    memset(&d->shared_info->evtchn_mask[0], 0xff,
    3.42 +		sizeof(d->shared_info->evtchn_mask));
    3.43  	}
    3.44 -	memset(v->vcpu_info, 0, PAGE_SIZE);
    3.45  
    3.46  	/* Allocate per-domain vTLB and vhpt */
    3.47  	v->arch.vtlb = init_domain_tlb(v);
    3.48 @@ -291,6 +296,7 @@ void arch_getdomaininfo_ctxt(struct vcpu
    3.49  	c->shared = v->domain->shared_info->arch;
    3.50  }
    3.51  
    3.52 +#ifndef CONFIG_VTI
    3.53  int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
    3.54  {
    3.55  	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
    3.56 @@ -312,6 +318,79 @@ int arch_set_info_guest(struct vcpu *v, 
    3.57  	v->domain->shared_info->arch = c->shared;
    3.58  	return 0;
    3.59  }
    3.60 +#else // CONFIG_VTI
    3.61 +int arch_set_info_guest(
    3.62 +    struct vcpu *v, struct vcpu_guest_context *c)
    3.63 +{
    3.64 +    struct domain *d = v->domain;
    3.65 +    int i, rc, ret;
    3.66 +    unsigned long progress = 0;
    3.67 +
    3.68 +    if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
    3.69 +        return 0;
    3.70 +
    3.71 +    /* Lazy FP not implemented yet */
    3.72 +    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
    3.73 +    if ( c->flags & VGCF_FPU_VALID )
    3.74 +        set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
    3.75 +
    3.76 +    /* Sync d/i cache conservatively, after domain N is loaded */
    3.77 +    ret = ia64_pal_cache_flush(3, 0, &progress, NULL);
    3.78 +    if (ret != PAL_STATUS_SUCCESS)
    3.79 +            panic("PAL CACHE FLUSH failed for dom[%d].\n",
    3.80 +		v->domain->domain_id);
    3.81 +    DPRINTK("Sync i/d cache for dom%d image SUCC\n",
    3.82 +		v->domain->domain_id);
    3.83 +
    3.84 +    /* Physical mode emulation initialization, including
    3.85 +     * emulation ID allcation and related memory request
    3.86 +     */
    3.87 +    physical_mode_init(v);
    3.88 +
    3.89 +    /* FIXME: only support PMT table continuously by far */
    3.90 +    d->arch.pmt = __va(c->pt_base);
    3.91 +    d->arch.max_pfn = c->pt_max_pfn;
    3.92 +    v->arch.arch_vmx.vmx_platform.shared_page_va = __va(c->share_io_pg);
    3.93 +    memset((char *)__va(c->share_io_pg),0,PAGE_SIZE);
    3.94 +
    3.95 +    if (c->flags & VGCF_VMX_GUEST) {
    3.96 +	if (!vmx_enabled)
    3.97 +	    panic("No VMX hardware feature for vmx domain.\n");
    3.98 +
    3.99 +	vmx_final_setup_domain(d);
   3.100 +
   3.101 +	/* One more step to enable interrupt assist */
   3.102 +	set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
   3.103 +    }
   3.104 +
   3.105 +    vlsapic_reset(v);
   3.106 +    vtm_init(v);
   3.107 +
   3.108 +    /* Only open one port for I/O and interrupt emulation */
   3.109 +    if (v == d->vcpu[0]) {
   3.110 +	memset(&d->shared_info->evtchn_mask[0], 0xff,
   3.111 +		sizeof(d->shared_info->evtchn_mask));
   3.112 +	clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_mask[0]);
   3.113 +    }
   3.114 +    /* Setup domain context. Actually IA-64 is a bit different with
   3.115 +     * x86, with almost all system resources better managed by HV
   3.116 +     * directly. CP only needs to provide start IP of guest, which
   3.117 +     * ideally is the load address of guest Firmware.
   3.118 +     */
   3.119 +    new_thread(v, c->guest_iip, 0, 0);
   3.120 +
   3.121 +
   3.122 +    d->xen_vastart = 0xf000000000000000;
   3.123 +    d->xen_vaend = 0xf300000000000000;
   3.124 +    d->arch.breakimm = 0x1000 + d->domain_id;
   3.125 +    v->arch._thread.on_ustack = 0;
   3.126 +
   3.127 +    /* Don't redo final setup */
   3.128 +    set_bit(_VCPUF_initialised, &v->vcpu_flags);
   3.129 +
   3.130 +    return 0;
   3.131 +}
   3.132 +#endif // CONFIG_VTI
   3.133  
   3.134  void arch_do_boot_vcpu(struct vcpu *v)
   3.135  {
   3.136 @@ -361,7 +440,10 @@ void new_thread(struct vcpu *v,
   3.137  		init_all_rr(v);
   3.138  
   3.139  	if (VMX_DOMAIN(v)) {
   3.140 -		VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
   3.141 +		if (d == dom0) {
   3.142 +		    VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
   3.143 +		    printk("new_thread, done with dom_fw_setup\n");
   3.144 +		}
   3.145  		/* Virtual processor context setup */
   3.146  		VMX_VPD(v, vpsr) = IA64_PSR_BN;
   3.147  		VPD_CR(v, dcr) = 0;
   3.148 @@ -556,6 +638,7 @@ tryagain:
   3.149  }
   3.150  
   3.151  // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
   3.152 +#ifndef CONFIG_VTI
   3.153  unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
   3.154  {
   3.155  	unsigned long pte = lookup_domain_mpa(d,mpaddr);
   3.156 @@ -566,6 +649,14 @@ unsigned long domain_mpa_to_imva(struct 
   3.157  	imva |= mpaddr & ~PAGE_MASK;
   3.158  	return(imva);
   3.159  }
   3.160 +#else // CONFIG_VTI
   3.161 +unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
   3.162 +{
   3.163 +    unsigned long imva = __gpa_to_mpa(d, mpaddr);
   3.164 +
   3.165 +    return __va(imva);
   3.166 +}
   3.167 +#endif // CONFIG_VTI
   3.168  
   3.169  // remove following line if not privifying in memory
   3.170  //#define HAVE_PRIVIFY_MEMORY
   3.171 @@ -812,6 +903,17 @@ void build_shared_info(struct domain *d)
   3.172      /* ... */
   3.173  }
   3.174  
   3.175 +/*
   3.176 + * Domain 0 has direct access to all devices absolutely. However
   3.177 + * the major point of this stub here, is to allow alloc_dom_mem
   3.178 + * handled with order > 0 request. Dom0 requires that bit set to
   3.179 + * allocate memory for other domains.
   3.180 + */
   3.181 +void physdev_init_dom0(struct domain *d)
   3.182 +{
   3.183 +	set_bit(_DOMF_physdev_access, &d->domain_flags);
   3.184 +}
   3.185 +
   3.186  extern unsigned long running_on_sim;
   3.187  unsigned int vmx_dom0 = 0;
   3.188  int construct_dom0(struct domain *d, 
   3.189 @@ -963,6 +1065,7 @@ int construct_dom0(struct domain *d,
   3.190      set_bit(_DOMF_constructed, &d->domain_flags);
   3.191      new_thread(v, pkern_entry, 0, 0);
   3.192  
   3.193 +    physdev_init_dom0(d);
   3.194      // FIXME: Hack for keyboard input
   3.195  #ifdef CLONE_DOMAIN0
   3.196  if (d == dom0)
   3.197 @@ -978,6 +1081,8 @@ if (d == dom0)
   3.198  
   3.199      return 0;
   3.200  }
   3.201 +
   3.202 +
   3.203  #else //CONFIG_VTI
   3.204  
   3.205  int construct_dom0(struct domain *d, 
     4.1 --- a/xen/arch/ia64/mm.c	Sat Jul 09 07:37:13 2005 -0700
     4.2 +++ b/xen/arch/ia64/mm.c	Sat Jul 09 07:58:56 2005 -0700
     4.3 @@ -95,7 +95,7 @@
     4.4  #include <asm/vmx_vcpu.h>
     4.5  #include <asm/vmmu.h>
     4.6  #include <asm/regionreg.h>
     4.7 -
     4.8 +#include <asm/vmx_mm_def.h>
     4.9  /*
    4.10          uregs->ptr is virtual address
    4.11          uregs->val is pte value
    4.12 @@ -109,8 +109,9 @@ int do_mmu_update(mmu_update_t *ureqs,u6
    4.13      mmu_update_t req;
    4.14      ia64_rr rr;
    4.15      thash_cb_t *hcb;
    4.16 -    thash_data_t entry={0};
    4.17 +    thash_data_t entry={0},*ovl;
    4.18      vcpu = current;
    4.19 +    search_section_t sections;
    4.20      hcb = vmx_vcpu_get_vtlb(vcpu);
    4.21      for ( i = 0; i < count; i++ )
    4.22      {
    4.23 @@ -124,8 +125,18 @@ int do_mmu_update(mmu_update_t *ureqs,u6
    4.24              entry.cl = DSIDE_TLB;
    4.25              rr = vmx_vcpu_rr(vcpu, req.ptr);
    4.26              entry.ps = rr.ps;
    4.27 +            entry.key = redistribute_rid(rr.rid);
    4.28              entry.rid = rr.rid;
    4.29 -            vtlb_insert(hcb, &entry, req.ptr);
    4.30 +            entry.vadr = PAGEALIGN(req.ptr,entry.ps);
    4.31 +            sections.tr = 1;
    4.32 +            sections.tc = 0;
    4.33 +            ovl = thash_find_overlap(hcb, &entry, sections);
    4.34 +            if (ovl) {
    4.35 +                  // generate MCA.
    4.36 +                panic("Tlb conflict!!");
    4.37 +                return;
    4.38 +            }
    4.39 +            thash_purge_and_insert(hcb, &entry);
    4.40          }else if(cmd == MMU_MACHPHYS_UPDATE){
    4.41              mfn = req.ptr >>PAGE_SHIFT;
    4.42              gpfn = req.val;
     5.1 --- a/xen/arch/ia64/mmio.c	Sat Jul 09 07:37:13 2005 -0700
     5.2 +++ b/xen/arch/ia64/mmio.c	Sat Jul 09 07:58:56 2005 -0700
     5.3 @@ -27,7 +27,13 @@
     5.4  #include <asm/gcc_intrin.h>
     5.5  #include <xen/interrupt.h>
     5.6  #include <asm/vmx_vcpu.h>
     5.7 +#include <asm/privop.h>
     5.8 +#include <asm/types.h>
     5.9 +#include <public/io/ioreq.h>
    5.10 +#include <asm/mm.h>
    5.11 +#include <asm/vmx.h>
    5.12  
    5.13 +/*
    5.14  struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
    5.15  {
    5.16      int     i;
    5.17 @@ -37,215 +43,14 @@ struct mmio_list *lookup_mmio(u64 gpa, s
    5.18      }
    5.19      return NULL;
    5.20  }
    5.21 -
    5.22 -
    5.23 -extern void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma);
    5.24 -static inline void mmio_write(VCPU *vcpu, void *src, u64 dest_pa, size_t s, int ma)
    5.25 -{
    5.26 -    struct virutal_platform_def *v_plat;
    5.27 -    struct mmio_list    *mio;
    5.28 -    
    5.29 -    v_plat = vmx_vcpu_get_plat(vcpu);
    5.30 -    mio = lookup_mmio(dest_pa, v_plat->mmio);
    5.31 -    if ( mio == NULL ) 
    5.32 -        panic ("Wrong address for MMIO\n");
    5.33 -    
    5.34 -    switch (mio->iot) {
    5.35 -    case PIB_MMIO:
    5.36 -        pib_write(vcpu, src, dest_pa - v_plat->pib_base, s, ma);
    5.37 -        break;
    5.38 -    case VGA_BUFF:
    5.39 -    case CHIPSET_IO:
    5.40 -    case LOW_MMIO:
    5.41 -    case LEGACY_IO:
    5.42 -    case IO_SAPIC:
    5.43 -    default:
    5.44 -        break;
    5.45 -    }
    5.46 -    return;
    5.47 -}
    5.48 -
    5.49 -static inline void mmio_read(VCPU *vcpu, u64 src_pa, void *dest, size_t s, int ma)
    5.50 -{
    5.51 -    struct virutal_platform_def *v_plat;
    5.52 -    struct mmio_list    *mio;
    5.53 -    
    5.54 -    v_plat = vmx_vcpu_get_plat(vcpu);
    5.55 -    mio = lookup_mmio(src_pa, v_plat->mmio);
    5.56 -    if ( mio == NULL ) 
    5.57 -        panic ("Wrong address for MMIO\n");
    5.58 -    
    5.59 -    switch (mio->iot) {
    5.60 -    case PIB_MMIO:
    5.61 -        pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
    5.62 -        break;
    5.63 -    case VGA_BUFF:
    5.64 -    case CHIPSET_IO:
    5.65 -    case LOW_MMIO:
    5.66 -    case LEGACY_IO:
    5.67 -    case IO_SAPIC:
    5.68 -    default:
    5.69 -        break;
    5.70 -    }
    5.71 -    return;
    5.72 -}
    5.73 -
    5.74 -/*
    5.75 - * Read or write data in guest virtual address mode.
    5.76 - */
    5.77 - 
    5.78 -void
    5.79 -memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
    5.80 -{
    5.81 -    uint64_t pa;
    5.82 -
    5.83 -    if (!vtlb->nomap)
    5.84 -        panic("Normal memory write shouldn't go to this point!");
    5.85 -    pa = PPN_2_PA(vtlb->ppn);
    5.86 -    pa += POFFSET((u64)dest, vtlb->ps);
    5.87 -    mmio_write (vcpu, src, pa, s, vtlb->ma);
    5.88 -}
    5.89 -
    5.90 -
    5.91 -void
    5.92 -memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s)
    5.93 -{
    5.94 -    uint64_t pa = (uint64_t)dest;
    5.95 -    int    ma;
    5.96 -
    5.97 -    if ( pa & (1UL <<63) ) {
    5.98 -        // UC
    5.99 -        ma = 4;
   5.100 -        pa <<=1; 
   5.101 -        pa >>=1;
   5.102 -    } 
   5.103 -    else {
   5.104 -        // WBL
   5.105 -        ma = 0;     // using WB for WBL
   5.106 -    }
   5.107 -    mmio_write (vcpu, src, pa, s, ma);
   5.108 -}
   5.109 -
   5.110 -void
   5.111 -memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s)
   5.112 -{
   5.113 -    uint64_t pa;
   5.114 -
   5.115 -    if (!vtlb->nomap)
   5.116 -        panic("Normal memory write shouldn't go to this point!");
   5.117 -    pa = PPN_2_PA(vtlb->ppn);
   5.118 -    pa += POFFSET((u64)src, vtlb->ps);
   5.119 -    
   5.120 -    mmio_read(vcpu, pa, dest, s, vtlb->ma);
   5.121 -}
   5.122 -
   5.123 -void
   5.124 -memread_p(VCPU *vcpu, void *src, void *dest, size_t s)
   5.125 -{
   5.126 -    uint64_t pa = (uint64_t)src;
   5.127 -    int    ma;
   5.128 -
   5.129 -    if ( pa & (1UL <<63) ) {
   5.130 -        // UC
   5.131 -        ma = 4;
   5.132 -        pa <<=1; 
   5.133 -        pa >>=1;
   5.134 -    } 
   5.135 -    else {
   5.136 -        // WBL
   5.137 -        ma = 0;     // using WB for WBL
   5.138 -    }
   5.139 -    mmio_read(vcpu, pa, dest, s, ma);
   5.140 -}
   5.141 +*/
   5.142  
   5.143  #define	PIB_LOW_HALF(ofst)	!(ofst&(1<<20))
   5.144  #define PIB_OFST_INTA           0x1E0000
   5.145  #define PIB_OFST_XTP            0x1E0008
   5.146  
   5.147 -
   5.148 -/*
   5.149 - * Deliver IPI message. (Only U-VP is supported now)
   5.150 - *  offset: address offset to IPI space.
   5.151 - *  value:  deliver value.
   5.152 - */
   5.153 -static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
   5.154 +static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
   5.155  {
   5.156 -#ifdef  IPI_DEBUG
   5.157 -  printf ("deliver_ipi %lx %lx\n",dm,vector);
   5.158 -#endif
   5.159 -    switch ( dm ) {
   5.160 -    case 0:     // INT
   5.161 -        vmx_vcpu_pend_interrupt (vcpu, vector);
   5.162 -        break;
   5.163 -    case 2:     // PMI
   5.164 -        // TODO -- inject guest PMI
   5.165 -        panic ("Inject guest PMI!\n");
   5.166 -        break;
   5.167 -    case 4:     // NMI
   5.168 -        vmx_vcpu_pend_interrupt (vcpu, 2);     
   5.169 -        break;
   5.170 -    case 5:     // INIT
   5.171 -        // TODO -- inject guest INIT
   5.172 -        panic ("Inject guest INIT!\n");
   5.173 -        break;
   5.174 -    case 7:     // ExtINT
   5.175 -        vmx_vcpu_pend_interrupt (vcpu, 0);     
   5.176 -        break;
   5.177 -        
   5.178 -    case 1:
   5.179 -    case 3:
   5.180 -    case 6:
   5.181 -    default:
   5.182 -        panic ("Deliver reserved IPI!\n");
   5.183 -        break;
   5.184 -    }   
   5.185 -}
   5.186 -
   5.187 -/*
   5.188 - * TODO: Use hash table for the lookup.
   5.189 - */
   5.190 -static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
   5.191 -{
   5.192 -	int   i;
   5.193 -	VCPU  *vcpu;
   5.194 -	LID	  lid;
   5.195 -	
   5.196 -	for (i=0; i<MAX_VIRT_CPUS; i++) {
   5.197 -		vcpu = d->vcpu[i];
   5.198 -		lid.val = VPD_CR(vcpu, lid);
   5.199 -		if ( lid.id == id && lid.eid == eid ) {
   5.200 -		    return vcpu;
   5.201 -		}
   5.202 -	}
   5.203 -	return NULL;
   5.204 -}
   5.205 -
   5.206 -/*
   5.207 - * execute write IPI op.
   5.208 - */
   5.209 -static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
   5.210 -{
   5.211 -    VCPU   *target_cpu;
   5.212 -    
   5.213 -    target_cpu = lid_2_vcpu(vcpu->domain, 
   5.214 -    				((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   5.215 -    if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
   5.216 -    if ( target_cpu == vcpu ) {
   5.217 -    	// IPI to self
   5.218 -        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
   5.219 -                ((ipi_d_t)value).vector);
   5.220 -        return 1;
   5.221 -    }
   5.222 -    else {
   5.223 -    	// TODO: send Host IPI to inject guest SMP IPI interruption
   5.224 -        panic ("No SM-VP supported!\n");
   5.225 -        return 0;
   5.226 -    }
   5.227 -}
   5.228 -
   5.229 -void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
   5.230 -{
   5.231 -    
   5.232      switch (pib_off) {
   5.233      case PIB_OFST_INTA:
   5.234          panic("Undefined write on PIB INTA\n");
   5.235 @@ -276,7 +81,7 @@ void pib_write(VCPU *vcpu, void *src, ui
   5.236      }
   5.237  }
   5.238  
   5.239 -void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
   5.240 +static void pib_read(VCPU *vcpu, uint64_t pib_off, void *dest, size_t s, int ma)
   5.241  {
   5.242      switch (pib_off) {
   5.243      case PIB_OFST_INTA:
   5.244 @@ -323,3 +128,334 @@ void pib_read(VCPU *vcpu, uint64_t pib_o
   5.245      }
   5.246  }
   5.247  
   5.248 +static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
   5.249 +{
   5.250 +    struct vcpu *v = current;
   5.251 +    vcpu_iodata_t *vio;
   5.252 +    ioreq_t *p;
   5.253 +    unsigned long addr;
   5.254 +
   5.255 +    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
   5.256 +    if (vio == 0) {
   5.257 +        panic("bad shared page: %lx", (unsigned long)vio);
   5.258 +    }
   5.259 +    p = &vio->vp_ioreq;
   5.260 +    p->addr = pa;
   5.261 +    p->size = 1<<s;
   5.262 +    p->count = 1;
   5.263 +    p->dir = dir;
   5.264 +    if(dir==IOREQ_WRITE)     //write;
   5.265 +        p->u.data = *val;
   5.266 +    p->pdata_valid = 0;
   5.267 +    p->port_mm = 1;
   5.268 +    p->df = 0;
   5.269 +
   5.270 +    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
   5.271 +    p->state = STATE_IOREQ_READY;
   5.272 +    evtchn_send(IOPACKET_PORT);
   5.273 +    vmx_wait_io();
   5.274 +    if(dir){ //read
   5.275 +        *val=p->u.data;
   5.276 +    }
   5.277 +    return;
   5.278 +}
   5.279 +#define TO_LEGACY_IO(pa)  (((pa)>>12<<2)|((pa)&0x3))
   5.280 +
   5.281 +static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
   5.282 +{
   5.283 +    struct vcpu *v = current;
   5.284 +    vcpu_iodata_t *vio;
   5.285 +    ioreq_t *p;
   5.286 +    unsigned long addr;
   5.287 +
   5.288 +    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
   5.289 +    if (vio == 0) {
   5.290 +        panic("bad shared page: %lx");
   5.291 +    }
   5.292 +    p = &vio->vp_ioreq;
   5.293 +    p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
   5.294 +    p->size = 1<<s;
   5.295 +    p->count = 1;
   5.296 +    p->dir = dir;
   5.297 +    if(dir==IOREQ_WRITE)     //write;
   5.298 +        p->u.data = *val;
   5.299 +    p->pdata_valid = 0;
   5.300 +    p->port_mm = 0;
   5.301 +    p->df = 0;
   5.302 +
   5.303 +    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
   5.304 +    p->state = STATE_IOREQ_READY;
   5.305 +    evtchn_send(IOPACKET_PORT);
   5.306 +    vmx_wait_io();
   5.307 +    if(dir){ //read
   5.308 +        *val=p->u.data;
   5.309 +    }
   5.310 +    return;
   5.311 +}
   5.312 +
   5.313 +static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
   5.314 +{
   5.315 +    struct virutal_platform_def *v_plat;
   5.316 +    //mmio_type_t iot;
   5.317 +    unsigned long iot;
   5.318 +    iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
   5.319 +    v_plat = vmx_vcpu_get_plat(vcpu);
   5.320 +
   5.321 +    switch (iot) {
   5.322 +    case GPFN_PIB:
   5.323 +        if(!dir)
   5.324 +            pib_write(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
   5.325 +        else
   5.326 +            pib_read(vcpu, src_pa - v_plat->pib_base, dest, s, ma);
   5.327 +        break;
   5.328 +    case GPFN_GFW:
   5.329 +        break;
   5.330 +    case GPFN_FRAME_BUFFER:
   5.331 +    case GPFN_LOW_MMIO:
   5.332 +        low_mmio_access(vcpu, src_pa, dest, s, dir);
   5.333 +        break;
   5.334 +    case GPFN_LEGACY_IO:
   5.335 +        legacy_io_access(vcpu, src_pa, dest, s, dir);
   5.336 +        break;
   5.337 +    case GPFN_IOSAPIC:
   5.338 +    default:
   5.339 +        panic("Bad I/O access\n");
   5.340 +        break;
   5.341 +    }
   5.342 +    return;
   5.343 +}
   5.344 +
   5.345 +/*
   5.346 + * Read or write data in guest virtual address mode.
   5.347 + */
   5.348 +/*
   5.349 +void
   5.350 +memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
   5.351 +{
   5.352 +    uint64_t pa;
   5.353 +
   5.354 +    if (!vtlb->nomap)
   5.355 +        panic("Normal memory write shouldn't go to this point!");
   5.356 +    pa = PPN_2_PA(vtlb->ppn);
   5.357 +    pa += POFFSET((u64)dest, vtlb->ps);
   5.358 +    mmio_write (vcpu, src, pa, s, vtlb->ma);
   5.359 +}
   5.360 +
   5.361 +
   5.362 +void
   5.363 +memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
   5.364 +{
   5.365 +    uint64_t pa = (uint64_t)dest;
   5.366 +    int    ma;
   5.367 +
   5.368 +    if ( pa & (1UL <<63) ) {
   5.369 +        // UC
   5.370 +        ma = 4;
   5.371 +        pa <<=1;
   5.372 +        pa >>=1;
   5.373 +    }
   5.374 +    else {
   5.375 +        // WBL
   5.376 +        ma = 0;     // using WB for WBL
   5.377 +    }
   5.378 +    mmio_write (vcpu, src, pa, s, ma);
   5.379 +}
   5.380 +
   5.381 +void
   5.382 +memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s)
   5.383 +{
   5.384 +    uint64_t pa;
   5.385 +
   5.386 +    if (!vtlb->nomap)
   5.387 +        panic("Normal memory write shouldn't go to this point!");
   5.388 +    pa = PPN_2_PA(vtlb->ppn);
   5.389 +    pa += POFFSET((u64)src, vtlb->ps);
   5.390 +
   5.391 +    mmio_read(vcpu, pa, dest, s, vtlb->ma);
   5.392 +}
   5.393 +
   5.394 +void
   5.395 +memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s)
   5.396 +{
   5.397 +    uint64_t pa = (uint64_t)src;
   5.398 +    int    ma;
   5.399 +
   5.400 +    if ( pa & (1UL <<63) ) {
   5.401 +        // UC
   5.402 +        ma = 4;
   5.403 +        pa <<=1;
   5.404 +        pa >>=1;
   5.405 +    }
   5.406 +    else {
   5.407 +        // WBL
   5.408 +        ma = 0;     // using WB for WBL
   5.409 +    }
   5.410 +    mmio_read(vcpu, pa, dest, s, ma);
   5.411 +}
   5.412 +*/
   5.413 +
   5.414 +
   5.415 +/*
   5.416 + * Deliver IPI message. (Only U-VP is supported now)
   5.417 + *  offset: address offset to IPI space.
   5.418 + *  value:  deliver value.
   5.419 + */
   5.420 +static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
   5.421 +{
   5.422 +#ifdef  IPI_DEBUG
   5.423 +  printf ("deliver_ipi %lx %lx\n",dm,vector);
   5.424 +#endif
   5.425 +    switch ( dm ) {
   5.426 +    case 0:     // INT
   5.427 +        vmx_vcpu_pend_interrupt (vcpu, vector);
   5.428 +        break;
   5.429 +    case 2:     // PMI
   5.430 +        // TODO -- inject guest PMI
   5.431 +        panic ("Inject guest PMI!\n");
   5.432 +        break;
   5.433 +    case 4:     // NMI
   5.434 +        vmx_vcpu_pend_interrupt (vcpu, 2);
   5.435 +        break;
   5.436 +    case 5:     // INIT
   5.437 +        // TODO -- inject guest INIT
   5.438 +        panic ("Inject guest INIT!\n");
   5.439 +        break;
   5.440 +    case 7:     // ExtINT
   5.441 +        vmx_vcpu_pend_interrupt (vcpu, 0);
   5.442 +        break;
   5.443 +    case 1:
   5.444 +    case 3:
   5.445 +    case 6:
   5.446 +    default:
   5.447 +        panic ("Deliver reserved IPI!\n");
   5.448 +        break;
   5.449 +    }
   5.450 +}
   5.451 +
   5.452 +/*
   5.453 + * TODO: Use hash table for the lookup.
   5.454 + */
   5.455 +static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
   5.456 +{
   5.457 +	int   i;
   5.458 +	VCPU  *vcpu;
   5.459 +	LID	  lid;
   5.460 +	for (i=0; i<MAX_VIRT_CPUS; i++) {
   5.461 +		vcpu = d->vcpu[i];
   5.462 +		lid.val = VPD_CR(vcpu, lid);
   5.463 +		if ( lid.id == id && lid.eid == eid ) {
   5.464 +		    return vcpu;
   5.465 +		}
   5.466 +	}
   5.467 +	return NULL;
   5.468 +}
   5.469 +
   5.470 +/*
   5.471 + * execute write IPI op.
   5.472 + */
   5.473 +static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
   5.474 +{
   5.475 +    VCPU   *target_cpu;
   5.476 + 
   5.477 +    target_cpu = lid_2_vcpu(vcpu->domain, 
   5.478 +    				((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   5.479 +    if ( target_cpu == NULL ) panic("Unknown IPI cpu\n");
   5.480 +    if ( target_cpu == vcpu ) {
   5.481 +    	// IPI to self
   5.482 +        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
   5.483 +                ((ipi_d_t)value).vector);
   5.484 +        return 1;
   5.485 +    }
   5.486 +    else {
   5.487 +    	// TODO: send Host IPI to inject guest SMP IPI interruption
   5.488 +        panic ("No SM-VP supported!\n");
   5.489 +        return 0;
   5.490 +    }
   5.491 +}
   5.492 +
   5.493 +
   5.494 +/*
   5.495 +   dir 1: read 0:write
   5.496 +    inst_type 0:integer 1:floating point
   5.497 + */
   5.498 +extern IA64_BUNDLE __vmx_get_domain_bundle(u64 iip);
   5.499 +
   5.500 +
   5.501 +void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
   5.502 +{
   5.503 +    REGS *regs;
   5.504 +    IA64_BUNDLE bundle;
   5.505 +    int slot, dir, inst_type=0;
   5.506 +    size_t size;
   5.507 +    u64 data, value, slot1a, slot1b;
   5.508 +    INST64 inst;
   5.509 +    regs=vcpu_regs(vcpu);
   5.510 +    bundle = __vmx_get_domain_bundle(regs->cr_iip);
   5.511 +    slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
   5.512 +    if (!slot) inst.inst = bundle.slot0;
   5.513 +    else if (slot == 1){
   5.514 +        slot1a=bundle.slot1a;
   5.515 +        slot1b=bundle.slot1b;
   5.516 +        inst.inst =slot1a + (slot1b<<18);
   5.517 +    }
   5.518 +    else if (slot == 2) inst.inst = bundle.slot2;
   5.519 +
   5.520 +    if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
   5.521 +        inst_type=0;  //fp
   5.522 +        size=(inst.M1.x6&0x3);
   5.523 +        if((inst.M1.x6>>2)>0xb){      // write
   5.524 +            vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
   5.525 +            dir=IOREQ_WRITE;     //write
   5.526 +        }else if((inst.M1.x6>>2)<0xb){   //  read
   5.527 +            vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
   5.528 +            dir=IOREQ_READ;
   5.529 +        }else{
   5.530 +            printf("This memory access instruction can't be emulated one : %lx\n",inst.inst);
   5.531 +            while(1);
   5.532 +        }
   5.533 +    }else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   5.534 +        inst_type=1;  //fp
   5.535 +        dir=IOREQ_READ;
   5.536 +        size=3;     //ldfd
   5.537 +    }else{
   5.538 +        printf("This memory access instruction can't be emulated two: %lx\n ",inst.inst);
   5.539 +        while(1);
   5.540 +    }
   5.541 +
   5.542 +    if(dir==IOREQ_WRITE){
   5.543 +        mmio_access(vcpu, padr, &data, size, ma, dir);
   5.544 +    }else{
   5.545 +        mmio_access(vcpu, padr, &data, size, ma, dir);
   5.546 +        if(size==0)
   5.547 +            data = (value & 0xffffffffffffff00U) | (data & 0xffU);
   5.548 +        else if(size==1)
   5.549 +            data = (value & 0xffffffffffff0000U) | (data & 0xffffU);
   5.550 +        else if(size==2)
   5.551 +            data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
   5.552 +
   5.553 +        if(inst_type==0){       //gp
   5.554 +            vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
   5.555 +        }else{
   5.556 +            panic("Don't support ldfd now !");
   5.557 +/*            switch(inst.M6.f1){
   5.558 +
   5.559 +            case 6:
   5.560 +                regs->f6=(struct ia64_fpreg)data;
   5.561 +            case 7:
   5.562 +                regs->f7=(struct ia64_fpreg)data;
   5.563 +            case 8:
   5.564 +                regs->f8=(struct ia64_fpreg)data;
   5.565 +            case 9:
   5.566 +                regs->f9=(struct ia64_fpreg)data;
   5.567 +            case 10:
   5.568 +                regs->f10=(struct ia64_fpreg)data;
   5.569 +            case 11:
   5.570 +                regs->f11=(struct ia64_fpreg)data;
   5.571 +            default :
   5.572 +                ia64_ldfs(inst.M6.f1,&data);
   5.573 +            }
   5.574 +*/
   5.575 +        }
   5.576 +    }
   5.577 +    vmx_vcpu_increment_iip(vcpu);
   5.578 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/pal_emul.c	Sat Jul 09 07:58:56 2005 -0700
     6.3 @@ -0,0 +1,283 @@
     6.4 +/* PAL/SAL call delegation
     6.5 + *
     6.6 + * Copyright (c) 2004 Li Susie <susie.li@intel.com>
     6.7 + * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
     6.8 + */
     6.9 +
    6.10 +#include <asm/vmx_vcpu.h>
    6.11 +
    6.12 +static void
    6.13 +get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
    6.14 +			UINT64 *gr30, UINT64 *gr31) {
    6.15 +
    6.16 +  	vmx_vcpu_get_gr(vcpu,29,gr29);
    6.17 +  	vmx_vcpu_get_gr(vcpu,30,gr30); 
    6.18 +  	vmx_vcpu_get_gr(vcpu,31,gr31);
    6.19 +}
    6.20 +
    6.21 +static void
    6.22 +set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
    6.23 +
    6.24 +	vmx_vcpu_set_gr(vcpu,8, result.status,0);
    6.25 +	vmx_vcpu_set_gr(vcpu,9, result.v0,0);
    6.26 +	vmx_vcpu_set_gr(vcpu,10, result.v1,0);
    6.27 +	vmx_vcpu_set_gr(vcpu,11, result.v2,0);
    6.28 +}
    6.29 +
    6.30 +
    6.31 +static struct ia64_pal_retval
    6.32 +pal_cache_flush (VCPU *vcpu) {
    6.33 +	UINT64 gr28,gr29, gr30, gr31;
    6.34 +	struct ia64_pal_retval result;
    6.35 +
    6.36 +	get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
    6.37 +	vmx_vcpu_get_gr(vcpu,28,&gr28);
    6.38 +
    6.39 +	/* Always call Host Pal in int=1 */
    6.40 +	gr30 = gr30 &(~(0x2UL));
    6.41 +
    6.42 +	/* call Host PAL cache flush */
    6.43 +	result=ia64_pal_call_static(gr28 ,gr29, gr30,gr31,1);  // Clear psr.ic when call PAL_CACHE_FLUSH
    6.44 +
    6.45 +	/* If host PAL call is interrupted, then loop to complete it */
    6.46 +//	while (result.status == 1) {
    6.47 +//		ia64_pal_call_static(gr28 ,gr29, gr30, 
    6.48 +//				result.v1,1LL);
    6.49 +//	}
    6.50 +	while (result.status != 0) {
    6.51 +        panic("PAL_CACHE_FLUSH ERROR, status %d", result.status);
    6.52 +	}
    6.53 +
    6.54 +	return result;
    6.55 +}
    6.56 +
    6.57 +static struct ia64_pal_retval
    6.58 +pal_vm_tr_read (VCPU *vcpu ) {
    6.59 +#warning pal_vm_tr_read: to be implemented
    6.60 +	struct ia64_pal_retval result;
    6.61 +
    6.62 +	result.status= -1; //unimplemented
    6.63 +
    6.64 +	return result;
    6.65 +}
    6.66 +
    6.67 +
    6.68 +static struct ia64_pal_retval
    6.69 +pal_prefetch_visibility (VCPU *vcpu)  {
    6.70 +	/* Due to current MM virtualization algorithm,
    6.71 +	 * We do not allow guest to change mapping attribute.
    6.72 +	 * Thus we will not support PAL_PREFETCH_VISIBILITY
    6.73 +	 */
    6.74 +	struct ia64_pal_retval result;
    6.75 +
    6.76 +	result.status= -1; //unimplemented
    6.77 +
    6.78 +	return result;
    6.79 +}
    6.80 +
    6.81 +static struct ia64_pal_retval
    6.82 +pal_platform_addr(VCPU *vcpu) {
    6.83 +	struct ia64_pal_retval result;
    6.84 +
    6.85 +	result.status= 0; //success
    6.86 +
    6.87 +	return result;
    6.88 +}
    6.89 +
    6.90 +static struct ia64_pal_retval
    6.91 +pal_halt (VCPU *vcpu) {
    6.92 +#warning pal_halt: to be implemented
    6.93 +	//bugbug: to be implement. 
    6.94 +	struct ia64_pal_retval result;
    6.95 +
    6.96 +	result.status= -1; //unimplemented
    6.97 +
    6.98 +	return result;
    6.99 +}
   6.100 +
   6.101 +
   6.102 +static struct ia64_pal_retval
   6.103 +pal_halt_light (VCPU *vcpu) {
   6.104 +#if 0	
   6.105 +	// GVMM will go back to HVMM and ask HVMM to call yield().
   6.106 +	vmmdata.p_ctlblk->status = VM_OK;
   6.107 +	vmmdata.p_ctlblk->ctlcode = ExitVM_YIELD;
   6.108 +
   6.109 +	vmm_transition((UINT64)&vmmdata.p_gsa->guest,
   6.110 +    			(UINT64)&vmmdata.p_gsa->host,
   6.111 +    			(UINT64) vmmdata.p_tramp,0,0);
   6.112 +
   6.113 +
   6.114 +	result.status = 0;
   6.115 +	result.pal_result[0]=0;
   6.116 +	result.pal_result[1]=0;
   6.117 +	result.pal_result[2]=0;
   6.118 +
   6.119 +	return result;
   6.120 +#endif
   6.121 +	struct ia64_pal_retval result;
   6.122 +
   6.123 +	result.status= -1; //unimplemented
   6.124 +
   6.125 +	return result;
   6.126 +}
   6.127 +
   6.128 +static struct ia64_pal_retval
   6.129 +pal_cache_read (VCPU *vcpu) {
   6.130 +	struct ia64_pal_retval result;
   6.131 +
   6.132 +	result.status= -1; //unimplemented
   6.133 +
   6.134 +	return result;
   6.135 +}
   6.136 +
   6.137 +static struct ia64_pal_retval
   6.138 +pal_cache_write (VCPU *vcpu) {
   6.139 +	struct ia64_pal_retval result;
   6.140 +
   6.141 +	result.status= -1; //unimplemented
   6.142 +
   6.143 +	return result;
   6.144 +}
   6.145 +
   6.146 +static struct ia64_pal_retval
   6.147 +pal_bus_get_features(VCPU *vcpu){
   6.148 +	
   6.149 +}
   6.150 +
   6.151 +static struct ia64_pal_retval
   6.152 +pal_cache_summary(VCPU *vcpu){
   6.153 +	
   6.154 +}
   6.155 +
   6.156 +static struct ia64_pal_retval
   6.157 +pal_cache_init(VCPU *vcpu){
   6.158 +	struct ia64_pal_retval result;
   6.159 +	result.status=0;
   6.160 +	return result;
   6.161 +}
   6.162 +
   6.163 +static struct ia64_pal_retval
   6.164 +pal_cache_info(VCPU *vcpu){
   6.165 +}
   6.166 +
   6.167 +static struct ia64_pal_retval
   6.168 +pal_cache_prot_info(VCPU *vcpu){
   6.169 +}
   6.170 +
   6.171 +static struct ia64_pal_retval
   6.172 +pal_cache_shared_info(VCPU *vcpu){
   6.173 +}
   6.174 +
   6.175 +static struct ia64_pal_retval
   6.176 +pal_mem_attrib(VCPU *vcpu){
   6.177 +}
   6.178 +
   6.179 +static struct ia64_pal_retval
   6.180 +pal_debug_info(VCPU *vcpu){
   6.181 +}
   6.182 +
   6.183 +static struct ia64_pal_retval
   6.184 +pal_fixed_addr(VCPU *vcpu){
   6.185 +}
   6.186 +
   6.187 +static struct ia64_pal_retval
   6.188 +pal_freq_base(VCPU *vcpu){
   6.189 +}
   6.190 +
   6.191 +static struct ia64_pal_retval
   6.192 +pal_freq_ratios(VCPU *vcpu){
   6.193 +}
   6.194 +
   6.195 +static struct ia64_pal_retval
   6.196 +pal_halt_info(VCPU *vcpu){
   6.197 +}
   6.198 +
   6.199 +static struct ia64_pal_retval
   6.200 +pal_logical_to_physica(VCPU *vcpu){
   6.201 +}
   6.202 +
   6.203 +static struct ia64_pal_retval
   6.204 +pal_perf_mon_info(VCPU *vcpu){
   6.205 +}
   6.206 +
   6.207 +static struct ia64_pal_retval
   6.208 +pal_proc_get_features(VCPU *vcpu){
   6.209 +}
   6.210 +
   6.211 +static struct ia64_pal_retval
   6.212 +pal_ptce_info(VCPU *vcpu){
   6.213 +}
   6.214 +
   6.215 +static struct ia64_pal_retval
   6.216 +pal_register_info(VCPU *vcpu){
   6.217 +}
   6.218 +
   6.219 +static struct ia64_pal_retval
   6.220 +pal_rse_info(VCPU *vcpu){
   6.221 +}
   6.222 +
   6.223 +static struct ia64_pal_retval
   6.224 +pal_test_info(VCPU *vcpu){
   6.225 +}
   6.226 +
   6.227 +static struct ia64_pal_retval
   6.228 +pal_vm_summary(VCPU *vcpu){
   6.229 +}
   6.230 +
   6.231 +static struct ia64_pal_retval
   6.232 +pal_vm_info(VCPU *vcpu){
   6.233 +}
   6.234 +
   6.235 +static struct ia64_pal_retval
   6.236 +pal_vm_page_size(VCPU *vcpu){
   6.237 +}
   6.238 +
   6.239 +void
   6.240 +pal_emul( VCPU *vcpu) {
   6.241 +	UINT64 gr28;
   6.242 +	struct ia64_pal_retval result;
   6.243 +
   6.244 +
   6.245 +	vmx_vcpu_get_gr(vcpu,28,&gr28);  //bank1
   6.246 +
   6.247 +	switch (gr28) {
   6.248 +		case PAL_CACHE_FLUSH:
   6.249 +			result = pal_cache_flush (vcpu);
   6.250 +			break;
   6.251 +
   6.252 +		case PAL_PREFETCH_VISIBILITY:
   6.253 +			result = pal_prefetch_visibility (vcpu);
   6.254 +			break;
   6.255 +
   6.256 +		case PAL_VM_TR_READ:
   6.257 +			result = pal_vm_tr_read (vcpu);
   6.258 +			break;
   6.259 +
   6.260 +		case PAL_HALT:
   6.261 +			result = pal_halt (vcpu);
   6.262 +			break;
   6.263 +
   6.264 +		case PAL_HALT_LIGHT:
   6.265 +			result = pal_halt_light (vcpu);
   6.266 +			break;
   6.267 +
   6.268 +		case PAL_CACHE_READ:
   6.269 +			result = pal_cache_read (vcpu);
   6.270 +			break;
   6.271 +
   6.272 +		case PAL_CACHE_WRITE:
   6.273 +			result = pal_cache_write (vcpu);
   6.274 +			break;
   6.275 +			
   6.276 +		case PAL_PLATFORM_ADDR:
   6.277 +			result = pal_platform_addr (vcpu);
   6.278 +			break;
   6.279 +
   6.280 +		default:
   6.281 +			panic("pal_emul(): guest call unsupported pal" );
   6.282 +  }
   6.283 +		set_pal_result (vcpu, result);
   6.284 +}
   6.285 +
   6.286 +
     7.1 --- a/xen/arch/ia64/vlsapic.c	Sat Jul 09 07:37:13 2005 -0700
     7.2 +++ b/xen/arch/ia64/vlsapic.c	Sat Jul 09 07:58:56 2005 -0700
     7.3 @@ -133,7 +133,7 @@ uint64_t vtm_get_itc(VCPU *vcpu)
     7.4      // FIXME: should use local_irq_disable & local_irq_enable ??
     7.5      local_irq_save(spsr);
     7.6      guest_itc = now_itc(vtm);
     7.7 -    update_last_itc(vtm, guest_itc);
     7.8 +//    update_last_itc(vtm, guest_itc);
     7.9  
    7.10      local_irq_restore(spsr);
    7.11      return guest_itc;
    7.12 @@ -174,12 +174,12 @@ void vtm_set_itv(VCPU *vcpu)
    7.13  /* Interrupt must be disabled at this point */
    7.14  
    7.15  extern u64 tick_to_ns(u64 tick);
    7.16 -#define TIMER_SLOP (50*1000) /* ns */	/* copy from ac_timer.c */
    7.17 +#define TIMER_SLOP (50*1000) /* ns */  /* copy from ac_timer.c */
    7.18  void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
    7.19  {
    7.20      uint64_t    cur_itc,vitm,vitv;
    7.21      uint64_t    expires;
    7.22 -    long     	diff_now, diff_last;
    7.23 +    long        diff_now, diff_last;
    7.24      uint64_t    spsr;
    7.25      
    7.26      vitv = VPD_CR(vcpu, itv);
    7.27 @@ -237,21 +237,30 @@ void vtm_domain_in(VCPU *vcpu)
    7.28  
    7.29  #define  NMI_VECTOR         2
    7.30  #define  ExtINT_VECTOR      0
    7.31 -
    7.32 +#define  NULL_VECTOR        -1
    7.33  #define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
    7.34 -/*
    7.35 - * LID-CR64: Keep in vpd.
    7.36 - * IVR-CR65: (RO) see guest_read_ivr().
    7.37 - * TPR-CR66: Keep in vpd, acceleration enabled.
    7.38 - * EOI-CR67: see guest_write_eoi().
    7.39 - * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[]
    7.40 - *          can move to vpd for optimization.
    7.41 - * ITV: in time virtualization.
    7.42 - * PMV: Keep in vpd initialized as 0x10000.
    7.43 - * CMCV: Keep in vpd initialized as 0x10000.
    7.44 - * LRR0-1: Keep in vpd, initialized as 0x10000.
    7.45 - *
    7.46 - */
    7.47 +static void update_vhpi(VCPU *vcpu, int vec)
    7.48 +{
    7.49 +    u64     vhpi;
    7.50 +    if ( vec == NULL_VECTOR ) {
    7.51 +        vhpi = 0;
    7.52 +    }
    7.53 +    else if ( vec == NMI_VECTOR ) { // NMI
    7.54 +        vhpi = 32;
    7.55 +    } else if (vec == ExtINT_VECTOR) { //ExtINT
    7.56 +        vhpi = 16;
    7.57 +    }
    7.58 +    else {
    7.59 +        vhpi = vec / 16;
    7.60 +    }
    7.61 +
    7.62 +    VMX_VPD(vcpu,vhpi) = vhpi;
    7.63 +    // TODO: Add support for XENO
    7.64 +    if ( VMX_VPD(vcpu,vac).a_int ) {
    7.65 +        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 
    7.66 +                (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
    7.67 +    }
    7.68 +}
    7.69  
    7.70  void vlsapic_reset(VCPU *vcpu)
    7.71  {
    7.72 @@ -268,9 +277,11 @@ void vlsapic_reset(VCPU *vcpu)
    7.73      VPD_CR(vcpu, cmcv) = 0x10000;
    7.74      VPD_CR(vcpu, lrr0) = 0x10000;   // default reset value?
    7.75      VPD_CR(vcpu, lrr1) = 0x10000;   // default reset value?
    7.76 +    update_vhpi(vcpu, NULL_VECTOR);
    7.77      for ( i=0; i<4; i++) {
    7.78          VLSAPIC_INSVC(vcpu,i) = 0;
    7.79      }
    7.80 +    DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
    7.81  }
    7.82  
    7.83  /*
    7.84 @@ -281,7 +292,7 @@ void vlsapic_reset(VCPU *vcpu)
    7.85   */
    7.86  static __inline__ int highest_bits(uint64_t *dat)
    7.87  {
    7.88 -    uint64_t  bits, bitnum=-1;
    7.89 +    uint64_t  bits, bitnum;
    7.90      int i;
    7.91      
    7.92      /* loop for all 256 bits */
    7.93 @@ -292,12 +303,12 @@ static __inline__ int highest_bits(uint6
    7.94              return i*64+bitnum;
    7.95          }
    7.96      }
    7.97 -   return -1;
    7.98 +   return NULL_VECTOR;
    7.99  }
   7.100  
   7.101  /*
   7.102   * Return 0-255 for pending irq.
   7.103 - *        -1 when no pending.
   7.104 + *        NULL_VECTOR: when no pending.
   7.105   */
   7.106  static int highest_pending_irq(VCPU *vcpu)
   7.107  {
   7.108 @@ -320,7 +331,7 @@ static int highest_inservice_irq(VCPU *v
   7.109  static int is_higher_irq(int pending, int inservice)
   7.110  {
   7.111      return ( (pending >> 4) > (inservice>>4) || 
   7.112 -                ((pending != -1) && (inservice == -1)) );
   7.113 +                ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
   7.114  }
   7.115  
   7.116  static int is_higher_class(int pending, int mic)
   7.117 @@ -333,40 +344,96 @@ static int is_invalid_irq(int vec)
   7.118      return (vec == 1 || ((vec <= 14 && vec >= 3)));
   7.119  }
   7.120  
   7.121 +#define   IRQ_NO_MASKED         0
   7.122 +#define   IRQ_MASKED_BY_VTPR    1
   7.123 +#define   IRQ_MASKED_BY_INSVC   2   // masked by inservice IRQ
   7.124 +
   7.125  /* See Table 5-8 in SDM vol2 for the definition */
   7.126  static int
   7.127 -irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   7.128 +_xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   7.129  {
   7.130 -    uint64_t    vtpr;
   7.131 +    tpr_t    vtpr;
   7.132 +    uint64_t    mmi;
   7.133      
   7.134 -    vtpr = VPD_CR(vcpu, tpr);
   7.135 +    vtpr.val = VPD_CR(vcpu, tpr);
   7.136  
   7.137 -    if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR )
   7.138 +    if ( h_inservice == NMI_VECTOR ) {
   7.139 +        return IRQ_MASKED_BY_INSVC;
   7.140 +    }
   7.141 +    if ( h_pending == NMI_VECTOR ) {
   7.142          // Non Maskable Interrupt
   7.143 -        return 0;
   7.144 -
   7.145 -    if ( h_pending == ExtINT_VECTOR && h_inservice >= 16)
   7.146 -        return (vtpr>>16)&1;    // vtpr.mmi
   7.147 +        return IRQ_NO_MASKED;
   7.148 +    }
   7.149 +    if ( h_inservice == ExtINT_VECTOR ) {
   7.150 +        return IRQ_MASKED_BY_INSVC;
   7.151 +    }
   7.152 +    mmi = vtpr.mmi;
   7.153 +    if ( h_pending == ExtINT_VECTOR ) {
   7.154 +        if ( mmi ) {
   7.155 +            // mask all external IRQ
   7.156 +            return IRQ_MASKED_BY_VTPR;
   7.157 +        }
   7.158 +        else {
   7.159 +            return IRQ_NO_MASKED;
   7.160 +        }
   7.161 +    }
   7.162  
   7.163 -    if ( !(vtpr&(1UL<<16)) &&
   7.164 -          is_higher_irq(h_pending, h_inservice) &&
   7.165 -          is_higher_class(h_pending, (vtpr>>4)&0xf) )
   7.166 -        return 0;
   7.167 -
   7.168 -    return 1;
   7.169 +    if ( is_higher_irq(h_pending, h_inservice) ) {
   7.170 +        if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) {
   7.171 +            return IRQ_NO_MASKED;
   7.172 +        }
   7.173 +        else {
   7.174 +            return IRQ_MASKED_BY_VTPR;
   7.175 +        }
   7.176 +    }
   7.177 +    else {
   7.178 +        return IRQ_MASKED_BY_INSVC;
   7.179 +    }
   7.180  }
   7.181  
   7.182 +static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
   7.183 +{
   7.184 +    int mask;
   7.185 +    
   7.186 +    mask = _xirq_masked(vcpu, h_pending, h_inservice);
   7.187 +    return mask;
   7.188 +}
   7.189 +
   7.190 +
   7.191 +/*
   7.192 + * May come from virtualization fault or
   7.193 + * nested host interrupt.
   7.194 + */
   7.195  void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
   7.196  {
   7.197      uint64_t    spsr;
   7.198  
   7.199      if (vector & ~0xff) {
   7.200 -        printf("vmx_vcpu_pend_interrupt: bad vector\n");
   7.201 +        DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
   7.202          return;
   7.203      }
   7.204      local_irq_save(spsr);
   7.205      VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
   7.206      local_irq_restore(spsr);
   7.207 +    vcpu->arch.irq_new_pending = 1;
   7.208 +}
   7.209 +
   7.210 +/*
   7.211 + * Add batch of pending interrupt.
   7.212 + * The interrupt source is contained in pend_irr[0-3] with
   7.213 + * each bits stand for one interrupt.
   7.214 + */
   7.215 +void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
   7.216 +{
   7.217 +    uint64_t    spsr;
   7.218 +    int     i;
   7.219 +
   7.220 +    local_irq_save(spsr);
   7.221 +    for (i=0 ; i<4; i++ ) {
   7.222 +        VPD_CR(vcpu,irr[i]) |= pend_irr[i];
   7.223 +    }
   7.224 +    local_irq_restore(spsr);
   7.225 +    vcpu->arch.irq_new_pending = 1;
   7.226  }
   7.227  
   7.228  /*
   7.229 @@ -383,7 +450,7 @@ void vmx_vcpu_pend_interrupt(VCPU *vcpu,
   7.230   */
   7.231  int vmx_check_pending_irq(VCPU *vcpu)
   7.232  {
   7.233 -    uint64_t  spsr;
   7.234 +    uint64_t  spsr, mask;
   7.235      int     h_pending, h_inservice;
   7.236      int injected=0;
   7.237      uint64_t    isr;
   7.238 @@ -391,22 +458,25 @@ int vmx_check_pending_irq(VCPU *vcpu)
   7.239  
   7.240      local_irq_save(spsr);
   7.241      h_pending = highest_pending_irq(vcpu);
   7.242 -    if ( h_pending == -1 ) goto chk_irq_exit;
   7.243 +    if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
   7.244      h_inservice = highest_inservice_irq(vcpu);
   7.245  
   7.246      vpsr.val = vmx_vcpu_get_psr(vcpu);
   7.247 -    if (  vpsr.i &&
   7.248 -        !irq_masked(vcpu, h_pending, h_inservice) ) {
   7.249 -        //inject_guest_irq(v);
   7.250 +    mask = irq_masked(vcpu, h_pending, h_inservice);
   7.251 +    if (  vpsr.i && IRQ_NO_MASKED == mask ) {
   7.252          isr = vpsr.val & IA64_PSR_RI;
   7.253          if ( !vpsr.ic )
   7.254              panic("Interrupt when IC=0\n");
   7.255          vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
   7.256          injected = 1;
   7.257      }
   7.258 -    else if ( VMX_VPD(vcpu,vac).a_int && 
   7.259 -            is_higher_irq(h_pending,h_inservice) ) {
   7.260 -        vmx_inject_vhpi(vcpu,h_pending);
   7.261 +    else if ( mask == IRQ_MASKED_BY_INSVC ) {
   7.262 +        // cann't inject VHPI
   7.263 +//        DPRINTK("IRQ masked by higher inservice\n");
   7.264 +    }
   7.265 +    else {
   7.266 +        // masked by vpsr.i or vtpr.
   7.267 +        update_vhpi(vcpu,h_pending);
   7.268      }
   7.269  
   7.270  chk_irq_exit:
   7.271 @@ -414,17 +484,21 @@ chk_irq_exit:
   7.272      return injected;
   7.273  }
   7.274  
   7.275 +/*
   7.276 + * Only coming from virtualization fault.
   7.277 + */
   7.278  void guest_write_eoi(VCPU *vcpu)
   7.279  {
   7.280      int vec;
   7.281      uint64_t  spsr;
   7.282  
   7.283      vec = highest_inservice_irq(vcpu);
   7.284 -    if ( vec < 0 ) panic("Wrong vector to EOI\n");
   7.285 +    if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
   7.286      local_irq_save(spsr);
   7.287      VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
   7.288      local_irq_restore(spsr);
   7.289      VPD_CR(vcpu, eoi)=0;    // overwrite the data
   7.290 +    vmx_check_pending_irq(vcpu);
   7.291  }
   7.292  
   7.293  uint64_t guest_read_vivr(VCPU *vcpu)
   7.294 @@ -435,37 +509,54 @@ uint64_t guest_read_vivr(VCPU *vcpu)
   7.295      local_irq_save(spsr);
   7.296      vec = highest_pending_irq(vcpu);
   7.297      h_inservice = highest_inservice_irq(vcpu);
   7.298 -    if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) {
   7.299 +    if ( vec == NULL_VECTOR || 
   7.300 +        irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) {
   7.301          local_irq_restore(spsr);
   7.302          return IA64_SPURIOUS_INT_VECTOR;
   7.303      }
   7.304   
   7.305      VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
   7.306      VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
   7.307 -
   7.308 -    h_inservice = highest_inservice_irq(vcpu);
   7.309 -    next = highest_pending_irq(vcpu);
   7.310 -    if ( VMX_VPD(vcpu,vac).a_int &&
   7.311 -        (is_higher_irq(next, h_inservice) || (next == -1)) )
   7.312 -        vmx_inject_vhpi(vcpu, next);
   7.313 +    update_vhpi(vcpu, NULL_VECTOR);     // clear VHPI till EOI or IRR write
   7.314      local_irq_restore(spsr);
   7.315      return (uint64_t)vec;
   7.316  }
   7.317  
   7.318 -void vmx_inject_vhpi(VCPU *vcpu, u8 vec)
   7.319 +static void generate_exirq(VCPU *vcpu)
   7.320  {
   7.321 -        VMX_VPD(vcpu,vhpi) = vec / 16;
   7.322 -
   7.323 -
   7.324 -        // non-maskable
   7.325 -        if ( vec == NMI_VECTOR ) // NMI
   7.326 -                VMX_VPD(vcpu,vhpi) = 32;
   7.327 -        else if (vec == ExtINT_VECTOR) //ExtINT
   7.328 -                VMX_VPD(vcpu,vhpi) = 16;
   7.329 -        else if (vec == -1)
   7.330 -                VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */
   7.331 -
   7.332 -        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT, 
   7.333 -            (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
   7.334 +    IA64_PSR    vpsr;
   7.335 +    uint64_t    isr;
   7.336 +    
   7.337 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   7.338 +    update_vhpi(vcpu, NULL_VECTOR);
   7.339 +    isr = vpsr.val & IA64_PSR_RI;
   7.340 +    if ( !vpsr.ic )
   7.341 +        panic("Interrupt when IC=0\n");
   7.342 +    vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
   7.343  }
   7.344  
   7.345 +vhpi_detection(VCPU *vcpu)
   7.346 +{
   7.347 +    uint64_t    threshold,vhpi;
   7.348 +    tpr_t       vtpr;
   7.349 +    IA64_PSR    vpsr;
   7.350 +    
   7.351 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   7.352 +    vtpr.val = VPD_CR(vcpu, tpr);
   7.353 +
   7.354 +    threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
   7.355 +    vhpi = VMX_VPD(vcpu,vhpi);
   7.356 +    if ( vhpi > threshold ) {
   7.357 +        // interrupt actived
   7.358 +        generate_exirq (vcpu);
   7.359 +    }
   7.360 +}
   7.361 +
   7.362 +vmx_vexirq(VCPU *vcpu)
   7.363 +{
   7.364 +    static  uint64_t  vexirq_count=0;
   7.365 +
   7.366 +    vexirq_count ++;
   7.367 +    printk("Virtual ex-irq %ld\n", vexirq_count);
   7.368 +    generate_exirq (vcpu);
   7.369 +}
     8.1 --- a/xen/arch/ia64/vmx_entry.S	Sat Jul 09 07:37:13 2005 -0700
     8.2 +++ b/xen/arch/ia64/vmx_entry.S	Sat Jul 09 07:58:56 2005 -0700
     8.3 @@ -217,7 +217,7 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
     8.4      alloc loc0=ar.pfs,0,1,1,0
     8.5      adds out0=16,r12
     8.6      ;;
     8.7 -    br.call.sptk.many b0=vmx_deliver_pending_interrupt
     8.8 +    br.call.sptk.many b0=leave_hypervisor_tail
     8.9      mov ar.pfs=loc0
    8.10      adds r8=IA64_VPD_BASE_OFFSET,r13
    8.11      ;;
     9.1 --- a/xen/arch/ia64/vmx_hypercall.c	Sat Jul 09 07:37:13 2005 -0700
     9.2 +++ b/xen/arch/ia64/vmx_hypercall.c	Sat Jul 09 07:58:56 2005 -0700
     9.3 @@ -141,21 +141,27 @@ void hyper_lock_page(void)
     9.4  static int do_set_shared_page(VCPU *vcpu, u64 gpa)
     9.5  {
     9.6      u64 shared_info, o_info;
     9.7 +    struct domain *d = vcpu->domain;
     9.8 +    struct vcpu *v;
     9.9      if(vcpu->domain!=dom0)
    9.10          return -EPERM;
    9.11      shared_info = __gpa_to_mpa(vcpu->domain, gpa);
    9.12      o_info = (u64)vcpu->domain->shared_info;
    9.13 -    vcpu->domain->shared_info= (shared_info_t *)__va(shared_info);
    9.14 +    d->shared_info= (shared_info_t *)__va(shared_info);
    9.15  
    9.16      /* Copy existing shared info into new page */
    9.17 -    if (!o_info) {
    9.18 -	memcpy((void*)vcpu->domain->shared_info, (void*)o_info, PAGE_SIZE);
    9.19 -	/* If original page belongs to xen heap, then relinguish back
    9.20 -	 * to xen heap. Or else, leave to domain itself to decide.
    9.21 -	 */
    9.22 -	if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
    9.23 -		free_xenheap_page(o_info);
    9.24 -    }
    9.25 +    if (o_info) {
    9.26 +    	memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
    9.27 +    	for_each_vcpu(d, v) {
    9.28 +	        v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    9.29 +    	}
    9.30 +    	/* If original page belongs to xen heap, then relinguish back
    9.31 +    	 * to xen heap. Or else, leave to domain itself to decide.
    9.32 +    	 */
    9.33 +    	if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
    9.34 +	    	free_xenheap_page(o_info);
    9.35 +    } else
    9.36 +        memset(d->shared_info, 0, PAGE_SIZE);
    9.37      return 0;
    9.38  }
    9.39  
    10.1 --- a/xen/arch/ia64/vmx_init.c	Sat Jul 09 07:37:13 2005 -0700
    10.2 +++ b/xen/arch/ia64/vmx_init.c	Sat Jul 09 07:58:56 2005 -0700
    10.3 @@ -40,6 +40,7 @@
    10.4  #include <asm/vmmu.h>
    10.5  #include <public/arch-ia64.h>
    10.6  #include <asm/vmx_phy_mode.h>
    10.7 +#include <asm/processor.h>
    10.8  #include <asm/vmx.h>
    10.9  #include <xen/mm.h>
   10.10  
   10.11 @@ -225,6 +226,17 @@ vmx_save_state(struct vcpu *v)
   10.12          vmx_purge_double_mapping(dom_rr7, KERNEL_START,
   10.13  				 (u64)v->arch.vtlb->ts->vhpt->hash);
   10.14  
   10.15 +	/* Need to save KR when domain switch, though HV itself doesn;t
   10.16 +	 * use them.
   10.17 +	 */
   10.18 +	v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
   10.19 +	v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
   10.20 +	v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
   10.21 +	v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
   10.22 +	v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
   10.23 +	v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
   10.24 +	v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
   10.25 +	v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
   10.26  }
   10.27  
   10.28  /* Even guest is in physical mode, we still need such double mapping */
   10.29 @@ -234,6 +246,7 @@ vmx_load_state(struct vcpu *v)
   10.30  	u64 status, psr;
   10.31  	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
   10.32  	u64 pte_xen, pte_vhpt;
   10.33 +	int i;
   10.34  
   10.35  	status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
   10.36  	if (status != PAL_STATUS_SUCCESS)
   10.37 @@ -246,6 +259,14 @@ vmx_load_state(struct vcpu *v)
   10.38  				  (u64)v->arch.vtlb->ts->vhpt->hash,
   10.39  				  pte_xen, pte_vhpt);
   10.40  
   10.41 +	ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
   10.42 +	ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
   10.43 +	ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
   10.44 +	ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
   10.45 +	ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
   10.46 +	ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
   10.47 +	ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
   10.48 +	ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
   10.49  	/* Guest vTLB is not required to be switched explicitly, since
   10.50  	 * anchored in vcpu */
   10.51  }
   10.52 @@ -290,7 +311,7 @@ vmx_final_setup_domain(struct domain *d)
   10.53  	vmx_create_vp(v);
   10.54  
   10.55  	/* Set this ed to be vmx */
   10.56 -	v->arch.arch_vmx.flags = 1;
   10.57 +	set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
   10.58  
   10.59  	/* Other vmx specific initialization work */
   10.60  }
    11.1 --- a/xen/arch/ia64/vmx_ivt.S	Sat Jul 09 07:37:13 2005 -0700
    11.2 +++ b/xen/arch/ia64/vmx_ivt.S	Sat Jul 09 07:58:56 2005 -0700
    11.3 @@ -347,11 +347,16 @@ ENTRY(vmx_break_fault)
    11.4  	mov r31=pr
    11.5      mov r19=11
    11.6      mov r30=cr.iim
    11.7 -    mov r29=0x1100
    11.8 +    movl r29=0x1100
    11.9      ;;
   11.10 -    cmp4.eq  p6,p7=r29,r30
   11.11 +    cmp.eq p6,p7=r30,r0
   11.12 +    (p6) br.sptk vmx_fault_11
   11.13 +    ;;
   11.14 +    cmp.eq  p6,p7=r29,r30
   11.15      (p6) br.dptk.few vmx_hypercall_dispatch
   11.16      (p7) br.sptk.many vmx_dispatch_break_fault
   11.17 +    ;;
   11.18 +    VMX_FAULT(11);
   11.19  END(vmx_break_fault)
   11.20  
   11.21  	.org vmx_ia64_ivt+0x3000
   11.22 @@ -363,6 +368,8 @@ ENTRY(vmx_interrupt)
   11.23      mov r29=cr.ipsr
   11.24      ;;
   11.25      tbit.z p6,p7=r29,IA64_PSR_VM_BIT
   11.26 +    tbit.z p0,p15=r29,IA64_PSR_I_BIT
   11.27 +    ;;
   11.28  (p7) br.sptk vmx_dispatch_interrupt
   11.29      ;;
   11.30  	mov r27=ar.rsc			/* M */
   11.31 @@ -447,7 +454,7 @@ ENTRY(vmx_interrupt)
   11.32      ;;
   11.33      srlz.i
   11.34  	;;
   11.35 -    ssm psr.i
   11.36 +    (p15) ssm psr.i
   11.37  	adds r3=8,r2		// set up second base pointer for SAVE_REST
   11.38  	srlz.i			// ensure everybody knows psr.ic is back on
   11.39  	;;
   11.40 @@ -508,9 +515,12 @@ END(vmx_interrupt)
   11.41  	.org vmx_ia64_ivt+0x3400
   11.42  /////////////////////////////////////////////////////////////////////////////////////////
   11.43  // 0x3400 Entry 13 (size 64 bundles) Reserved
   11.44 +ENTRY(vmx_virtual_exirq)
   11.45  	VMX_DBG_FAULT(13)
   11.46 -	VMX_FAULT(13)
   11.47 -
   11.48 +	mov r31=pr
   11.49 +        mov r19=13
   11.50 +        br.sptk vmx_dispatch_vexirq
   11.51 +END(vmx_virtual_exirq)
   11.52  
   11.53  	.org vmx_ia64_ivt+0x3800
   11.54  /////////////////////////////////////////////////////////////////////////////////////////
   11.55 @@ -876,7 +886,7 @@ ENTRY(vmx_dispatch_reflection)
   11.56      ;;
   11.57      srlz.i                  // guarantee that interruption collection is on
   11.58      ;;
   11.59 -    ssm psr.i               // restore psr.i
   11.60 +    (p15) ssm psr.i               // restore psr.i
   11.61      adds r3=16,r2                // set up second base pointer
   11.62      ;;
   11.63      VMX_SAVE_REST
   11.64 @@ -887,8 +897,6 @@ ENTRY(vmx_dispatch_reflection)
   11.65  END(vmx_dispatch_reflection)
   11.66  
   11.67  ENTRY(vmx_dispatch_virtualization_fault)
   11.68 -    cmp.eq pEml,pNonEml=r0,r0       /* force pEml =1, save r4 ~ r7 */
   11.69 -    ;;
   11.70      VMX_SAVE_MIN_WITH_COVER_R19
   11.71      ;;
   11.72      alloc r14=ar.pfs,0,0,3,0        // now it's safe (must be first in insn group!)
   11.73 @@ -899,7 +907,7 @@ ENTRY(vmx_dispatch_virtualization_fault)
   11.74      ;;
   11.75      srlz.i                  // guarantee that interruption collection is on
   11.76      ;;
   11.77 -    ssm psr.i               // restore psr.i
   11.78 +    (p15) ssm psr.i               // restore psr.i
   11.79      adds r3=16,r2                // set up second base pointer
   11.80      ;;
   11.81      VMX_SAVE_REST
   11.82 @@ -910,6 +918,24 @@ ENTRY(vmx_dispatch_virtualization_fault)
   11.83  END(vmx_dispatch_virtualization_fault)
   11.84  
   11.85  
   11.86 +ENTRY(vmx_dispatch_vexirq)
   11.87 +    VMX_SAVE_MIN_WITH_COVER_R19
   11.88 +    alloc r14=ar.pfs,0,0,1,0
   11.89 +    mov out0=r13
   11.90 +
   11.91 +    ssm psr.ic
   11.92 +    ;;
   11.93 +    srlz.i                  // guarantee that interruption collection is on
   11.94 +    ;;
   11.95 +    (p15) ssm psr.i               // restore psr.i
   11.96 +    adds r3=16,r2                // set up second base pointer
   11.97 +    ;;
   11.98 +    VMX_SAVE_REST
   11.99 +    movl r14=ia64_leave_hypervisor
  11.100 +    ;;
  11.101 +    mov rp=r14
  11.102 +    br.call.sptk.many b6=vmx_vexirq
  11.103 +END(vmx_dispatch_vexirq)
  11.104  
  11.105  ENTRY(vmx_dispatch_tlb_miss)
  11.106      VMX_SAVE_MIN_WITH_COVER_R19
  11.107 @@ -922,7 +948,7 @@ ENTRY(vmx_dispatch_tlb_miss)
  11.108      ;;
  11.109      srlz.i                  // guarantee that interruption collection is on
  11.110      ;;
  11.111 -    ssm psr.i               // restore psr.i
  11.112 +    (p15) ssm psr.i               // restore psr.i
  11.113      adds r3=16,r2                // set up second base pointer
  11.114      ;;
  11.115      VMX_SAVE_REST
  11.116 @@ -947,7 +973,7 @@ ENTRY(vmx_dispatch_break_fault)
  11.117      ;;
  11.118      srlz.i                  // guarantee that interruption collection is on
  11.119      ;;
  11.120 -    ssm psr.i               // restore psr.i
  11.121 +    (p15)ssm psr.i               // restore psr.i
  11.122      adds r3=16,r2                // set up second base pointer
  11.123      ;;
  11.124      VMX_SAVE_REST
  11.125 @@ -965,7 +991,7 @@ ENTRY(vmx_hypercall_dispatch)
  11.126      ;;
  11.127      srlz.i                  // guarantee that interruption collection is on
  11.128      ;;
  11.129 -    ssm psr.i               // restore psr.i
  11.130 +    (p15) ssm psr.i               // restore psr.i
  11.131      adds r3=16,r2                // set up second base pointer
  11.132      ;;
  11.133      VMX_SAVE_REST
  11.134 @@ -987,8 +1013,6 @@ END(vmx_hypercall_dispatch)
  11.135  
  11.136  
  11.137  ENTRY(vmx_dispatch_interrupt)
  11.138 -    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  11.139 -    ;;
  11.140  	VMX_SAVE_MIN_WITH_COVER_R19	// uses r31; defines r2 and r3
  11.141  	;;
  11.142  	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
  11.143 @@ -999,7 +1023,7 @@ ENTRY(vmx_dispatch_interrupt)
  11.144  	;;
  11.145      srlz.i
  11.146      ;;
  11.147 -    ssm psr.i
  11.148 +    (p15) ssm psr.i
  11.149  	adds r3=16,r2		// set up second base pointer for SAVE_REST
  11.150  	;;
  11.151  	VMX_SAVE_REST
    12.1 --- a/xen/arch/ia64/vmx_minstate.h	Sat Jul 09 07:37:13 2005 -0700
    12.2 +++ b/xen/arch/ia64/vmx_minstate.h	Sat Jul 09 07:58:56 2005 -0700
    12.3 @@ -148,10 +148,14 @@
    12.4      mov r20=r1;         /* A */                         \
    12.5      mov r26=ar.unat;        /* M */                         \
    12.6      mov r29=cr.ipsr;        /* M */                         \
    12.7 +    mov r18=cr.isr;         \
    12.8      COVER;              /* B;; (or nothing) */                  \
    12.9      ;;                                          \
   12.10 -    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
   12.11 +    tbit.z p6,p0=r29,IA64_PSR_VM_BIT;       \
   12.12 +    tbit.nz.or p6,p0 = r18,39; \
   12.13 +    ;;        \
   12.14  (p6) br.sptk.few vmx_panic;        \
   12.15 +    tbit.z p0,p15=r29,IA64_PSR_I_BIT;   \
   12.16      mov r1=r16;                     \
   12.17  /*    mov r21=r16;	*/		\
   12.18      /* switch from user to kernel RBS: */                           \
    13.1 --- a/xen/arch/ia64/vmx_phy_mode.c	Sat Jul 09 07:37:13 2005 -0700
    13.2 +++ b/xen/arch/ia64/vmx_phy_mode.c	Sat Jul 09 07:58:56 2005 -0700
    13.3 @@ -119,38 +119,79 @@ physical_mode_init(VCPU *vcpu)
    13.4      vcpu->arch.old_rsc = 0;
    13.5      vcpu->arch.mode_flags = GUEST_IN_PHY;
    13.6  
    13.7 -    psr = ia64_clear_ic();
    13.8 +    return;
    13.9 +}
   13.10  
   13.11 -    ia64_set_rr((VRN0<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr0.rrval);
   13.12 -    ia64_srlz_d();
   13.13 -    ia64_set_rr((VRN4<<VRN_SHIFT), vcpu->domain->arch.emul_phy_rr4.rrval);
   13.14 -    ia64_srlz_d();
   13.15 +extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
   13.16  #if 0
   13.17 -    /* FIXME: temp workaround to support guest physical mode */
   13.18 -ia64_itr(0x1, IA64_TEMP_PHYSICAL, dom0_start,
   13.19 -	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
   13.20 -	 28);
   13.21 -ia64_itr(0x2, IA64_TEMP_PHYSICAL, dom0_start,
   13.22 -	 pte_val(pfn_pte((dom0_start >> PAGE_SHIFT), PAGE_KERNEL)),
   13.23 -	 28);
   13.24 -ia64_srlz_i();
   13.25 -#endif
   13.26 +void
   13.27 +physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
   13.28 +{
   13.29 +    u64 psr;
   13.30 +    IA64_PSR vpsr;
   13.31 +    u64 mppn,gppn,mpp1,gpp1;
   13.32 +    struct domain *d;
   13.33 +    static u64 test=0;
   13.34 +    d=vcpu->domain;
   13.35 +    if(test)
   13.36 +        panic("domn physical itlb miss happen\n");
   13.37 +    else
   13.38 +        test=1;
   13.39 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
   13.40 +    gppn=(vadr<<1)>>13;
   13.41 +    mppn = get_mfn(DOMID_SELF,gppn,1);
   13.42 +    mppn=(mppn<<12)|(vpsr.cpl<<7);
   13.43 +    gpp1=0;
   13.44 +    mpp1 = get_mfn(DOMID_SELF,gpp1,1);
   13.45 +    mpp1=(mpp1<<12)|(vpsr.cpl<<7);
   13.46 +//    if(vadr>>63)
   13.47 +//        mppn |= PHY_PAGE_UC;
   13.48 +//    else
   13.49 +//        mppn |= PHY_PAGE_WB;
   13.50 +    mpp1 |= PHY_PAGE_WB;
   13.51 +    psr=ia64_clear_ic();
   13.52 +    ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
   13.53 +    ia64_srlz_i();
   13.54 +    ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
   13.55 +    ia64_stop();
   13.56 +    ia64_srlz_i();
   13.57 +    ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
   13.58 +    ia64_srlz_i();
   13.59 +    ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
   13.60 +    ia64_stop();
   13.61 +    ia64_srlz_i();
   13.62 +    ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
   13.63 +    ia64_srlz_i();
   13.64 +    ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
   13.65 +    ia64_stop();
   13.66 +    ia64_srlz_i();
   13.67      ia64_set_psr(psr);
   13.68      ia64_srlz_i();
   13.69      return;
   13.70  }
   13.71 +#endif
   13.72  
   13.73 -extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
   13.74  void
   13.75  physical_itlb_miss(VCPU *vcpu, u64 vadr)
   13.76  {
   13.77 +        physical_itlb_miss_dom0(vcpu, vadr);
   13.78 +}
   13.79 +
   13.80 +
   13.81 +void
   13.82 +physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
   13.83 +{
   13.84      u64 psr;
   13.85      IA64_PSR vpsr;
   13.86      u64 mppn,gppn;
   13.87      vpsr.val=vmx_vcpu_get_psr(vcpu);
   13.88      gppn=(vadr<<1)>>13;
   13.89      mppn = get_mfn(DOMID_SELF,gppn,1);
   13.90 -    mppn=(mppn<<12)|(vpsr.cpl<<7)|PHY_PAGE_WB;
   13.91 +    mppn=(mppn<<12)|(vpsr.cpl<<7); 
   13.92 +//    if(vadr>>63)
   13.93 +//       mppn |= PHY_PAGE_UC;
   13.94 +//    else
   13.95 +    mppn |= PHY_PAGE_WB;
   13.96  
   13.97      psr=ia64_clear_ic();
   13.98      ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
   13.99 @@ -159,12 +200,15 @@ physical_itlb_miss(VCPU *vcpu, u64 vadr)
  13.100      return;
  13.101  }
  13.102  
  13.103 +
  13.104  void
  13.105  physical_dtlb_miss(VCPU *vcpu, u64 vadr)
  13.106  {
  13.107      u64 psr;
  13.108      IA64_PSR vpsr;
  13.109      u64 mppn,gppn;
  13.110 +//    if(vcpu->domain!=dom0)
  13.111 +//        panic("dom n physical dtlb miss happen\n");
  13.112      vpsr.val=vmx_vcpu_get_psr(vcpu);
  13.113      gppn=(vadr<<1)>>13;
  13.114      mppn = get_mfn(DOMID_SELF,gppn,1);
  13.115 @@ -209,6 +253,8 @@ vmx_load_all_rr(VCPU *vcpu)
  13.116  	 * mode in same region
  13.117  	 */
  13.118  	if (is_physical_mode(vcpu)) {
  13.119 +		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  13.120 +			panic("Unexpected domain switch in phy emul\n");
  13.121  		ia64_set_rr((VRN0 << VRN_SHIFT),
  13.122  			     vcpu->domain->arch.emul_phy_rr0.rrval);
  13.123  		ia64_set_rr((VRN4 << VRN_SHIFT),
  13.124 @@ -262,15 +308,10 @@ switch_to_virtual_rid(VCPU *vcpu)
  13.125      psr=ia64_clear_ic();
  13.126  
  13.127      mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
  13.128 -    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  13.129 -//VRID_2_MRID(vcpu,mrr.rid);
  13.130 -    mrr.ve = 1;
  13.131 -    ia64_set_rr(VRN0<<VRN_SHIFT, mrr.rrval );
  13.132 +    ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
  13.133      ia64_srlz_d();
  13.134      mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
  13.135 -    mrr.rid = VRID_2_MRID(vcpu,mrr.rid);
  13.136 -    mrr.ve = 1;
  13.137 -    ia64_set_rr(VRN4<<VRN_SHIFT, mrr.rrval );
  13.138 +    ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
  13.139      ia64_srlz_d();
  13.140      ia64_set_psr(psr);
  13.141      ia64_srlz_i();
  13.142 @@ -377,8 +418,10 @@ check_mm_mode_switch (VCPU *vcpu,  IA64_
  13.143  void
  13.144  prepare_if_physical_mode(VCPU *vcpu)
  13.145  {
  13.146 -    if (is_physical_mode(vcpu))
  13.147 +    if (is_physical_mode(vcpu)) {
  13.148 +	vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  13.149          switch_to_virtual_rid(vcpu);
  13.150 +    }
  13.151      return;
  13.152  }
  13.153  
  13.154 @@ -386,8 +429,10 @@ prepare_if_physical_mode(VCPU *vcpu)
  13.155  void
  13.156  recover_if_physical_mode(VCPU *vcpu)
  13.157  {
  13.158 -    if (is_physical_mode(vcpu))
  13.159 +    if (is_physical_mode(vcpu)) {
  13.160 +	vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  13.161          switch_to_physical_rid(vcpu);
  13.162 +    }
  13.163      return;
  13.164  }
  13.165  
    14.1 --- a/xen/arch/ia64/vmx_process.c	Sat Jul 09 07:37:13 2005 -0700
    14.2 +++ b/xen/arch/ia64/vmx_process.c	Sat Jul 09 07:58:56 2005 -0700
    14.3 @@ -45,7 +45,9 @@
    14.4  #include <asm/dom_fw.h>
    14.5  #include <asm/vmx_vcpu.h>
    14.6  #include <asm/kregs.h>
    14.7 +#include <asm/vmx.h>
    14.8  #include <asm/vmx_mm_def.h>
    14.9 +#include <xen/mm.h>
   14.10  /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
   14.11  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   14.12  
   14.13 @@ -53,7 +55,7 @@
   14.14  extern struct ia64_sal_retval pal_emulator_static(UINT64);
   14.15  extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
   14.16  extern void rnat_consumption (VCPU *vcpu);
   14.17 -
   14.18 +#define DOMN_PAL_REQUEST    0x110000
   14.19  IA64FAULT
   14.20  vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   14.21  {
   14.22 @@ -148,7 +150,10 @@ vmx_ia64_handle_break (unsigned long ifa
   14.23  				regs->r2);
   14.24  #endif
   14.25  		vmx_vcpu_increment_iip(current);
   14.26 -	} else
   14.27 +	}else if(iim == DOMN_PAL_REQUEST){
   14.28 +        pal_emul(current);
   14.29 +		vmx_vcpu_increment_iip(current);
   14.30 +    }  else
   14.31  		vmx_reflect_interruption(ifa,isr,iim,11);
   14.32  }
   14.33  
   14.34 @@ -187,26 +192,43 @@ void vmx_reflect_interruption(UINT64 ifa
   14.35  // ONLY gets called from ia64_leave_kernel
   14.36  // ONLY call with interrupts disabled?? (else might miss one?)
   14.37  // NEVER successful if already reflecting a trap/fault because psr.i==0
   14.38 -void vmx_deliver_pending_interrupt(struct pt_regs *regs)
   14.39 +void leave_hypervisor_tail(struct pt_regs *regs)
   14.40  {
   14.41  	struct domain *d = current->domain;
   14.42  	struct vcpu *v = current;
   14.43  	// FIXME: Will this work properly if doing an RFI???
   14.44  	if (!is_idle_task(d) ) {	// always comes from guest
   14.45 -		//vcpu_poke_timer(v);
   14.46 -		//if (vcpu_deliverable_interrupts(v)) {
   14.47 -		//	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   14.48 -		//	foodpi();
   14.49 -		//	reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
   14.50 -		//}
   14.51  	        extern void vmx_dorfirfi(void);
   14.52  		struct pt_regs *user_regs = vcpu_regs(current);
   14.53  
   14.54 + 		if (local_softirq_pending())
   14.55 + 			do_softirq();
   14.56 +		local_irq_disable();
   14.57 + 
   14.58  		if (user_regs != regs)
   14.59  			printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
   14.60 -		if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
   14.61 -			return;
   14.62 -		vmx_check_pending_irq(v);
   14.63 +
   14.64 +		/* VMX Domain N has other interrupt source, saying DM  */
   14.65 +                if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
   14.66 +                      vmx_intr_assist(v);
   14.67 +
   14.68 + 		/* FIXME: Check event pending indicator, and set
   14.69 + 		 * pending bit if necessary to inject back to guest.
   14.70 + 		 * Should be careful about window between this check
   14.71 + 		 * and above assist, since IOPACKET_PORT shouldn't be
   14.72 + 		 * injected into vmx domain.
   14.73 + 		 *
   14.74 + 		 * Now hardcode the vector as 0x10 temporarily
   14.75 + 		 */
   14.76 + 		if (event_pending(v)&&(!((v->arch.arch_vmx.in_service[0])&(1UL<<0x10)))) {
   14.77 + 			VPD_CR(v, irr[0]) |= 1UL << 0x10;
   14.78 + 			v->arch.irq_new_pending = 1;
   14.79 + 		}
   14.80 + 
   14.81 + 		if ( v->arch.irq_new_pending ) {
   14.82 + 			v->arch.irq_new_pending = 0;
   14.83 + 			vmx_check_pending_irq(v);
   14.84 + 		}
   14.85  	}
   14.86  }
   14.87  
   14.88 @@ -244,7 +266,11 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u
   14.89          return;
   14.90      }
   14.91      if((vec==2)&&(!vpsr.dt)){
   14.92 -        physical_dtlb_miss(vcpu, vadr);
   14.93 +        if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
   14.94 +            emulate_io_inst(vcpu,((vadr<<1)>>1),4);   //  UC
   14.95 +        }else{
   14.96 +            physical_dtlb_miss(vcpu, vadr);
   14.97 +        }
   14.98          return;
   14.99      }
  14.100      vrr = vmx_vcpu_rr(vcpu,vadr);
  14.101 @@ -255,6 +281,11 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u
  14.102  //    prepare_if_physical_mode(vcpu);
  14.103  
  14.104      if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
  14.105 +        if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain, data->ppn>>(PAGE_SHIFT-12))){
  14.106 +            vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
  14.107 +            emulate_io_inst(vcpu, vadr, data->ma);
  14.108 +            return IA64_FAULT;
  14.109 +        }
  14.110      	if ( data->ps != vrr.ps ) {
  14.111      		machine_tlb_insert(vcpu, data);
  14.112      	}
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/arch/ia64/vmx_support.c	Sat Jul 09 07:58:56 2005 -0700
    15.3 @@ -0,0 +1,159 @@
    15.4 +
    15.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    15.6 +/*
    15.7 + * vmx_support.c: vmx specific support interface.
    15.8 + * Copyright (c) 2005, Intel Corporation.
    15.9 + *	Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
   15.10 + *
   15.11 + * This program is free software; you can redistribute it and/or modify it
   15.12 + * under the terms and conditions of the GNU General Public License,
   15.13 + * version 2, as published by the Free Software Foundation.
   15.14 + *
   15.15 + * This program is distributed in the hope it will be useful, but WITHOUT
   15.16 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   15.17 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   15.18 + * more details.
   15.19 + *
   15.20 + * You should have received a copy of the GNU General Public License along with
   15.21 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   15.22 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   15.23 + *
   15.24 + */
   15.25 +#include <xen/config.h>
   15.26 +#include <xen/sched.h>
   15.27 +#include <public/io/ioreq.h>
   15.28 +#include <asm/vmx.h>
   15.29 +#include <asm/vmx_vcpu.h>
   15.30 +
   15.31 +/*
   15.32 + * I/O emulation should be atomic from domain point of view. However,
   15.33 + * when emulation code is waiting for I/O completion by do_block,
   15.34 + * other events like DM interrupt, VBD, etc. may come and unblock
   15.35 + * current exection flow. So we have to prepare for re-block if unblocked
   15.36 + * by non I/O completion event.
   15.37 + */
   15.38 +void vmx_wait_io(void)
   15.39 +{
   15.40 +    struct vcpu *v = current;
   15.41 +    struct domain *d = v->domain;
   15.42 +    extern void do_block();
   15.43 +
   15.44 +    do {
   15.45 +	if (!test_bit(IOPACKET_PORT,
   15.46 +		&d->shared_info->evtchn_pending[0]))
   15.47 +	    do_block();
   15.48 +
   15.49 +	/* Unblocked when some event is coming. Clear pending indication
   15.50 +	 * immediately if deciding to go for io assist
   15.51 +	  */
   15.52 +	if (test_and_clear_bit(IOPACKET_PORT,
   15.53 +		&d->shared_info->evtchn_pending[0])) {
   15.54 +	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   15.55 +	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   15.56 +	    vmx_io_assist(v);
   15.57 +	}
   15.58 +
   15.59 +
   15.60 +	if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
   15.61 +	    /*
   15.62 +	     * Latest event is not I/O completion, so clear corresponding
   15.63 +	     * selector and pending indication, to allow real event coming
   15.64 +	     */
   15.65 +	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   15.66 +
   15.67 +	    /* Here atually one window is leaved before selector is cleared.
   15.68 +	     * However this window only delay the indication to coming event,
   15.69 +	     * nothing losed. Next loop will check I/O channel to fix this
   15.70 +	     * window.
   15.71 +	     */
   15.72 +	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   15.73 +	}
   15.74 +	else
   15.75 +	    break;
   15.76 +    } while (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags));
   15.77 +}
   15.78 +
   15.79 +/*
   15.80 + * Only place to call vmx_io_assist is mmio/legacy_io emulation.
   15.81 + * Since I/O emulation is synchronous, it shouldn't be called in
   15.82 + * other places. This is not like x86, since IA-64 implements a
   15.83 + * per-vp stack without continuation.
   15.84 + */
   15.85 +void vmx_io_assist(struct vcpu *v)
   15.86 +{
   15.87 +    vcpu_iodata_t *vio;
   15.88 +    ioreq_t *p;
   15.89 +
   15.90 +    /*
   15.91 +     * This shared page contains I/O request between emulation code
   15.92 +     * and device model.
   15.93 +     */
   15.94 +    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
   15.95 +    if (!vio)
   15.96 +	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
   15.97 +
   15.98 +    p = &vio->vp_ioreq;
   15.99 +
  15.100 +    if (p->state == STATE_IORESP_HOOK)
  15.101 +	panic("Not supported: No hook available for DM request\n");
  15.102 +
  15.103 +    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
  15.104 +	if (p->state != STATE_IORESP_READY) {
  15.105 +	    /* Can't do_block here, for the same reason as other places to
  15.106 +	     * use vmx_wait_io. Simple return is safe since vmx_wait_io will
  15.107 +	     * try to block again
  15.108 +	     */
  15.109 +	    return; 
  15.110 +	} else
  15.111 +	    p->state = STATE_INVALID;
  15.112 +
  15.113 +	clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
  15.114 +    } else
  15.115 +	return; /* Spurous event? */
  15.116 +}
  15.117 +
  15.118 +/*
  15.119 + * VMX domainN has two types of interrupt source: lsapic model within
  15.120 + * HV, and device model within domain 0 (service OS). There're another
  15.121 + * pending array in share page, manipulated by device model directly.
  15.122 + * To conform to VT-i spec, we have to sync pending bits in shared page
  15.123 + * into VPD. This has to be done before checking pending interrupt at
  15.124 + * resume to guest. For domain 0, all the interrupt sources come from
  15.125 + * HV, which then doesn't require this assist.
  15.126 + */
  15.127 +void vmx_intr_assist(struct vcpu *v)
  15.128 +{
  15.129 +    vcpu_iodata_t *vio;
  15.130 +    struct domain *d = v->domain;
  15.131 +    extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
  15.132 +					unsigned long *pend_irr);
  15.133 +
  15.134 +    /* I/O emulation is atomic, so it's impossible to see execution flow
  15.135 +     * out of vmx_wait_io, when guest is still waiting for response.
  15.136 +     */
  15.137 +    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
  15.138 +	panic("!!!Bad resume to guest before I/O emulation is done.\n");
  15.139 +
  15.140 +    /* Clear indicator specific to interrupt delivered from DM */
  15.141 +    if (test_and_clear_bit(IOPACKET_PORT,
  15.142 +		&d->shared_info->evtchn_pending[0])) {
  15.143 +	if (!d->shared_info->evtchn_pending[IOPACKET_PORT >> 5])
  15.144 +	    clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
  15.145 +
  15.146 +	if (!v->vcpu_info->evtchn_pending_sel)
  15.147 +	    clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
  15.148 +    }
  15.149 +
  15.150 +    /* Even without event pending, we still need to sync pending bits
  15.151 +     * between DM and vlsapic. The reason is that interrupt delivery
  15.152 +     * shares same event channel as I/O emulation, with corresponding
  15.153 +     * indicator possibly cleared when vmx_wait_io().
  15.154 +     */
  15.155 +    vio = (vcpu_iodata_t *)v->arch.arch_vmx.vmx_platform.shared_page_va;
  15.156 +    if (!vio)
  15.157 +	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
  15.158 +
  15.159 +    vmx_vcpu_pend_batch_interrupt(v, &vio->vp_intr[0]); 
  15.160 +    memset(&vio->vp_intr[0], 0, sizeof(vio->vp_intr));
  15.161 +    return;
  15.162 +}
    16.1 --- a/xen/arch/ia64/vmx_vcpu.c	Sat Jul 09 07:37:13 2005 -0700
    16.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Sat Jul 09 07:58:56 2005 -0700
    16.3 @@ -23,8 +23,6 @@
    16.4   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    16.5   */
    16.6  
    16.7 -
    16.8 -
    16.9  #include <linux/sched.h>
   16.10  #include <public/arch-ia64.h>
   16.11  #include <asm/ia64_int.h>
   16.12 @@ -71,8 +69,8 @@
   16.13  
   16.14  //unsigned long last_guest_rsm = 0x0;
   16.15  struct guest_psr_bundle{
   16.16 -	unsigned long ip;
   16.17 -	unsigned long psr;
   16.18 +    unsigned long ip;
   16.19 +    unsigned long psr;
   16.20  };
   16.21  
   16.22  struct guest_psr_bundle guest_psr_buf[100];
   16.23 @@ -107,20 +105,24 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   16.24                  IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
   16.25              ));
   16.26  
   16.27 +    if ( !old_psr.i && (value & IA64_PSR_I) ) {
   16.28 +        // vpsr.i 0->1
   16.29 +        vcpu->arch.irq_new_condition = 1;
   16.30 +    }
   16.31      new_psr.val=vmx_vcpu_get_psr(vcpu);
   16.32      {
   16.33 -	struct xen_regs *regs = vcpu_regs(vcpu);
   16.34 -	guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
   16.35 -	guest_psr_buf[guest_psr_index].psr = new_psr.val;
   16.36 -	if (++guest_psr_index >= 100)
   16.37 -	    guest_psr_index = 0;
   16.38 +    struct xen_regs *regs = vcpu_regs(vcpu);
   16.39 +    guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
   16.40 +    guest_psr_buf[guest_psr_index].psr = new_psr.val;
   16.41 +    if (++guest_psr_index >= 100)
   16.42 +        guest_psr_index = 0;
   16.43      }
   16.44  #if 0
   16.45      if (old_psr.i != new_psr.i) {
   16.46 -	if (old_psr.i)
   16.47 -		last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
   16.48 -	else
   16.49 -		last_guest_rsm = 0;
   16.50 +    if (old_psr.i)
   16.51 +        last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
   16.52 +    else
   16.53 +        last_guest_rsm = 0;
   16.54      }
   16.55  #endif
   16.56  
   16.57 @@ -270,8 +272,8 @@ check_entry(u64 va, u64 ps, char *str)
   16.58  {
   16.59       va &= ~ (PSIZE(ps)-1);
   16.60       if ( va == 0x2000000002908000UL ||
   16.61 -	  va == 0x600000000000C000UL ) {
   16.62 -	stop();
   16.63 +      va == 0x600000000000C000UL ) {
   16.64 +    stop();
   16.65       }
   16.66       if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
   16.67  }
   16.68 @@ -433,4 +435,11 @@ IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu,
   16.69      return IA64_NO_FAULT;
   16.70  }
   16.71  
   16.72 +IA64FAULT
   16.73 +vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
   16.74 +{
   16.75 +    VPD_CR(vcpu,tpr)=val;
   16.76 +    vcpu->arch.irq_new_condition = 1;
   16.77 +    return IA64_NO_FAULT;
   16.78 +}
   16.79  
    17.1 --- a/xen/arch/ia64/vmx_virt.c	Sat Jul 09 07:37:13 2005 -0700
    17.2 +++ b/xen/arch/ia64/vmx_virt.c	Sat Jul 09 07:58:56 2005 -0700
    17.3 @@ -1276,7 +1276,13 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
    17.4  }
    17.5  
    17.6  
    17.7 -
    17.8 +static void post_emulation_action(VCPU *vcpu)
    17.9 +{
   17.10 +    if ( vcpu->arch.irq_new_condition ) {
   17.11 +        vcpu->arch.irq_new_condition = 0;
   17.12 +        vhpi_detection(vcpu);
   17.13 +    }
   17.14 +}
   17.15  
   17.16  //#define  BYPASS_VMAL_OPCODE
   17.17  extern IA64_SLOT_TYPE  slot_types[0x20][3];
   17.18 @@ -1336,7 +1342,7 @@ if ( (cause == 0xff && opcode == 0x1e000
   17.19      slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
   17.20      if (!slot) inst.inst = bundle.slot0;
   17.21      else if (slot == 1)
   17.22 -        inst.inst = bundle.slot1a + (bundle.slot1b<<23);
   17.23 +        inst.inst = bundle.slot1a + (bundle.slot1b<<18);
   17.24      else if (slot == 2) inst.inst = bundle.slot2;
   17.25      else printf("priv_handle_op: illegal slot: %d\n", slot);
   17.26      slot_type = slot_types[bundle.template][slot];
   17.27 @@ -1478,9 +1484,11 @@ if ( (cause == 0xff && opcode == 0x1e000
   17.28  	status=IA64_FAULT;
   17.29          break;
   17.30      default:
   17.31 -        printf("unknown cause %d:\n", cause);
   17.32 +        printf("unknown cause %d, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
   17.33 +        while(1);
   17.34  	/* For unknown cause, let hardware to re-execute */
   17.35  	status=IA64_RETRY;
   17.36 +        break;
   17.37  //        panic("unknown cause in virtualization intercept");
   17.38      };
   17.39  
   17.40 @@ -1494,6 +1502,7 @@ if ( (cause == 0xff && opcode == 0x1e000
   17.41      }
   17.42  
   17.43      recover_if_physical_mode(vcpu);
   17.44 +    post_emulation_action (vcpu);
   17.45  //TODO    set_irq_check(v);
   17.46      return;
   17.47  
    18.1 --- a/xen/arch/ia64/vtlb.c	Sat Jul 09 07:37:13 2005 -0700
    18.2 +++ b/xen/arch/ia64/vtlb.c	Sat Jul 09 07:58:56 2005 -0700
    18.3 @@ -26,7 +26,7 @@
    18.4  #include <asm/vmx_mm_def.h>
    18.5  #include <asm/gcc_intrin.h>
    18.6  #include <xen/interrupt.h>
    18.7 -#include <asm/vcpu.h>
    18.8 +#include <asm/vmx_vcpu.h>
    18.9  #define  MAX_CCH_LENGTH     40
   18.10  
   18.11  
   18.12 @@ -401,6 +401,8 @@ static void vhpt_insert(thash_cb_t *hcb,
   18.13              panic("Can't convert to machine VHPT entry\n");
   18.14          }
   18.15          hash_table->next = cch;
   18.16 +        if(hash_table->tag==hash_table->next->tag)
   18.17 +            while(1);
   18.18      }
   18.19      return /*hash_table*/;
   18.20  }
   18.21 @@ -466,10 +468,11 @@ int   cch_depth=0;
   18.22  static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
   18.23  {
   18.24      thash_data_t *next;
   18.25 -    
   18.26 +
   18.27      if ( ++cch_depth > MAX_CCH_LENGTH ) {
   18.28          printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
   18.29 -    }
   18.30 +        while(1);
   18.31 +   }
   18.32      if ( cch -> next ) {
   18.33          next = thash_rem_cch(hcb, cch->next);
   18.34      }
    19.1 --- a/xen/arch/ia64/xensetup.c	Sat Jul 09 07:37:13 2005 -0700
    19.2 +++ b/xen/arch/ia64/xensetup.c	Sat Jul 09 07:58:56 2005 -0700
    19.3 @@ -30,7 +30,6 @@ struct vcpu *idle_task[NR_CPUS] = { &idl
    19.4  #ifdef CLONE_DOMAIN0
    19.5  struct domain *clones[CLONE_DOMAIN0];
    19.6  #endif
    19.7 -extern struct domain *dom0;
    19.8  extern unsigned long domain0_ready;
    19.9  
   19.10  int find_max_pfn (unsigned long, unsigned long, void *);
    20.1 --- a/xen/include/asm-ia64/config.h	Sat Jul 09 07:37:13 2005 -0700
    20.2 +++ b/xen/include/asm-ia64/config.h	Sat Jul 09 07:58:56 2005 -0700
    20.3 @@ -49,6 +49,7 @@ typedef unsigned long physaddr_t;
    20.4  extern unsigned long xenheap_phys_end;
    20.5  extern unsigned long xen_pstart;
    20.6  extern unsigned long xenheap_size;
    20.7 +extern struct domain *dom0;
    20.8  extern unsigned long dom0_start;
    20.9  extern unsigned long dom0_size;
   20.10  
    21.1 --- a/xen/include/asm-ia64/domain.h	Sat Jul 09 07:37:13 2005 -0700
    21.2 +++ b/xen/include/asm-ia64/domain.h	Sat Jul 09 07:58:56 2005 -0700
    21.3 @@ -42,8 +42,6 @@ struct arch_domain {
    21.4       * max_pages in domain struct, which indicates maximum memory size
    21.5       */
    21.6      unsigned long max_pfn;
    21.7 -    unsigned int section_nr;
    21.8 -    mm_section_t *sections;	/* Describe memory hole except for Dom0 */
    21.9  #endif  //CONFIG_VTI
   21.10      u64 xen_vastart;
   21.11      u64 xen_vaend;
   21.12 @@ -88,6 +86,8 @@ struct arch_vcpu {
   21.13      void (*schedule_tail) (struct vcpu *);
   21.14      struct trap_bounce trap_bounce;
   21.15      thash_cb_t *vtlb;
   21.16 +    char irq_new_pending;
   21.17 +    char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
   21.18      //for phycial  emulation
   21.19      unsigned long old_rsc;
   21.20      int mode_flags;
    22.1 --- a/xen/include/asm-ia64/privop.h	Sat Jul 09 07:37:13 2005 -0700
    22.2 +++ b/xen/include/asm-ia64/privop.h	Sat Jul 09 07:58:56 2005 -0700
    22.3 @@ -138,6 +138,19 @@ typedef union U_INST64_M47 {
    22.4      IA64_INST inst;
    22.5      struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
    22.6  } INST64_M47;
    22.7 +typedef union U_INST64_M1{
    22.8 +    IA64_INST inst;
    22.9 +    struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   22.10 +} INST64_M1;
   22.11 +typedef union U_INST64_M4 {
   22.12 +    IA64_INST inst;
   22.13 +    struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   22.14 +} INST64_M4;
   22.15 +typedef union U_INST64_M6 {
   22.16 +    IA64_INST inst;
   22.17 +    struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   22.18 +} INST64_M6;
   22.19 +
   22.20  #endif // CONFIG_VTI
   22.21  
   22.22  typedef union U_INST64 {
   22.23 @@ -151,6 +164,11 @@ typedef union U_INST64 {
   22.24      INST64_I26 I26;	// mov register to ar (I unit)
   22.25      INST64_I27 I27;	// mov immediate to ar (I unit)
   22.26      INST64_I28 I28;	// mov from ar (I unit)
   22.27 +#ifdef CONFIG_VTI
   22.28 +    INST64_M1  M1;  // ld integer
   22.29 +    INST64_M4  M4;  // st integer
   22.30 +    INST64_M6  M6;  // ldfd floating pointer
   22.31 +#endif // CONFIG_VTI
   22.32      INST64_M28 M28;	// purge translation cache entry
   22.33      INST64_M29 M29;	// mov register to ar (M unit)
   22.34      INST64_M30 M30;	// mov immediate to ar (M unit)
    23.1 --- a/xen/include/asm-ia64/vmx.h	Sat Jul 09 07:37:13 2005 -0700
    23.2 +++ b/xen/include/asm-ia64/vmx.h	Sat Jul 09 07:58:56 2005 -0700
    23.3 @@ -35,4 +35,6 @@ extern vmx_insert_double_mapping(u64,u64
    23.4  extern void vmx_purge_double_mapping(u64, u64, u64);
    23.5  extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
    23.6  
    23.7 +extern void vmx_wait_io(void);
    23.8 +extern void vmx_io_assist(struct vcpu *v);
    23.9  #endif /* _ASM_IA64_VT_H */
    24.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Sat Jul 09 07:37:13 2005 -0700
    24.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Sat Jul 09 07:58:56 2005 -0700
    24.3 @@ -83,6 +83,7 @@
    24.4  #define IA64_RSC_MODE       0x0000000000000003
    24.5  #define XEN_RR7_RID    (0xf00010)
    24.6  #define GUEST_IN_PHY    0x1
    24.7 +#define GUEST_PHY_EMUL	0x2
    24.8  extern int valid_mm_mode[];
    24.9  extern int mm_switch_table[][8];
   24.10  extern void physical_mode_init(VCPU *);
    25.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Sat Jul 09 07:37:13 2005 -0700
    25.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Sat Jul 09 07:58:56 2005 -0700
    25.3 @@ -53,7 +53,7 @@
    25.4  
    25.5  #define VMM_RR_SHIFT    20
    25.6  #define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
    25.7 -#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
    25.8 +//#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
    25.9                  ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
   25.10  extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
   25.11  extern u64 cr_igfld_mask (int index, u64 value);
   25.12 @@ -69,7 +69,7 @@ extern void vmx_vcpu_set_psr_sync_mpsr(V
   25.13  extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
   25.14  extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
   25.15  extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
   25.16 -ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
   25.17 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
   25.18  extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
   25.19  extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   25.20  extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   25.21 @@ -112,10 +112,10 @@ extern uint64_t guest_read_vivr(VCPU *vc
   25.22  extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
   25.23  extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
   25.24  extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
   25.25 -extern void memread_p(VCPU *vcpu, void *src, void *dest, size_t s);
   25.26 -extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
   25.27 -extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
   25.28 -extern void memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s);
   25.29 +extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
   25.30 +extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
   25.31 +extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
   25.32 +extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
   25.33  
   25.34  
   25.35  /**************************************************************************
   25.36 @@ -401,14 +401,8 @@ vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
   25.37      VPD_CR(vcpu,lid)=val;
   25.38      return IA64_NO_FAULT;
   25.39  }
   25.40 -static inline
   25.41 -IA64FAULT
   25.42 -vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
   25.43 -{
   25.44 -    VPD_CR(vcpu,tpr)=val;
   25.45 -    //TODO
   25.46 -    return IA64_NO_FAULT;
   25.47 -}
   25.48 +extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
   25.49 +
   25.50  static inline
   25.51  IA64FAULT
   25.52  vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
    26.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Sat Jul 09 07:37:13 2005 -0700
    26.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Sat Jul 09 07:58:56 2005 -0700
    26.3 @@ -19,8 +19,8 @@
    26.4   *      Kun Tian (Kevin Tian) (kevin.tian@intel.com)
    26.5   */
    26.6  
    26.7 -#ifndef _VPD_H_
    26.8 -#define _VPD_H_
    26.9 +#ifndef _ASM_IA64_VMX_VPD_H_
   26.10 +#define _ASM_IA64_VMX_VPD_H_
   26.11  
   26.12  #ifndef __ASSEMBLY__
   26.13  
   26.14 @@ -123,6 +123,7 @@ struct arch_vmx_struct {
   26.15  	vpd_t       *vpd;
   26.16  	vtime_t	    vtm;
   26.17      unsigned long   vrr[8];
   26.18 +    unsigned long   vkr[8];
   26.19      unsigned long   mrr5;
   26.20      unsigned long   mrr6;
   26.21      unsigned long   mrr7;
   26.22 @@ -145,6 +146,7 @@ struct arch_vmx_struct {
   26.23  #define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
   26.24  #define ARCH_VMX_VMCS_RESUME    2       /* Needs VMCS resume */
   26.25  #define ARCH_VMX_IO_WAIT        3       /* Waiting for I/O completion */
   26.26 +#define ARCH_VMX_INTR_ASSIST   4       /* Need DM's assist to issue intr */
   26.27  
   26.28  
   26.29  #define VMX_DEBUG 1
   26.30 @@ -191,4 +193,4 @@ extern unsigned int opt_vmx_debug_level;
   26.31  #define VPD_VMM_VAIL_START_OFFSET	31744
   26.32  
   26.33  
   26.34 -#endif /* _VPD_H_ */
   26.35 +#endif /* _ASM_IA64_VMX_VPD_H_ */
    27.1 --- a/xen/include/asm-ia64/xenprocessor.h	Sat Jul 09 07:37:13 2005 -0700
    27.2 +++ b/xen/include/asm-ia64/xenprocessor.h	Sat Jul 09 07:58:56 2005 -0700
    27.3 @@ -166,6 +166,16 @@ typedef union{
    27.4      };
    27.5  } ipi_d_t;
    27.6  
    27.7 +typedef union {
    27.8 +    __u64 val;
    27.9 +    struct {
   27.10 +        __u64 ig0 : 4;
   27.11 +        __u64 mic : 4;
   27.12 +        __u64 rsv : 8;
   27.13 +        __u64 mmi : 1;
   27.14 +        __u64 ig1 : 47;
   27.15 +    };
   27.16 +} tpr_t;
   27.17  
   27.18  #define IA64_ISR_CODE_MASK0     0xf
   27.19  #define IA64_UNIMPL_DADDR_FAULT     0x30
    28.1 --- a/xen/include/asm-ia64/xensystem.h	Sat Jul 09 07:37:13 2005 -0700
    28.2 +++ b/xen/include/asm-ia64/xensystem.h	Sat Jul 09 07:58:56 2005 -0700
    28.3 @@ -33,6 +33,8 @@
    28.4  #ifdef CONFIG_VTI
    28.5  extern struct task_struct *vmx_ia64_switch_to (void *next_task);
    28.6  #define __switch_to(prev,next,last) do {	\
    28.7 +       ia64_save_fpu(prev->arch._thread.fph);	\
    28.8 +       ia64_load_fpu(next->arch._thread.fph);	\
    28.9         if (VMX_DOMAIN(prev))                   \
   28.10                 vmx_save_state(prev);           \
   28.11         else {                                  \
    29.1 --- a/xen/include/public/arch-ia64.h	Sat Jul 09 07:37:13 2005 -0700
    29.2 +++ b/xen/include/public/arch-ia64.h	Sat Jul 09 07:58:56 2005 -0700
    29.3 @@ -181,6 +181,16 @@ typedef struct {
    29.4  } arch_shared_info_t;		// DON'T PACK 
    29.5  
    29.6  typedef struct vcpu_guest_context {
    29.7 +#define VGCF_FPU_VALID (1<<0)
    29.8 +#define VGCF_VMX_GUEST (1<<1)
    29.9 +#define VGCF_IN_KERNEL (1<<2)
   29.10 +	unsigned long flags;       /* VGCF_* flags */
   29.11 +	unsigned long pt_base;     /* PMT table base */
   29.12 +	unsigned long pt_max_pfn;  /* Max pfn including holes */
   29.13 +	unsigned long share_io_pg; /* Shared page for I/O emulation */
   29.14 +	unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
   29.15 +	unsigned long guest_iip;   /* Guest entry point */
   29.16 +
   29.17  	struct pt_regs regs;
   29.18  	arch_vcpu_info_t vcpu;
   29.19  	arch_shared_info_t shared;