direct-io.hg

changeset 10121:77ccce98ddef

[IA64] vhtp clean-up

Create a clean interface for vhpt/tlb.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue May 16 10:35:58 2006 -0600 (2006-05-16)
parents 06e5c5599147
children 303406dd9e3b
files xen/arch/ia64/linux-xen/smp.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/vhpt.c xen/arch/ia64/xen/xenmisc.c xen/include/asm-ia64/flushtlb.h xen/include/asm-ia64/linux-xen/asm/tlbflush.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/tlbflush.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vhpt.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/smp.c	Tue May 16 09:05:36 2006 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/smp.c	Tue May 16 10:35:58 2006 -0600
     1.3 @@ -53,28 +53,6 @@
     1.4  #endif
     1.5  
     1.6  #ifdef XEN
     1.7 -// FIXME: MOVE ELSEWHERE
     1.8 -//Huh? This seems to be used on ia64 even if !CONFIG_SMP
     1.9 -void flush_tlb_mask(cpumask_t mask)
    1.10 -{
    1.11 -    int cpu;
    1.12 -
    1.13 -    cpu = smp_processor_id();
    1.14 -    if (cpu_isset (cpu, mask)) {
    1.15 -        cpu_clear(cpu, mask);
    1.16 -	local_flush_tlb_all ();
    1.17 -    }
    1.18 -
    1.19 -#ifdef CONFIG_SMP
    1.20 -    if (cpus_empty(mask))
    1.21 -        return;
    1.22 -
    1.23 -    for (cpu = 0; cpu < NR_CPUS; ++cpu)
    1.24 -        if (cpu_isset(cpu, mask))
    1.25 -	   smp_call_function_single
    1.26 -	     (cpu, (void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
    1.27 -#endif
    1.28 -}
    1.29  //#if CONFIG_SMP || IA64
    1.30  #if CONFIG_SMP
    1.31  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
    1.32 @@ -276,7 +254,6 @@ smp_send_reschedule (int cpu)
    1.33  {
    1.34  	platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
    1.35  }
    1.36 -#endif
    1.37  
    1.38  void
    1.39  smp_flush_tlb_all (void)
    1.40 @@ -284,15 +261,6 @@ smp_flush_tlb_all (void)
    1.41  	on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
    1.42  }
    1.43  
    1.44 -#ifdef XEN
    1.45 -void
    1.46 -smp_vhpt_flush_all(void)
    1.47 -{
    1.48 -	on_each_cpu((void (*)(void *))vhpt_flush, NULL, 1, 1);
    1.49 -}
    1.50 -#endif
    1.51 -
    1.52 -#ifndef XEN
    1.53  void
    1.54  smp_flush_tlb_mm (struct mm_struct *mm)
    1.55  {
     2.1 --- a/xen/arch/ia64/xen/domain.c	Tue May 16 09:05:36 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Tue May 16 10:35:58 2006 -0600
     2.3 @@ -89,13 +89,9 @@ void arch_domain_destroy(struct domain *
     2.4  	if (d->shared_info != NULL)
     2.5  		free_xenheap_page(d->shared_info);
     2.6  
     2.7 -	deallocate_rid_range(d);
     2.8 +	domain_flush_destroy (d);
     2.9  
    2.10 -	/* It is really good in this? */
    2.11 -	flush_tlb_all();
    2.12 -
    2.13 -	/* It is really good in this? */
    2.14 -	vhpt_flush_all();
    2.15 +	deallocate_rid_range(d);
    2.16  }
    2.17  
    2.18  static void default_idle(void)
    2.19 @@ -873,17 +869,7 @@ void
    2.20  domain_page_flush(struct domain* d, unsigned long mpaddr,
    2.21                    unsigned long old_mfn, unsigned long new_mfn)
    2.22  {
    2.23 -    struct vcpu* v;
    2.24 -    //XXX SMP
    2.25 -    for_each_vcpu(d, v) {
    2.26 -        vcpu_purge_tr_entry(&v->arch.dtlb);
    2.27 -        vcpu_purge_tr_entry(&v->arch.itlb);
    2.28 -    }
    2.29 -
    2.30 -    // flush vhpt
    2.31 -    vhpt_flush();
    2.32 -    // flush tlb
    2.33 -    flush_tlb_all();
    2.34 +    domain_flush_vtlb_all (d);
    2.35  }
    2.36  #endif
    2.37  
     3.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Tue May 16 09:05:36 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Tue May 16 10:35:58 2006 -0600
     3.3 @@ -47,7 +47,7 @@
     3.4  #endif
     3.5  
     3.6  #ifdef CONFIG_SMP
     3.7 -#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
     3.8 +//#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
     3.9  #undef FAST_PTC_GA
    3.10  #endif
    3.11  
     4.1 --- a/xen/arch/ia64/xen/process.c	Tue May 16 09:05:36 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/process.c	Tue May 16 10:35:58 2006 -0600
     4.3 @@ -307,11 +307,7 @@ void ia64_do_page_fault (unsigned long a
     4.4  		if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
     4.5  			/* dtlb has been purged in-between.  This dtlb was
     4.6  			   matching.  Undo the work.  */
     4.7 -#ifdef VHPT_GLOBAL
     4.8 -			vhpt_flush_address (address, 1);
     4.9 -#endif
    4.10 -			ia64_ptcl(address, 1<<2);
    4.11 -			ia64_srlz_i();
    4.12 +			vcpu_flush_tlb_vhpt_range (address, 1);
    4.13  			goto again;
    4.14  		}
    4.15  		return;
     5.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue May 16 09:05:36 2006 -0600
     5.2 +++ b/xen/arch/ia64/xen/vcpu.c	Tue May 16 10:35:58 2006 -0600
     5.3 @@ -28,8 +28,6 @@ extern void setfpreg (unsigned long regn
     5.4  
     5.5  extern void panic_domain(struct pt_regs *, const char *, ...);
     5.6  extern unsigned long translate_domain_mpaddr(unsigned long);
     5.7 -extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
     5.8 -
     5.9  
    5.10  typedef	union {
    5.11  	struct ia64_psr ia64_psr;
    5.12 @@ -1702,11 +1700,6 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
    5.13   VCPU translation register access routines
    5.14  **************************************************************************/
    5.15  
    5.16 -void vcpu_purge_tr_entry(TR_ENTRY *trp)
    5.17 -{
    5.18 -	trp->pte.val = 0;
    5.19 -}
    5.20 -
    5.21  static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
    5.22  {
    5.23  	UINT64 ps;
    5.24 @@ -1867,21 +1860,13 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
    5.25  	return fault;
    5.26  }
    5.27  
    5.28 -int ptce_count = 0;
    5.29  IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
    5.30  {
    5.31  	// Note that this only needs to be called once, i.e. the
    5.32  	// architected loop to purge the entire TLB, should use
    5.33  	//  base = stride1 = stride2 = 0, count0 = count 1 = 1
    5.34  
    5.35 -	// just invalidate the "whole" tlb
    5.36 -	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
    5.37 -	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
    5.38 -
    5.39 -#ifdef VHPT_GLOBAL
    5.40 -	vhpt_flush();	// FIXME: This is overdoing it
    5.41 -#endif
    5.42 -	local_flush_tlb_all();
    5.43 +	vcpu_flush_vtlb_all ();
    5.44  
    5.45  	return IA64_NO_FAULT;
    5.46  }
    5.47 @@ -1899,33 +1884,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
    5.48  	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
    5.49  //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
    5.50  
    5.51 -#ifdef CONFIG_XEN_SMP
    5.52 -	struct domain *d = vcpu->domain;
    5.53 -	struct vcpu *v;
    5.54 -
    5.55 -	for_each_vcpu (d, v) {
    5.56 -		if (v == vcpu)
    5.57 -			continue;
    5.58 -
    5.59 -		/* Purge TC entries.
    5.60 -		   FIXME: clear only if match.  */
    5.61 -		vcpu_purge_tr_entry(&PSCBX(v,dtlb));
    5.62 -		vcpu_purge_tr_entry(&PSCBX(v,itlb));
    5.63 +	domain_flush_vtlb_range (vcpu->domain, vadr, addr_range);
    5.64  
    5.65 -#ifdef VHPT_GLOBAL
    5.66 -		/* Invalidate VHPT entries.  */
    5.67 -		vhpt_flush_address_remote (v->processor, vadr, addr_range);
    5.68 -#endif
    5.69 -	}
    5.70 -#endif
    5.71 -
    5.72 -#ifdef VHPT_GLOBAL
    5.73 -	vhpt_flush_address(vadr,addr_range);
    5.74 -#endif
    5.75 -	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
    5.76 -	/* Purge tc.  */
    5.77 -	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
    5.78 -	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
    5.79  	return IA64_NO_FAULT;
    5.80  }
    5.81  
     6.1 --- a/xen/arch/ia64/xen/vhpt.c	Tue May 16 09:05:36 2006 -0600
     6.2 +++ b/xen/arch/ia64/xen/vhpt.c	Tue May 16 10:35:58 2006 -0600
     6.3 @@ -12,32 +12,31 @@
     6.4  #include <asm/system.h>
     6.5  #include <asm/pgalloc.h>
     6.6  #include <asm/page.h>
     6.7 -#include <asm/dma.h>
     6.8  #include <asm/vhpt.h>
     6.9 +#include <asm/vcpu.h>
    6.10 +
    6.11 +/* Defined in tlb.c  */
    6.12 +extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
    6.13  
    6.14  extern long running_on_sim;
    6.15  
    6.16  DEFINE_PER_CPU (unsigned long, vhpt_paddr);
    6.17  DEFINE_PER_CPU (unsigned long, vhpt_pend);
    6.18  
    6.19 -void vhpt_flush(void)
    6.20 +static void vhpt_flush(void)
    6.21  {
    6.22 -	struct vhpt_lf_entry *v =__va(__ia64_per_cpu_var(vhpt_paddr));
    6.23 +	struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
    6.24  	int i;
    6.25 -#if 0
    6.26 -static int firsttime = 2;
    6.27  
    6.28 -if (firsttime) firsttime--;
    6.29 -else {
    6.30 -printf("vhpt_flush: *********************************************\n");
    6.31 -printf("vhpt_flush: *********************************************\n");
    6.32 -printf("vhpt_flush: *********************************************\n");
    6.33 -printf("vhpt_flush: flushing vhpt (seems to crash at rid wrap?)...\n");
    6.34 -printf("vhpt_flush: *********************************************\n");
    6.35 -printf("vhpt_flush: *********************************************\n");
    6.36 -printf("vhpt_flush: *********************************************\n");
    6.37 +	for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
    6.38 +		v->ti_tag = INVALID_TI_TAG;
    6.39  }
    6.40 -#endif
    6.41 +
    6.42 +static void vhpt_erase(void)
    6.43 +{
    6.44 +	struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
    6.45 +	int i;
    6.46 +
    6.47  	for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
    6.48  		v->itir = 0;
    6.49  		v->CChain = 0;
    6.50 @@ -47,51 +46,6 @@ printf("vhpt_flush: ********************
    6.51  	// initialize cache too???
    6.52  }
    6.53  
    6.54 -#ifdef VHPT_GLOBAL
    6.55 -void vhpt_flush_address(unsigned long vadr, unsigned long addr_range)
    6.56 -{
    6.57 -	struct vhpt_lf_entry *vlfe;
    6.58 -
    6.59 -	if ((vadr >> 61) == 7) {
    6.60 -		// no vhpt for region 7 yet, see vcpu_itc_no_srlz
    6.61 -		printf("vhpt_flush_address: region 7, spinning...\n");
    6.62 -		while(1);
    6.63 -	}
    6.64 -#if 0
    6.65 -	// this only seems to occur at shutdown, but it does occur
    6.66 -	if ((!addr_range) || addr_range & (addr_range - 1)) {
    6.67 -		printf("vhpt_flush_address: weird range, spinning...\n");
    6.68 -		while(1);
    6.69 -	}
    6.70 -//printf("************** vhpt_flush_address(%p,%p)\n",vadr,addr_range);
    6.71 -#endif
    6.72 -	while ((long)addr_range > 0) {
    6.73 -		vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
    6.74 -		// FIXME: for now, just blow it away even if it belongs to
    6.75 -		// another domain.  Later, use ttag to check for match
    6.76 -//if (!(vlfe->ti_tag & INVALID_TI_TAG)) {
    6.77 -//printf("vhpt_flush_address: blowing away valid tag for vadr=%p\n",vadr);
    6.78 -//}
    6.79 -		vlfe->ti_tag |= INVALID_TI_TAG;
    6.80 -		addr_range -= PAGE_SIZE;
    6.81 -		vadr += PAGE_SIZE;
    6.82 -	}
    6.83 -}
    6.84 -
    6.85 -void vhpt_flush_address_remote(int cpu,
    6.86 -			       unsigned long vadr, unsigned long addr_range)
    6.87 -{
    6.88 -	while ((long)addr_range > 0) {
    6.89 -		/* Get the VHPT entry.  */
    6.90 -		unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
    6.91 -		volatile struct vhpt_lf_entry *v;
    6.92 -		v =__va(per_cpu(vhpt_paddr, cpu) + off);
    6.93 -		v->ti_tag = INVALID_TI_TAG;
    6.94 -		addr_range -= PAGE_SIZE;
    6.95 -		vadr += PAGE_SIZE;
    6.96 -	}
    6.97 -}
    6.98 -#endif
    6.99  
   6.100  static void vhpt_map(unsigned long pte)
   6.101  {
   6.102 @@ -147,17 +101,11 @@ void vhpt_multiple_insert(unsigned long 
   6.103  
   6.104  void vhpt_init(void)
   6.105  {
   6.106 -	unsigned long vhpt_total_size, vhpt_alignment;
   6.107  	unsigned long paddr, pte;
   6.108  	struct page_info *page;
   6.109  #if !VHPT_ENABLED
   6.110  	return;
   6.111  #endif
   6.112 -	// allocate a huge chunk of physical memory.... how???
   6.113 -	vhpt_total_size = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   6.114 -	vhpt_alignment = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   6.115 -	printf("vhpt_init: vhpt size=0x%lx, align=0x%lx\n",
   6.116 -		vhpt_total_size, vhpt_alignment);
   6.117  	/* This allocation only holds true if vhpt table is unique for
   6.118  	 * all domains. Or else later new vhpt table should be allocated
   6.119  	 * from domain heap when each domain is created. Assume xen buddy
   6.120 @@ -167,17 +115,135 @@ void vhpt_init(void)
   6.121  	if (!page)
   6.122  		panic("vhpt_init: can't allocate VHPT!\n");
   6.123  	paddr = page_to_maddr(page);
   6.124 +	if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
   6.125 +		panic("vhpt_init: bad VHPT alignment!\n");
   6.126  	__get_cpu_var(vhpt_paddr) = paddr;
   6.127 -	__get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
   6.128 +	__get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
   6.129  	printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
   6.130  		paddr, __get_cpu_var(vhpt_pend));
   6.131  	pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
   6.132  	vhpt_map(pte);
   6.133  	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   6.134  		VHPT_ENABLED);
   6.135 -	vhpt_flush();
   6.136 +	vhpt_erase();
   6.137 +}
   6.138 +
   6.139 +
   6.140 +void vcpu_flush_vtlb_all (void)
   6.141 +{
   6.142 +	struct vcpu *v = current;
   6.143 +
   6.144 +	/* First VCPU tlb.  */
   6.145 +	vcpu_purge_tr_entry(&PSCBX(v,dtlb));
   6.146 +	vcpu_purge_tr_entry(&PSCBX(v,itlb));
   6.147 +
   6.148 +	/* Then VHPT.  */
   6.149 +	vhpt_flush ();
   6.150 +
   6.151 +	/* Then mTLB.  */
   6.152 +	local_flush_tlb_all ();
   6.153 +
   6.154 +	/* We could clear bit in d->domain_dirty_cpumask only if domain d in
   6.155 +	   not running on this processor.  There is currently no easy way to
   6.156 +	   check this.  */
   6.157 +}
   6.158 +
   6.159 +void domain_flush_vtlb_all (void)
   6.160 +{
   6.161 +	int cpu = smp_processor_id ();
   6.162 +	struct vcpu *v;
   6.163 +
   6.164 +	for_each_vcpu (current->domain, v)
   6.165 +		if (v->processor == cpu)
   6.166 +			vcpu_flush_vtlb_all ();
   6.167 +		else
   6.168 +			smp_call_function_single
   6.169 +				(v->processor,
   6.170 +				 (void(*)(void *))vcpu_flush_vtlb_all,
   6.171 +				 NULL,1,1);
   6.172 +}
   6.173 +
   6.174 +static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
   6.175 +{
   6.176 +	void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu));
   6.177 +
   6.178 +	while ((long)addr_range > 0) {
   6.179 +		/* Get the VHPT entry.  */
   6.180 +		unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
   6.181 +		volatile struct vhpt_lf_entry *v;
   6.182 +		v = vhpt_base + off;
   6.183 +		v->ti_tag = INVALID_TI_TAG;
   6.184 +		addr_range -= PAGE_SIZE;
   6.185 +		vadr += PAGE_SIZE;
   6.186 +	}
   6.187 +}
   6.188 +
   6.189 +void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
   6.190 +{
   6.191 +	cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range);
   6.192 +	ia64_ptcl(vadr, log_range << 2);
   6.193 +	ia64_srlz_i();
   6.194  }
   6.195  
   6.196 +void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
   6.197 +{
   6.198 +	struct vcpu *v;
   6.199 +
   6.200 +#if 0
   6.201 +	// this only seems to occur at shutdown, but it does occur
   6.202 +	if ((!addr_range) || addr_range & (addr_range - 1)) {
   6.203 +		printf("vhpt_flush_address: weird range, spinning...\n");
   6.204 +		while(1);
   6.205 +	}
   6.206 +#endif
   6.207 +
   6.208 +	for_each_vcpu (d, v) {
   6.209 +		/* Purge TC entries.
   6.210 +		   FIXME: clear only if match.  */
   6.211 +		vcpu_purge_tr_entry(&PSCBX(v,dtlb));
   6.212 +		vcpu_purge_tr_entry(&PSCBX(v,itlb));
   6.213 +
   6.214 +		/* Invalidate VHPT entries.  */
   6.215 +		cpu_flush_vhpt_range (v->processor, vadr, addr_range);
   6.216 +	}
   6.217 +
   6.218 +	/* ptc.ga  */
   6.219 +	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
   6.220 +}
   6.221 +
   6.222 +static void flush_tlb_vhpt_all (struct domain *d)
   6.223 +{
   6.224 +	/* First VHPT.  */
   6.225 +	vhpt_flush ();
   6.226 +
   6.227 +	/* Then mTLB.  */
   6.228 +	local_flush_tlb_all ();
   6.229 +}
   6.230 +
   6.231 +void domain_flush_destroy (struct domain *d)
   6.232 +{
   6.233 +	/* Very heavy...  */
   6.234 +	on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
   6.235 +	cpus_clear (d->domain_dirty_cpumask);
   6.236 +}
   6.237 +
   6.238 +void flush_tlb_mask(cpumask_t mask)
   6.239 +{
   6.240 +    int cpu;
   6.241 +
   6.242 +    cpu = smp_processor_id();
   6.243 +    if (cpu_isset (cpu, mask)) {
   6.244 +        cpu_clear(cpu, mask);
   6.245 +        flush_tlb_vhpt_all (NULL);
   6.246 +    }
   6.247 +
   6.248 +    if (cpus_empty(mask))
   6.249 +        return;
   6.250 +
   6.251 +    for_each_cpu_mask (cpu, mask)
   6.252 +        smp_call_function_single
   6.253 +            (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
   6.254 +}
   6.255  
   6.256  void zero_vhpt_stats(void)
   6.257  {
     7.1 --- a/xen/arch/ia64/xen/xenmisc.c	Tue May 16 09:05:36 2006 -0600
     7.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Tue May 16 10:35:58 2006 -0600
     7.3 @@ -267,6 +267,9 @@ void context_switch(struct vcpu *prev, s
     7.4  	    vmx_load_state(next);
     7.5      /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     7.6      prev = ia64_switch_to(next);
     7.7 +
     7.8 +    //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
     7.9 +
    7.10      if (!VMX_DOMAIN(current)){
    7.11  	    vcpu_set_next_timer(current);
    7.12      }
     8.1 --- a/xen/include/asm-ia64/flushtlb.h	Tue May 16 09:05:36 2006 -0600
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,9 +0,0 @@
     8.4 -#ifndef __FLUSHTLB_H__
     8.5 -#define __FLUSHTLB_H__
     8.6 -
     8.7 -#include <asm/tlbflush.h>
     8.8 -
     8.9 -#define tlbflush_current_time() 0
    8.10 -#define tlbflush_filter(x,y) ((void)0)
    8.11 -
    8.12 -#endif
     9.1 --- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Tue May 16 09:05:36 2006 -0600
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,119 +0,0 @@
     9.4 -#ifndef _ASM_IA64_TLBFLUSH_H
     9.5 -#define _ASM_IA64_TLBFLUSH_H
     9.6 -
     9.7 -/*
     9.8 - * Copyright (C) 2002 Hewlett-Packard Co
     9.9 - *	David Mosberger-Tang <davidm@hpl.hp.com>
    9.10 - */
    9.11 -
    9.12 -#include <linux/config.h>
    9.13 -
    9.14 -#include <linux/mm.h>
    9.15 -
    9.16 -#include <asm/intrinsics.h>
    9.17 -#include <asm/mmu_context.h>
    9.18 -#include <asm/page.h>
    9.19 -
    9.20 -/*
    9.21 - * Now for some TLB flushing routines.  This is the kind of stuff that
    9.22 - * can be very expensive, so try to avoid them whenever possible.
    9.23 - */
    9.24 -
    9.25 -/*
    9.26 - * Flush everything (kernel mapping may also have changed due to
    9.27 - * vmalloc/vfree).
    9.28 - */
    9.29 -extern void local_flush_tlb_all (void);
    9.30 -
    9.31 -#ifdef CONFIG_SMP
    9.32 -  extern void smp_flush_tlb_all (void);
    9.33 -  extern void smp_flush_tlb_mm (struct mm_struct *mm);
    9.34 -# define flush_tlb_all()	smp_flush_tlb_all()
    9.35 -#else
    9.36 -# define flush_tlb_all()	local_flush_tlb_all()
    9.37 -#endif
    9.38 -
    9.39 -#ifndef XEN
    9.40 -static inline void
    9.41 -local_finish_flush_tlb_mm (struct mm_struct *mm)
    9.42 -{
    9.43 -#ifndef XEN
    9.44 -// FIXME SMP?
    9.45 -	if (mm == current->active_mm)
    9.46 -		activate_context(mm);
    9.47 -#endif
    9.48 -}
    9.49 -
    9.50 -/*
    9.51 - * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
    9.52 - * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
    9.53 - * the PTEs of the parent task.
    9.54 - */
    9.55 -static inline void
    9.56 -flush_tlb_mm (struct mm_struct *mm)
    9.57 -{
    9.58 -	if (!mm)
    9.59 -		return;
    9.60 -
    9.61 -#ifndef XEN
    9.62 -// FIXME SMP?
    9.63 -	mm->context = 0;
    9.64 -#endif
    9.65 -
    9.66 -	if (atomic_read(&mm->mm_users) == 0)
    9.67 -		return;		/* happens as a result of exit_mmap() */
    9.68 -
    9.69 -#ifdef CONFIG_SMP
    9.70 -	smp_flush_tlb_mm(mm);
    9.71 -#else
    9.72 -	local_finish_flush_tlb_mm(mm);
    9.73 -#endif
    9.74 -}
    9.75 -
    9.76 -extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
    9.77 -
    9.78 -/*
    9.79 - * Page-granular tlb flush.
    9.80 - */
    9.81 -static inline void
    9.82 -flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
    9.83 -{
    9.84 -#ifdef CONFIG_SMP
    9.85 -	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
    9.86 -#else
    9.87 -#ifdef XEN
    9.88 -	if (vma->vm_mm == current->domain->arch.mm)
    9.89 -#else
    9.90 -	if (vma->vm_mm == current->active_mm)
    9.91 -#endif
    9.92 -		ia64_ptcl(addr, (PAGE_SHIFT << 2));
    9.93 -#ifndef XEN
    9.94 -// FIXME SMP?
    9.95 -	else
    9.96 -		vma->vm_mm->context = 0;
    9.97 -#endif
    9.98 -#endif
    9.99 -}
   9.100 -
   9.101 -/*
   9.102 - * Flush the TLB entries mapping the virtually mapped linear page
   9.103 - * table corresponding to address range [START-END).
   9.104 - */
   9.105 -static inline void
   9.106 -flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
   9.107 -{
   9.108 -	/*
   9.109 -	 * Deprecated.  The virtual page table is now flushed via the normal gather/flush
   9.110 -	 * interface (see tlb.h).
   9.111 -	 */
   9.112 -}
   9.113 -
   9.114 -
   9.115 -#define flush_tlb_kernel_range(start, end)	flush_tlb_all()	/* XXX fix me */
   9.116 -#endif /* XEN */
   9.117 -
   9.118 -#ifdef XEN
   9.119 -extern void flush_tlb_mask(cpumask_t mask);
   9.120 -#endif
   9.121 -
   9.122 -#endif /* _ASM_IA64_TLBFLUSH_H */
    10.1 --- a/xen/include/asm-ia64/mm.h	Tue May 16 09:05:36 2006 -0600
    10.2 +++ b/xen/include/asm-ia64/mm.h	Tue May 16 10:35:58 2006 -0600
    10.3 @@ -12,7 +12,7 @@
    10.4  
    10.5  #include <asm/processor.h>
    10.6  #include <asm/atomic.h>
    10.7 -#include <asm/flushtlb.h>
    10.8 +#include <asm/tlbflush.h>
    10.9  #include <asm/io.h>
   10.10  
   10.11  #include <public/xen.h>
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/include/asm-ia64/tlbflush.h	Tue May 16 10:35:58 2006 -0600
    11.3 @@ -0,0 +1,37 @@
    11.4 +#ifndef __FLUSHTLB_H__
    11.5 +#define __FLUSHTLB_H__
    11.6 +
    11.7 +#include <xen/sched.h>
    11.8 +
    11.9 +/* TLB flushes can be either local (current vcpu only) or domain wide (on
   11.10 +   all vcpus).
   11.11 +   TLB flushes can be either all-flush or range only.
   11.12 +
   11.13 +   vTLB flushing means flushing VCPU virtual TLB + machine TLB + machine VHPT.
   11.14 +*/
   11.15 +
   11.16 +/* Local all flush of vTLB.  */
   11.17 +void vcpu_flush_vtlb_all (void);
   11.18 +
   11.19 +/* Local range flush of machine TLB only (not full VCPU virtual TLB!!!)  */
   11.20 +void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range);
   11.21 +
   11.22 +/* Global all flush of vTLB  */
   11.23 +void domain_flush_vtlb_all (void);
   11.24 +
   11.25 +/* Global range-flush of vTLB.  */
   11.26 +void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);
   11.27 +
   11.28 +/* Final vTLB flush on every dirty cpus.  */
   11.29 +void domain_flush_destroy (struct domain *d);
   11.30 +
   11.31 +/* Flush v-tlb on cpus set in mask for current domain.  */
   11.32 +void flush_tlb_mask(cpumask_t mask);
   11.33 +
   11.34 +/* Flush local machine TLB.  */
   11.35 +void local_flush_tlb_all (void);
   11.36 +
   11.37 +#define tlbflush_current_time() 0
   11.38 +#define tlbflush_filter(x,y) ((void)0)
   11.39 +
   11.40 +#endif
    12.1 --- a/xen/include/asm-ia64/vcpu.h	Tue May 16 09:05:36 2006 -0600
    12.2 +++ b/xen/include/asm-ia64/vcpu.h	Tue May 16 10:35:58 2006 -0600
    12.3 @@ -135,7 +135,10 @@ extern IA64FAULT vcpu_get_pkr(VCPU *vcpu
    12.4  extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
    12.5  extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
    12.6  /* TLB */
    12.7 -extern void vcpu_purge_tr_entry(TR_ENTRY *trp);
    12.8 +static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
    12.9 +{
   12.10 +	trp->pte.val = 0;
   12.11 +}
   12.12  extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
   12.13  		UINT64 itir, UINT64 ifa);
   12.14  extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
    13.1 --- a/xen/include/asm-ia64/vhpt.h	Tue May 16 09:05:36 2006 -0600
    13.2 +++ b/xen/include/asm-ia64/vhpt.h	Tue May 16 10:35:58 2006 -0600
    13.3 @@ -14,13 +14,7 @@
    13.4  /* Number of entries in the VHPT.  The size of an entry is 4*8B == 32B */
    13.5  #define	VHPT_NUM_ENTRIES		(1 << (VHPT_SIZE_LOG2 - 5))
    13.6  
    13.7 -#ifdef CONFIG_SMP
    13.8 -# define vhpt_flush_all()	smp_vhpt_flush_all()
    13.9 -#else
   13.10 -# define vhpt_flush_all()	vhpt_flush()
   13.11 -#endif
   13.12  // FIXME: These should be automatically generated
   13.13 -
   13.14  #define	VLE_PGFLAGS_OFFSET		0
   13.15  #define	VLE_ITIR_OFFSET			8
   13.16  #define	VLE_TITAG_OFFSET		16
   13.17 @@ -42,15 +36,10 @@ struct vhpt_lf_entry {
   13.18  extern void vhpt_init (void);
   13.19  extern void zero_vhpt_stats(void);
   13.20  extern int dump_vhpt_stats(char *buf);
   13.21 -extern void vhpt_flush_address(unsigned long vadr, unsigned long addr_range);
   13.22 -extern void vhpt_flush_address_remote(int cpu, unsigned long vadr,
   13.23 -				      unsigned long addr_range);
   13.24  extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
   13.25  				 unsigned long logps);
   13.26  extern void vhpt_insert (unsigned long vadr, unsigned long pte,
   13.27  			 unsigned long logps);
   13.28 -extern void vhpt_flush(void);
   13.29 -extern void smp_vhpt_flush_all(void);
   13.30  
   13.31  /* Currently the VHPT is allocated per CPU.  */
   13.32  DECLARE_PER_CPU (unsigned long, vhpt_paddr);