direct-io.hg

changeset 11978:685586518b2e

[IA64] Remove VHPT_ADDR

Remove VHPT_ADDR by mapping vhpt to xen identity mapping area.
and some clean ups.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Sat Oct 14 17:52:09 2006 -0600 (2006-10-14)
parents 0c18c6009448
children fcd746cf4647
files xen/arch/ia64/linux-xen/entry.S xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/regionreg.c xen/arch/ia64/xen/vhpt.c xen/arch/ia64/xen/xenasm.S xen/include/asm-ia64/vhpt.h xen/include/asm-ia64/xensystem.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/entry.S	Sat Oct 14 17:42:00 2006 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Sat Oct 14 17:52:09 2006 -0600
     1.3 @@ -262,13 +262,15 @@ GLOBAL_ENTRY(ia64_switch_to)
     1.4  #endif
     1.5  	rsm psr.ic			// interrupts (psr.i) are already disabled here
     1.6  	movl r25=PAGE_KERNEL
     1.7 +	movl r26 = IA64_GRANULE_SHIFT << 2
     1.8  	;;
     1.9  	srlz.d
    1.10  	or r23=r25,r20			// construct PA | page properties
    1.11 -	mov r25=IA64_GRANULE_SHIFT<<2
    1.12 +	ptr.d in0, r26			// to purge dtr[IA64_TR_VHPT]
    1.13  	;;
    1.14 -	mov cr.itir=r25
    1.15 +	mov cr.itir=r26
    1.16  	mov cr.ifa=in0			// VA of next task...
    1.17 +	srlz.d
    1.18  	;;
    1.19  	mov r25=IA64_TR_CURRENT_STACK
    1.20  #ifdef XEN
     2.1 --- a/xen/arch/ia64/xen/domain.c	Sat Oct 14 17:42:00 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Sat Oct 14 17:52:09 2006 -0600
     2.3 @@ -118,13 +118,13 @@ void schedule_tail(struct vcpu *prev)
     2.4  	extern char ia64_ivt;
     2.5  	context_saved(prev);
     2.6  
     2.7 +	ia64_disable_vhpt_walker();
     2.8  	if (VMX_DOMAIN(current)) {
     2.9  		vmx_do_launch(current);
    2.10  		migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
    2.11  		              current->processor);
    2.12  	} else {
    2.13  		ia64_set_iva(&ia64_ivt);
    2.14 -		ia64_disable_vhpt_walker();
    2.15  		load_region_regs(current);
    2.16          	ia64_set_pta(vcpu_pta(current));
    2.17  		vcpu_load_kernel_regs(current);
    2.18 @@ -157,6 +157,8 @@ void context_switch(struct vcpu *prev, s
    2.19      }
    2.20      if (VMX_DOMAIN(next))
    2.21  	vmx_load_state(next);
    2.22 +
    2.23 +    ia64_disable_vhpt_walker();
    2.24      /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
    2.25      prev = ia64_switch_to(next);
    2.26  
    2.27 @@ -176,7 +178,6 @@ void context_switch(struct vcpu *prev, s
    2.28  
    2.29  	nd = current->domain;
    2.30      	if (!is_idle_domain(nd)) {
    2.31 -		ia64_disable_vhpt_walker();
    2.32  	    	load_region_regs(current);
    2.33  		ia64_set_pta(vcpu_pta(current));
    2.34  	    	vcpu_load_kernel_regs(current);
    2.35 @@ -192,7 +193,6 @@ void context_switch(struct vcpu *prev, s
    2.36  		 * walker. Then all accesses happen within idle context will
    2.37  		 * be handled by TR mapping and identity mapping.
    2.38  		 */
    2.39 -		ia64_disable_vhpt_walker();
    2.40  		__ia64_per_cpu_var(current_psr_i_addr) = NULL;
    2.41  		__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
    2.42          }
     3.1 --- a/xen/arch/ia64/xen/regionreg.c	Sat Oct 14 17:42:00 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/regionreg.c	Sat Oct 14 17:52:09 2006 -0600
     3.3 @@ -17,7 +17,7 @@
     3.4  #include <asm/vcpu.h>
     3.5  
     3.6  /* Defined in xemasm.S  */
     3.7 -extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long p_vhpt);
     3.8 +extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
     3.9  
    3.10  /* RID virtualization mechanism is really simple:  domains have less rid bits
    3.11     than the host and the host rid space is shared among the domains.  (Values
    3.12 @@ -260,7 +260,7 @@ int set_one_rr(unsigned long rr, unsigne
    3.13  	} else if (rreg == 7) {
    3.14  		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
    3.15  			     v->arch.privregs, v->domain->arch.shared_info_va,
    3.16 -		             vcpu_vhpt_maddr(v));
    3.17 +		             __va_ul(vcpu_vhpt_maddr(v)));
    3.18  	} else {
    3.19  		set_rr(rr,newrrv.rrval);
    3.20  	}
     4.1 --- a/xen/arch/ia64/xen/vhpt.c	Sat Oct 14 17:42:00 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/vhpt.c	Sat Oct 14 17:52:09 2006 -0600
     4.3 @@ -30,7 +30,7 @@ DEFINE_PER_CPU (unsigned long, vhpt_padd
     4.4  DEFINE_PER_CPU (unsigned long, vhpt_pend);
     4.5  
     4.6  static void
     4.7 - __vhpt_flush(unsigned long vhpt_maddr)
     4.8 +__vhpt_flush(unsigned long vhpt_maddr)
     4.9  {
    4.10  	struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
    4.11  	int i;
    4.12 @@ -158,8 +158,7 @@ pervcpu_vhpt_alloc(struct vcpu *v)
    4.13  	v->arch.pta.ve = 1; // enable vhpt
    4.14  	v->arch.pta.size = VHPT_SIZE_LOG2;
    4.15  	v->arch.pta.vf = 1; // long format
    4.16 -	//v->arch.pta.base = __va(v->arch.vhpt_maddr) >> 15;
    4.17 -	v->arch.pta.base = VHPT_ADDR >> 15;
    4.18 +	v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
    4.19  
    4.20  	vhpt_erase(v->arch.vhpt_maddr);
    4.21  	smp_mb(); // per vcpu vhpt may be used by another physical cpu.
    4.22 @@ -284,7 +283,8 @@ static void
    4.23  
    4.24  	while ((long)addr_range > 0) {
    4.25  		/* Get the VHPT entry.  */
    4.26 -		unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
    4.27 +		unsigned int off = ia64_thash(vadr) -
    4.28 +			__va_ul(vcpu_vhpt_maddr(current));
    4.29  		struct vhpt_lf_entry *v = vhpt_base + off;
    4.30  		v->ti_tag = INVALID_TI_TAG;
    4.31  		addr_range -= PAGE_SIZE;
    4.32 @@ -444,7 +444,7 @@ static void flush_tlb_vhpt_all (struct d
    4.33  void domain_flush_tlb_vhpt(struct domain *d)
    4.34  {
    4.35  	/* Very heavy...  */
    4.36 -	if (HAS_PERVCPU_VHPT(d) /* || VMX_DOMAIN(v) */)
    4.37 +	if (HAS_PERVCPU_VHPT(d) || d->arch.is_vti)
    4.38  		on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
    4.39  	else
    4.40  		on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
     5.1 --- a/xen/arch/ia64/xen/xenasm.S	Sat Oct 14 17:42:00 2006 -0600
     5.2 +++ b/xen/arch/ia64/xen/xenasm.S	Sat Oct 14 17:52:09 2006 -0600
     5.3 @@ -26,10 +26,11 @@
     5.4  //                         void *shared_info,      	 /* in1 */
     5.5  //                         void *shared_arch_info, 	 /* in2 */
     5.6  //                         unsigned long shared_info_va, /* in3 */
     5.7 -//                         unsigned long p_vhpt)   	 /* in4 */
     5.8 +//                         unsigned long va_vhpt)   	 /* in4 */
     5.9  //Local usage:
    5.10  //  loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
    5.11  //  loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
    5.12 +//  r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
    5.13  GLOBAL_ENTRY(ia64_new_rr7)
    5.14  	// FIXME? not sure this unwind statement is correct...
    5.15  	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    5.16 @@ -118,16 +119,31 @@ 1:
    5.17  
    5.18  	// VHPT
    5.19  #if VHPT_ENABLED
    5.20 -	mov r24=VHPT_SIZE_LOG2<<2
    5.21 -	movl r22=VHPT_ADDR
    5.22 -	mov r21=IA64_TR_VHPT
    5.23 +#if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
    5.24 +#error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
    5.25 +#endif	
    5.26 +	// unless overlaps with KERNEL_TR and IA64_TR_CURRENT_STACK
    5.27 +	dep r14=0,in4,0,KERNEL_TR_PAGE_SHIFT
    5.28 +	dep r15=0,in4,0,IA64_GRANULE_SHIFT
    5.29 +	dep r21=0,r13,0,IA64_GRANULE_SHIFT
    5.30  	;;
    5.31 -	ptr.d	r22,r24
    5.32 -	or r23=in4,r26			// construct PA | page properties
    5.33 +	cmp.eq p7,p0=r17,r14
    5.34 +	cmp.eq p8,p0=r15,r21
    5.35 +(p7)	br.cond.sptk	.vhpt_overlaps
    5.36 +(p8)	br.cond.sptk	.vhpt_overlaps
    5.37 +	mov r21=IA64_TR_VHPT
    5.38 +	dep r22=0,r15,60,4		// physical address of
    5.39 +	                                // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
    5.40 +	mov r24=IA64_GRANULE_SHIFT<<2
    5.41 +	;;
    5.42 +	ptr.d	r15,r24
    5.43 +	or r23=r22,r26			// construct PA | page properties
    5.44  	mov cr.itir=r24
    5.45 -	mov cr.ifa=r22
    5.46 +	mov cr.ifa=r15
    5.47 +	srlz.d
    5.48  	;;
    5.49  	itr.d dtr[r21]=r23		// wire in new mapping...
    5.50 +.vhpt_overlaps:	
    5.51  #endif
    5.52  
    5.53  	//  Shared info
     6.1 --- a/xen/include/asm-ia64/vhpt.h	Sat Oct 14 17:42:00 2006 -0600
     6.2 +++ b/xen/include/asm-ia64/vhpt.h	Sat Oct 14 17:52:09 2006 -0600
     6.3 @@ -79,7 +79,8 @@ vcpu_pta(struct vcpu* v)
     6.4      if (HAS_PERVCPU_VHPT(v->domain))
     6.5          return v->arch.pta.val;
     6.6  #endif
     6.7 -    return VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
     6.8 +    return __va_ul(__get_cpu_var(vhpt_paddr)) | (1 << 8) |
     6.9 +        (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
    6.10  }
    6.11  
    6.12  #endif /* !__ASSEMBLY */
     7.1 --- a/xen/include/asm-ia64/xensystem.h	Sat Oct 14 17:42:00 2006 -0600
     7.2 +++ b/xen/include/asm-ia64/xensystem.h	Sat Oct 14 17:52:09 2006 -0600
     7.3 @@ -22,7 +22,6 @@
     7.4  #define GATE_ADDR		KERNEL_START
     7.5  #define DEFAULT_SHAREDINFO_ADDR	 0xf100000000000000
     7.6  #define PERCPU_ADDR		 (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
     7.7 -#define VHPT_ADDR		 0xf200000000000000
     7.8  #ifdef CONFIG_VIRTUAL_FRAME_TABLE
     7.9  #define VIRT_FRAME_TABLE_ADDR	 0xf300000000000000
    7.10  #define VIRT_FRAME_TABLE_END	 0xf400000000000000