ia64/xen-unstable

changeset 15416:88ab11d8fd1c

[IA64] Don't use hard wired privilege level 2 for domain kernel

PV domains now use CPL 1 instead of 2 for CPL 0 emulation

Signed-off-by: Juergen Gross <juergen.gross@fujitsu-siemens.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 02 09:29:08 2007 -0600 (2007-07-02)
parents 38d061886873
children 2a5b463f2e8d
files xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/ivt.S xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/oprofile/xenoprof.c xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/xenasm.S xen/include/asm-ia64/config.h xen/include/asm-ia64/linux-xen/asm/ptrace.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmmu.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Mon Jul 02 09:05:24 2007 -0600
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Mon Jul 02 09:29:08 2007 -0600
     1.3 @@ -838,8 +838,9 @@ int arch_set_info_guest(struct vcpu *v, 
     1.4  	
     1.5   	if (!d->arch.is_vti) {
     1.6   		/* domain runs at PL2/3 */
     1.7 - 		uregs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
     1.8 - 		uregs->ar_rsc |= (2 << 2); /* force PL2/3 */
     1.9 + 		uregs->cr_ipsr = vcpu_pl_adjust(uregs->cr_ipsr,
    1.10 +		                                IA64_PSR_CPL0_BIT);
    1.11 + 		uregs->ar_rsc = vcpu_pl_adjust(uregs->ar_rsc, 2);
    1.12   	}
    1.13  
    1.14  	for (i = 0; i < IA64_NUM_DBG_REGS; i++) {
     2.1 --- a/xen/arch/ia64/xen/faults.c	Mon Jul 02 09:05:24 2007 -0600
     2.2 +++ b/xen/arch/ia64/xen/faults.c	Mon Jul 02 09:29:08 2007 -0600
     2.3 @@ -38,10 +38,9 @@ extern void die_if_kernel(char *str, str
     2.4  extern int ia64_hyperprivop(unsigned long, REGS *);
     2.5  extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
     2.6  
     2.7 -#define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
     2.8  // note IA64_PSR_PK removed from following, why is this necessary?
     2.9  #define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    2.10 -			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    2.11 +			IA64_PSR_DT | IA64_PSR_RT | \
    2.12  			IA64_PSR_IT | IA64_PSR_BN)
    2.13  
    2.14  #define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH |	\
    2.15 @@ -92,6 +91,7 @@ static void reflect_interruption(unsigne
    2.16  
    2.17  	regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
    2.18  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    2.19 +	regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
    2.20  	if (PSCB(v, dcr) & IA64_DCR_BE)
    2.21  		regs->cr_ipsr |= IA64_PSR_BE;
    2.22  
    2.23 @@ -137,6 +137,7 @@ void reflect_event(void)
    2.24  
    2.25  	regs->cr_iip = v->arch.event_callback_ip;
    2.26  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    2.27 +	regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
    2.28  	if (PSCB(v, dcr) & IA64_DCR_BE)
    2.29  		regs->cr_ipsr |= IA64_PSR_BE;
    2.30  
    2.31 @@ -236,6 +237,8 @@ void ia64_do_page_fault(unsigned long ad
    2.32  		    ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
    2.33  		regs->cr_ipsr =
    2.34  		    (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    2.35 +		regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
    2.36 +					       IA64_PSR_CPL0_BIT);
    2.37  
    2.38  		if (PSCB(current, hpsr_dfh))
    2.39  			regs->cr_ipsr |= IA64_PSR_DFH;  
    2.40 @@ -503,7 +506,7 @@ ia64_handle_break(unsigned long ifa, str
    2.41  
    2.42  	/* FIXME: don't hardcode constant */
    2.43  	if ((iim == 0x80001 || iim == 0x80002)
    2.44 -	    && ia64_get_cpl(regs->cr_ipsr) == 2) {
    2.45 +	    && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
    2.46  		do_ssc(vcpu_get_gr(current, 36), regs);
    2.47  	}
    2.48  #ifdef CRASH_DEBUG
    2.49 @@ -513,7 +516,8 @@ ia64_handle_break(unsigned long ifa, str
    2.50  		debugger_trap_fatal(0 /* don't care */ , regs);
    2.51  	}
    2.52  #endif
    2.53 -	else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == 2) {
    2.54 +	else if (iim == d->arch.breakimm &&
    2.55 +	         ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
    2.56  		/* by default, do not continue */
    2.57  		v->arch.hypercall_continuation = 0;
    2.58  
    2.59 @@ -523,7 +527,7 @@ ia64_handle_break(unsigned long ifa, str
    2.60  		} else
    2.61  			reflect_interruption(isr, regs, vector);
    2.62  	} else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
    2.63 -		   && ia64_get_cpl(regs->cr_ipsr) == 2) {
    2.64 +		   && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
    2.65  		if (ia64_hyperprivop(iim, regs))
    2.66  			vcpu_increment_iip(current);
    2.67  	} else {
     3.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Mon Jul 02 09:05:24 2007 -0600
     3.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Mon Jul 02 09:29:08 2007 -0600
     3.3 @@ -18,9 +18,8 @@
     3.4  
     3.5  
     3.6  #define	_PAGE_PPN_MASK	0x0003fffffffff000 //asm/pgtable.h doesn't do assembly
     3.7 -#define PAGE_PHYS	0x0010000000000761 //__pgprot(__DIRTY_BITS|
     3.8 -					   //         _PAGE_PL_2|_PAGE_AR_RWX)
     3.9 -#define _PAGE_PL_2	(2<<7)
    3.10 +#define PAGE_PHYS	(0x0010000000000661 | _PAGE_PL_PRIV)
    3.11 +			//__pgprot(__DIRTY_BITS|_PAGE_PL_PRIV|_PAGE_AR_RWX)
    3.12  
    3.13  #if 1	 // change to 0 to turn off all fast paths
    3.14  # define FAST_HYPERPRIVOPS
    3.15 @@ -62,7 +61,7 @@
    3.16  #define IA64_PSR_CPL0	(__IA64_UL(1) << IA64_PSR_CPL0_BIT)
    3.17  // note IA64_PSR_PK removed from following, why is this necessary?
    3.18  #define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    3.19 -			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    3.20 +			IA64_PSR_DT | IA64_PSR_RT | \
    3.21  			IA64_PSR_IT | IA64_PSR_BN)
    3.22  
    3.23  #define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
    3.24 @@ -249,8 +248,8 @@ ENTRY(hyper_ssm_i)
    3.25  	mov r29=r30 ;;
    3.26  	movl r28=DELIVER_PSR_SET;;
    3.27  	movl r27=~DELIVER_PSR_CLR;;
    3.28 +	and r29=r29,r27;;
    3.29  	or r29=r29,r28;;
    3.30 -	and r29=r29,r27;;
    3.31  	// set hpsr_dfh to ipsr
    3.32  	adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
    3.33  	ld1 r28=[r28];;
    3.34 @@ -258,8 +257,7 @@ ENTRY(hyper_ssm_i)
    3.35  	mov cr.ipsr=r29;;
    3.36  	// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
    3.37  	extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
    3.38 -	cmp.eq p6,p7=3,r29;;
    3.39 -(p6)	dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
    3.40 +	cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
    3.41  (p7)	dep r30=0,r30,IA64_PSR_CPL0_BIT,2
    3.42  	;;
    3.43  	// FOR SSM_I ONLY, also turn on psr.i and psr.ic
    3.44 @@ -441,20 +439,18 @@ GLOBAL_ENTRY(fast_tick_reflect)
    3.45  	st8 [r21]=r16 ;;
    3.46  	// set cr.ipsr (make sure cpl==2!)
    3.47  	mov r29=r17 ;;
    3.48 -	movl r28=DELIVER_PSR_SET;;
    3.49 -	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
    3.50 +	movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
    3.51 +	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1);;
    3.52 +	and r29=r29,r27;;
    3.53  	or r29=r29,r28;;
    3.54 -	and r29=r29,r27;;
    3.55  	mov cr.ipsr=r29;;
    3.56  	// set shared_mem ipsr (from ipsr in r17 with ipsr.ri already set)
    3.57  	extr.u r29=r17,IA64_PSR_CPL0_BIT,2;;
    3.58 -	cmp.eq p6,p7=3,r29;;
    3.59 -(p6)	dep r17=-1,r17,IA64_PSR_CPL0_BIT,2
    3.60 +	cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
    3.61  (p7)	dep r17=0,r17,IA64_PSR_CPL0_BIT,2
    3.62  	;;
    3.63  	movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
    3.64  	movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN|IA64_PSR_I|IA64_PSR_IC);;
    3.65 -	dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
    3.66  	or r17=r17,r28;;
    3.67  	and r17=r17,r27;;
    3.68  	ld4 r16=[r18];;
    3.69 @@ -620,10 +616,10 @@ ENTRY(fast_reflect)
    3.70  	movl r21=THIS_CPU(current_psr_i_addr)
    3.71  	mov r29=r30 ;;
    3.72  	ld8 r21=[r21]
    3.73 -	movl r28=DELIVER_PSR_SET;;
    3.74 -	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
    3.75 +	movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
    3.76 +	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1);;
    3.77 +	and r29=r29,r27;;
    3.78  	or r29=r29,r28;;
    3.79 -	and r29=r29,r27;;
    3.80  	// set hpsr_dfh to ipsr
    3.81  	adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
    3.82  	ld1 r28=[r28];;
    3.83 @@ -631,8 +627,7 @@ ENTRY(fast_reflect)
    3.84  	mov cr.ipsr=r29;;
    3.85  	// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
    3.86  	extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
    3.87 -	cmp.eq p6,p7=3,r29;;
    3.88 -(p6)	dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
    3.89 +	cmp.eq p7,p0=CONFIG_CPL0_EMUL,r29;;
    3.90  (p7)	dep r30=0,r30,IA64_PSR_CPL0_BIT,2
    3.91  	;;
    3.92  	movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
    3.93 @@ -1112,14 +1107,17 @@ 1:	// OK now, let's do an rfi.
    3.94  
    3.95  just_do_rfi:
    3.96  	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
    3.97 -	mov cr.iip=r22;;
    3.98 +	mov cr.iip=r22
    3.99 +	extr.u r19=r21,IA64_PSR_CPL0_BIT,2
   3.100  	adds r20=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   3.101 +	cmp.gtu p7,p0=CONFIG_CPL0_EMUL,r19
   3.102  	ld8 r20=[r20];;
   3.103 +(p7)	mov r19=CONFIG_CPL0_EMUL
   3.104  	dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
   3.105  	mov cr.ifs=r20 ;;
   3.106 -	// ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
   3.107 +	// ipsr.cpl = max(vcr.ipsr.cpl, IA64_PSR_CPL0_BIT);
   3.108  	movl r20=THIS_CPU(current_psr_i_addr)
   3.109 -	dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
   3.110 +	dep r21=r19,r21,IA64_PSR_CPL0_BIT,2;;
   3.111  	// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
   3.112  	ld8 r20=[r20]
   3.113  	mov r19=1 
   3.114 @@ -1287,12 +1285,12 @@ ENTRY(rfi_with_interrupt)
   3.115  	movl r22=THIS_CPU(current_psr_i_addr)
   3.116  	// set cr.ipsr (make sure cpl==2!)
   3.117  	mov r29=r17
   3.118 -	movl r28=DELIVER_PSR_SET;;
   3.119 +	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0|IA64_PSR_CPL1)
   3.120 +	movl r28=DELIVER_PSR_SET | (CONFIG_CPL0_EMUL << IA64_PSR_CPL0_BIT);;
   3.121  	mov r20=1;;
   3.122  	ld8 r22=[r22]
   3.123 -	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0)
   3.124 +	and r29=r29,r27;;
   3.125  	or r29=r29,r28;;
   3.126 -	and r29=r29,r27;;
   3.127  	mov cr.ipsr=r29;;
   3.128  	// v.ipsr and v.iip are already set (and v.iip validated) as rfi target
   3.129  	// set shared_mem interrupt_delivery_enabled to 0
   3.130 @@ -1935,7 +1933,7 @@ ENTRY(fast_insert)
   3.131  	or r20=r20,r21 ;;	// r20==return value from lookup_domain_mpa
   3.132  	// r16=pteval,r20=pteval2
   3.133  	movl r19=_PAGE_PPN_MASK
   3.134 -	movl r21=_PAGE_PL_2;;
   3.135 +	movl r21=_PAGE_PL_PRIV;;
   3.136  	andcm r25=r16,r19;;	// r25==pteval & ~_PAGE_PPN_MASK
   3.137  	and r22=r20,r19;;
   3.138  	or r22=r22,r21;;
     4.1 --- a/xen/arch/ia64/xen/ivt.S	Mon Jul 02 09:05:24 2007 -0600
     4.2 +++ b/xen/arch/ia64/xen/ivt.S	Mon Jul 02 09:29:08 2007 -0600
     4.3 @@ -510,7 +510,8 @@ ENTRY(break_fault)
     4.4  (p7)	br.spnt.many dispatch_privop_fault
     4.5  	;;
     4.6  #endif
     4.7 -	// if (ipsr.cpl == 2 && (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
     4.8 +	// if (ipsr.cpl == CONFIG_CPL0_EMUL &&
     4.9 +	//    (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
    4.10  	// this is a hyperprivop. A hyperprivop is hand-coded assembly with
    4.11  	// psr.ic off which means it can make no calls, cannot use r1-r15,
    4.12  	// and it can have no memory accesses unless they are to pinned
    4.13 @@ -524,7 +525,7 @@ ENTRY(break_fault)
    4.14  	;;
    4.15  	cmp.gtu p7,p0=r21,r20
    4.16  	;;
    4.17 -	cmp.eq.and p7,p0=2,r19			// ipsr.cpl==2
    4.18 +	cmp.eq.and p7,p0=CONFIG_CPL0_EMUL,r19	// ipsr.cpl==CONFIG_CPL0_EMUL
    4.19  (p7)	br.sptk.many fast_hyperprivop
    4.20  	;;
    4.21  	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
    4.22 @@ -535,7 +536,7 @@ ENTRY(break_fault)
    4.23  	;;
    4.24  	ld4 r23=[r23];;
    4.25  	cmp4.eq p6,p0=r23,r17;;			// Xen-reserved breakimm?
    4.26 -	cmp.eq.and p6,p0=2,r19        
    4.27 +	cmp.eq.and p6,p0=CONFIG_CPL0_EMUL,r19        
    4.28  (p6)	br.spnt.many fast_hypercall
    4.29  	;;
    4.30  	br.sptk.many fast_break_reflect
     5.1 --- a/xen/arch/ia64/xen/mm.c	Mon Jul 02 09:05:24 2007 -0600
     5.2 +++ b/xen/arch/ia64/xen/mm.c	Mon Jul 02 09:29:08 2007 -0600
     5.3 @@ -546,7 +546,7 @@ u64 translate_domain_pte(u64 pteval, u64
     5.4  	/* Ignore non-addr bits of pteval2 and force PL0->2
     5.5  	   (PL3 is unaffected) */
     5.6  	return (pteval & ~_PAGE_PPN_MASK) |
     5.7 -	       (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_2;
     5.8 +	       (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_PRIV;
     5.9  }
    5.10  
    5.11  // given a current domain metaphysical address, return the physical address
    5.12 @@ -711,7 +711,8 @@ unsigned long lookup_domain_mpa(struct d
    5.13          p2m_entry_set(entry, NULL, __pte(0));
    5.14      //XXX This is a work around until the emulation memory access to a region
    5.15      //    where memory or device are attached is implemented.
    5.16 -    return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    5.17 +    return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_PRIV |
    5.18 +                                       _PAGE_AR_RWX)));
    5.19  }
    5.20  
    5.21  // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
    5.22 @@ -785,7 +786,7 @@ static struct page_info *
    5.23      set_pte_rel(pte,
    5.24                  pfn_pte(maddr >> PAGE_SHIFT,
    5.25                          __pgprot(_PAGE_PGC_ALLOCATED | __DIRTY_BITS |
    5.26 -                                 _PAGE_PL_2 | _PAGE_AR_RWX)));
    5.27 +                                 _PAGE_PL_PRIV | _PAGE_AR_RWX)));
    5.28  
    5.29      smp_mb();
    5.30      return p;
    5.31 @@ -820,7 +821,7 @@ assign_new_domain0_page(struct domain *d
    5.32  static unsigned long
    5.33  flags_to_prot (unsigned long flags)
    5.34  {
    5.35 -    unsigned long res = _PAGE_PL_2 | __DIRTY_BITS;
    5.36 +    unsigned long res = _PAGE_PL_PRIV | __DIRTY_BITS;
    5.37  
    5.38      res |= flags & ASSIGN_readonly ? _PAGE_AR_R: _PAGE_AR_RWX;
    5.39      res |= flags & ASSIGN_nocache ? _PAGE_MA_UC: _PAGE_MA_WB;
     6.1 --- a/xen/arch/ia64/xen/oprofile/xenoprof.c	Mon Jul 02 09:05:24 2007 -0600
     6.2 +++ b/xen/arch/ia64/xen/oprofile/xenoprof.c	Mon Jul 02 09:29:08 2007 -0600
     6.3 @@ -28,20 +28,26 @@
     6.4  int
     6.5  xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs)
     6.6  {
     6.7 -    int mode = 0;
     6.8 +    int mode;
     6.9  
    6.10      // mode
    6.11      // 0: user, 1: kernel, 2: xen
    6.12 -    // Xen/IA64 uses ring2 for kernel, and doesn't use ring1.
    6.13 -    if (ring_2(regs))
    6.14 -        mode = 1;
    6.15 -    else if (ring_0(regs))
    6.16 -        mode = 2;
    6.17 -    else if (ring_1(regs)) {
    6.18 -        gdprintk(XENLOG_ERR, "%s:%d ring1 is used!\n", __func__, __LINE__);
    6.19 -        mode = 1;// fall back to kernel mode.
    6.20 +    switch (ring(regs))
    6.21 +    {
    6.22 +        case 3:
    6.23 +                mode = 0;
    6.24 +                break;
    6.25 +        case CONFIG_CPL0_EMUL:
    6.26 +                mode = 1;
    6.27 +                break;
    6.28 +        case 0:
    6.29 +                mode = 2;
    6.30 +                break;
    6.31 +        default:
    6.32 +                gdprintk(XENLOG_ERR, "%s:%d ring%d is used!\n", __func__,
    6.33 +                         __LINE__, 3 - CONFIG_CPL0_EMUL);
    6.34 +                mode = 1; /* fall back to kernel mode. */
    6.35      }
    6.36 -
    6.37      return mode;
    6.38  }
    6.39  
     7.1 --- a/xen/arch/ia64/xen/privop.c	Mon Jul 02 09:05:24 2007 -0600
     7.2 +++ b/xen/arch/ia64/xen/privop.c	Mon Jul 02 09:29:08 2007 -0600
     7.3 @@ -636,7 +636,7 @@ static IA64FAULT priv_handle_op(VCPU * v
     7.4  	}
     7.5  	if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
     7.6  		// break instr for privified cover
     7.7 -	} else if (privlvl != 2)
     7.8 +	} else if (privlvl > CONFIG_CPL0_EMUL)
     7.9  		return IA64_ILLOP_FAULT;
    7.10  	switch (slot_type) {
    7.11  	case M:
     8.1 --- a/xen/arch/ia64/xen/vcpu.c	Mon Jul 02 09:05:24 2007 -0600
     8.2 +++ b/xen/arch/ia64/xen/vcpu.c	Mon Jul 02 09:29:08 2007 -0600
     8.3 @@ -158,7 +158,7 @@ void vcpu_init_regs(struct vcpu *v)
     8.4  		regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
     8.5  				   | IA64_PSR_RI | IA64_PSR_IS);
     8.6  		// domain runs at PL2
     8.7 -		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
     8.8 +		regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
     8.9  		// lazy fp 
    8.10  		PSCB(v, hpsr_dfh) = 1;
    8.11  		PSCB(v, hpsr_mfh) = 0;
    8.12 @@ -174,7 +174,7 @@ void vcpu_init_regs(struct vcpu *v)
    8.13  		VCPU(v, dcr) = 0;
    8.14  	} else {
    8.15  		init_all_rr(v);
    8.16 -		regs->ar_rsc |= (2 << 2);	/* force PL2/3 */
    8.17 +		regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
    8.18  		VCPU(v, banknum) = 1;
    8.19  		VCPU(v, metaphysical_mode) = 1;
    8.20  		VCPU(v, interrupt_mask_addr) =
    8.21 @@ -496,7 +496,7 @@ IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 
    8.22  	PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic;
    8.23  	vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it));
    8.24  
    8.25 -	newpsr.cpl |= vpsr.cpl | 2;
    8.26 +	newpsr.cpl |= max(vpsr.cpl, (u64)CONFIG_CPL0_EMUL);
    8.27  
    8.28  	if (PSCB(vcpu, banknum)	!= vpsr.bn) {
    8.29  		if (vpsr.bn)
    8.30 @@ -535,10 +535,10 @@ u64 vcpu_get_psr(VCPU * vcpu)
    8.31  	newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
    8.32  
    8.33  	/* Fool cpl.  */
    8.34 -	if (ipsr.ia64_psr.cpl < 3)
    8.35 +	if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL)
    8.36  		newpsr.ia64_psr.cpl = 0;
    8.37  	else
    8.38 -		newpsr.ia64_psr.cpl = 3;
    8.39 +		newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl;
    8.40  
    8.41  	newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
    8.42  	
    8.43 @@ -1646,7 +1646,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
    8.44  
    8.45  		} else {
    8.46  			*pteval = (address & _PAGE_PPN_MASK) |
    8.47 -				__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
    8.48 +				__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
    8.49  			*itir = PAGE_SHIFT << 2;
    8.50  			perfc_incr(phys_translate);
    8.51  			return IA64_NO_FAULT;
    8.52 @@ -1711,7 +1711,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
    8.53  		REGS *regs = vcpu_regs(vcpu);
    8.54  		// NOTE: This is specific code for linux kernel
    8.55  		// We assume region 7 is identity mapped
    8.56 -		if (region == 7 && ia64_psr(regs)->cpl == 2) {
    8.57 +		if (region == 7 && ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL) {
    8.58  			pte.val = address & _PAGE_PPN_MASK;
    8.59  			pte.val = pte.val | pgprot_val(PAGE_KERNEL);
    8.60  			goto out;
    8.61 @@ -2090,8 +2090,8 @@ vcpu_set_tr_entry_rid(TR_ENTRY * trp, u6
    8.62  	trp->rid = rid;
    8.63  	ps = trp->ps;
    8.64  	new_pte.val = pte;
    8.65 -	if (new_pte.pl < 2)
    8.66 -		new_pte.pl = 2;
    8.67 +	if (new_pte.pl < CONFIG_CPL0_EMUL)
    8.68 +		new_pte.pl = CONFIG_CPL0_EMUL;
    8.69  	trp->vadr = ifa & ~0xfff;
    8.70  	if (ps > 12) {		// "ignore" relevant low-order bits
    8.71  		new_pte.ppn &= ~((1UL << (ps - 12)) - 1);
     9.1 --- a/xen/arch/ia64/xen/xenasm.S	Mon Jul 02 09:05:24 2007 -0600
     9.2 +++ b/xen/arch/ia64/xen/xenasm.S	Mon Jul 02 09:29:08 2007 -0600
     9.3 @@ -11,6 +11,7 @@
     9.4  #include <asm/pgtable.h>
     9.5  #include <asm/vhpt.h>
     9.6  #include <asm/asm-xsi-offsets.h>
     9.7 +#include <asm/vmmu.h>
     9.8  #include <public/xen.h>
     9.9  	
    9.10  // Change rr7 to the passed value while ensuring
    9.11 @@ -148,7 +149,7 @@ 1:
    9.12  
    9.13  	//  Shared info
    9.14  	mov r24=XSI_SHIFT<<2
    9.15 -	movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
    9.16 +	movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
    9.17  	;;
    9.18  	ptr.d	in3,r24
    9.19  	or r23=in1,r25			// construct PA | page properties
    10.1 --- a/xen/include/asm-ia64/config.h	Mon Jul 02 09:05:24 2007 -0600
    10.2 +++ b/xen/include/asm-ia64/config.h	Mon Jul 02 09:29:08 2007 -0600
    10.3 @@ -55,6 +55,9 @@
    10.4  
    10.5  #define NR_hypercalls 64
    10.6  
    10.7 +/* PV domains use this value for priv. level 0 emulation */
    10.8 +#define CONFIG_CPL0_EMUL	1
    10.9 +
   10.10  #ifndef __ASSEMBLY__
   10.11  
   10.12  // can't find where this typedef was before?!?
    11.1 --- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h	Mon Jul 02 09:05:24 2007 -0600
    11.2 +++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h	Mon Jul 02 09:29:08 2007 -0600
    11.3 @@ -267,7 +267,7 @@ struct switch_stack {
    11.4  # define ia64_psr(regs)			((struct ia64_psr *) &(regs)->cr_ipsr)
    11.5  #ifdef XEN
    11.6  # define guest_mode(regs)		(ia64_psr(regs)->cpl != 0)
    11.7 -# define guest_kernel_mode(regs)	(ia64_psr(regs)->cpl == 2)
    11.8 +# define guest_kernel_mode(regs)	(ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
    11.9  #else
   11.10  # define user_mode(regs)		(((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
   11.11  #endif
    12.1 --- a/xen/include/asm-ia64/vcpu.h	Mon Jul 02 09:05:24 2007 -0600
    12.2 +++ b/xen/include/asm-ia64/vcpu.h	Mon Jul 02 09:29:08 2007 -0600
    12.3 @@ -203,6 +203,16 @@ static inline s64 vcpu_get_next_timer_ns
    12.4  	return vcpu_get_next_timer_ns;
    12.5  }
    12.6  
    12.7 +static inline u64 vcpu_pl_adjust(u64 reg, u64 shift)
    12.8 +{
    12.9 +	u64 pl;
   12.10 +
   12.11 +	pl = reg & (3UL << shift);
   12.12 +	if (pl < ((u64)CONFIG_CPL0_EMUL << shift))
   12.13 +		pl = (u64)CONFIG_CPL0_EMUL << shift;
   12.14 +	return (reg & ~(3UL << shift)) | pl;
   12.15 +}
   12.16 +
   12.17  #define verbose(a...) do {if (vcpu_verbose) printk(a);} while(0)
   12.18  
   12.19  //#define vcpu_quick_region_check(_tr_regions,_ifa) 1
    13.1 --- a/xen/include/asm-ia64/vmmu.h	Mon Jul 02 09:05:24 2007 -0600
    13.2 +++ b/xen/include/asm-ia64/vmmu.h	Mon Jul 02 09:29:08 2007 -0600
    13.3 @@ -32,6 +32,7 @@
    13.4  #define     VCPU_VHPT_ORDER     (VCPU_VHPT_SHIFT - PAGE_SHIFT)
    13.5  #define     VTLB(v,_x)          (v->arch.vtlb._x)
    13.6  #define     VHPT(v,_x)          (v->arch.vhpt._x)
    13.7 +#define     _PAGE_PL_PRIV       (CONFIG_CPL0_EMUL << 7)
    13.8  #ifndef __ASSEMBLY__
    13.9  
   13.10  #include <xen/config.h>