ia64/xen-unstable

changeset 16212:359484cee7d9

[IA64] Check range of r2 for mov rr[r3]=r2

This fixes a security hole.
Use C fall-back for thash with long VHPT format
Add comments.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Wed Oct 24 11:18:42 2007 -0600 (2007-10-24)
parents 8cea24cf57c7
children c9535eb45a8b
files xen/arch/ia64/vmx/optvfault.S
line diff
     1.1 --- a/xen/arch/ia64/vmx/optvfault.S	Wed Oct 24 11:18:10 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/optvfault.S	Wed Oct 24 11:18:42 2007 -0600
     1.3 @@ -6,7 +6,9 @@
     1.4   *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
     1.5   */
     1.6  
     1.7 -#include <linux/config.h>
     1.8 +#include <linux/config.h>	
     1.9 +#include <asm/config.h>
    1.10 +#include <asm/pgtable.h>
    1.11  #include <asm/asmmacro.h>
    1.12  #include <asm/kregs.h>
    1.13  #include <asm/offsets.h>
    1.14 @@ -26,6 +28,9 @@
    1.15  #define ACCE_MOV_TO_PSR
    1.16  #define ACCE_THASH
    1.17  
    1.18 +// Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)
    1.19 +
    1.20 +
    1.21  //mov r1=ar3 (only itc is virtualized)
    1.22  GLOBAL_ENTRY(vmx_asm_mov_from_ar)
    1.23  #ifndef ACCE_MOV_FROM_AR
    1.24 @@ -90,13 +95,16 @@ GLOBAL_ENTRY(vmx_asm_mov_to_rr)
    1.25  #ifndef ACCE_MOV_TO_RR
    1.26      br.many vmx_virtualization_fault_back
    1.27  #endif
    1.28 -    extr.u r16=r25,20,7
    1.29 -    extr.u r17=r25,13,7
    1.30 +    add r22=IA64_VCPU_DOMAIN_OFFSET,r21
    1.31 +    extr.u r16=r25,20,7		// r3
    1.32 +    extr.u r17=r25,13,7		// r2
    1.33 +    ;;
    1.34 +    ld8 r22=[r22]		// Get domain
    1.35      movl r20=asm_mov_from_reg
    1.36      ;;
    1.37      adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
    1.38 -    shladd r16=r16,4,r20
    1.39 -    mov r22=b0
    1.40 +    shladd r16=r16,4,r20	// get r3
    1.41 +    mov r18=b0			// save b0
    1.42      ;;
    1.43      add r27=VCPU_VRR0_OFS,r21
    1.44      mov b0=r16
    1.45 @@ -104,47 +112,56 @@ GLOBAL_ENTRY(vmx_asm_mov_to_rr)
    1.46      ;;   
    1.47  vmx_asm_mov_to_rr_back_1:
    1.48      adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
    1.49 -    shr.u r23=r19,61
    1.50 -    shladd r17=r17,4,r20
    1.51 +    shr.u r23=r19,61		// get RR #
    1.52 +    shladd r17=r17,4,r20	// get r2
    1.53      ;;
    1.54      //if rr7, go back
    1.55      cmp.eq p6,p0=7,r23
    1.56 -    mov b0=r22
    1.57 +    mov b0=r18			// restore b0
    1.58      (p6) br.cond.dpnt.many vmx_virtualization_fault_back
    1.59      ;;
    1.60 -    mov r28=r19
    1.61 +    mov r28=r19			// save r3
    1.62      mov b0=r17
    1.63      br.many b0
    1.64  vmx_asm_mov_to_rr_back_2: 
    1.65      adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
    1.66 -    shladd r27=r23,3,r27
    1.67 -    ;; // +starting_rid
    1.68 -    st8 [r27]=r19
    1.69 -    mov b0=r30
    1.70 +    shladd r27=r23,3,r27	// address of VRR
    1.71 +    add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22
    1.72      ;;
    1.73 +    ld1 r22=[r22]		// Load rid_bits from domain
    1.74 +    mov b0=r18			// restore b0
    1.75      adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
    1.76      ;;
    1.77 -    ld4 r16=[r16]
    1.78 +    ld4 r16=[r16]		// load starting_rid
    1.79 +    extr.u r17=r19,8,24		// Extract RID
    1.80      ;;
    1.81 +    shr r17=r17,r22		// Shift out used bits
    1.82      shl r16=r16,8
    1.83      ;;
    1.84 -    add r19=r19,r16
    1.85 +    add r20=r19,r16
    1.86 +    cmp.ne p6,p0=0,r17	// If reserved RID bits are set, use C fall back.
    1.87 +    (p6) br.cond.dpnt.many vmx_virtualization_fault_back
    1.88      ;; //mangling rid 1 and 3
    1.89 -    extr.u r16=r19,8,8
    1.90 -    extr.u r17=r19,24,8
    1.91 -    extr.u r18=r19,2,6 // page size
    1.92 -    ;;
    1.93 -    dep r19=r16,r19,24,8
    1.94 +    extr.u r16=r20,8,8
    1.95 +    extr.u r17=r20,24,8
    1.96 +    mov r24=r18		// saved b0 for resume
    1.97      ;;
    1.98 -    dep r19=r17,r19,8,8
    1.99 +    extr.u r18=r20,2,6 // page size
   1.100 +    dep r20=r16,r20,24,8
   1.101 +    mov b0=r30
   1.102 +    ;;
   1.103 +    dep r20=r17,r20,8,8
   1.104      ;; //set ve 1
   1.105 -    dep r19=-1,r19,0,1  
   1.106 -    cmp.lt p6,p0=14,r18
   1.107 +    dep r20=-1,r20,0,1
   1.108 +    // If ps > PAGE_SHIFT, use PAGE_SHIFT
   1.109 +    cmp.lt p6,p0=PAGE_SHIFT,r18
   1.110      ;;
   1.111 -    (p6) mov r18=14
   1.112 +    (p6) mov r18=PAGE_SHIFT
   1.113      ;;
   1.114 -    (p6) dep r19=r18,r19,2,6
   1.115 -    ;;
   1.116 +    (p6) dep r20=r18,r20,2,6
   1.117 +    ;;	
   1.118 +    st8 [r27]=r19	// Write to vrr.
   1.119 +    // Write to save_rr if rr=0 or rr=4.
   1.120      cmp.eq p6,p0=0,r23
   1.121      ;;
   1.122      cmp.eq.or p6,p0=4,r23
   1.123 @@ -156,11 +173,10 @@ vmx_asm_mov_to_rr_back_2:
   1.124      cmp.eq p7,p0=r0,r0
   1.125      (p6) shladd r17=r23,1,r17
   1.126      ;;
   1.127 -    (p6) st8 [r17]=r19
   1.128 +    (p6) st8 [r17]=r20
   1.129      (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode
   1.130      ;;
   1.131 -    (p7) mov rr[r28]=r19
   1.132 -    mov r24=r22
   1.133 +    (p7) mov rr[r28]=r20
   1.134      br.many b0
   1.135  END(vmx_asm_mov_to_rr)
   1.136  
   1.137 @@ -420,7 +436,7 @@ ENTRY(vmx_asm_dispatch_vexirq)
   1.138      br.many vmx_dispatch_vexirq
   1.139  END(vmx_asm_dispatch_vexirq)
   1.140  
   1.141 -// thash
   1.142 +// thash r1=r3
   1.143  // TODO: add support when pta.vf = 1
   1.144  GLOBAL_ENTRY(vmx_asm_thash)
   1.145  #ifndef ACCE_THASH
   1.146 @@ -433,8 +449,7 @@ GLOBAL_ENTRY(vmx_asm_thash)
   1.147      adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
   1.148      shladd r17=r17,4,r20	// get addr of MOVE_FROM_REG(r17)
   1.149      adds r16=IA64_VPD_BASE_OFFSET,r21	// get vcpu.arch.priveregs
   1.150 -    ;;
   1.151 -    mov r24=b0
   1.152 +    mov r24=b0			// save b0
   1.153      ;;
   1.154      ld8 r16=[r16]		// get VPD addr
   1.155      mov b0=r17
   1.156 @@ -452,6 +467,10 @@ vmx_asm_thash_back1:
   1.157      extr.u r29=r17,2,6		// get pta.size
   1.158      ld8 r25=[r27]		// get vcpu->arch.arch_vmx.vrr[r23]'s value
   1.159      ;;
   1.160 +    // Fall-back to C if VF (long format) is set
   1.161 +    tbit.nz p7,p0=r17,8
   1.162 +    mov b0=r24
   1.163 +    (p6) br.cond.dpnt.many vmx_virtualization_fault_back
   1.164      extr.u r25=r25,2,6		// get rr.ps
   1.165      shl r22=r26,r29		// 1UL << pta.size
   1.166      ;;
   1.167 @@ -595,6 +614,7 @@ MOV_FROM_BANK0_REG(31)
   1.168  
   1.169  // mov from reg table
   1.170  // r19:	value, r30: return address
   1.171 +// r26 may be destroyed
   1.172  ENTRY(asm_mov_from_reg)
   1.173      MOV_FROM_REG(0)
   1.174      MOV_FROM_REG(1)