direct-io.hg

changeset 15113:31be207e005e

[IA64] Handle speculative vhpt walk

Since processor may support speculative VHPT walk,
The long format VHPT head entry needs to be disabled
before programming it.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu May 10 15:46:30 2007 -0600 (2007-05-10)
parents 8745300bec4e
children 7d8acd319d5b
files xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/vhpt.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Thu May 10 15:18:27 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Thu May 10 15:46:30 2007 -0600
     1.3 @@ -168,11 +168,11 @@ vmx_itlb_loop:
     1.4      adds r16 = VLE_TITAG_OFFSET, r17
     1.5      adds r19 = VLE_CCHAIN_OFFSET, r17
     1.6      ;;
     1.7 -    ld8 r22 = [r16]
     1.8 +    ld8 r24 = [r16]
     1.9      ld8 r23 = [r19]
    1.10      ;;
    1.11      lfetch [r23]
    1.12 -    cmp.eq  p6,p7 = r20, r22
    1.13 +    cmp.eq  p6,p7 = r20, r24
    1.14      ;;
    1.15  (p7)mov r17 = r23;
    1.16  (p7)br.sptk vmx_itlb_loop
    1.17 @@ -180,10 +180,12 @@ vmx_itlb_loop:
    1.18      ld8 r25 = [r17]
    1.19      ld8 r27 = [r18]
    1.20      ld8 r29 = [r28]
    1.21 +    dep r22 = -1,r24,63,1    //set ti=1
    1.22      ;;
    1.23      st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
    1.24      st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
    1.25      extr.u r19 = r27, 56, 4
    1.26 +    mf
    1.27      ;;
    1.28      ld8 r29 = [r16]
    1.29      ld8 r22 = [r28]
    1.30 @@ -191,10 +193,11 @@ vmx_itlb_loop:
    1.31      dep r25 = r19, r25, 56, 4
    1.32      ;;
    1.33      st8 [r16] = r22
    1.34 -    st8 [r28] = r29
    1.35 +    st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
    1.36      st8 [r18] = r25
    1.37      st8 [r17] = r27
    1.38      ;;
    1.39 +    st8.rel [r28] = r24
    1.40      itc.i r25
    1.41      dv_serialize_data
    1.42      mov r17=cr.isr
    1.43 @@ -246,11 +249,11 @@ vmx_dtlb_loop:
    1.44      adds r16 = VLE_TITAG_OFFSET, r17
    1.45      adds r19 = VLE_CCHAIN_OFFSET, r17
    1.46      ;;
    1.47 -    ld8 r22 = [r16]
    1.48 +    ld8 r24 = [r16]
    1.49      ld8 r23 = [r19]
    1.50      ;;
    1.51      lfetch [r23]
    1.52 -    cmp.eq  p6,p7 = r20, r22
    1.53 +    cmp.eq  p6,p7 = r20, r24
    1.54      ;;
    1.55  (p7)mov r17 = r23;
    1.56  (p7)br.sptk vmx_dtlb_loop
    1.57 @@ -258,10 +261,12 @@ vmx_dtlb_loop:
    1.58      ld8 r25 = [r17]
    1.59      ld8 r27 = [r18]
    1.60      ld8 r29 = [r28]
    1.61 +    dep r22 = -1,r24,63,1    //set ti=1
    1.62      ;;
    1.63      st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
    1.64      st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
    1.65      extr.u r19 = r27, 56, 4
    1.66 +    mf
    1.67      ;;
    1.68      ld8 r29 = [r16]
    1.69      ld8 r22 = [r28]
    1.70 @@ -269,10 +274,11 @@ vmx_dtlb_loop:
    1.71      dep r25 = r19, r25, 56, 4
    1.72      ;;
    1.73      st8 [r16] = r22
    1.74 -    st8 [r28] = r29
    1.75 +    st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
    1.76      st8 [r18] = r25
    1.77      st8 [r17] = r27
    1.78 -    ;;    
    1.79 +    ;;
    1.80 +    st8.rel [r28] = r24 
    1.81      itc.d r25
    1.82      dv_serialize_data
    1.83      mov r17=cr.isr
     2.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu May 10 15:18:27 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu May 10 15:46:30 2007 -0600
     2.3 @@ -141,7 +141,7 @@ static void thash_recycle_cch(thash_cb_t
     2.4  
     2.5  static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
     2.6  {
     2.7 -    u64 tag ,len;
     2.8 +    u64 tag;
     2.9      ia64_rr rr;
    2.10      thash_data_t *head, *cch;
    2.11      pte = pte & ~PAGE_FLAGS_RV_MASK;
    2.12 @@ -155,14 +155,12 @@ static void vmx_vhpt_insert(thash_cb_t *
    2.13          cch = cch->next;
    2.14      }
    2.15      if (cch) {
    2.16 -        if (cch == head) {
    2.17 -            len = head->len;
    2.18 -        } else {
    2.19 +        if (cch != head) {
    2.20              local_irq_disable();
    2.21              cch->page_flags = head->page_flags;
    2.22              cch->itir = head->itir;
    2.23              cch->etag  = head->etag;
    2.24 -            len = head->len;
    2.25 +            head->ti = 1;
    2.26              local_irq_enable();
    2.27          }
    2.28      }
    2.29 @@ -175,16 +173,17 @@ static void vmx_vhpt_insert(thash_cb_t *
    2.30          }
    2.31          local_irq_disable();
    2.32          *cch = *head;
    2.33 +        head->ti = 1;
    2.34          head->next = cch;
    2.35 -        len = cch->len+1;
    2.36 +        head->len = cch->len + 1;
    2.37          cch->len = 0;
    2.38          local_irq_enable();
    2.39      }
    2.40 -
    2.41 +    //here head is invalid
    2.42 +    wmb();
    2.43      head->page_flags=pte;
    2.44 -    head->len = len;
    2.45      head->itir = rr.ps << 2;
    2.46 -    head->etag=tag;
    2.47 +    *(volatile unsigned long*)&head->etag = tag;
    2.48      return;
    2.49  }
    2.50  
     3.1 --- a/xen/arch/ia64/xen/vhpt.c	Thu May 10 15:18:27 2007 -0600
     3.2 +++ b/xen/arch/ia64/xen/vhpt.c	Thu May 10 15:46:30 2007 -0600
     3.3 @@ -78,11 +78,13 @@ void vhpt_insert (unsigned long vadr, un
     3.4  	struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
     3.5  	unsigned long tag = ia64_ttag (vadr);
     3.6  
     3.7 -	/* No need to first disable the entry, since VHPT is per LP
     3.8 -	   and VHPT is TR mapped.  */
     3.9 +	/* Even though VHPT is per VCPU, still need to first disable the entry,
    3.10 +	 * because the processor may support speculative VHPT walk.  */
    3.11 +	vlfe->ti_tag = INVALID_TI_TAG;
    3.12 +	wmb();
    3.13  	vlfe->itir = logps;
    3.14  	vlfe->page_flags = pte | _PAGE_P;
    3.15 -	vlfe->ti_tag = tag;
    3.16 +	*(volatile unsigned long*)&vlfe->ti_tag = tag;
    3.17  }
    3.18  
    3.19  void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)