direct-io.hg

changeset 11498:464acece0dad

[POWERPC][XEN] Clear SLB entries on boot and other cleanups

This patch clears and SLB entries that might have been left behind by
Firmware and also cleans up the Save and Restore of the segments.

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Thu Sep 07 02:21:17 2006 -0400 (2006-09-07)
parents 22e01a4864b0
children e20a469dabb4
files xen/arch/powerpc/powerpc64/domain.c xen/arch/powerpc/powerpc64/ppc970.c xen/include/asm-powerpc/domain.h xen/include/asm-powerpc/processor.h
line diff
     1.1 --- a/xen/arch/powerpc/powerpc64/domain.c	Thu Sep 07 01:48:42 2006 -0400
     1.2 +++ b/xen/arch/powerpc/powerpc64/domain.c	Thu Sep 07 02:21:17 2006 -0400
     1.3 @@ -63,24 +63,31 @@ void load_sprs(struct vcpu *v)
     1.4  
     1.5  /* XXX evaluate all isyncs in segment code */
     1.6  
     1.7 -static void flush_slb(struct vcpu *v)
     1.8 +void flush_segments(void)
     1.9  {
    1.10 -    struct slb_entry *slb0 = &v->arch.slb_entries[0];
    1.11 +    struct slb_entry slb0;
    1.12 +    ulong zero = 0;
    1.13  
    1.14 -    slbia();
    1.15 +    __asm__ __volatile__(
    1.16 +        "slbmfev %0,%2\n"
    1.17 +        "slbmfee %1,%2\n"
    1.18 +        :"=&r"(slb0.slb_vsid), "=&r"(slb0.slb_esid)
    1.19 +        :"r"(zero)
    1.20 +        :"memory");
    1.21  
    1.22      /* we manually have to invalidate SLB[0] since slbia doesn't. */
    1.23      /* XXX name magic constants! */
    1.24 -    if (slb0->slb_esid & (1 << (63 - 36))) {
    1.25 +    if (slb0.slb_esid & SLB_ESID_VALID) {
    1.26          ulong rb;
    1.27          ulong class;
    1.28  
    1.29 -        class = (slb0->slb_vsid >> (63 - 56)) & 1ULL;
    1.30 -        rb = slb0->slb_esid & (~0ULL << (63 - 35));
    1.31 -        rb |= class << (63 - 36);
    1.32 +        class = !!(slb0.slb_vsid & SLB_ESID_CLASS);
    1.33 +        rb = slb0.slb_esid & SLB_ESID_MASK;
    1.34 +        rb |= class << SLBIE_CLASS_LOG;
    1.35  
    1.36          slbie(rb);
    1.37      }
    1.38 +    slbia();
    1.39  }
    1.40  
    1.41  void save_segments(struct vcpu *v)
    1.42 @@ -111,7 +118,7 @@ void save_segments(struct vcpu *v)
    1.43  #endif
    1.44      }
    1.45  
    1.46 -    flush_slb(v);
    1.47 +    flush_segments();
    1.48  }
    1.49  
    1.50  void load_segments(struct vcpu *v)
    1.51 @@ -126,7 +133,8 @@ void load_segments(struct vcpu *v)
    1.52  
    1.53          /* FIXME: should we bother to restore invalid entries */
    1.54          /* stuff in the index here */
    1.55 -        esid |= i & ((0x1UL << (63 - 52 + 1)) - 1);
    1.56 +        esid &= ~SLBMTE_ENTRY_MASK;
    1.57 +        esid |= i;
    1.58  
    1.59          __asm__ __volatile__(
    1.60                  "isync\n"
    1.61 @@ -144,3 +152,27 @@ void load_segments(struct vcpu *v)
    1.62  #endif
    1.63      }
    1.64  }
    1.65 +
    1.66 +void dump_segments(int valid)
    1.67 +{
    1.68 +    int i;
    1.69 +
    1.70 +    printk("Dump %s SLB entries:\n", valid ? "VALID" : "ALL");
    1.71 +
    1.72 +    /* save all extra SLBs */
    1.73 +    for (i = 0; i < NUM_SLB_ENTRIES; i++) {
    1.74 +        ulong vsid;
    1.75 +        ulong esid;
    1.76 +
    1.77 +        __asm__ __volatile__(
    1.78 +                "slbmfev %0,%2\n"
    1.79 +                "slbmfee %1,%2\n"
    1.80 +                :"=&r"(vsid), "=&r"(esid)
    1.81 +                :"r"(i)
    1.82 +                :"memory");
    1.83 +
    1.84 +        if (valid && !(esid & SLB_ESID_VALID))
    1.85 +            continue;
    1.86 +        printf("S%02d: 0x%016lx 0x%016lx\n", i, vsid, esid);
    1.87 +    }
    1.88 +}
     2.1 --- a/xen/arch/powerpc/powerpc64/ppc970.c	Thu Sep 07 01:48:42 2006 -0400
     2.2 +++ b/xen/arch/powerpc/powerpc64/ppc970.c	Thu Sep 07 02:21:17 2006 -0400
     2.3 @@ -202,8 +202,10 @@ void cpu_initialize(int cpuid)
     2.4  
     2.5      mthior(cpu0_hior);
     2.6  
     2.7 -    /* for good luck */
     2.8 -    __asm__ __volatile__("isync; slbia; isync" : : : "memory");
     2.9 +#ifdef DEBUG
    2.10 +    dump_segments(1);
    2.11 +#endif
    2.12 +    flush_segments();
    2.13  }
    2.14  
    2.15  void cpu_init_vcpu(struct vcpu *v)
     3.1 --- a/xen/include/asm-powerpc/domain.h	Thu Sep 07 01:48:42 2006 -0400
     3.2 +++ b/xen/include/asm-powerpc/domain.h	Thu Sep 07 02:21:17 2006 -0400
     3.3 @@ -52,6 +52,11 @@ struct slb_entry {
     3.4      ulong slb_vsid;
     3.5      ulong slb_esid;
     3.6  };
     3.7 +#define SLB_ESID_VALID (1ULL << (63 - 36))
     3.8 +#define SLB_ESID_CLASS (1ULL << (63 - 56))
     3.9 +#define SLB_ESID_MASK  (~0ULL << (63 - 35))
    3.10 +#define SLBIE_CLASS_LOG (63-36)
    3.11 +#define SLBMTE_ENTRY_MASK ((0x1UL << (63 - 52 + 1)) - 1)
    3.12  
    3.13  struct xencomm;
    3.14  
     4.1 --- a/xen/include/asm-powerpc/processor.h	Thu Sep 07 01:48:42 2006 -0400
     4.2 +++ b/xen/include/asm-powerpc/processor.h	Thu Sep 07 02:21:17 2006 -0400
     4.3 @@ -50,6 +50,8 @@ extern void cpu_init_vcpu(struct vcpu *)
     4.4  extern int cpu_io_mfn(ulong mfn);
     4.5  extern void save_cpu_sprs(struct vcpu *);
     4.6  extern void load_cpu_sprs(struct vcpu *);
     4.7 +extern void flush_segments(void);
     4.8 +extern void dump_segments(int valid);
     4.9  
    4.10  /* XXX this could also land us in GDB */
    4.11  #define dump_execution_state() BUG()