ia64/xen-unstable

changeset 15370:093bc9dcbbca

Merge with xen-ia64-unstable
author Keir Fraser <keir@xensource.com>
date Sat Jun 16 10:42:06 2007 +0100 (2007-06-16)
parents 3b51eebdf9ab 1feb91894e11
children 2811e6e027a0
files
line diff
     1.1 --- a/xen/arch/x86/hvm/hpet.c	Fri Jun 15 13:33:11 2007 -0600
     1.2 +++ b/xen/arch/x86/hvm/hpet.c	Sat Jun 16 10:42:06 2007 +0100
     1.3 @@ -113,6 +113,8 @@ static inline int hpet_check_access_leng
     1.4  
     1.5  static inline uint64_t hpet_read_maincounter(HPETState *h)
     1.6  {
     1.7 +    ASSERT(spin_is_locked(&h->lock));
     1.8 +
     1.9      if ( hpet_enabled(h) )
    1.10          return guest_time_hpet(h->vcpu) + h->mc_offset;
    1.11      else 
    1.12 @@ -131,6 +133,8 @@ static unsigned long hpet_read(
    1.13      if ( hpet_check_access_length(addr, length) != 0 )
    1.14          return ~0UL;
    1.15  
    1.16 +    spin_lock(&h->lock);
    1.17 +
    1.18      val = hpet_read64(h, addr & ~7);
    1.19      if ( (addr & ~7) == HPET_COUNTER )
    1.20          val = hpet_read_maincounter(h);
    1.21 @@ -139,12 +143,15 @@ static unsigned long hpet_read(
    1.22      if ( length != 8 )
    1.23          result = (val >> ((addr & 7) * 8)) & ((1UL << (length * 8)) - 1);
    1.24  
    1.25 +    spin_unlock(&h->lock);
    1.26 +
    1.27      return result;
    1.28  }
    1.29  
    1.30  static void hpet_stop_timer(HPETState *h, unsigned int tn)
    1.31  {
    1.32      ASSERT(tn < HPET_TIMER_NUM);
    1.33 +    ASSERT(spin_is_locked(&h->lock));
    1.34      stop_timer(&h->timers[tn]);
    1.35  }
    1.36  
    1.37 @@ -157,7 +164,8 @@ static void hpet_set_timer(HPETState *h,
    1.38      uint64_t tn_cmp, cur_tick, diff;
    1.39  
    1.40      ASSERT(tn < HPET_TIMER_NUM);
    1.41 -    
    1.42 +    ASSERT(spin_is_locked(&h->lock));
    1.43 +
    1.44      if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
    1.45          return;
    1.46  
    1.47 @@ -213,6 +221,8 @@ static void hpet_write(
    1.48      if ( hpet_check_access_length(addr, length) != 0 )
    1.49          return;
    1.50  
    1.51 +    spin_lock(&h->lock);
    1.52 +
    1.53      old_val = hpet_read64(h, addr & ~7);
    1.54      if ( (addr & ~7) == HPET_COUNTER )
    1.55          old_val = hpet_read_maincounter(h);
    1.56 @@ -302,6 +312,8 @@ static void hpet_write(
    1.57          /* Ignore writes to unsupported and reserved registers. */
    1.58          break;
    1.59      }
    1.60 +
    1.61 +    spin_unlock(&h->lock);
    1.62  }
    1.63  
    1.64  static int hpet_range(struct vcpu *v, unsigned long addr)
    1.65 @@ -321,6 +333,8 @@ static void hpet_route_interrupt(HPETSta
    1.66      unsigned int tn_int_route = timer_int_route(h, tn);
    1.67      struct domain *d = h->vcpu->domain;
    1.68  
    1.69 +    ASSERT(spin_is_locked(&h->lock));
    1.70 +
    1.71      if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
    1.72      {
    1.73          /* if LegacyReplacementRoute bit is set, HPET specification requires
    1.74 @@ -352,8 +366,13 @@ static void hpet_timer_fn(void *opaque)
    1.75      HPETState *h = htfi->hs;
    1.76      unsigned int tn = htfi->tn;
    1.77  
    1.78 +    spin_lock(&h->lock);
    1.79 +
    1.80      if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
    1.81 +    {
    1.82 +        spin_unlock(&h->lock);
    1.83          return;
    1.84 +    }
    1.85  
    1.86      hpet_route_interrupt(h, tn);
    1.87  
    1.88 @@ -374,6 +393,8 @@ static void hpet_timer_fn(void *opaque)
    1.89          set_timer(&h->timers[tn], 
    1.90                    NOW() + hpet_tick_to_ns(h, h->hpet.period[tn]));
    1.91      }
    1.92 +
    1.93 +    spin_unlock(&h->lock);
    1.94  }
    1.95  
    1.96  void hpet_migrate_timers(struct vcpu *v)
    1.97 @@ -391,12 +412,19 @@ void hpet_migrate_timers(struct vcpu *v)
    1.98  static int hpet_save(struct domain *d, hvm_domain_context_t *h)
    1.99  {
   1.100      HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
   1.101 +    int rc;
   1.102 +
   1.103 +    spin_lock(&hp->lock);
   1.104  
   1.105      /* Write the proper value into the main counter */
   1.106      hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp->vcpu);
   1.107  
   1.108      /* Save the HPET registers */
   1.109 -    return hvm_save_entry(HPET, 0, h, &hp->hpet);
   1.110 +    rc = hvm_save_entry(HPET, 0, h, &hp->hpet);
   1.111 +
   1.112 +    spin_unlock(&hp->lock);
   1.113 +
   1.114 +    return rc;
   1.115  }
   1.116  
   1.117  static int hpet_load(struct domain *d, hvm_domain_context_t *h)
   1.118 @@ -404,9 +432,14 @@ static int hpet_load(struct domain *d, h
   1.119      HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
   1.120      int i;
   1.121  
   1.122 +    spin_lock(&hp->lock);
   1.123 +
   1.124      /* Reload the HPET registers */
   1.125      if ( hvm_load_entry(HPET, h, &hp->hpet) )
   1.126 +    {
   1.127 +        spin_unlock(&hp->lock);
   1.128          return -EINVAL;
   1.129 +    }
   1.130      
   1.131      /* Recalculate the offset between the main counter and guest time */
   1.132      hp->mc_offset = hp->hpet.mc64 - guest_time_hpet(hp->vcpu);
   1.133 @@ -415,6 +448,8 @@ static int hpet_load(struct domain *d, h
   1.134      for ( i = 0; i < HPET_TIMER_NUM; i++ )
   1.135          hpet_set_timer(hp, i);
   1.136  
   1.137 +    spin_unlock(&hp->lock);
   1.138 +
   1.139      return 0;
   1.140  }
   1.141  
   1.142 @@ -427,6 +462,8 @@ void hpet_init(struct vcpu *v)
   1.143  
   1.144      memset(h, 0, sizeof(HPETState));
   1.145  
   1.146 +    spin_lock_init(&h->lock);
   1.147 +
   1.148      h->vcpu = v;
   1.149      h->tsc_freq = ticks_per_sec(v);
   1.150  
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Jun 15 13:33:11 2007 -0600
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Sat Jun 16 10:42:06 2007 +0100
     2.3 @@ -401,6 +401,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     2.4          get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
     2.5      spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
     2.6  
     2.7 +    spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
     2.8      INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
     2.9  
    2.10      if ( v->vcpu_id == 0 )
     3.1 --- a/xen/arch/x86/hvm/i8254.c	Fri Jun 15 13:33:11 2007 -0600
     3.2 +++ b/xen/arch/x86/hvm/i8254.c	Sat Jun 16 10:42:06 2007 +0100
     3.3 @@ -82,6 +82,8 @@ static int pit_get_count(PITState *pit, 
     3.4      struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
     3.5      struct vcpu *v = vpit_vcpu(pit);
     3.6  
     3.7 +    ASSERT(spin_is_locked(&pit->lock));
     3.8 +
     3.9      d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
    3.10                   PIT_FREQ, ticks_per_sec(v));
    3.11  
    3.12 @@ -111,6 +113,8 @@ static int pit_get_out(PITState *pit, in
    3.13      int out;
    3.14      struct vcpu *v = vpit_vcpu(pit);
    3.15  
    3.16 +    ASSERT(spin_is_locked(&pit->lock));
    3.17 +
    3.18      d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel], 
    3.19                   PIT_FREQ, ticks_per_sec(v));
    3.20  
    3.21 @@ -143,6 +147,8 @@ static void pit_set_gate(PITState *pit, 
    3.22      struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
    3.23      struct vcpu *v = vpit_vcpu(pit);
    3.24  
    3.25 +    ASSERT(spin_is_locked(&pit->lock));
    3.26 +
    3.27      switch ( s->mode )
    3.28      {
    3.29      default:
    3.30 @@ -165,6 +171,7 @@ static void pit_set_gate(PITState *pit, 
    3.31  
    3.32  int pit_get_gate(PITState *pit, int channel)
    3.33  {
    3.34 +    ASSERT(spin_is_locked(&pit->lock));
    3.35      return pit->hw.channels[channel].gate;
    3.36  }
    3.37  
    3.38 @@ -181,10 +188,15 @@ static void pit_load_count(PITState *pit
    3.39      struct periodic_time *pt = &pit->pt[channel];
    3.40      struct vcpu *v = vpit_vcpu(pit);
    3.41  
    3.42 +    ASSERT(spin_is_locked(&pit->lock));
    3.43 +
    3.44      if ( val == 0 )
    3.45          val = 0x10000;
    3.46  
    3.47 -    pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
    3.48 +    if ( v == NULL )
    3.49 +        rdtscll(pit->count_load_time[channel]);
    3.50 +    else
    3.51 +        pit->count_load_time[channel] = hvm_get_guest_time(v);
    3.52      s->count = val;
    3.53      period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
    3.54  
    3.55 @@ -209,23 +221,29 @@ static void pit_load_count(PITState *pit
    3.56      }
    3.57  }
    3.58  
    3.59 -static void pit_latch_count(PITState *s, int channel)
    3.60 +static void pit_latch_count(PITState *pit, int channel)
    3.61  {
    3.62 -    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
    3.63 +    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
    3.64 +
    3.65 +    ASSERT(spin_is_locked(&pit->lock));
    3.66 +
    3.67      if ( !c->count_latched )
    3.68      {
    3.69 -        c->latched_count = pit_get_count(s, channel);
    3.70 +        c->latched_count = pit_get_count(pit, channel);
    3.71          c->count_latched = c->rw_mode;
    3.72      }
    3.73  }
    3.74  
    3.75 -static void pit_latch_status(PITState *s, int channel)
    3.76 +static void pit_latch_status(PITState *pit, int channel)
    3.77  {
    3.78 -    struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
    3.79 +    struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
    3.80 +
    3.81 +    ASSERT(spin_is_locked(&pit->lock));
    3.82 +
    3.83      if ( !c->status_latched )
    3.84      {
    3.85          /* TODO: Return NULL COUNT (bit 6). */
    3.86 -        c->status = ((pit_get_out(s, channel) << 7) |
    3.87 +        c->status = ((pit_get_out(pit, channel) << 7) |
    3.88                       (c->rw_mode << 4) |
    3.89                       (c->mode << 1) |
    3.90                       c->bcd);
    3.91 @@ -241,6 +259,8 @@ static void pit_ioport_write(struct PITS
    3.92      val  &= 0xff;
    3.93      addr &= 3;
    3.94  
    3.95 +    spin_lock(&pit->lock);
    3.96 +
    3.97      if ( addr == 3 )
    3.98      {
    3.99          channel = val >> 6;
   3.100 @@ -304,6 +324,8 @@ static void pit_ioport_write(struct PITS
   3.101              break;
   3.102          }
   3.103      }
   3.104 +
   3.105 +    spin_unlock(&pit->lock);
   3.106  }
   3.107  
   3.108  static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
   3.109 @@ -314,6 +336,8 @@ static uint32_t pit_ioport_read(struct P
   3.110      addr &= 3;
   3.111      s = &pit->hw.channels[addr];
   3.112  
   3.113 +    spin_lock(&pit->lock);
   3.114 +
   3.115      if ( s->status_latched )
   3.116      {
   3.117          s->status_latched = 0;
   3.118 @@ -364,12 +388,16 @@ static uint32_t pit_ioport_read(struct P
   3.119          }
   3.120      }
   3.121  
   3.122 +    spin_unlock(&pit->lock);
   3.123 +
   3.124      return ret;
   3.125  }
   3.126  
   3.127  void pit_stop_channel0_irq(PITState *pit)
   3.128  {
   3.129 +    spin_lock(&pit->lock);
   3.130      destroy_periodic_time(&pit->pt[0]);
   3.131 +    spin_unlock(&pit->lock);
   3.132  }
   3.133  
   3.134  #ifdef HVM_DEBUG_SUSPEND
   3.135 @@ -422,11 +450,18 @@ static void pit_info(PITState *pit)
   3.136  static int pit_save(struct domain *d, hvm_domain_context_t *h)
   3.137  {
   3.138      PITState *pit = domain_vpit(d);
   3.139 +    int rc;
   3.140 +
   3.141 +    spin_lock(&pit->lock);
   3.142      
   3.143      pit_info(pit);
   3.144  
   3.145      /* Save the PIT hardware state */
   3.146 -    return hvm_save_entry(PIT, 0, h, &pit->hw);
   3.147 +    rc = hvm_save_entry(PIT, 0, h, &pit->hw);
   3.148 +
   3.149 +    spin_unlock(&pit->lock);
   3.150 +
   3.151 +    return rc;
   3.152  }
   3.153  
   3.154  static int pit_load(struct domain *d, hvm_domain_context_t *h)
   3.155 @@ -434,9 +469,14 @@ static int pit_load(struct domain *d, hv
   3.156      PITState *pit = domain_vpit(d);
   3.157      int i;
   3.158  
   3.159 +    spin_lock(&pit->lock);
   3.160 +
   3.161      /* Restore the PIT hardware state */
   3.162      if ( hvm_load_entry(PIT, h, &pit->hw) )
   3.163 +    {
   3.164 +        spin_unlock(&pit->lock);
   3.165          return 1;
   3.166 +    }
   3.167      
   3.168      /* Recreate platform timers from hardware state.  There will be some 
   3.169       * time jitter here, but the wall-clock will have jumped massively, so 
   3.170 @@ -448,6 +488,9 @@ static int pit_load(struct domain *d, hv
   3.171      }
   3.172  
   3.173      pit_info(pit);
   3.174 +
   3.175 +    spin_unlock(&pit->lock);
   3.176 +
   3.177      return 0;
   3.178  }
   3.179  
   3.180 @@ -456,17 +499,15 @@ HVM_REGISTER_SAVE_RESTORE(PIT, pit_save,
   3.181  void pit_init(struct vcpu *v, unsigned long cpu_khz)
   3.182  {
   3.183      PITState *pit = vcpu_vpit(v);
   3.184 -    struct periodic_time *pt;
   3.185      struct hvm_hw_pit_channel *s;
   3.186      int i;
   3.187  
   3.188 -    pt = &pit->pt[0];  
   3.189 -    pt[0].vcpu = v;
   3.190 -    pt[1].vcpu = v;
   3.191 -    pt[2].vcpu = v;
   3.192 +    spin_lock_init(&pit->lock);
   3.193 +
   3.194 +    /* Some sub-functions assert that they are called with the lock held. */
   3.195 +    spin_lock(&pit->lock);
   3.196  
   3.197      register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
   3.198 -    /* register the speaker port */
   3.199      register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
   3.200      ticks_per_sec(v) = cpu_khz * (int64_t)1000;
   3.201  
   3.202 @@ -477,6 +518,8 @@ void pit_init(struct vcpu *v, unsigned l
   3.203          s->gate = (i != 2);
   3.204          pit_load_count(pit, i, 0);
   3.205      }
   3.206 +
   3.207 +    spin_unlock(&pit->lock);
   3.208  }
   3.209  
   3.210  void pit_deinit(struct domain *d)
   3.211 @@ -492,10 +535,10 @@ static int handle_pit_io(ioreq_t *p)
   3.212  
   3.213      if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
   3.214      {
   3.215 -        gdprintk(XENLOG_WARNING, "HVM_PIT bad access\n");
   3.216 +        gdprintk(XENLOG_WARNING, "PIT bad access\n");
   3.217          return 1;
   3.218      }
   3.219 -    
   3.220 +
   3.221      if ( p->dir == IOREQ_WRITE )
   3.222      {
   3.223          pit_ioport_write(vpit, p->addr, p->data);
   3.224 @@ -505,7 +548,7 @@ static int handle_pit_io(ioreq_t *p)
   3.225          if ( (p->addr & 3) != 3 )
   3.226              p->data = pit_ioport_read(vpit, p->addr);
   3.227          else
   3.228 -            gdprintk(XENLOG_WARNING, "HVM_PIT: read A1:A0=3!\n");
   3.229 +            gdprintk(XENLOG_WARNING, "PIT: read A1:A0=3!\n");
   3.230      }
   3.231  
   3.232      return 1;
   3.233 @@ -533,15 +576,19 @@ static int handle_speaker_io(ioreq_t *p)
   3.234  
   3.235      if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
   3.236      {
   3.237 -        gdprintk(XENLOG_WARNING, "HVM_SPEAKER bad access\n");
   3.238 +        gdprintk(XENLOG_WARNING, "PIT_SPEAKER bad access\n");
   3.239          return 1;
   3.240      }
   3.241  
   3.242 +    spin_lock(&vpit->lock);
   3.243 +
   3.244      if ( p->dir == IOREQ_WRITE )
   3.245          speaker_ioport_write(vpit, p->addr, p->data);
   3.246      else
   3.247          p->data = speaker_ioport_read(vpit, p->addr);
   3.248  
   3.249 +    spin_unlock(&vpit->lock);
   3.250 +
   3.251      return 1;
   3.252  }
   3.253  
     4.1 --- a/xen/arch/x86/hvm/pmtimer.c	Fri Jun 15 13:33:11 2007 -0600
     4.2 +++ b/xen/arch/x86/hvm/pmtimer.c	Sat Jun 16 10:42:06 2007 +0100
     4.3 @@ -53,6 +53,8 @@
     4.4  /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
     4.5  static void pmt_update_sci(PMTState *s)
     4.6  {
     4.7 +    ASSERT(spin_is_locked(&s->lock));
     4.8 +
     4.9      if ( s->pm.pm1a_en & s->pm.pm1a_sts & SCI_MASK )
    4.10          hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
    4.11      else
    4.12 @@ -66,6 +68,8 @@ static void pmt_update_time(PMTState *s)
    4.13      uint64_t curr_gtime;
    4.14      uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
    4.15      
    4.16 +    ASSERT(spin_is_locked(&s->lock));
    4.17 +
    4.18      /* Update the timer */
    4.19      curr_gtime = hvm_get_guest_time(s->vcpu);
    4.20      s->pm.tmr_val += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
    4.21 @@ -89,6 +93,8 @@ static void pmt_timer_callback(void *opa
    4.22      uint32_t pmt_cycles_until_flip;
    4.23      uint64_t time_until_flip;
    4.24  
    4.25 +    spin_lock(&s->lock);
    4.26 +
    4.27      /* Recalculate the timer and make sure we get an SCI if we need one */
    4.28      pmt_update_time(s);
    4.29  
    4.30 @@ -103,9 +109,10 @@ static void pmt_timer_callback(void *opa
    4.31  
    4.32      /* Wake up again near the next bit-flip */
    4.33      set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
    4.34 +
    4.35 +    spin_unlock(&s->lock);
    4.36  }
    4.37  
    4.38 -
    4.39  /* Handle port I/O to the PM1a_STS and PM1a_EN registers */
    4.40  static int handle_evt_io(ioreq_t *p)
    4.41  {
    4.42 @@ -114,7 +121,9 @@ static int handle_evt_io(ioreq_t *p)
    4.43      uint32_t addr, data, byte;
    4.44      int i;
    4.45  
    4.46 -    if ( p->dir == 0 ) /* Write */
    4.47 +    spin_lock(&s->lock);
    4.48 +
    4.49 +    if ( p->dir == IOREQ_WRITE )
    4.50      {
    4.51          /* Handle this I/O one byte at a time */
    4.52          for ( i = p->size, addr = p->addr, data = p->data;
    4.53 @@ -122,7 +131,7 @@ static int handle_evt_io(ioreq_t *p)
    4.54                i--, addr++, data >>= 8 )
    4.55          {
    4.56              byte = data & 0xff;
    4.57 -            switch(addr) 
    4.58 +            switch ( addr )
    4.59              {
    4.60                  /* PM1a_STS register bits are write-to-clear */
    4.61              case PM1a_STS_ADDR:
    4.62 @@ -149,7 +158,7 @@ static int handle_evt_io(ioreq_t *p)
    4.63          /* Fix up the SCI state to match the new register state */
    4.64          pmt_update_sci(s);
    4.65      }
    4.66 -    else /* Read */
    4.67 +    else /* p->dir == IOREQ_READ */
    4.68      {
    4.69          data = s->pm.pm1a_sts | (((uint32_t) s->pm.pm1a_en) << 16);
    4.70          data >>= 8 * (p->addr - PM1a_STS_ADDR);
    4.71 @@ -157,6 +166,9 @@ static int handle_evt_io(ioreq_t *p)
    4.72          else if ( p->size == 2 ) data &= 0xffff;
    4.73          p->data = data;
    4.74      }
    4.75 +
    4.76 +    spin_unlock(&s->lock);
    4.77 +
    4.78      return 1;
    4.79  }
    4.80  
    4.81 @@ -167,29 +179,31 @@ static int handle_pmt_io(ioreq_t *p)
    4.82      struct vcpu *v = current;
    4.83      PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
    4.84  
    4.85 -    if (p->size != 4 ||
    4.86 -        p->data_is_ptr ||
    4.87 -        p->type != IOREQ_TYPE_PIO){
    4.88 -        printk("HVM_PMT: wrong PM timer IO\n");
    4.89 +    if ( (p->size != 4) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
    4.90 +    {
    4.91 +        gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
    4.92          return 1;
    4.93      }
    4.94      
    4.95 -    if (p->dir == 0) { /* write */
    4.96 -        /* PM_TMR_BLK is read-only */
    4.97 -        return 1;
    4.98 -    } else if (p->dir == 1) { /* read */
    4.99 +    if ( p->dir == IOREQ_READ )
   4.100 +    {
   4.101 +        spin_lock(&s->lock);
   4.102          pmt_update_time(s);
   4.103          p->data = s->pm.tmr_val;
   4.104 +        spin_unlock(&s->lock);
   4.105          return 1;
   4.106      }
   4.107 +
   4.108      return 0;
   4.109  }
   4.110  
   4.111  static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
   4.112  {
   4.113      PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
   4.114 -    uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
   4.115 -    uint32_t x;
   4.116 +    uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
   4.117 +    int rc;
   4.118 +
   4.119 +    spin_lock(&s->lock);
   4.120  
   4.121      /* Update the counter to the guest's current time.  We always save
   4.122       * with the domain paused, so the saved time should be after the
   4.123 @@ -202,22 +216,33 @@ static int pmtimer_save(struct domain *d
   4.124      /* No point in setting the SCI here because we'll already have saved the 
   4.125       * IRQ and *PIC state; we'll fix it up when we restore the domain */
   4.126  
   4.127 -    return hvm_save_entry(PMTIMER, 0, h, &s->pm);
   4.128 +    rc = hvm_save_entry(PMTIMER, 0, h, &s->pm);
   4.129 +
   4.130 +    spin_unlock(&s->lock);
   4.131 +
   4.132 +    return rc;
   4.133  }
   4.134  
   4.135  static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
   4.136  {
   4.137      PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
   4.138  
   4.139 +    spin_lock(&s->lock);
   4.140 +
   4.141      /* Reload the registers */
   4.142      if ( hvm_load_entry(PMTIMER, h, &s->pm) )
   4.143 +    {
   4.144 +        spin_unlock(&s->lock);
   4.145          return -EINVAL;
   4.146 +    }
   4.147  
   4.148      /* Calculate future counter values from now. */
   4.149      s->last_gtime = hvm_get_guest_time(s->vcpu);
   4.150  
   4.151      /* Set the SCI state from the registers */ 
   4.152      pmt_update_sci(s);
   4.153 +
   4.154 +    spin_unlock(&s->lock);
   4.155      
   4.156      return 0;
   4.157  }
   4.158 @@ -225,14 +250,11 @@ static int pmtimer_load(struct domain *d
   4.159  HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load, 
   4.160                            1, HVMSR_PER_DOM);
   4.161  
   4.162 -
   4.163  void pmtimer_init(struct vcpu *v)
   4.164  {
   4.165      PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
   4.166  
   4.167 -    s->pm.tmr_val = 0;
   4.168 -    s->pm.pm1a_sts = 0;
   4.169 -    s->pm.pm1a_en = 0;
   4.170 +    spin_lock_init(&s->lock);
   4.171  
   4.172      s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / ticks_per_sec(v);
   4.173      s->vcpu = v;
     5.1 --- a/xen/arch/x86/hvm/rtc.c	Fri Jun 15 13:33:11 2007 -0600
     5.2 +++ b/xen/arch/x86/hvm/rtc.c	Sat Jun 16 10:42:06 2007 +0100
     5.3 @@ -34,10 +34,12 @@
     5.4                                         arch.hvm_domain.pl_time.vrtc))
     5.5  #define vrtc_vcpu(rtc)   (vrtc_domain(rtc)->vcpu[0])
     5.6  
     5.7 -void rtc_periodic_cb(struct vcpu *v, void *opaque)
     5.8 +static void rtc_periodic_cb(struct vcpu *v, void *opaque)
     5.9  {
    5.10      RTCState *s = opaque;
    5.11 +    spin_lock(&s->lock);
    5.12      s->hw.cmos_data[RTC_REG_C] |= 0xc0;
    5.13 +    spin_unlock(&s->lock);
    5.14  }
    5.15  
    5.16  int is_rtc_periodic_irq(void *opaque)
    5.17 @@ -55,6 +57,8 @@ static void rtc_timer_update(RTCState *s
    5.18      int period_code, period;
    5.19      struct vcpu *v = vrtc_vcpu(s);
    5.20  
    5.21 +    ASSERT(spin_is_locked(&s->lock));
    5.22 +
    5.23      period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT;
    5.24      if ( (period_code != 0) && (s->hw.cmos_data[RTC_REG_B] & RTC_PIE) )
    5.25      {
    5.26 @@ -78,14 +82,21 @@ static int rtc_ioport_write(void *opaque
    5.27  {
    5.28      RTCState *s = opaque;
    5.29  
    5.30 +    spin_lock(&s->lock);
    5.31 +
    5.32      if ( (addr & 1) == 0 )
    5.33      {
    5.34 -        s->hw.cmos_index = data & 0x7f;
    5.35 -        return (s->hw.cmos_index < RTC_CMOS_SIZE);
    5.36 +        data &= 0x7f;
    5.37 +        s->hw.cmos_index = data;
    5.38 +        spin_unlock(&s->lock);
    5.39 +        return (data < RTC_CMOS_SIZE);
    5.40      }
    5.41  
    5.42      if ( s->hw.cmos_index >= RTC_CMOS_SIZE )
    5.43 +    {
    5.44 +        spin_unlock(&s->lock);
    5.45          return 0;
    5.46 +    }
    5.47  
    5.48      switch ( s->hw.cmos_index )
    5.49      {
    5.50 @@ -134,6 +145,8 @@ static int rtc_ioport_write(void *opaque
    5.51          break;
    5.52      }
    5.53  
    5.54 +    spin_unlock(&s->lock);
    5.55 +
    5.56      return 1;
    5.57  }
    5.58  
    5.59 @@ -158,6 +171,8 @@ static void rtc_set_time(RTCState *s)
    5.60      struct tm *tm = &s->current_tm;
    5.61      unsigned long before, after; /* XXX s_time_t */
    5.62        
    5.63 +    ASSERT(spin_is_locked(&s->lock));
    5.64 +
    5.65      before = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday,
    5.66  		    tm->tm_hour, tm->tm_min, tm->tm_sec);
    5.67      
    5.68 @@ -182,6 +197,8 @@ static void rtc_copy_date(RTCState *s)
    5.69      const struct tm *tm = &s->current_tm;
    5.70      struct domain *d = vrtc_domain(s);
    5.71  
    5.72 +    ASSERT(spin_is_locked(&s->lock));
    5.73 +
    5.74      if ( s->time_offset_seconds != d->time_offset_seconds )
    5.75      {
    5.76          s->current_tm = gmtime(get_localtime(d));
    5.77 @@ -231,6 +248,8 @@ static void rtc_next_second(RTCState *s)
    5.78      int days_in_month;
    5.79      struct domain *d = vrtc_domain(s);
    5.80  
    5.81 +    ASSERT(spin_is_locked(&s->lock));
    5.82 +
    5.83      if ( s->time_offset_seconds != d->time_offset_seconds )
    5.84      {
    5.85          s->current_tm = gmtime(get_localtime(d));
    5.86 @@ -279,6 +298,8 @@ static void rtc_update_second(void *opaq
    5.87  {
    5.88      RTCState *s = opaque;
    5.89  
    5.90 +    spin_lock(&s->lock);
    5.91 +
    5.92      /* if the oscillator is not in normal operation, we do not update */
    5.93      if ( (s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ )
    5.94      {
    5.95 @@ -295,6 +316,8 @@ static void rtc_update_second(void *opaq
    5.96          /* Delay time before update cycle */
    5.97          set_timer(&s->second_timer2, s->next_second_time + 244000);
    5.98      }
    5.99 +
   5.100 +    spin_unlock(&s->lock);
   5.101  }
   5.102  
   5.103  static void rtc_update_second2(void *opaque)
   5.104 @@ -302,6 +325,8 @@ static void rtc_update_second2(void *opa
   5.105      RTCState *s = opaque;
   5.106      struct domain *d = vrtc_domain(s);
   5.107  
   5.108 +    spin_lock(&s->lock);
   5.109 +
   5.110      if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) )
   5.111          rtc_copy_date(s);
   5.112  
   5.113 @@ -337,16 +362,19 @@ static void rtc_update_second2(void *opa
   5.114  
   5.115      s->next_second_time += 1000000000ULL;
   5.116      set_timer(&s->second_timer, s->next_second_time);
   5.117 +
   5.118 +    spin_unlock(&s->lock);
   5.119  }
   5.120  
   5.121 -static uint32_t rtc_ioport_read(void *opaque, uint32_t addr)
   5.122 +static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr)
   5.123  {
   5.124 -    RTCState *s = opaque;
   5.125      int ret;
   5.126  
   5.127      if ( (addr & 1) == 0 )
   5.128          return 0xff;
   5.129  
   5.130 +    spin_lock(&s->lock);
   5.131 +
   5.132      switch ( s->hw.cmos_index )
   5.133      {
   5.134      case RTC_SECONDS:
   5.135 @@ -371,6 +399,8 @@ static uint32_t rtc_ioport_read(void *op
   5.136          break;
   5.137      }
   5.138  
   5.139 +    spin_unlock(&s->lock);
   5.140 +
   5.141      return ret;
   5.142  }
   5.143  
   5.144 @@ -413,7 +443,11 @@ void rtc_migrate_timers(struct vcpu *v)
   5.145  static int rtc_save(struct domain *d, hvm_domain_context_t *h)
   5.146  {
   5.147      RTCState *s = domain_vrtc(d);
   5.148 -    return hvm_save_entry(RTC, 0, h, &s->hw);
   5.149 +    int rc;
   5.150 +    spin_lock(&s->lock);
   5.151 +    rc = hvm_save_entry(RTC, 0, h, &s->hw);
   5.152 +    spin_unlock(&s->lock);
   5.153 +    return rc;
   5.154  }
   5.155  
   5.156  /* Reload the hardware state from a saved domain */
   5.157 @@ -421,9 +455,14 @@ static int rtc_load(struct domain *d, hv
   5.158  {
   5.159      RTCState *s = domain_vrtc(d);
   5.160  
   5.161 +    spin_lock(&s->lock);
   5.162 +
   5.163      /* Restore the registers */
   5.164      if ( hvm_load_entry(RTC, h, &s->hw) != 0 )
   5.165 +    {
   5.166 +        spin_unlock(&s->lock);
   5.167          return -EINVAL;
   5.168 +    }
   5.169  
   5.170      /* Reset the wall-clock time.  In normal running, this runs with host 
   5.171       * time, so let's keep doing that. */
   5.172 @@ -436,6 +475,8 @@ static int rtc_load(struct domain *d, hv
   5.173      /* Reset the periodic interrupt timer based on the registers */
   5.174      rtc_timer_update(s);
   5.175  
   5.176 +    spin_unlock(&s->lock);
   5.177 +
   5.178      return 0;
   5.179  }
   5.180  
   5.181 @@ -446,13 +487,18 @@ void rtc_init(struct vcpu *v, int base)
   5.182  {
   5.183      RTCState *s = vcpu_vrtc(v);
   5.184  
   5.185 +    spin_lock_init(&s->lock);
   5.186 +
   5.187      s->hw.cmos_data[RTC_REG_A] = RTC_REF_CLCK_32KHZ | 6; /* ~1kHz */
   5.188      s->hw.cmos_data[RTC_REG_B] = RTC_24H;
   5.189      s->hw.cmos_data[RTC_REG_C] = 0;
   5.190      s->hw.cmos_data[RTC_REG_D] = RTC_VRT;
   5.191  
   5.192      s->current_tm = gmtime(get_localtime(v->domain));
   5.193 +
   5.194 +    spin_lock(&s->lock);
   5.195      rtc_copy_date(s);
   5.196 +    spin_unlock(&s->lock);
   5.197  
   5.198      init_timer(&s->second_timer, rtc_update_second, s, v->processor);
   5.199      init_timer(&s->second_timer2, rtc_update_second2, s, v->processor);
     6.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Fri Jun 15 13:33:11 2007 -0600
     6.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Sat Jun 16 10:42:06 2007 +0100
     6.3 @@ -236,6 +236,16 @@ static int construct_vmcb(struct vcpu *v
     6.4          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
     6.5          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
     6.6          vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
     6.7 +
     6.8 +        /* No point in intercepting CR0/3/4 reads, because the hardware 
     6.9 +         * will return the guest versions anyway. */
    6.10 +        vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR0_READ
    6.11 +                                 |CR_INTERCEPT_CR3_READ
    6.12 +                                 |CR_INTERCEPT_CR4_READ);
    6.13 +
    6.14 +        /* No point in intercepting INVLPG if we don't have shadow pagetables 
    6.15 +         * that need to be fixed up. */
    6.16 +        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_INVLPG;
    6.17      }
    6.18      else
    6.19      {
     7.1 --- a/xen/arch/x86/hvm/vpt.c	Fri Jun 15 13:33:11 2007 -0600
     7.2 +++ b/xen/arch/x86/hvm/vpt.c	Sat Jun 16 10:42:06 2007 +0100
     7.3 @@ -17,11 +17,31 @@
     7.4   * Place - Suite 330, Boston, MA 02111-1307 USA.
     7.5   *
     7.6   */
     7.7 +
     7.8  #include <xen/time.h>
     7.9  #include <asm/hvm/support.h>
    7.10  #include <asm/hvm/vpt.h>
    7.11  #include <asm/event.h>
    7.12  
    7.13 +static void pt_lock(struct periodic_time *pt)
    7.14 +{
    7.15 +    struct vcpu *v;
    7.16 +
    7.17 +    for ( ; ; )
    7.18 +    {
    7.19 +        v = pt->vcpu;
    7.20 +        spin_lock(&v->arch.hvm_vcpu.tm_lock);
    7.21 +        if ( likely(pt->vcpu == v) )
    7.22 +            break;
    7.23 +        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
    7.24 +    }
    7.25 +}
    7.26 +
    7.27 +static void pt_unlock(struct periodic_time *pt)
    7.28 +{
    7.29 +    spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
    7.30 +}
    7.31 +
    7.32  static void missed_ticks(struct periodic_time *pt)
    7.33  {
    7.34      s_time_t missed_ticks;
    7.35 @@ -52,10 +72,14 @@ void pt_freeze_time(struct vcpu *v)
    7.36      if ( test_bit(_VPF_blocked, &v->pause_flags) )
    7.37          return;
    7.38  
    7.39 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
    7.40 +
    7.41      v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
    7.42  
    7.43      list_for_each_entry ( pt, head, list )
    7.44          stop_timer(&pt->timer);
    7.45 +
    7.46 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
    7.47  }
    7.48  
    7.49  void pt_thaw_time(struct vcpu *v)
    7.50 @@ -63,6 +87,8 @@ void pt_thaw_time(struct vcpu *v)
    7.51      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
    7.52      struct periodic_time *pt;
    7.53  
    7.54 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
    7.55 +
    7.56      if ( v->arch.hvm_vcpu.guest_time )
    7.57      {
    7.58          hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
    7.59 @@ -74,12 +100,16 @@ void pt_thaw_time(struct vcpu *v)
    7.60              set_timer(&pt->timer, pt->scheduled);
    7.61          }
    7.62      }
    7.63 +
    7.64 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
    7.65  }
    7.66  
    7.67  static void pt_timer_fn(void *data)
    7.68  {
    7.69      struct periodic_time *pt = data;
    7.70  
    7.71 +    pt_lock(pt);
    7.72 +
    7.73      pt->pending_intr_nr++;
    7.74      pt->scheduled += pt->period;
    7.75  
    7.76 @@ -89,6 +119,8 @@ static void pt_timer_fn(void *data)
    7.77          set_timer(&pt->timer, pt->scheduled);
    7.78  
    7.79      vcpu_kick(pt->vcpu);
    7.80 +
    7.81 +    pt_unlock(pt);
    7.82  }
    7.83  
    7.84  void pt_update_irq(struct vcpu *v)
    7.85 @@ -98,6 +130,8 @@ void pt_update_irq(struct vcpu *v)
    7.86      uint64_t max_lag = -1ULL;
    7.87      int irq = -1;
    7.88  
    7.89 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
    7.90 +
    7.91      list_for_each_entry ( pt, head, list )
    7.92      {
    7.93          if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
    7.94 @@ -108,6 +142,8 @@ void pt_update_irq(struct vcpu *v)
    7.95          }
    7.96      }
    7.97  
    7.98 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
    7.99 +
   7.100      if ( is_lvtt(v, irq) )
   7.101      {
   7.102          vlapic_set_irq(vcpu_vlapic(v), irq, 0);
   7.103 @@ -119,7 +155,7 @@ void pt_update_irq(struct vcpu *v)
   7.104      }
   7.105  }
   7.106  
   7.107 -struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
   7.108 +static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
   7.109  {
   7.110      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   7.111      struct periodic_time *pt;
   7.112 @@ -152,19 +188,34 @@ struct periodic_time *is_pt_irq(struct v
   7.113  
   7.114  void pt_intr_post(struct vcpu *v, int vector, int type)
   7.115  {
   7.116 -    struct periodic_time *pt = is_pt_irq(v, vector, type);
   7.117 +    struct periodic_time *pt;
   7.118 +    time_cb *cb;
   7.119 +    void *cb_priv;
   7.120  
   7.121 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
   7.122 +
   7.123 +    pt = is_pt_irq(v, vector, type);
   7.124      if ( pt == NULL )
   7.125 +    {
   7.126 +        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
   7.127          return;
   7.128 +    }
   7.129 +
   7.130 +    ASSERT(pt->vcpu == v);
   7.131  
   7.132      pt->pending_intr_nr--;
   7.133      pt->last_plt_gtime += pt->period_cycles;
   7.134  
   7.135 -    if ( hvm_get_guest_time(pt->vcpu) < pt->last_plt_gtime )
   7.136 -        hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
   7.137 +    if ( hvm_get_guest_time(v) < pt->last_plt_gtime )
   7.138 +        hvm_set_guest_time(v, pt->last_plt_gtime);
   7.139  
   7.140 -    if ( pt->cb != NULL )
   7.141 -        pt->cb(pt->vcpu, pt->priv);
   7.142 +    cb = pt->cb;
   7.143 +    cb_priv = pt->priv;
   7.144 +
   7.145 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
   7.146 +
   7.147 +    if ( cb != NULL )
   7.148 +        cb(v, cb_priv);
   7.149  }
   7.150  
   7.151  void pt_reset(struct vcpu *v)
   7.152 @@ -172,6 +223,8 @@ void pt_reset(struct vcpu *v)
   7.153      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   7.154      struct periodic_time *pt;
   7.155  
   7.156 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
   7.157 +
   7.158      list_for_each_entry ( pt, head, list )
   7.159      {
   7.160          if ( pt->enabled )
   7.161 @@ -182,6 +235,8 @@ void pt_reset(struct vcpu *v)
   7.162              set_timer(&pt->timer, pt->scheduled);
   7.163          }
   7.164      }
   7.165 +
   7.166 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
   7.167  }
   7.168  
   7.169  void pt_migrate(struct vcpu *v)
   7.170 @@ -189,11 +244,15 @@ void pt_migrate(struct vcpu *v)
   7.171      struct list_head *head = &v->arch.hvm_vcpu.tm_list;
   7.172      struct periodic_time *pt;
   7.173  
   7.174 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
   7.175 +
   7.176      list_for_each_entry ( pt, head, list )
   7.177      {
   7.178          if ( pt->enabled )
   7.179              migrate_timer(&pt->timer, v->processor);
   7.180      }
   7.181 +
   7.182 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
   7.183  }
   7.184  
   7.185  void create_periodic_time(
   7.186 @@ -202,6 +261,8 @@ void create_periodic_time(
   7.187  {
   7.188      destroy_periodic_time(pt);
   7.189  
   7.190 +    spin_lock(&v->arch.hvm_vcpu.tm_lock);
   7.191 +
   7.192      init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
   7.193      pt->enabled = 1;
   7.194      if ( period < 900000 ) /* < 0.9 ms */
   7.195 @@ -223,6 +284,8 @@ void create_periodic_time(
   7.196  
   7.197      list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
   7.198      set_timer(&pt->timer, pt->scheduled);
   7.199 +
   7.200 +    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
   7.201  }
   7.202  
   7.203  void destroy_periodic_time(struct periodic_time *pt)
   7.204 @@ -230,8 +293,10 @@ void destroy_periodic_time(struct period
   7.205      if ( !pt->enabled )
   7.206          return;
   7.207  
   7.208 +    pt_lock(pt);
   7.209      pt->enabled = 0;
   7.210      pt->pending_intr_nr = 0;
   7.211      list_del(&pt->list);
   7.212      kill_timer(&pt->timer);
   7.213 +    pt_unlock(pt);
   7.214  }
     8.1 --- a/xen/arch/x86/mm/hap/hap.c	Fri Jun 15 13:33:11 2007 -0600
     8.2 +++ b/xen/arch/x86/mm/hap/hap.c	Sat Jun 16 10:42:06 2007 +0100
     8.3 @@ -55,14 +55,14 @@
     8.4  /* hap code to call when log_dirty is enable. return 0 if no problem found. */
     8.5  int hap_enable_log_dirty(struct domain *d)
     8.6  {
     8.7 +    /* turn on PG_log_dirty bit in paging mode */
     8.8      hap_lock(d);
     8.9 -    /* turn on PG_log_dirty bit in paging mode */
    8.10      d->arch.paging.mode |= PG_log_dirty;
    8.11 +    hap_unlock(d);
    8.12 +
    8.13      /* set l1e entries of P2M table to NOT_WRITABLE. */
    8.14      p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
    8.15 -    flush_tlb_all_pge();
    8.16 -    hap_unlock(d);
    8.17 -
    8.18 +    flush_tlb_mask(d->domain_dirty_cpumask);
    8.19      return 0;
    8.20  }
    8.21  
    8.22 @@ -70,19 +70,20 @@ int hap_disable_log_dirty(struct domain 
    8.23  {
    8.24      hap_lock(d);
    8.25      d->arch.paging.mode &= ~PG_log_dirty;
    8.26 +    hap_unlock(d);
    8.27 +
    8.28      /* set l1e entries of P2M table with normal mode */
    8.29 -    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
    8.30 -    hap_unlock(d);
    8.31 -    
    8.32 -    return 1;
    8.33 +    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);    
    8.34 +    return 0;
    8.35  }
    8.36  
    8.37  void hap_clean_dirty_bitmap(struct domain *d)
    8.38  {
    8.39      /* mark physical memory as NOT_WRITEABLE and flush the TLB */
    8.40      p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
    8.41 -    flush_tlb_all_pge();
    8.42 +    flush_tlb_mask(d->domain_dirty_cpumask);
    8.43  }
    8.44 +
    8.45  /************************************************/
    8.46  /*             HAP SUPPORT FUNCTIONS            */
    8.47  /************************************************/
    8.48 @@ -268,6 +269,7 @@ void hap_install_xen_entries_in_l2h(stru
    8.49  {
    8.50      struct domain *d = v->domain;
    8.51      l2_pgentry_t *sl2e;
    8.52 +    l3_pgentry_t *p2m;
    8.53  
    8.54      int i;
    8.55  
    8.56 @@ -290,23 +292,18 @@ void hap_install_xen_entries_in_l2h(stru
    8.57          sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
    8.58              l2e_empty();
    8.59  
    8.60 -    if ( paging_mode_translate(d) )
    8.61 +    /* Install the domain-specific p2m table */
    8.62 +    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
    8.63 +    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
    8.64 +    for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
    8.65      {
    8.66 -        /* Install the domain-specific p2m table */
    8.67 -        l3_pgentry_t *p2m;
    8.68 -        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
    8.69 -        p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
    8.70 -        for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
    8.71 -        {
    8.72 -            sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
    8.73 -                (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
    8.74 -                ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
    8.75 -                                      __PAGE_HYPERVISOR)
    8.76 -                : l2e_empty();
    8.77 -        }
    8.78 -        hap_unmap_domain_page(p2m);
    8.79 +        sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
    8.80 +            (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
    8.81 +            ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
    8.82 +                           __PAGE_HYPERVISOR)
    8.83 +            : l2e_empty();
    8.84      }
    8.85 -
    8.86 +    hap_unmap_domain_page(p2m);
    8.87      hap_unmap_domain_page(sl2e);
    8.88  }
    8.89  #endif
    8.90 @@ -565,61 +562,37 @@ void hap_vcpu_init(struct vcpu *v)
    8.91  /************************************************/
    8.92  /*          HAP PAGING MODE FUNCTIONS           */
    8.93  /************************************************/
    8.94 -/* In theory, hap should not intercept guest page fault. This function can 
    8.95 - * be recycled to handle host/nested page fault, if needed.
    8.96 +/* 
    8.97 + * HAP guests can handle page faults (in the guest page tables) without
    8.98 + * needing any action from Xen, so we should not be intercepting them.
    8.99   */
   8.100  int hap_page_fault(struct vcpu *v, unsigned long va, 
   8.101                     struct cpu_user_regs *regs)
   8.102  {
   8.103 -    HERE_I_AM;
   8.104 +    HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
   8.105 +              v->domain->domain_id, v->vcpu_id);
   8.106      domain_crash(v->domain);
   8.107      return 0;
   8.108  }
   8.109  
   8.110 -/* called when guest issues a invlpg request. 
   8.111 - * Return 1 if need to issue page invalidation on CPU; Return 0 if does not
   8.112 - * need to do so.
   8.113 +/* 
   8.114 + * HAP guests can handle invlpg without needing any action from Xen, so
   8.115 + * should not be intercepting it. 
   8.116   */
   8.117  int hap_invlpg(struct vcpu *v, unsigned long va)
   8.118  {
   8.119 -    HERE_I_AM;
   8.120 +    HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
   8.121 +              v->domain->domain_id, v->vcpu_id);
   8.122 +    domain_crash(v->domain);
   8.123      return 0;
   8.124  }
   8.125  
   8.126 +/*
   8.127 + * HAP guests do not need to take any action on CR3 writes (they are still
   8.128 + * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
   8.129 + */
   8.130  void hap_update_cr3(struct vcpu *v, int do_locking)
   8.131  {
   8.132 -    struct domain *d = v->domain;
   8.133 -    mfn_t gmfn;
   8.134 -
   8.135 -    HERE_I_AM;
   8.136 -    /* Don't do anything on an uninitialised vcpu */
   8.137 -    if ( !is_hvm_domain(d) && !v->is_initialised )
   8.138 -    {
   8.139 -        ASSERT(v->arch.cr3 == 0);
   8.140 -        return;
   8.141 -    }
   8.142 -
   8.143 -    if ( do_locking )
   8.144 -        hap_lock(v->domain);
   8.145 -    
   8.146 -    ASSERT(hap_locked_by_me(v->domain));
   8.147 -    ASSERT(v->arch.paging.mode);
   8.148 -    
   8.149 -    gmfn = pagetable_get_mfn(v->arch.guest_table);
   8.150 -
   8.151 -    make_cr3(v, pagetable_get_pfn(v->arch.monitor_table));
   8.152 -    
   8.153 -    hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.monitor_table));
   8.154 -
   8.155 -    HAP_PRINTK("d=%u v=%u guest_table=%05lx, monitor_table = %05lx\n", 
   8.156 -               d->domain_id, v->vcpu_id, 
   8.157 -               (unsigned long)pagetable_get_pfn(v->arch.guest_table),
   8.158 -               (unsigned long)pagetable_get_pfn(v->arch.monitor_table));
   8.159 -
   8.160 -    flush_tlb_mask(d->domain_dirty_cpumask);
   8.161 -
   8.162 -    if ( do_locking )
   8.163 -        hap_unlock(v->domain);
   8.164  }
   8.165  
   8.166  void hap_update_paging_modes(struct vcpu *v)
   8.167 @@ -647,7 +620,7 @@ void hap_update_paging_modes(struct vcpu
   8.168          v->arch.paging.mode = &hap_paging_real_mode;
   8.169      }
   8.170  
   8.171 -    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);    
   8.172 +    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
   8.173  
   8.174      if ( pagetable_is_null(v->arch.monitor_table) ) {
   8.175          mfn_t mmfn = hap_make_monitor_table(v);
   8.176 @@ -655,7 +628,6 @@ void hap_update_paging_modes(struct vcpu
   8.177          make_cr3(v, mfn_x(mmfn));
   8.178      }
   8.179  
   8.180 -    flush_tlb_mask(d->domain_dirty_cpumask);
   8.181      hap_unlock(d);
   8.182  }
   8.183  
   8.184 @@ -702,29 +674,18 @@ void
   8.185  hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
   8.186                      l1_pgentry_t new, unsigned int level)
   8.187  {
   8.188 -    int do_locking;
   8.189 -
   8.190 -    /* This function can be called from two directions (P2M and log dirty). We
   8.191 -     *  need to make sure this lock has been held or not.
   8.192 -     */
   8.193 -    do_locking = !hap_locked_by_me(v->domain);
   8.194 -
   8.195 -    if ( do_locking )
   8.196 -        hap_lock(v->domain);
   8.197 +    hap_lock(v->domain);
   8.198  
   8.199      safe_write_pte(p, new);
   8.200  #if CONFIG_PAGING_LEVELS == 3
   8.201      /* install P2M in monitor table for PAE Xen */
   8.202 -    if ( level == 3 ) {
   8.203 +    if ( level == 3 ) 
   8.204  	/* We have written to the p2m l3: need to sync the per-vcpu
   8.205           * copies of it in the monitor tables */
   8.206  	p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
   8.207 -	
   8.208 -    }
   8.209  #endif
   8.210      
   8.211 -    if ( do_locking )
   8.212 -        hap_unlock(v->domain);
   8.213 +    hap_unlock(v->domain);
   8.214  }
   8.215  
   8.216  /* Entry points into this mode of the hap code. */
     9.1 --- a/xen/arch/x86/mm/p2m.c	Fri Jun 15 13:33:11 2007 -0600
     9.2 +++ b/xen/arch/x86/mm/p2m.c	Sat Jun 16 10:42:06 2007 +0100
     9.3 @@ -32,9 +32,13 @@
     9.4  #define P2M_AUDIT     0
     9.5  #define P2M_DEBUGGING 1
     9.6  
     9.7 -/* The P2M lock.  This protects all updates to the p2m table.
     9.8 +/*
     9.9 + * The P2M lock.  This protects all updates to the p2m table.
    9.10   * Updates are expected to be safe against concurrent reads, 
    9.11 - * which do *not* require the lock */
    9.12 + * which do *not* require the lock.
    9.13 + *
    9.14 + * Locking discipline: always acquire this lock before the shadow or HAP one
    9.15 + */
    9.16  
    9.17  #define p2m_lock_init(_d)                            \
    9.18      do {                                             \
    10.1 --- a/xen/common/kexec.c	Fri Jun 15 13:33:11 2007 -0600
    10.2 +++ b/xen/common/kexec.c	Sat Jun 16 10:42:06 2007 +0100
    10.3 @@ -19,6 +19,7 @@
    10.4  #include <asm/atomic.h>
    10.5  #include <xen/spinlock.h>
    10.6  #include <xen/version.h>
    10.7 +#include <xen/console.h>
    10.8  #include <public/elfnote.h>
    10.9  
   10.10  #ifndef COMPAT
   10.11 @@ -110,6 +111,8 @@ void kexec_crash(void)
   10.12      if ( !test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
   10.13          return;
   10.14  
   10.15 +    console_start_sync();
   10.16 +
   10.17      one_cpu_only();
   10.18      kexec_crash_save_cpu();
   10.19      machine_crash_shutdown();
    11.1 --- a/xen/common/timer.c	Fri Jun 15 13:33:11 2007 -0600
    11.2 +++ b/xen/common/timer.c	Sat Jun 16 10:42:06 2007 +0100
    11.3 @@ -183,7 +183,7 @@ static inline void timer_lock(struct tim
    11.4  
    11.5  static inline void timer_unlock(struct timer *timer)
    11.6  {
    11.7 -        spin_unlock(&per_cpu(timers, timer->cpu).lock);
    11.8 +    spin_unlock(&per_cpu(timers, timer->cpu).lock);
    11.9  }
   11.10  
   11.11  #define timer_unlock_irq(t) \
    12.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Fri Jun 15 13:33:11 2007 -0600
    12.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Sat Jun 16 10:42:06 2007 +0100
    12.3 @@ -35,6 +35,9 @@ struct hvm_vcpu {
    12.4      struct vlapic       vlapic;
    12.5      s64                 cache_tsc_offset;
    12.6      u64                 guest_time;
    12.7 +
    12.8 +    /* Lock and list for virtual platform timers. */
    12.9 +    spinlock_t          tm_lock;
   12.10      struct list_head    tm_list;
   12.11  
   12.12      /* For AP startup */
    13.1 --- a/xen/include/asm-x86/hvm/vpt.h	Fri Jun 15 13:33:11 2007 -0600
    13.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Sat Jun 16 10:42:06 2007 +0100
    13.3 @@ -31,7 +31,6 @@
    13.4  #include <asm/hvm/vpic.h>
    13.5  #include <public/hvm/save.h>
    13.6  
    13.7 -
    13.8  struct HPETState;
    13.9  struct HPET_timer_fn_info {
   13.10      struct HPETState       *hs;
   13.11 @@ -45,6 +44,7 @@ typedef struct HPETState {
   13.12      uint64_t mc_offset;
   13.13      struct timer timers[HPET_TIMER_NUM];
   13.14      struct HPET_timer_fn_info timer_fn_info[HPET_TIMER_NUM]; 
   13.15 +    spinlock_t lock;
   13.16  } HPETState;
   13.17  
   13.18  
   13.19 @@ -80,6 +80,7 @@ typedef struct PITState {
   13.20      int64_t count_load_time[3];
   13.21      /* irq handling */
   13.22      struct periodic_time pt[3];
   13.23 +    spinlock_t lock;
   13.24  } PITState;
   13.25  
   13.26  typedef struct RTCState {
   13.27 @@ -93,6 +94,7 @@ typedef struct RTCState {
   13.28      struct timer second_timer2;
   13.29      struct periodic_time pt;
   13.30      int32_t time_offset_seconds;
   13.31 +    spinlock_t lock;
   13.32  } RTCState;
   13.33  
   13.34  #define FREQUENCE_PMTIMER  3579545  /* Timer should run at 3.579545 MHz */
   13.35 @@ -102,6 +104,7 @@ typedef struct PMTState {
   13.36      uint64_t last_gtime;        /* Last (guest) time we updated the timer */
   13.37      uint64_t scale;             /* Multiplier to get from tsc to timer ticks */
   13.38      struct timer timer;         /* To make sure we send SCIs */
   13.39 +    spinlock_t lock;
   13.40  } PMTState;
   13.41  
   13.42  struct pl_time {    /* platform time */
   13.43 @@ -116,7 +119,6 @@ struct pl_time {    /* platform time */
   13.44  void pt_freeze_time(struct vcpu *v);
   13.45  void pt_thaw_time(struct vcpu *v);
   13.46  void pt_update_irq(struct vcpu *v);
   13.47 -struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
   13.48  void pt_intr_post(struct vcpu *v, int vector, int type);
   13.49  void pt_reset(struct vcpu *v);
   13.50  void pt_migrate(struct vcpu *v);