outb(CALIBRATE_LATCH & 0xff, PIT_CH2); /* LSB of count */
outb(CALIBRATE_LATCH >> 8, PIT_CH2); /* MSB of count */
- start = rdtsc();
+ start = rdtsc_ordered();
for ( count = 0; (inb(0x61) & 0x20) == 0; count++ )
continue;
- end = rdtsc();
+ end = rdtsc_ordered();
/* Error if the CTC doesn't behave itself. */
if ( count == 0 )
if ( at_tsc )
tsc = at_tsc;
else
- tsc = rdtsc();
+ tsc = rdtsc_ordered();
delta = tsc - t->local_tsc_stamp;
now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale);
/* TSC-extrapolated time may be bogus after frequency change. */
/*t->stime_local_stamp = get_s_time();*/
t->stime_local_stamp = t->stime_master_stamp;
- curr_tsc = rdtsc();
+ curr_tsc = rdtsc_ordered();
t->local_tsc_stamp = curr_tsc;
set_time_scale(&t->tsc_scale, freq);
local_irq_enable();
if ( r->master_stime == 0 )
{
r->master_stime = read_platform_stime();
- r->master_tsc_stamp = rdtsc();
+ r->master_tsc_stamp = rdtsc_ordered();
}
atomic_inc(&r->semaphore);
}
}
- c->local_tsc_stamp = rdtsc();
+ c->local_tsc_stamp = rdtsc_ordered();
c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
c->stime_master_stamp = r->master_stime;
mb(); /* receive signal /then/ read r->master_stime */
}
- c->local_tsc_stamp = rdtsc();
+ c->local_tsc_stamp = rdtsc_ordered();
c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
c->stime_master_stamp = r->master_stime;
local_irq_save(flags);
ap_bringup_ref.master_stime = read_platform_stime();
- tsc = rdtsc();
+ tsc = rdtsc_ordered();
local_irq_restore(flags);
ap_bringup_ref.local_stime = get_s_time_fixed(tsc);
local_irq_save(flags);
now = read_platform_stime();
- tsc = rdtsc();
+ tsc = rdtsc_ordered();
local_irq_restore(flags);
t->stime_master_stamp = now;
XEN_CPUFEATURE(CPUID_FAULTING, (FSCAPINTS+0)*32+ 6) /* cpuid faulting */
XEN_CPUFEATURE(CLFLUSH_MONITOR, (FSCAPINTS+0)*32+ 7) /* clflush reqd with monitor */
XEN_CPUFEATURE(APERFMPERF, (FSCAPINTS+0)*32+ 8) /* APERFMPERF */
+XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTSC */
#define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */
return ((uint64_t)high << 32) | low;
}
+static inline uint64_t rdtsc_ordered(void)
+{
+ /*
+ * The RDTSC instruction is not ordered relative to memory access.
+ * The Intel SDM and the AMD APM are both vague on this point, but
+ * empirically an RDTSC instruction can be speculatively executed
+ * before prior loads. An RDTSC immediately after an appropriate
+ * barrier appears to be ordered as a normal load, that is, it
+ * provides the same ordering guarantees as reading from a global
+ * memory location that some other imaginary CPU is updating
+ * continuously with a time stamp.
+ */
+ alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC);
+ return rdtsc();
+}
+
#define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val)
#define write_tsc(val) ({ \
/* Reliable TSCs are in lockstep across all CPUs. We should \