obj-y += gic-v2.o
obj-$(CONFIG_HAS_GICV3) += gic-v3.o
obj-y += guestcopy.o
+obj-y += guest_atomics.o
obj-y += hvm.o
obj-y += io.o
obj-y += irq.o
--- /dev/null
+/*
+ * arch/arm/guest_atomics.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <xen/cpu.h>
+
+#include <asm/guest_atomics.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, guest_safe_atomic_max);
+
+/*
+ * Heuristic to find a safe upper-limit for load-store exclusive
+ * operations on memory shared with guest.
+ *
+ * At the moment, we calculate the number of iterations of a simple
+ * load-store atomic loop in 1uS.
+ */
+static void calibrate_safe_atomic(void)
+{
+ s_time_t deadline = NOW() + MICROSECS(1);
+ unsigned int counter = 0;
+ unsigned long mem = 0;
+
+ do
+ {
+ unsigned long res, tmp;
+
+#ifdef CONFIG_ARM_32
+ asm volatile (" ldrex %2, %1\n"
+ " add %2, %2, #1\n"
+ " strex %0, %2, %1\n"
+ : "=&r" (res), "+Q" (mem), "=&r" (tmp));
+#else
+ asm volatile (" ldxr %w2, %1\n"
+ " add %w2, %w2, #1\n"
+ " stxr %w0, %w2, %1\n"
+ : "=&r" (res), "+Q" (mem), "=&r" (tmp));
+#endif
+ counter++;
+ } while (NOW() < deadline);
+
+ this_cpu(guest_safe_atomic_max) = counter;
+
+ printk(XENLOG_DEBUG
+ "CPU%u: Guest atomics will try %u times before pausing the domain\n",
+ smp_processor_id(), counter);
+}
+
+static int cpu_guest_safe_atomic_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ if ( action == CPU_STARTING )
+ calibrate_safe_atomic();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_guest_safe_atomic_nfb = {
+ .notifier_call = cpu_guest_safe_atomic_callback,
+};
+
+static int __init guest_safe_atomic_init(void)
+{
+ register_cpu_notifier(&cpu_guest_safe_atomic_nfb);
+
+ calibrate_safe_atomic();
+
+ return 0;
+}
+presmp_initcall(guest_safe_atomic_init);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef _ARM_GUEST_ATOMICS_H
+#define _ARM_GUEST_ATOMICS_H
+
+#include <xen/bitops.h>
+#include <xen/sched.h>
+
+/*
+ * The guest atomics helpers shares the same logic. We first try to use
+ * the *_timeout version of the operation. If it didn't timeout, then we
+ * successfully updated the memory. Nothing else to do.
+ *
+ * If it did timeout, then it means we didn't manage to update the
+ * memory. This is possibly because the guest is misbehaving (i.e tight
+ * store loop) but can also happen for other reasons (i.e nested Xen).
+ * In that case pause the domain and retry the operation, this time
+ * without a timeout.
+ *
+ * Note, those helpers rely on other part of the code to prevent sharing
+ * a page between Xen and multiple domain.
+ */
+
+DECLARE_PER_CPU(unsigned int, guest_safe_atomic_max);
+
+#define guest_bitop(name) \
+static inline void guest_##name(struct domain *d, int nr, volatile void *p) \
+{ \
+ if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \
+ return; \
+ \
+ domain_pause_nosync(d); \
+ name(nr, p); \
+ domain_unpause(d); \
+}
+
+#define guest_testop(name) \
+static inline int guest_##name(struct domain *d, int nr, volatile void *p) \
+{ \
+ bool succeed; \
+ int oldbit; \
+ \
+ succeed = name##_timeout(nr, p, &oldbit, \
+ this_cpu(guest_safe_atomic_max)); \
+ if ( succeed ) \
+ return oldbit; \
+ \
+ domain_pause_nosync(d); \
+ oldbit = name(nr, p); \
+ domain_unpause(d); \
+ \
+ return oldbit; \
+}
+
+guest_bitop(set_bit)
+guest_bitop(clear_bit)
+guest_bitop(change_bit)
+
+#undef guest_bitop
+
+/* test_bit does not use load-store atomic operations */
+#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p))
+
+guest_testop(test_and_set_bit)
+guest_testop(test_and_clear_bit)
+guest_testop(test_and_change_bit)
+
+#undef guest_testop
+
+#endif /* _ARM_GUEST_ATOMICS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+#ifndef _X86_GUEST_ATOMICS_H
+#define _X86_GUEST_ATOMICS_H
+
+#include <xen/bitops.h>
+
+/*
+ * It is safe to use the atomics helpers on x86 on memory shared with
+ * the guests.
+ */
+#define guest_set_bit(d, nr, p) ((void)(d), set_bit(nr, p))
+#define guest_clear_bit(d, nr, p) ((void)(d), clear_bit(nr, p))
+#define guest_change_bit(d, nr, p) ((void)(d), change_bit(nr, p))
+#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p))
+
+#define guest_test_and_set_bit(d, nr, p) \
+ ((void)(d), test_and_set_bit(nr, p))
+#define guest_test_and_clear_bit(d, nr, p) \
+ ((void)(d), test_and_clear_bit(nr, p))
+#define guest_test_and_change_bit(d, nr, p) \
+ ((void)(d), test_and_change_bit(nr, p))
+
+#endif /* _X86_GUEST_ATOMICS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */