]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
RISC-V: KVM: selftests: Add steal_time test support
authorAndrew Jones <ajones@ventanamicro.com>
Wed, 20 Dec 2023 16:00:25 +0000 (17:00 +0100)
committerAnup Patel <anup@brainfault.org>
Sat, 30 Dec 2023 05:56:45 +0000 (11:26 +0530)
With the introduction of steal-time accounting support for
RISC-V KVM we can add RISC-V support to the steal_time test.

Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/steal_time.c

index 3e0c36b8ddd56ebbb2817ba76d3af58abcfdc4ee..017efabcf502fa07c20546342ad34fa84a2a41cf 100644 (file)
@@ -184,12 +184,13 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
 
 TEST_GEN_PROGS_riscv += demand_paging_test
 TEST_GEN_PROGS_riscv += dirty_log_test
-TEST_GEN_PROGS_riscv += guest_print_test
 TEST_GEN_PROGS_riscv += get-reg-list
+TEST_GEN_PROGS_riscv += guest_print_test
+TEST_GEN_PROGS_riscv += kvm_binary_stats_test
 TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
 TEST_GEN_PROGS_riscv += kvm_page_table_test
 TEST_GEN_PROGS_riscv += set_memory_region_test
-TEST_GEN_PROGS_riscv += kvm_binary_stats_test
+TEST_GEN_PROGS_riscv += steal_time
 
 SPLIT_TESTS += get-reg-list
 
index dc50ad62e150d85757f15bb37e5afcf12850f786..a0f9efe5a2a8de6afda4d4531f1ca6bda22f4b9c 100644 (file)
@@ -128,6 +128,7 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
 
 enum sbi_ext_id {
        SBI_EXT_BASE = 0x10,
+       SBI_EXT_STA = 0x535441,
 };
 
 enum sbi_ext_base_fid {
index 171adfb2a6cbc45ffee0fcf4d149b49ed29ab2d7..bae0c5026f82f6f16022813437797c6b28c882e8 100644 (file)
@@ -11,7 +11,9 @@
 #include <pthread.h>
 #include <linux/kernel.h>
 #include <asm/kvm.h>
+#ifndef __riscv
 #include <asm/kvm_para.h>
+#endif
 
 #include "test_util.h"
 #include "kvm_util.h"
@@ -203,6 +205,103 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
        pr_info("    st_time: %ld\n", st->st_time);
 }
 
+#elif defined(__riscv)
+
+/* SBI STA shmem must have 64-byte alignment */
+#define STEAL_TIME_SIZE                ((sizeof(struct sta_struct) + 63) & ~63)
+
+static vm_paddr_t st_gpa[NR_VCPUS];
+
+struct sta_struct {
+       uint32_t sequence;
+       uint32_t flags;
+       uint64_t steal;
+       uint8_t preempted;
+       uint8_t pad[47];
+} __packed;
+
+static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags)
+{
+       unsigned long lo = (unsigned long)gpa;
+#if __riscv_xlen == 32
+       unsigned long hi = (unsigned long)(gpa >> 32);
+#else
+       unsigned long hi = gpa == -1 ? -1 : 0;
+#endif
+       struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0);
+
+       GUEST_ASSERT(ret.value == 0 && ret.error == 0);
+}
+
+static void check_status(struct sta_struct *st)
+{
+       GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1));
+       GUEST_ASSERT(READ_ONCE(st->flags) == 0);
+       GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
+}
+
+static void guest_code(int cpu)
+{
+       struct sta_struct *st = st_gva[cpu];
+       uint32_t sequence;
+       long out_val = 0;
+       bool probe;
+
+       probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val);
+       GUEST_ASSERT(probe && out_val == 1);
+
+       sta_set_shmem(st_gpa[cpu], 0);
+       GUEST_SYNC(0);
+
+       check_status(st);
+       WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+       sequence = READ_ONCE(st->sequence);
+       check_status(st);
+       GUEST_SYNC(1);
+
+       check_status(st);
+       GUEST_ASSERT(sequence < READ_ONCE(st->sequence));
+       WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+       check_status(st);
+       GUEST_DONE();
+}
+
+static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
+{
+       uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
+       unsigned long enabled;
+
+       vcpu_get_reg(vcpu, id, &enabled);
+       TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
+
+       return enabled;
+}
+
+static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+{
+       /* ST_GPA_BASE is identity mapped */
+       st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+       st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]);
+       sync_global_to_guest(vcpu->vm, st_gva[i]);
+       sync_global_to_guest(vcpu->vm, st_gpa[i]);
+}
+
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+{
+       struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
+       int i;
+
+       pr_info("VCPU%d:\n", vcpu_idx);
+       pr_info("    sequence:  %d\n", st->sequence);
+       pr_info("    flags:     %d\n", st->flags);
+       pr_info("    steal:     %"PRIu64"\n", st->steal);
+       pr_info("    preempted: %d\n", st->preempted);
+       pr_info("    pad:      ");
+       for (i = 0; i < 47; ++i)
+               pr_info("%d", st->pad[i]);
+       pr_info("\n");
+}
+
 #endif
 
 static void *do_steal_time(void *arg)