]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
RISC-V: KVM: Implement SBI STA extension
authorAndrew Jones <ajones@ventanamicro.com>
Wed, 20 Dec 2023 16:00:22 +0000 (17:00 +0100)
committerAnup Patel <anup@brainfault.org>
Sat, 30 Dec 2023 05:56:38 +0000 (11:26 +0530)
Add a select SCHED_INFO to the KVM config in order to get run_delay
info. Then implement SBI STA's set-steal-time-shmem function and
kvm_riscv_vcpu_record_steal_time() to provide the steal-time info
to guests.

Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/kvm/Kconfig
arch/riscv/kvm/vcpu_sbi_sta.c

index dfc237d7875b53bb2f3e7e716c1b85af8b335458..148e52b516cf5bf50c61354e995910ccaec75b38 100644 (file)
@@ -32,6 +32,7 @@ config KVM
        select KVM_XFER_TO_GUEST_WORK
        select MMU_NOTIFIER
        select PREEMPT_NOTIFIERS
+       select SCHED_INFO
        help
          Support hosting virtualized guest machines.
 
index 87bf1a5f05ced8d09c3770bb4595d35575532934..01f09fe8c3b020968be3f623097c9a48ab958087 100644 (file)
@@ -6,9 +6,15 @@
 #include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
 
+#include <asm/bug.h>
+#include <asm/current.h>
 #include <asm/kvm_vcpu_sbi.h>
+#include <asm/page.h>
 #include <asm/sbi.h>
+#include <asm/uaccess.h>
 
 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
 {
@@ -19,14 +25,100 @@ void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
 {
        gpa_t shmem = vcpu->arch.sta.shmem;
+       u64 last_steal = vcpu->arch.sta.last_steal;
+       u32 *sequence_ptr, sequence;
+       u64 *steal_ptr, steal;
+       unsigned long hva;
+       gfn_t gfn;
 
        if (shmem == INVALID_GPA)
                return;
+
+       /*
+        * shmem is 64-byte aligned (see the enforcement in
+        * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
+        * is 64 bytes, so we know all its offsets are in the same page.
+        */
+       gfn = shmem >> PAGE_SHIFT;
+       hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
+
+       if (WARN_ON(kvm_is_error_hva(hva))) {
+               vcpu->arch.sta.shmem = INVALID_GPA;
+               return;
+       }
+
+       sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
+                              offsetof(struct sbi_sta_struct, sequence));
+       steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
+                           offsetof(struct sbi_sta_struct, steal));
+
+       if (WARN_ON(get_user(sequence, sequence_ptr)))
+               return;
+
+       sequence = le32_to_cpu(sequence);
+       sequence += 1;
+
+       if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
+               return;
+
+       if (!WARN_ON(get_user(steal, steal_ptr))) {
+               steal = le64_to_cpu(steal);
+               vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
+               steal += vcpu->arch.sta.last_steal - last_steal;
+               WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
+       }
+
+       sequence += 1;
+       WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
+
+       kvm_vcpu_mark_page_dirty(vcpu, gfn);
 }
 
 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
 {
-       return SBI_ERR_FAILURE;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       unsigned long shmem_phys_lo = cp->a0;
+       unsigned long shmem_phys_hi = cp->a1;
+       u32 flags = cp->a2;
+       struct sbi_sta_struct zero_sta = {0};
+       unsigned long hva;
+       bool writable;
+       gpa_t shmem;
+       int ret;
+
+       if (flags != 0)
+               return SBI_ERR_INVALID_PARAM;
+
+       if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
+           shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
+               vcpu->arch.sta.shmem = INVALID_GPA;
+               return 0;
+       }
+
+       if (shmem_phys_lo & (SZ_64 - 1))
+               return SBI_ERR_INVALID_PARAM;
+
+       shmem = shmem_phys_lo;
+
+       if (shmem_phys_hi != 0) {
+               if (IS_ENABLED(CONFIG_32BIT))
+                       shmem |= ((gpa_t)shmem_phys_hi << 32);
+               else
+                       return SBI_ERR_INVALID_ADDRESS;
+       }
+
+       hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
+       if (kvm_is_error_hva(hva) || !writable)
+               return SBI_ERR_INVALID_ADDRESS;
+
+       ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
+       if (ret)
+               return SBI_ERR_FAILURE;
+
+       vcpu->arch.sta.shmem = shmem;
+       vcpu->arch.sta.last_steal = current->sched_info.run_delay;
+
+       return 0;
 }
 
 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@@ -52,7 +144,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
 {
-       return 0;
+       return !!sched_info_on();
 }
 
 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {