]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
RISC-V: KVM: Add support for SBI STA registers
authorAndrew Jones <ajones@ventanamicro.com>
Wed, 20 Dec 2023 16:00:21 +0000 (17:00 +0100)
committerAnup Patel <anup@brainfault.org>
Sat, 30 Dec 2023 05:56:35 +0000 (11:26 +0530)
KVM userspace needs to be able to save and restore the steal-time
shared memory address. Provide the address through the get/set-one-reg
interface with two ulong-sized SBI STA extension registers (lo and hi).
64-bit KVM userspace must not set the hi register to anything other
than zero and is allowed to completely neglect saving/restoring it.

Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_vcpu_sbi.h
arch/riscv/include/uapi/asm/kvm.h
arch/riscv/kvm/vcpu_onereg.c
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vcpu_sbi_sta.c

index dd60f73b5c36efdd03fdfbc0ebf9e2617a996be5..b96705258cf9641fbc43810cda7c01f970db1f58 100644 (file)
@@ -70,6 +70,11 @@ bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
 
+int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                  unsigned long *reg_val);
+int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                  unsigned long reg_val);
+
 #ifdef CONFIG_RISCV_SBI_V01
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
 #endif
index 3471b1e48d180bf2f3b1a0521139251769483143..d6b7a5b958742c443bce93e067434128c1cff7e4 100644 (file)
@@ -161,6 +161,12 @@ enum KVM_RISCV_SBI_EXT_ID {
        KVM_RISCV_SBI_EXT_MAX,
 };
 
+/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_sbi_sta {
+       unsigned long shmem_lo;
+       unsigned long shmem_hi;
+};
+
 /* Possible states for kvm_riscv_timer */
 #define KVM_RISCV_TIMER_STATE_OFF      0
 #define KVM_RISCV_TIMER_STATE_ON       1
@@ -244,6 +250,9 @@ enum KVM_RISCV_SBI_EXT_ID {
 
 /* Registers for specific SBI extensions are mapped as type 10 */
 #define KVM_REG_RISCV_SBI_STATE                (0x0a << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_STA          (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_STA_REG(name)                \
+               (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long))
 
 /* Device Control API: RISC-V AIA */
 #define KVM_DEV_RISCV_APLIC_ALIGN              0x1000
index 143d0edd7f6388387cf2442c609c6d5381d064a4..fc34557f5356e27902a2f83c27eb37f1237c9b95 100644 (file)
@@ -961,27 +961,36 @@ static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
        return copy_sbi_ext_reg_indices(vcpu, NULL);
 }
 
-static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
-       int n = num_sbi_regs(vcpu);
+       struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+       int total = 0;
 
-       for (int i = 0; i < n; i++) {
-               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
-                         KVM_REG_RISCV_SBI_STATE | i;
+       if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
 
-               if (uindices) {
-                       if (put_user(reg, uindices))
-                               return -EFAULT;
-                       uindices++;
+               for (int i = 0; i < n; i++) {
+                       u64 reg = KVM_REG_RISCV | size |
+                                 KVM_REG_RISCV_SBI_STATE |
+                                 KVM_REG_RISCV_SBI_STA | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
                }
+
+               total += n;
        }
 
-       return n;
+       return total;
+}
+
+static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
+{
+       return copy_sbi_reg_indices(vcpu, NULL);
 }
 
 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
index a1997c39dfde35cbd0a12a2d528ee8251499975d..72a2ffb8dcd158a4f5b4d0839d26ee399e2b93b6 100644 (file)
@@ -345,6 +345,8 @@ int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
        reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
 
        switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_STA:
+               return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
        default:
                return -EINVAL;
        }
@@ -370,6 +372,9 @@ int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
        reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
 
        switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_STA:
+               ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
+               break;
        default:
                return -EINVAL;
        }
index 6592d287fc4e57d608b5ae6ea3e43e8985a7cea3..87bf1a5f05ced8d09c3770bb4595d35575532934 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright (c) 2023 Ventana Micro Systems Inc.
  */
 
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
 #include <linux/kvm_host.h>
 
 #include <asm/kvm_vcpu_sbi.h>
@@ -59,3 +61,56 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
        .handler = kvm_sbi_ext_sta_handler,
        .probe = kvm_sbi_ext_sta_probe,
 };
+
+int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
+                                  unsigned long reg_num,
+                                  unsigned long *reg_val)
+{
+       switch (reg_num) {
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+               *reg_val = (unsigned long)vcpu->arch.sta.shmem;
+               break;
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+               if (IS_ENABLED(CONFIG_32BIT))
+                       *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
+               else
+                       *reg_val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
+                                  unsigned long reg_num,
+                                  unsigned long reg_val)
+{
+       switch (reg_num) {
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+               if (IS_ENABLED(CONFIG_32BIT)) {
+                       gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
+
+                       vcpu->arch.sta.shmem = reg_val;
+                       vcpu->arch.sta.shmem |= hi << 32;
+               } else {
+                       vcpu->arch.sta.shmem = reg_val;
+               }
+               break;
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+               if (IS_ENABLED(CONFIG_32BIT)) {
+                       gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
+
+                       vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
+                       vcpu->arch.sta.shmem |= lo;
+               } else if (reg_val != 0) {
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}