--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ *
+ * Copyright 2019 (C) Alistair Francis <alistair.francis@wdc.com>
+ * Copyright 2021 (C) Bobby Eshleman <bobby.eshleman@gmail.com>
+ * Copyright 2023 (C) Vates
+ *
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* On stack VCPU state */
+struct cpu_user_regs
+{
+ unsigned long zero;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ unsigned long sepc;
+ unsigned long sstatus;
+ /* pointer to previous stack_cpu_regs */
+ unsigned long pregs;
+};
+
+static inline void wfi(void)
+{
+ __asm__ __volatile__ ("wfi");
+}
+
+/*
+ * panic() isn't available at the moment so an infinite loop will be
+ * used temporarily.
+ * TODO: change it to panic()
+ */
+static inline void die(void)
+{
+ for ( ;; )
+ wfi();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+#define COMPILE_OFFSETS
+
+#include <asm/processor.h>
+#include <xen/types.h>
+
+#define DEFINE(_sym, _val) \
+ asm volatile ("\n.ascii\"==>#define " #_sym " %0 /* " #_val " */<==\"" \
+ : : "i" (_val) )
+#define BLANK() \
+ asm volatile ( "\n.ascii\"==><==\"" : : )
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void asm_offsets(void)
+{
+ BLANK();
+ DEFINE(CPU_USER_REGS_SIZE, sizeof(struct cpu_user_regs));
+ OFFSET(CPU_USER_REGS_ZERO, struct cpu_user_regs, zero);
+ OFFSET(CPU_USER_REGS_RA, struct cpu_user_regs, ra);
+ OFFSET(CPU_USER_REGS_SP, struct cpu_user_regs, sp);
+ OFFSET(CPU_USER_REGS_GP, struct cpu_user_regs, gp);
+ OFFSET(CPU_USER_REGS_TP, struct cpu_user_regs, tp);
+ OFFSET(CPU_USER_REGS_T0, struct cpu_user_regs, t0);
+ OFFSET(CPU_USER_REGS_T1, struct cpu_user_regs, t1);
+ OFFSET(CPU_USER_REGS_T2, struct cpu_user_regs, t2);
+ OFFSET(CPU_USER_REGS_S0, struct cpu_user_regs, s0);
+ OFFSET(CPU_USER_REGS_S1, struct cpu_user_regs, s1);
+ OFFSET(CPU_USER_REGS_A0, struct cpu_user_regs, a0);
+ OFFSET(CPU_USER_REGS_A1, struct cpu_user_regs, a1);
+ OFFSET(CPU_USER_REGS_A2, struct cpu_user_regs, a2);
+ OFFSET(CPU_USER_REGS_A3, struct cpu_user_regs, a3);
+ OFFSET(CPU_USER_REGS_A4, struct cpu_user_regs, a4);
+ OFFSET(CPU_USER_REGS_A5, struct cpu_user_regs, a5);
+ OFFSET(CPU_USER_REGS_A6, struct cpu_user_regs, a6);
+ OFFSET(CPU_USER_REGS_A7, struct cpu_user_regs, a7);
+ OFFSET(CPU_USER_REGS_S2, struct cpu_user_regs, s2);
+ OFFSET(CPU_USER_REGS_S3, struct cpu_user_regs, s3);
+ OFFSET(CPU_USER_REGS_S4, struct cpu_user_regs, s4);
+ OFFSET(CPU_USER_REGS_S5, struct cpu_user_regs, s5);
+ OFFSET(CPU_USER_REGS_S6, struct cpu_user_regs, s6);
+ OFFSET(CPU_USER_REGS_S7, struct cpu_user_regs, s7);
+ OFFSET(CPU_USER_REGS_S8, struct cpu_user_regs, s8);
+ OFFSET(CPU_USER_REGS_S9, struct cpu_user_regs, s9);
+ OFFSET(CPU_USER_REGS_S10, struct cpu_user_regs, s10);
+ OFFSET(CPU_USER_REGS_S11, struct cpu_user_regs, s11);
+ OFFSET(CPU_USER_REGS_T3, struct cpu_user_regs, t3);
+ OFFSET(CPU_USER_REGS_T4, struct cpu_user_regs, t4);
+ OFFSET(CPU_USER_REGS_T5, struct cpu_user_regs, t5);
+ OFFSET(CPU_USER_REGS_T6, struct cpu_user_regs, t6);
+ OFFSET(CPU_USER_REGS_SEPC, struct cpu_user_regs, sepc);
+ OFFSET(CPU_USER_REGS_SSTATUS, struct cpu_user_regs, sstatus);
+ OFFSET(CPU_USER_REGS_PREGS, struct cpu_user_regs, pregs);
+}