]> xenbits.xensource.com Git - unikraft/unikraft.git/commitdiff
plat/common/x86: Make SIPI vector relocs independent from `ukreloc`
authorSergiu Moga <sergiu@unikraft.io>
Fri, 29 Sep 2023 14:26:01 +0000 (17:26 +0300)
committerRazvan Deaconescu <razvand@unikraft.io>
Fri, 20 Oct 2023 16:32:28 +0000 (19:32 +0300)
Allow having the same relocation mechanism of the SIPI vector symbols
be used regardless of whether `ukreloc` or `PIE` are enabled or not.

With this commit, SMP initialization has now the same SIPI behavior
regardless of `ukreloc`/`PIE`, thus dropping any conflicts/dependencies.

Signed-off-by: Sergiu Moga <sergiu@unikraft.io>
Reviewed-by: Razvan Virtan <virtanrazvan@gmail.com>
Reviewed-by: Marco Schlumpp <marco@unikraft.io>
Approved-by: Razvan Deaconescu <razvand@unikraft.io>
GitHub-Closes: #1113

plat/common/x86/lcpu.c
plat/common/x86/start16_helpers.h

index 1355a90f4d84ae056de4a4f2bf7ab4a959dcb046..7f5473ff9f87cc8fc94696abaa299018cdfa14df 100644 (file)
@@ -104,7 +104,7 @@ void __noreturn lcpu_arch_jump_to(void *sp, ukplat_lcpu_entry_t entry)
        __builtin_unreachable();
 }
 
-#ifdef CONFIG_HAVE_SMP
+#if CONFIG_HAVE_SMP
 IMPORT_START16_SYM(gdt32_ptr, 2, MOV);
 IMPORT_START16_SYM(gdt32, 4, DATA);
 IMPORT_START16_SYM(lcpu_start16, 2, MOV);
@@ -115,36 +115,54 @@ IMPORT_START16_SYM(lcpu_start32, 4, MOV);
  * corresponding boot code somewhere in the first 1 MiB. We copy the trampoline
  * code to the target address during MP initialization.
  */
-#if CONFIG_LIBUKRELOC
+#define START16_RELOC_ENTRY(sym, sz, type)                             \
+       {                                                               \
+               .r_mem_off = START16_##type##_OFF(sym, sz),             \
+               .r_addr = (void *)(sym) - (void *)x86_start16_begin,    \
+               .r_sz = (sz),                                           \
+       }
+
+static void apply_start16_reloc(__u64 baddr, __u64 r_mem_off,
+                               __u64 r_addr, __u32 r_sz)
+{
+       switch (r_sz) {
+       case 2:
+               *(__u16 *)((__u8 *)baddr + r_mem_off) = (__u16)(baddr + r_addr);
+               break;
+       case 4:
+               *(__u32 *)((__u8 *)baddr + r_mem_off) = (__u32)(baddr + r_addr);
+               break;
+       }
+}
+
 static void start16_reloc_mp_init(void)
 {
-       size_t i;
-       struct uk_reloc x86_start16_relocs[] = {
-               START16_UKRELOC_ENTRY(lcpu_start16, 2, MOV),
-               START16_UKRELOC_ENTRY(gdt32, 4, DATA),
-               START16_UKRELOC_ENTRY(gdt32_ptr, 2, MOV),
-               START16_UKRELOC_ENTRY(jump_to32, 2, MOV),
+       struct {
+               __u64 r_mem_off;
+               __u64 r_addr;
+               __u32 r_sz;
+       } x86_start16_relocs[] = {
+               START16_RELOC_ENTRY(lcpu_start16, 2, MOV),
+               START16_RELOC_ENTRY(gdt32_ptr, 2, MOV),
+               START16_RELOC_ENTRY(gdt32, 4, DATA),
+               START16_RELOC_ENTRY(jump_to32, 2, MOV),
+               START16_RELOC_ENTRY(lcpu_start32, 4, MOV),
        };
+       __sz i;
 
        for (i = 0; i < ARRAY_SIZE(x86_start16_relocs); i++)
-               apply_uk_reloc(&x86_start16_relocs[i],
-                             (__u64)x86_start16_addr +
-                             x86_start16_relocs[i].r_addr,
-                             (void *)x86_start16_addr);
+               apply_start16_reloc((__u64)x86_start16_addr,
+                                   x86_start16_relocs[i].r_mem_off,
+                                   x86_start16_relocs[i].r_addr,
+                                   x86_start16_relocs[i].r_sz);
 
        /* Unlike the other entries, lcpu_start32 must stay the same
-        * as it is not part of the start16 section
+        * as it is not part of the start16 section.
         */
-       apply_uk_reloc(&(struct uk_reloc)UKRELOC_ENTRY(
-                               START16_MOV_OFF(lcpu_start32, 4),
-                               (__u64)lcpu_start32, 4,
-                               UKRELOC_FLAGS_PHYS_REL),
-                      (__u64)lcpu_start32,
-                      (void *)x86_start16_addr);
+       apply_start16_reloc((__u64)x86_start16_addr,
+                           START16_MOV_OFF(lcpu_start32, 4),
+                           (__u64)lcpu_start32 - (__u64)x86_start16_addr, 4);
 }
-#else  /* CONFIG_LIBUKRELOC */
-static void start16_reloc_mp_init(void) { }
-#endif /* !CONFIG_LIBUKRELOC */
 
 int lcpu_arch_mp_init(void *arg __unused)
 {
index 6a447d4fe5a5fbe21320c122c1fd06866258927b..3d16f54c0515f0c118ffbc1facdfa1e009ebc1e0 100644 (file)
@@ -32,11 +32,4 @@ extern void *x86_start16_end[];
        ((void *)START16_DATA_SYM(sym, sz) -                            \
        (void *)x86_start16_begin)
 
-#if CONFIG_LIBUKRELOC
-#define START16_UKRELOC_ENTRY(sym, sz, type)                           \
-       UKRELOC_ENTRY(START16_##type##_OFF(sym, sz),                    \
-                      (void *)sym - (void *)x86_start16_begin,         \
-                      sz, UKRELOC_FLAGS_PHYS_REL)
-#endif /* CONFIG_LIBUKRELOC */
-
 #endif  /* __START16_HELPERS_H__ */