From 322261eed4e101095b3bdb9a50bc5d7521bdfc2c Mon Sep 17 00:00:00 2001 From: Sergiu Moga Date: Fri, 29 Sep 2023 16:42:36 +0300 Subject: [PATCH] plat/common/x86: Separate `start16` relocations from `ukreloc` Since `start16` related symbols have a relocation mechanism similar, but entirely independent from `ukreloc`, rename all such symbols, macro's and their uses so that they do not contain the `uk_reloc` or the `UKRELOC` substrings. Signed-off-by: Sergiu Moga Reviewed-by: Razvan Virtan Reviewed-by: Marco Schlumpp Approved-by: Razvan Deaconescu GitHub-Closes: #1113 --- plat/common/x86/lcpu.c | 20 +++++++++---------- plat/common/x86/start16_helpers.h | 24 +++++++++++----------- plat/kvm/x86/lcpu_start.S | 33 ++++++++++++++++--------------- 3 files changed, 39 insertions(+), 38 deletions(-) diff --git a/plat/common/x86/lcpu.c b/plat/common/x86/lcpu.c index 6be907b1a..1355a90f4 100644 --- a/plat/common/x86/lcpu.c +++ b/plat/common/x86/lcpu.c @@ -105,18 +105,18 @@ void __noreturn lcpu_arch_jump_to(void *sp, ukplat_lcpu_entry_t entry) } #ifdef CONFIG_HAVE_SMP +IMPORT_START16_SYM(gdt32_ptr, 2, MOV); +IMPORT_START16_SYM(gdt32, 4, DATA); +IMPORT_START16_SYM(lcpu_start16, 2, MOV); +IMPORT_START16_SYM(jump_to32, 2, MOV); +IMPORT_START16_SYM(lcpu_start32, 4, MOV); + /* Secondary cores start in 16-bit real-mode and we have to provide the * corresponding boot code somewhere in the first 1 MiB. We copy the trampoline * code to the target address during MP initialization. */ #if CONFIG_LIBUKRELOC -IMPORT_START16_UKRELOC_SYM(gdt32_ptr, 2, MOV); -IMPORT_START16_UKRELOC_SYM(gdt32, 4, DATA); -IMPORT_START16_UKRELOC_SYM(lcpu_start16, 2, MOV); -IMPORT_START16_UKRELOC_SYM(jump_to32, 2, MOV); -IMPORT_START16_UKRELOC_SYM(lcpu_start32, 4, MOV); - -static void uk_reloc_mp_init(void) +static void start16_reloc_mp_init(void) { size_t i; struct uk_reloc x86_start16_relocs[] = { @@ -136,14 +136,14 @@ static void uk_reloc_mp_init(void) * as it is not part of the start16 section */ apply_uk_reloc(&(struct uk_reloc)UKRELOC_ENTRY( - START16_UKRELOC_MOV_OFF(lcpu_start32, 4), + START16_MOV_OFF(lcpu_start32, 4), (__u64)lcpu_start32, 4, UKRELOC_FLAGS_PHYS_REL), (__u64)lcpu_start32, (void *)x86_start16_addr); } #else /* CONFIG_LIBUKRELOC */ -static void uk_reloc_mp_init(void) { } +static void start16_reloc_mp_init(void) { } #endif /* !CONFIG_LIBUKRELOC */ int lcpu_arch_mp_init(void *arg __unused) @@ -214,7 +214,7 @@ int lcpu_arch_mp_init(void *arg __unused) UK_ASSERT(x86_start16_addr < 0x100000); memcpy((void *)x86_start16_addr, &x86_start16_begin, X86_START16_SIZE); - uk_reloc_mp_init(); + start16_reloc_mp_init(); uk_pr_debug("Copied AP 16-bit boot code to 0x%"__PRIvaddr"\n", x86_start16_addr); diff --git a/plat/common/x86/start16_helpers.h b/plat/common/x86/start16_helpers.h index 067de8c1b..6a447d4fe 100644 --- a/plat/common/x86/start16_helpers.h +++ b/plat/common/x86/start16_helpers.h @@ -14,27 +14,27 @@ extern void *x86_start16_end[]; #define X86_START16_SIZE \ ((__uptr)x86_start16_end - (__uptr)x86_start16_begin) -#if CONFIG_LIBUKRELOC -#define START16_UKRELOC_MOV_SYM(sym, sz) \ - sym##_uk_reloc_imm##sz##_start16 +#define START16_MOV_SYM(sym, sz) \ + sym##_imm##sz##_start16 -#define START16_UKRELOC_DATA_SYM(sym, sz) \ - sym##_uk_reloc_data##sz##_start16 +#define START16_DATA_SYM(sym, sz) \ + sym##_data##sz##_start16 -#define IMPORT_START16_UKRELOC_SYM(sym, sz, type) \ +#define IMPORT_START16_SYM(sym, sz, type) \ extern void *sym[]; \ - extern void *START16_UKRELOC_##type##_SYM(sym, sz)[] + extern void *START16_##type##_SYM(sym, sz)[] -#define START16_UKRELOC_MOV_OFF(sym, sz) \ - ((void *)START16_UKRELOC_MOV_SYM(sym, sz) - \ +#define START16_MOV_OFF(sym, sz) \ + ((void *)START16_MOV_SYM(sym, sz) - \ (void *)x86_start16_begin) -#define START16_UKRELOC_DATA_OFF(sym, sz) \ - ((void *)START16_UKRELOC_DATA_SYM(sym, sz) - \ +#define START16_DATA_OFF(sym, sz) \ + ((void *)START16_DATA_SYM(sym, sz) - \ (void *)x86_start16_begin) +#if CONFIG_LIBUKRELOC #define START16_UKRELOC_ENTRY(sym, sz, type) \ - UKRELOC_ENTRY(START16_UKRELOC_##type##_OFF(sym, sz), \ + UKRELOC_ENTRY(START16_##type##_OFF(sym, sz), \ (void *)sym - (void *)x86_start16_begin, \ sz, UKRELOC_FLAGS_PHYS_REL) #endif /* CONFIG_LIBUKRELOC */ diff --git a/plat/kvm/x86/lcpu_start.S b/plat/kvm/x86/lcpu_start.S index 6e2227ee2..09cb6e208 100644 --- a/plat/kvm/x86/lcpu_start.S +++ b/plat/kvm/x86/lcpu_start.S @@ -53,25 +53,26 @@ x86_start16_addr: .quad START16_PLACEHOLDER -/* Implement dedicate ur_* macro's whose only use-case is this file to cope with - * the existence of 16-bit code. This is so it does not interfere with the other - * uses of the ur_* macro's. For example, we not want symbols for these to - * occupy unnecessary space in .uk_reloc. +/* Implement dedicated start16 macro's whose only use-case is this file to cope + * with the existence of 16-bit code. This is so it does not interfere with the + * other uses of the ur_* macro's. + * For example, we do not want symbols for these to occupy unnecessary space in + * .uk_reloc. * * NOTE:IF ADDING/REMOVING RELOCATIONS FROM HERE, THEN ADD/REMOVE - * CORRESPONDENT SYMBOL TO `lcpu.c` (see IMPORT_START16_UKRELOC_SYM + * CORRESPONDENT SYMBOL TO `lcpu.c` (see IMPORT_START16_SYM * start16_helpers.h macros being used on start16 symbols) */ -.macro ur_mov_start16 sym:req, reg:req, bytes:req +.macro mov_start16 sym:req, reg:req, bytes:req mov $START16_PLACEHOLDER, \reg -.globl \sym\()_uk_reloc_imm\bytes\()_start16 -.set \sym\()_uk_reloc_imm\bytes\()_start16, (. - \bytes) +.globl \sym\()_imm\bytes\()_start16 +.set \sym\()_imm\bytes\()_start16, (. - \bytes) nop .endm -.macro ur_data_start16 type:req, sym:req, bytes:req -.globl \sym\()_uk_reloc_data\bytes\()_start16 -.set \sym\()_uk_reloc_data\bytes\()_start16, . +.macro data_start16 type:req, sym:req, bytes:req +.globl \sym\()_data\bytes\()_start16 +.set \sym\()_data\bytes\()_start16, . .\type START16_PLACEHOLDER .endm @@ -94,7 +95,7 @@ ENTRY(lcpu_start16_ap) xorl %edi, %edi xorl %esi, %esi - ur_mov_start16 lcpu_start16, %ax, 2 + mov_start16 lcpu_start16, %ax, 2 /* On start-up a core's %cs is set depending on the value of the vector * inside the SIPI message, so make sure we are jumping to the * proper address w.r.t. segmentation. @@ -123,7 +124,7 @@ gdt32_null: .globl gdt32_ptr gdt32_ptr: .word (gdt32_end - gdt32 - 1) /* size - 1 */ - ur_data_start16 long, gdt32, 4 /* GDT address */ + data_start16 long, gdt32, 4 /* GDT address */ gdt32_cs: .quad GDT_DESC_CODE32_VAL /* 32-bit CS */ gdt32_ds: @@ -143,7 +144,7 @@ ENTRY(lcpu_start16) movl %eax, %cr0 /* Load 32-bit GDT and jump into 32-bit code segment */ - ur_mov_start16 gdt32_ptr, %ax, 2 + mov_start16 gdt32_ptr, %ax, 2 lgdt (%eax) /* ljmp encoding has 5 opcodes, thus 40 bits, which in our case @@ -161,7 +162,7 @@ ENTRY(lcpu_start16) * address corresponds to the very next instruction in memory * after our ljmp. */ - ur_mov_start16 jump_to32, %ax, 2 + mov_start16 jump_to32, %ax, 2 movw %ax, -4(%eax) ljmp $(gdt32_cs - gdt32), $START16_PLACEHOLDER @@ -178,7 +179,7 @@ jump_to32: movl %eax, %fs movl %eax, %gs - ur_mov_start16 lcpu_start32, %eax, 4 + mov_start16 lcpu_start32, %eax, 4 jmp *%eax END(lcpu_start16) -- 2.39.5