}
#ifdef CONFIG_HAVE_SMP
+IMPORT_START16_SYM(gdt32_ptr, 2, MOV);
+IMPORT_START16_SYM(gdt32, 4, DATA);
+IMPORT_START16_SYM(lcpu_start16, 2, MOV);
+IMPORT_START16_SYM(jump_to32, 2, MOV);
+IMPORT_START16_SYM(lcpu_start32, 4, MOV);
+
/* Secondary cores start in 16-bit real-mode and we have to provide the
* corresponding boot code somewhere in the first 1 MiB. We copy the trampoline
* code to the target address during MP initialization.
*/
#if CONFIG_LIBUKRELOC
-IMPORT_START16_UKRELOC_SYM(gdt32_ptr, 2, MOV);
-IMPORT_START16_UKRELOC_SYM(gdt32, 4, DATA);
-IMPORT_START16_UKRELOC_SYM(lcpu_start16, 2, MOV);
-IMPORT_START16_UKRELOC_SYM(jump_to32, 2, MOV);
-IMPORT_START16_UKRELOC_SYM(lcpu_start32, 4, MOV);
-
-static void uk_reloc_mp_init(void)
+static void start16_reloc_mp_init(void)
{
size_t i;
struct uk_reloc x86_start16_relocs[] = {
* as it is not part of the start16 section
*/
apply_uk_reloc(&(struct uk_reloc)UKRELOC_ENTRY(
- START16_UKRELOC_MOV_OFF(lcpu_start32, 4),
+ START16_MOV_OFF(lcpu_start32, 4),
(__u64)lcpu_start32, 4,
UKRELOC_FLAGS_PHYS_REL),
(__u64)lcpu_start32,
(void *)x86_start16_addr);
}
#else /* CONFIG_LIBUKRELOC */
-static void uk_reloc_mp_init(void) { }
+static void start16_reloc_mp_init(void) { }
#endif /* !CONFIG_LIBUKRELOC */
int lcpu_arch_mp_init(void *arg __unused)
UK_ASSERT(x86_start16_addr < 0x100000);
memcpy((void *)x86_start16_addr, &x86_start16_begin, X86_START16_SIZE);
- uk_reloc_mp_init();
+ start16_reloc_mp_init();
uk_pr_debug("Copied AP 16-bit boot code to 0x%"__PRIvaddr"\n",
x86_start16_addr);
#define X86_START16_SIZE \
((__uptr)x86_start16_end - (__uptr)x86_start16_begin)
-#if CONFIG_LIBUKRELOC
-#define START16_UKRELOC_MOV_SYM(sym, sz) \
- sym##_uk_reloc_imm##sz##_start16
+#define START16_MOV_SYM(sym, sz) \
+ sym##_imm##sz##_start16
-#define START16_UKRELOC_DATA_SYM(sym, sz) \
- sym##_uk_reloc_data##sz##_start16
+#define START16_DATA_SYM(sym, sz) \
+ sym##_data##sz##_start16
-#define IMPORT_START16_UKRELOC_SYM(sym, sz, type) \
+#define IMPORT_START16_SYM(sym, sz, type) \
extern void *sym[]; \
- extern void *START16_UKRELOC_##type##_SYM(sym, sz)[]
+ extern void *START16_##type##_SYM(sym, sz)[]
-#define START16_UKRELOC_MOV_OFF(sym, sz) \
- ((void *)START16_UKRELOC_MOV_SYM(sym, sz) - \
+#define START16_MOV_OFF(sym, sz) \
+ ((void *)START16_MOV_SYM(sym, sz) - \
(void *)x86_start16_begin)
-#define START16_UKRELOC_DATA_OFF(sym, sz) \
- ((void *)START16_UKRELOC_DATA_SYM(sym, sz) - \
+#define START16_DATA_OFF(sym, sz) \
+ ((void *)START16_DATA_SYM(sym, sz) - \
(void *)x86_start16_begin)
+#if CONFIG_LIBUKRELOC
#define START16_UKRELOC_ENTRY(sym, sz, type) \
- UKRELOC_ENTRY(START16_UKRELOC_##type##_OFF(sym, sz), \
+ UKRELOC_ENTRY(START16_##type##_OFF(sym, sz), \
(void *)sym - (void *)x86_start16_begin, \
sz, UKRELOC_FLAGS_PHYS_REL)
#endif /* CONFIG_LIBUKRELOC */
x86_start16_addr:
.quad START16_PLACEHOLDER
-/* Implement dedicate ur_* macro's whose only use-case is this file to cope with
- * the existence of 16-bit code. This is so it does not interfere with the other
- * uses of the ur_* macro's. For example, we not want symbols for these to
- * occupy unnecessary space in .uk_reloc.
+/* Implement dedicated start16 macro's whose only use-case is this file to cope
+ * with the existence of 16-bit code. This is so it does not interfere with the
+ * other uses of the ur_* macro's.
+ * For example, we do not want symbols for these to occupy unnecessary space in
+ * .uk_reloc.
*
* NOTE:IF ADDING/REMOVING RELOCATIONS FROM HERE, THEN ADD/REMOVE
- * CORRESPONDENT SYMBOL TO `lcpu.c` (see IMPORT_START16_UKRELOC_SYM
+ * CORRESPONDENT SYMBOL TO `lcpu.c` (see IMPORT_START16_SYM
* start16_helpers.h macros being used on start16 symbols)
*/
-.macro ur_mov_start16 sym:req, reg:req, bytes:req
+.macro mov_start16 sym:req, reg:req, bytes:req
mov $START16_PLACEHOLDER, \reg
-.globl \sym\()_uk_reloc_imm\bytes\()_start16
-.set \sym\()_uk_reloc_imm\bytes\()_start16, (. - \bytes)
+.globl \sym\()_imm\bytes\()_start16
+.set \sym\()_imm\bytes\()_start16, (. - \bytes)
nop
.endm
-.macro ur_data_start16 type:req, sym:req, bytes:req
-.globl \sym\()_uk_reloc_data\bytes\()_start16
-.set \sym\()_uk_reloc_data\bytes\()_start16, .
+.macro data_start16 type:req, sym:req, bytes:req
+.globl \sym\()_data\bytes\()_start16
+.set \sym\()_data\bytes\()_start16, .
.\type START16_PLACEHOLDER
.endm
xorl %edi, %edi
xorl %esi, %esi
- ur_mov_start16 lcpu_start16, %ax, 2
+ mov_start16 lcpu_start16, %ax, 2
/* On start-up a core's %cs is set depending on the value of the vector
* inside the SIPI message, so make sure we are jumping to the
* proper address w.r.t. segmentation.
.globl gdt32_ptr
gdt32_ptr:
.word (gdt32_end - gdt32 - 1) /* size - 1 */
- ur_data_start16 long, gdt32, 4 /* GDT address */
+ data_start16 long, gdt32, 4 /* GDT address */
gdt32_cs:
.quad GDT_DESC_CODE32_VAL /* 32-bit CS */
gdt32_ds:
movl %eax, %cr0
/* Load 32-bit GDT and jump into 32-bit code segment */
- ur_mov_start16 gdt32_ptr, %ax, 2
+ mov_start16 gdt32_ptr, %ax, 2
lgdt (%eax)
/* ljmp encoding has 5 opcodes, thus 40 bits, which in our case
* address corresponds to the very next instruction in memory
* after our ljmp.
*/
- ur_mov_start16 jump_to32, %ax, 2
+ mov_start16 jump_to32, %ax, 2
movw %ax, -4(%eax)
ljmp $(gdt32_cs - gdt32), $START16_PLACEHOLDER
movl %eax, %fs
movl %eax, %gs
- ur_mov_start16 lcpu_start32, %eax, 4
+ mov_start16 lcpu_start32, %eax, 4
jmp *%eax
END(lcpu_start16)