ia64/xen-unstable
changeset 3623:060c1ea52343
bitkeeper revision 1.1159.212.73 (420154ceFUvIANCrxSTgPyOjFi1Pag)
More x86_64 work. Interrupts and exceptions are now working. Next step is
DOM0 construction. First part of that is to map rest of physical memory,
allocate and map the mach_to_phys table, and fix arch_init_memory().
Signed-off-by: keir.fraser@cl.cam.ac.uk
More x86_64 work. Interrupts and exceptions are now working. Next step is
DOM0 construction. First part of that is to map rest of physical memory,
allocate and map the mach_to_phys table, and fix arch_init_memory().
Signed-off-by: keir.fraser@cl.cam.ac.uk
author | kaf24@viper.(none) |
---|---|
date | Wed Feb 02 22:31:42 2005 +0000 (2005-02-02) |
parents | bf2c38625b39 |
children | 8472fafee3cf |
files | xen/arch/x86/boot/x86_64.S xen/arch/x86/domain.c xen/arch/x86/memory.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/entry.S xen/include/asm-x86/desc.h xen/include/asm-x86/processor.h xen/include/public/arch-x86_64.h |
line diff
1.1 --- a/xen/arch/x86/boot/x86_64.S Wed Feb 02 18:27:05 2005 +0000 1.2 +++ b/xen/arch/x86/boot/x86_64.S Wed Feb 02 22:31:42 2005 +0000 1.3 @@ -196,7 +196,7 @@ ENTRY(gdt_table) 1.4 .quad 0x00affa000000ffff /* 0x082b ring 3 code, 64-bit mode */ 1.5 .quad 0x00cff2000000ffff /* 0x0833 ring 3 data */ 1.6 .quad 0x0000000000000000 /* unused */ 1.7 - .fill 2*NR_CPUS,8,0 /* space for TSS and LDT per CPU */ 1.8 + .fill 4*NR_CPUS,8,0 /* space for TSS and LDT per CPU */ 1.9 1.10 .word 0 1.11 gdt_descr: 1.12 @@ -206,7 +206,7 @@ SYMBOL_NAME(gdt): 1.13 1.14 .word 0 1.15 idt_descr: 1.16 - .word 256*8-1 1.17 + .word 256*16-1 1.18 SYMBOL_NAME(idt): 1.19 .quad SYMBOL_NAME(idt_table) 1.20
2.1 --- a/xen/arch/x86/domain.c Wed Feb 02 18:27:05 2005 +0000 2.2 +++ b/xen/arch/x86/domain.c Wed Feb 02 22:31:42 2005 +0000 2.3 @@ -498,7 +498,7 @@ int arch_final_setup_guestos(struct exec 2.4 return 0; 2.5 } 2.6 2.7 -#if defined(__i386__) 2.8 +#if defined(__i386__) /* XXX */ 2.9 2.10 void new_thread(struct exec_domain *d, 2.11 unsigned long start_pc,
3.1 --- a/xen/arch/x86/memory.c Wed Feb 02 18:27:05 2005 +0000 3.2 +++ b/xen/arch/x86/memory.c Wed Feb 02 22:31:42 2005 +0000 3.3 @@ -168,7 +168,7 @@ void __init init_frametable(void) 3.4 3.5 void arch_init_memory(void) 3.6 { 3.7 -#ifdef __i386__ 3.8 +#ifdef __i386__ /* XXX */ 3.9 unsigned long i; 3.10 3.11 /*
4.1 --- a/xen/arch/x86/setup.c Wed Feb 02 18:27:05 2005 +0000 4.2 +++ b/xen/arch/x86/setup.c Wed Feb 02 22:31:42 2005 +0000 4.3 @@ -91,23 +91,21 @@ EXPORT_SYMBOL(acpi_disabled); 4.4 int phys_proc_id[NR_CPUS]; 4.5 int logical_proc_id[NR_CPUS]; 4.6 4.7 -#if defined(__i386__) 4.8 - 4.9 -/* Standard macro to see if a specific flag is changeable */ 4.10 -static inline int flag_is_changeable_p(u32 flag) 4.11 +/* Standard macro to see if a specific flag is changeable. */ 4.12 +static inline int flag_is_changeable_p(unsigned long flag) 4.13 { 4.14 - u32 f1, f2; 4.15 + unsigned long f1, f2; 4.16 4.17 - asm("pushfl\n\t" 4.18 - "pushfl\n\t" 4.19 - "popl %0\n\t" 4.20 - "movl %0,%1\n\t" 4.21 - "xorl %2,%0\n\t" 4.22 - "pushl %0\n\t" 4.23 - "popfl\n\t" 4.24 - "pushfl\n\t" 4.25 - "popl %0\n\t" 4.26 - "popfl\n\t" 4.27 + asm("pushf\n\t" 4.28 + "pushf\n\t" 4.29 + "pop %0\n\t" 4.30 + "mov %0,%1\n\t" 4.31 + "xor %2,%0\n\t" 4.32 + "push %0\n\t" 4.33 + "popf\n\t" 4.34 + "pushf\n\t" 4.35 + "pop %0\n\t" 4.36 + "popf\n\t" 4.37 : "=&r" (f1), "=&r" (f2) 4.38 : "ir" (flag)); 4.39 4.40 @@ -120,12 +118,6 @@ static int __init have_cpuid_p(void) 4.41 return flag_is_changeable_p(X86_EFLAGS_ID); 4.42 } 4.43 4.44 -#elif defined(__x86_64__) 4.45 - 4.46 -#define have_cpuid_p() (1) 4.47 - 4.48 -#endif 4.49 - 4.50 void __init get_cpu_vendor(struct cpuinfo_x86 *c) 4.51 { 4.52 char *v = c->x86_vendor_id; 4.53 @@ -306,38 +298,40 @@ void __init identify_cpu(struct cpuinfo_ 4.54 unsigned long cpu_initialized; 4.55 void __init cpu_init(void) 4.56 { 4.57 -#if defined(__i386__) /* XXX */ 4.58 int nr = smp_processor_id(); 4.59 - struct tss_struct * t = &init_tss[nr]; 4.60 + struct tss_struct *t = &init_tss[nr]; 4.61 4.62 if ( test_and_set_bit(nr, &cpu_initialized) ) 4.63 panic("CPU#%d already initialized!!!\n", nr); 4.64 printk("Initializing CPU#%d\n", nr); 4.65 4.66 - t->bitmap = IOBMP_INVALID_OFFSET; 4.67 - memset(t->io_bitmap, ~0, sizeof(t->io_bitmap)); 4.68 - 4.69 /* Set up GDT and IDT. */ 4.70 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES); 4.71 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS); 4.72 - __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt)); 4.73 - __asm__ __volatile__("lidt %0": "=m" (idt_descr)); 4.74 + __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->mm.gdt) ); 4.75 + __asm__ __volatile__ ( "lidt %0" : "=m" (idt_descr) ); 4.76 4.77 /* No nested task. */ 4.78 - __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); 4.79 + __asm__ __volatile__ ( "pushf ; andw $0xbfff,(%"__OP"sp) ; popf" ); 4.80 4.81 /* Ensure FPU gets initialised for each domain. */ 4.82 stts(); 4.83 4.84 /* Set up and load the per-CPU TSS and LDT. */ 4.85 + t->bitmap = IOBMP_INVALID_OFFSET; 4.86 + memset(t->io_bitmap, ~0, sizeof(t->io_bitmap)); 4.87 +#if defined(__i386__) 4.88 t->ss0 = __HYPERVISOR_DS; 4.89 t->esp0 = get_stack_top(); 4.90 +#elif defined(__x86_64__) 4.91 + t->rsp0 = get_stack_top(); 4.92 +#endif 4.93 set_tss_desc(nr,t); 4.94 load_TR(nr); 4.95 - __asm__ __volatile__("lldt %%ax"::"a" (0)); 4.96 + __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) ); 4.97 4.98 /* Clear all 6 debug registers. */ 4.99 -#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); 4.100 +#define CD(register) __asm__ ( "mov %0,%%db" #register : : "r" (0UL) ); 4.101 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); 4.102 #undef CD 4.103 4.104 @@ -345,7 +339,6 @@ void __init cpu_init(void) 4.105 write_ptbase(¤t->mm); 4.106 4.107 init_idle_task(); 4.108 -#endif 4.109 } 4.110 4.111 static void __init do_initcalls(void)
5.1 --- a/xen/arch/x86/smpboot.c Wed Feb 02 18:27:05 2005 +0000 5.2 +++ b/xen/arch/x86/smpboot.c Wed Feb 02 22:31:42 2005 +0000 5.3 @@ -410,8 +410,8 @@ void __init start_secondary(void) 5.4 * now safe to make ourselves a private copy. 5.5 */ 5.6 idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES); 5.7 - memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8); 5.8 - *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1; 5.9 + memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*sizeof(idt_entry_t)); 5.10 + *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1; 5.11 *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu]; 5.12 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) ); 5.13
6.1 --- a/xen/arch/x86/traps.c Wed Feb 02 18:27:05 2005 +0000 6.2 +++ b/xen/arch/x86/traps.c Wed Feb 02 22:31:42 2005 +0000 6.3 @@ -709,9 +709,9 @@ void set_tss_desc(unsigned int n, void * 6.4 { 6.5 _set_tssldt_desc( 6.6 gdt_table + __TSS(n), 6.7 - (int)addr, 6.8 + (unsigned long)addr, 6.9 offsetof(struct tss_struct, __cacheline_filler) - 1, 6.10 - 0x89); 6.11 + 9); 6.12 } 6.13 6.14 void __init trap_init(void) 6.15 @@ -735,7 +735,6 @@ void __init trap_init(void) 6.16 set_intr_gate(TRAP_bounds,&bounds); 6.17 set_intr_gate(TRAP_invalid_op,&invalid_op); 6.18 set_intr_gate(TRAP_no_device,&device_not_available); 6.19 - set_task_gate(TRAP_double_fault,__DOUBLEFAULT_TSS_ENTRY<<3); 6.20 set_intr_gate(TRAP_copro_seg,&coprocessor_segment_overrun); 6.21 set_intr_gate(TRAP_invalid_tss,&invalid_TSS); 6.22 set_intr_gate(TRAP_no_segment,&segment_not_present); 6.23 @@ -750,6 +749,7 @@ void __init trap_init(void) 6.24 set_intr_gate(TRAP_deferred_nmi,&nmi); 6.25 6.26 #if defined(__i386__) 6.27 + set_task_gate(TRAP_double_fault,__DOUBLEFAULT_TSS_ENTRY<<3); 6.28 _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall); 6.29 #elif defined(__x86_64__) 6.30 _set_gate(idt_table+HYPERCALL_VECTOR, 14, 3, &hypercall);
7.1 --- a/xen/arch/x86/x86_32/traps.c Wed Feb 02 18:27:05 2005 +0000 7.2 +++ b/xen/arch/x86/x86_32/traps.c Wed Feb 02 22:31:42 2005 +0000 7.3 @@ -172,7 +172,7 @@ void __init doublefault_init(void) 7.4 tss->eflags = 2; 7.5 tss->bitmap = IOBMP_INVALID_OFFSET; 7.6 _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY, 7.7 - (int)tss, 235, 0x89); 7.8 + (unsigned long)tss, 235, 9); 7.9 } 7.10 7.11 long set_fast_trap(struct exec_domain *p, int idx)
8.1 --- a/xen/arch/x86/x86_64/entry.S Wed Feb 02 18:27:05 2005 +0000 8.2 +++ b/xen/arch/x86/x86_64/entry.S Wed Feb 02 22:31:42 2005 +0000 8.3 @@ -32,7 +32,7 @@ restore_all_xen: 8.4 popq %rsi 8.5 popq %rdi 8.6 addq $8,%rsp 8.7 - iret 8.8 + iretq 8.9 8.10 error_code: 8.11 SAVE_ALL
9.1 --- a/xen/include/asm-x86/desc.h Wed Feb 02 18:27:05 2005 +0000 9.2 +++ b/xen/include/asm-x86/desc.h Wed Feb 02 22:31:42 2005 +0000 9.3 @@ -4,14 +4,6 @@ 9.4 9.5 #define LDT_ENTRY_SIZE 8 9.6 9.7 -#define __DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY 9.8 - 9.9 -#define __FIRST_TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8) 9.10 -#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY + 1) 9.11 - 9.12 -#define __TSS(n) (((n)<<1) + __FIRST_TSS_ENTRY) 9.13 -#define __LDT(n) (((n)<<1) + __FIRST_LDT_ENTRY) 9.14 - 9.15 #define load_TR(n) __asm__ __volatile__ ("ltr %%ax" : : "a" (__TSS(n)<<3) ) 9.16 9.17 /* 9.18 @@ -44,6 +36,12 @@ struct desc_struct { 9.19 9.20 #if defined(__x86_64__) 9.21 9.22 +#define __FIRST_TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8) 9.23 +#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY + 2) 9.24 + 9.25 +#define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY) 9.26 +#define __LDT(n) (((n)<<2) + __FIRST_LDT_ENTRY) 9.27 + 9.28 #define VALID_CODESEL(_s) ((_s) == FLAT_RING3_CS64 || VALID_SEL(_s)) 9.29 9.30 typedef struct { 9.31 @@ -63,10 +61,28 @@ do { 9.32 ((unsigned long)(addr) >> 32); \ 9.33 } while (0) 9.34 9.35 -#define _set_tssldt_desc(n,addr,limit,type) ((void)0) 9.36 +#define _set_tssldt_desc(desc,addr,limit,type) \ 9.37 +do { \ 9.38 + (desc)[0].a = \ 9.39 + ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \ 9.40 + (desc)[0].b = \ 9.41 + ((u32)(addr) & 0xFF000000U) | \ 9.42 + ((u32)(type) << 8) | 0x8000U | \ 9.43 + (((u32)(addr) & 0x00FF0000U) >> 16); \ 9.44 + (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \ 9.45 + (desc)[1].b = 0; \ 9.46 +} while (0) 9.47 9.48 #elif defined(__i386__) 9.49 9.50 +#define __DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY 9.51 + 9.52 +#define __FIRST_TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8) 9.53 +#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY + 1) 9.54 + 9.55 +#define __TSS(n) (((n)<<1) + __FIRST_TSS_ENTRY) 9.56 +#define __LDT(n) (((n)<<1) + __FIRST_LDT_ENTRY) 9.57 + 9.58 #define VALID_CODESEL(_s) ((_s) == FLAT_RING1_CS || VALID_SEL(_s)) 9.59 9.60 typedef struct desc_struct idt_entry_t; 9.61 @@ -93,7 +109,7 @@ do { \ 9.62 "movb $0,6(%2)\n\t" \ 9.63 "movb %%ah,7(%2)\n\t" \ 9.64 "rorl $16,%%eax" \ 9.65 - : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) 9.66 + : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type|0x80)) 9.67 9.68 #endif 9.69
10.1 --- a/xen/include/asm-x86/processor.h Wed Feb 02 18:27:05 2005 +0000 10.2 +++ b/xen/include/asm-x86/processor.h Wed Feb 02 22:31:42 2005 +0000 10.3 @@ -379,7 +379,7 @@ struct tss_struct { 10.4 u8 io_bitmap[IOBMP_BYTES+1]; 10.5 /* Pads the TSS to be cacheline-aligned (total size is 0x2080). */ 10.6 u8 __cacheline_filler[23]; 10.7 -}; 10.8 +} __cacheline_aligned PACKED; 10.9 10.10 struct trap_bounce { 10.11 unsigned long error_code;
11.1 --- a/xen/include/public/arch-x86_64.h Wed Feb 02 18:27:05 2005 +0000 11.2 +++ b/xen/include/public/arch-x86_64.h Wed Feb 02 22:31:42 2005 +0000 11.3 @@ -30,7 +30,7 @@ 11.4 * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY 11.5 * and LAST_RESERVED_GDT_ENTRY are reserved). 11.6 */ 11.7 -#define NR_RESERVED_GDT_ENTRIES 40 11.8 +#define NR_RESERVED_GDT_ENTRIES 72 11.9 #define FIRST_RESERVED_GDT_ENTRY 256 11.10 #define LAST_RESERVED_GDT_ENTRY \ 11.11 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)