Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
{
unw_init();
+#ifndef XEN
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+#endif
*cmdline_p = __va(ia64_boot_param->command_line);
#ifndef XEN
#endif
BUG();
+#ifdef XEN
+ ia64_fph_enable();
+ __ia64_init_fpu();
+#endif
+
ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data));
#endif
}
+#ifndef XEN
void
check_bugs (void)
{
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
(unsigned long) __end___mckinley_e9_bundles);
}
+#endif
#include <asm/vmmu.h>
#include "vmx_minstate.h"
-/*
- * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
- * With Ingo's new scheduler, interrupts are disabled when this routine gets
- * called. The code starting at .map relies on this. The rest of the code
- * doesn't care about the interrupt masking status.
- *
- * Since we allocate domain stack in xenheap, there's no need to map new
- * domain's stack since all xenheap is mapped by TR. Another different task
- * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
- */
-GLOBAL_ENTRY(vmx_ia64_switch_to)
- .prologue
- alloc r16=ar.pfs,1,0,0,0
- DO_SAVE_SWITCH_STACK
- .body
-
- bsw.0 // Switch to bank0, because bank0 r21 is current pointer
- ;;
- adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
- movl r25=init_task
- adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
- ;;
- st8 [r22]=sp // save kernel stack pointer of old task
- ;;
- /*
- * TR always mapped this task's page, we can skip doing it again.
- */
- ld8 sp=[r26] // load kernel stack pointer of new task
- mov r21=in0 // update "current" application register
- mov r8=r13 // return pointer to previously running task
- mov r13=in0 // set "current" pointer
- ;;
- bsw.1
- ;;
- DO_LOAD_SWITCH_STACK
-
-#ifdef CONFIG_SMP
- sync.i // ensure "fc"s done by this CPU are visible on other CPUs
-#endif
- br.ret.sptk.many rp // boogie on out in new context
-END(vmx_ia64_switch_to)
-
GLOBAL_ENTRY(ia64_leave_nested)
rsm psr.i
;;
#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
/* FIXME: where these declarations should be there ? */
-extern void domain_pend_keyboard_interrupt(int);
extern long platform_is_hp_ski(void);
-extern void sync_split_caches(void);
extern void serial_input_init(void);
-
static void init_switch_stack(struct vcpu *v);
void build_physmap_table(struct domain *d);
/* Just some sanity to ensure that the scheduler is set up okay. */
ASSERT(current->domain == IDLE_DOMAIN_ID);
raise_softirq(SCHEDULE_SOFTIRQ);
-#if 0
-//do we have to ensure the idle task has a shared page so that, for example,
-//region registers can be loaded from it. Apparently not...
- idle0_task.shared_info = (void *)alloc_xenheap_page();
- memset(idle0_task.shared_info, 0, PAGE_SIZE);
- /* pin mapping */
- // FIXME: Does this belong here? Or do only at domain switch time?
- {
- /* WARNING: following must be inlined to avoid nested fault */
- unsigned long psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
- pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
- PAGE_SHIFT);
- ia64_set_psr(psr);
- ia64_srlz_i();
- }
-#endif
continue_cpu_idle_loop();
}
{
struct pt_regs *regs = vcpu_regs (v);
- printf("arch_getdomaininfo_ctxt\n");
c->regs = *regs;
c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
struct pt_regs *regs = vcpu_regs (v);
struct domain *d = v->domain;
- printf("arch_set_info_guest\n");
if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
return 0;
if (c->flags & VGCF_VMX_GUEST) {
dom0_start = alloc_boot_pages(dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
dom0_start <<= PAGE_SHIFT;
if (!dom0_start) {
- printf("alloc_dom0: can't allocate contiguous memory size=%lu\n",
+ panic("alloc_dom0: can't allocate contiguous memory size=%lu\n",
dom0_size);
- while(1);
}
printf("alloc_dom0: dom0_start=0x%lx\n", dom0_start);
#else
while(1);
}
-
-#if 0
-void switch_to(struct vcpu *prev, struct vcpu *next)
-{
- struct vcpu *last;
-
- __switch_to(prev,next,last);
- //set_current(next);
-}
-#endif
-
void domain_pend_keyboard_interrupt(int irq)
{
vcpu_pend_interrupt(dom0->vcpu[0],irq);
void sync_vcpu_execstate(struct vcpu *v)
{
- ia64_save_fpu(v->arch._thread.fph);
+ __ia64_save_fpu(v->arch._thread.fph);
if (VMX_DOMAIN(v))
vmx_save_state(v);
- else {
- if (IA64_HAS_EXTRA_STATE(v))
- ia64_save_extra(v);
- }
// FIXME SMP: Anything else needed here for SMP?
}
regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
-#ifdef CONFIG_SMP
-#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
-#endif
regs->r31 = XSI_IPSR;
v->vcpu_info->evtchn_upcall_mask = 1;
/*
* Xen misc
- *
+ *
* Functions/decls that are/may be needed to link with Xen because
* of x86 dependencies
*
#include <asm/debugger.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
+#include <asm/vcpu.h>
-efi_memory_desc_t ia64_efi_io_md;
-EXPORT_SYMBOL(ia64_efi_io_md);
-unsigned long wait_init_idle;
-int phys_proc_id[NR_CPUS];
unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
/* FIXME: where these declarations should be there ? */
void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
void ia64_mca_cpu_init(void *x) { }
-void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
-void ia64_patch_vtop(unsigned long a, unsigned long b) { }
void hpsim_setup(char **x)
{
#ifdef CONFIG_SMP
return running_on_sim;
}
-/* calls in xen/common code that are unused on ia64 */
-
-void sync_lazy_execstate_cpu(unsigned int cpu) {}
-
-#if 0
-int grant_table_create(struct domain *d) { return 0; }
-void grant_table_destroy(struct domain *d) { return; }
-#endif
struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
-void raise_actimer_softirq(void)
-{
- raise_softirq(TIMER_SOFTIRQ);
-}
-
unsigned long
gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
{
///////////////////////////////
-void free_page_type(struct page_info *page, u32 type)
+static void free_page_type(struct page_info *page, u32 type)
{
-// dummy();
- return;
}
-int alloc_page_type(struct page_info *page, u32 type)
+static int alloc_page_type(struct page_info *page, u32 type)
{
-// dummy();
return 1;
}
{
void *p;
p = alloc_xenheap_pages(0);
- if (p)
+ if (p)
clear_page(p);
return p;
}
void *__module_text_address(unsigned long addr) { return NULL; }
void *module_text_address(unsigned long addr) { return NULL; }
-void cs10foo(void) {}
-void cs01foo(void) {}
-
unsigned long context_switch_count = 0;
-#include <asm/vcpu.h>
+extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
+
void context_switch(struct vcpu *prev, struct vcpu *next)
{
uint64_t pta;
local_irq_save(spsr);
-// if(VMX_DOMAIN(prev)){
-// vtm_domain_out(prev);
-// }
- context_switch_count++;
- switch_to(prev,next,prev);
-// if(VMX_DOMAIN(current)){
-// vtm_domain_in(current);
-// }
+ context_switch_count++;
+
+ __ia64_save_fpu(prev->arch._thread.fph);
+ __ia64_load_fpu(next->arch._thread.fph);
+ if (VMX_DOMAIN(prev))
+ vmx_save_state(prev);
+ if (VMX_DOMAIN(next))
+ vmx_load_state(next);
+ /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
+ prev = ia64_switch_to(next);
+ if (!VMX_DOMAIN(current)){
+ vcpu_set_next_timer(current);
+ }
+
// leave this debug for now: it acts as a heartbeat when more than
// one domain is active
}
if (VMX_DOMAIN(current)){
-// vtm_domain_in(current);
vmx_load_all_rr(current);
}else{
extern char ia64_ivt;
ia64_set_iva(&ia64_ivt);
if (!is_idle_domain(current->domain)) {
ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
- VHPT_ENABLED);
+ VHPT_ENABLED);
load_region_regs(current);
vcpu_load_kernel_regs(current);
- if (vcpu_timer_expired(current))
- vcpu_pend_timer(current);
+ if (vcpu_timer_expired(current))
+ vcpu_pend_timer(current);
}else {
- /* When switching to idle domain, only need to disable vhpt
- * walker. Then all accesses happen within idle context will
- * be handled by TR mapping and identity mapping.
- */
- pta = ia64_get_pta();
- ia64_set_pta(pta & ~VHPT_ENABLED);
+ /* When switching to idle domain, only need to disable vhpt
+ * walker. Then all accesses happen within idle context will
+ * be handled by TR mapping and identity mapping.
+ */
+ pta = ia64_get_pta();
+ ia64_set_pta(pta & ~VHPT_ENABLED);
}
}
-
local_irq_restore(spsr);
context_saved(prev);
}
va_list args;
char buf[128];
struct vcpu *v = current;
-
+
printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
- v->domain->domain_id,
+ v->domain->domain_id,
__get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
va_start(args, fmt);
(void)vsnprintf(buf, sizeof(buf), fmt, args);
ASSERT((x & PGT_count_mask) != 0);
/*
- * The page should always be validated while a reference is held. The
- * exception is during domain destruction, when we forcibly invalidate
+ * The page should always be validated while a reference is held. The
+ * exception is during domain destruction, when we forcibly invalidate
* page-table pages if we detect a referential loop.
* See domain.c:relinquish_list().
*/
- ASSERT((x & PGT_validated) ||
+ ASSERT((x & PGT_validated) ||
test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
if ( unlikely((nx & PGT_count_mask) == 0) )
{
/* Record TLB information for flush later. Races are harmless. */
page->tlbflush_timestamp = tlbflush_current_time();
-
+
if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
likely(nx & PGT_validated) )
{
* 'free' is safe because the refcnt is non-zero and validated
* bit is clear => other ops will spin or fail.
*/
- if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
+ if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
x & ~PGT_validated)) != x) )
goto again;
/* We cleared the 'valid bit' so we do the clean up. */
nx &= ~PGT_validated;
}
}
- else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
+ else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
(PGT_pinned | 1)) &&
((nx & PGT_type_mask) != PGT_writable_page)) )
{
init_frametable();
- ia64_fph_enable();
- __ia64_init_fpu();
-
alloc_dom0();
end_boot_allocator();
offsetof(vcpu_info_t, evtchn_upcall_mask))
struct arch_vcpu {
-#if 1
TR_ENTRY itrs[NITRS];
TR_ENTRY dtrs[NDTRS];
TR_ENTRY itlb;
unsigned long domain_itm;
unsigned long domain_itm_last;
unsigned long xen_itm;
-#endif
+
mapped_regs_t *privregs; /* save the state of vcpu */
+
+ /* These fields are copied from arch_domain to make access easier/faster
+ in assembly code. */
unsigned long metaphysical_rr0; // from arch_domain (so is pinned)
unsigned long metaphysical_rr4; // from arch_domain (so is pinned)
unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
int breakimm; // from arch_domain (so is pinned)
int starting_rid; /* first RID assigned to domain */
int ending_rid; /* one beyond highest RID assigned to domain */
+
struct thread_struct _thread; // this must be last
thash_cb_t vtlb;
// FOLLOWING FROM linux-2.6.7/include/sched.h
struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
-#ifndef XEN
- struct rb_root mm_rb;
-#endif
- struct vm_area_struct * mmap_cache; /* last find_vma result */
- unsigned long free_area_cache; /* first hole */
pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- int map_count; /* number of VMAs */
-#ifndef XEN
- struct rw_semaphore mmap_sem;
-#endif
- spinlock_t page_table_lock; /* Protects task page tables and mm->rss */
-
+ // atomic_t mm_users; /* How many users with user space? */
struct list_head pt_list; /* List of pagetable */
-
- struct list_head mmlist; /* List of all active mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
-
-#ifndef XEN
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long rss, total_vm, locked_vm;
- unsigned long def_flags;
-
- unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
-
- unsigned dumpable:1;
-#endif
-#ifdef CONFIG_HUGETLB_PAGE
- int used_hugetlb;
-#endif
-#ifndef XEN
- cpumask_t cpu_vm_mask;
-
- /* Architecture-specific MM context */
- mm_context_t context;
-
- /* coredumping support */
- int core_waiters;
- struct completion *core_startup_done, core_done;
-
- /* aio bits */
- rwlock_t ioctx_list_lock;
- struct kioctx *ioctx_list;
-
- struct kioctx default_kioctx;
-#endif
};
extern struct mm_struct init_mm;
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
-#ifndef XEN
+#ifdef XEN
+#include <asm/xensystem.h>
+#else
#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
#endif
void *p;
} ia64_va;
+#ifndef XEN
/*
* Note: These macros depend on the fact that PAGE_OFFSET has all
* region bits set to 1 and all other bits set to zero. They are
*/
#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+#endif /* XEN */
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
+#ifndef XEN
#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
(((current->personality & READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
-#ifdef XEN
+#else
#include <asm/xenpage.h>
#endif
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+#ifndef XEN
/* atomic versions of the some PTE manipulations: */
static inline int
}
#define update_mmu_cache(vma, address, pte) do { } while (0)
+#endif /* XEN */
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
(__ia64_id_flags & IA64_PSR_I) == 0; \
})
-#ifndef XEN
+#ifdef XEN
+#define local_irq_is_enabled() (!irqs_disabled())
+#else
#ifdef __KERNEL__
#ifdef CONFIG_IA32_SUPPORT
/* It is sometimes very useful to have unsigned long as result. */
#define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
-#undef PAGE_OFFSET
-#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
-
#endif /* _ASM_IA64_XENPAGE_H */
#define HYPERVISOR_VIRT_START 0xf000000000000000
#define KERNEL_START 0xf000000004000000
#define SHAREDINFO_ADDR 0xf100000000000000
-#define SHARED_ARCHINFO_ADDR (SHAREDINFO_ADDR + PAGE_SIZE)
+#define XSI_OFS PAGE_SIZE
+#define SHARED_ARCHINFO_ADDR (SHAREDINFO_ADDR + XSI_OFS)
#define PERCPU_ADDR (SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#define XSI_OFS (SHARED_ARCHINFO_ADDR - SHAREDINFO_ADDR)
#define VHPT_ADDR 0xf200000000000000
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#define VIRT_FRAME_TABLE_ADDR 0xf300000000000000
#endif
#define XEN_END_ADDR 0xf400000000000000
-#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
-
-#ifndef __ASSEMBLY__
-
-#define IA64_HAS_EXTRA_STATE(t) 0
-
-struct vcpu;
-extern void ia64_save_extra (struct vcpu *v);
-extern void ia64_load_extra (struct vcpu *v);
+#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
-extern struct vcpu *vmx_ia64_switch_to (struct vcpu *next_task);
-extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
-
-#define __switch_to(prev,next,last) do { \
- ia64_save_fpu(prev->arch._thread.fph); \
- ia64_load_fpu(next->arch._thread.fph); \
- if (VMX_DOMAIN(prev)) \
- vmx_save_state(prev); \
- else { \
- if (IA64_HAS_EXTRA_STATE(prev)) \
- ia64_save_extra(prev); \
- } \
- if (VMX_DOMAIN(next)) \
- vmx_load_state(next); \
- else { \
- if (IA64_HAS_EXTRA_STATE(next)) \
- ia64_save_extra(next); \
- } \
- /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \
- (last) = ia64_switch_to((next)); \
- if (!VMX_DOMAIN(current)){ \
- vcpu_set_next_timer(current); \
- } \
-} while (0)
-
-// FIXME SMP... see system.h, does this need to be different?
-#define switch_to(prev,next,last) __switch_to(prev, next, last)
-
-#define local_irq_is_enabled() (!irqs_disabled())
+#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
-#endif // __ASSEMBLY__
#endif // _ASM_IA64_XENSYSTEM_H