ia64/xen-unstable

changeset 8533:1572681e4e5a

General start-of-day cleanups, resulting in the
x86/32 map_domain_page() mapcache now being per-domain
rather than global.

Other cleanups include removal of static definition of
CPU0's idle domain. Instead a single, multi-cpu, idle domain
is dynamically created early during bootstrap.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:44:30 2006 +0100 (2006-01-09)
parents dfb836264898
children da7873110bbb
files xen/arch/ia64/linux-xen/smpboot.c xen/arch/ia64/xen/idle0_task.c xen/arch/ia64/xen/xensetup.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/domain.c xen/common/sched_bvt.c xen/common/sched_sedf.c xen/common/schedule.c xen/include/asm-x86/config.h xen/include/asm-x86/domain.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/smpboot.c	Mon Jan 09 14:43:46 2006 +0000
     1.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c	Mon Jan 09 19:44:30 2006 +0100
     1.3 @@ -484,7 +484,6 @@ do_rest:
     1.4  
     1.5  	if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
     1.6  		panic("failed 'createdomain' for CPU %d", cpu);
     1.7 -	set_bit(_DOMF_idle_domain, &idle->domain_flags);
     1.8  	v = idle->vcpu[0];
     1.9  
    1.10  	printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
     2.1 --- a/xen/arch/ia64/xen/idle0_task.c	Mon Jan 09 14:43:46 2006 +0000
     2.2 +++ b/xen/arch/ia64/xen/idle0_task.c	Mon Jan 09 19:44:30 2006 +0100
     2.3 @@ -22,7 +22,6 @@
     2.4  #define IDLE0_DOMAIN(_t)             \
     2.5  {                                    \
     2.6      domain_id:   IDLE_DOMAIN_ID,     \
     2.7 -    domain_flags:DOMF_idle_domain,   \
     2.8      refcnt:      ATOMIC_INIT(1)      \
     2.9  }
    2.10  
     3.1 --- a/xen/arch/ia64/xen/xensetup.c	Mon Jan 09 14:43:46 2006 +0000
     3.2 +++ b/xen/arch/ia64/xen/xensetup.c	Mon Jan 09 19:44:30 2006 +0100
     3.3 @@ -26,7 +26,7 @@ unsigned long xenheap_phys_end;
     3.4  
     3.5  char saved_command_line[COMMAND_LINE_SIZE];
     3.6  
     3.7 -struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu };
     3.8 +struct vcpu *idle_vcpu[NR_CPUS] = { &idle0_vcpu };
     3.9  
    3.10  cpumask_t cpu_present_map;
    3.11  
     4.1 --- a/xen/arch/x86/domain.c	Mon Jan 09 14:43:46 2006 +0000
     4.2 +++ b/xen/arch/x86/domain.c	Mon Jan 09 19:44:30 2006 +0100
     4.3 @@ -91,11 +91,9 @@ void startup_cpu_idle_loop(void)
     4.4  {
     4.5      struct vcpu *v = current;
     4.6  
     4.7 -    ASSERT(is_idle_domain(v->domain));
     4.8 -    percpu_ctxt[smp_processor_id()].curr_vcpu = v;
     4.9 +    ASSERT(is_idle_vcpu(v));
    4.10      cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask);
    4.11      cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask);
    4.12 -    v->arch.schedule_tail = continue_idle_domain;
    4.13  
    4.14      reset_stack_and_jump(idle_loop);
    4.15  }
    4.16 @@ -217,14 +215,20 @@ struct vcpu *alloc_vcpu_struct(struct do
    4.17  
    4.18      memset(v, 0, sizeof(*v));
    4.19  
    4.20 -    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
    4.21 +    memcpy(&v->arch, &idle_vcpu[0]->arch, sizeof(v->arch));
    4.22      v->arch.flags = TF_kernel_mode;
    4.23  
    4.24 +    if ( is_idle_domain(d) )
    4.25 +    {
    4.26 +        percpu_ctxt[vcpu_id].curr_vcpu = v;
    4.27 +        v->arch.schedule_tail = continue_idle_domain;
    4.28 +    }
    4.29 +
    4.30      if ( (v->vcpu_id = vcpu_id) != 0 )
    4.31      {
    4.32          v->arch.schedule_tail  = d->vcpu[0]->arch.schedule_tail;
    4.33          v->arch.perdomain_ptes =
    4.34 -            d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT);
    4.35 +            d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT);
    4.36      }
    4.37  
    4.38      return v;
    4.39 @@ -259,31 +263,11 @@ int arch_do_createdomain(struct vcpu *v)
    4.40      int i;
    4.41  #endif
    4.42  
    4.43 -    if ( is_idle_domain(d) )
    4.44 -        return 0;
    4.45 -
    4.46 -    d->arch.ioport_caps = 
    4.47 -        rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
    4.48 -    if ( d->arch.ioport_caps == NULL )
    4.49 -        return -ENOMEM;
    4.50 -
    4.51 -    if ( (d->shared_info = alloc_xenheap_page()) == NULL )
    4.52 -        return -ENOMEM;
    4.53 -
    4.54 -    if ( (rc = ptwr_init(d)) != 0 )
    4.55 -    {
    4.56 -        free_xenheap_page(d->shared_info);
    4.57 -        return rc;
    4.58 -    }
    4.59 -
    4.60 -    v->arch.schedule_tail = continue_nonidle_domain;
    4.61 -
    4.62 -    memset(d->shared_info, 0, PAGE_SIZE);
    4.63 -    v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
    4.64 -    SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    4.65 -
    4.66      pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
    4.67      d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
    4.68 +    if ( d->arch.mm_perdomain_pt == NULL )
    4.69 +        goto fail_nomem;
    4.70 +
    4.71      memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
    4.72      v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
    4.73  
    4.74 @@ -296,34 +280,75 @@ int arch_do_createdomain(struct vcpu *v)
    4.75       */
    4.76      gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    4.77      for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
    4.78 -        d->arch.mm_perdomain_pt[
    4.79 -            (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = gdt_l1e;
    4.80 +        d->arch.mm_perdomain_pt[((vcpuid << GDT_LDT_VCPU_SHIFT) +
    4.81 +                                 FIRST_RESERVED_GDT_PAGE)] = gdt_l1e;
    4.82  
    4.83      v->arch.guest_vtable  = __linear_l2_table;
    4.84      v->arch.shadow_vtable = __shadow_linear_l2_table;
    4.85  
    4.86 -#ifdef __x86_64__
    4.87 +#if defined(__i386__)
    4.88 +
    4.89 +    d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
    4.90 +        (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
    4.91 +    spin_lock_init(&d->arch.mapcache.lock);
    4.92 +
    4.93 +#else /* __x86_64__ */
    4.94 +
    4.95      v->arch.guest_vl3table = __linear_l3_table;
    4.96      v->arch.guest_vl4table = __linear_l4_table;
    4.97  
    4.98      d->arch.mm_perdomain_l2 = alloc_xenheap_page();
    4.99 +    d->arch.mm_perdomain_l3 = alloc_xenheap_page();
   4.100 +    if ( (d->arch.mm_perdomain_l2 == NULL) ||
   4.101 +         (d->arch.mm_perdomain_l3 == NULL) )
   4.102 +        goto fail_nomem;
   4.103 +
   4.104      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
   4.105      for ( i = 0; i < (1 << pdpt_order); i++ )
   4.106          d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] =
   4.107              l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
   4.108                            __PAGE_HYPERVISOR);
   4.109  
   4.110 -    d->arch.mm_perdomain_l3 = alloc_xenheap_page();
   4.111      memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
   4.112      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
   4.113          l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
   4.114                              __PAGE_HYPERVISOR);
   4.115 -#endif
   4.116 +
   4.117 +#endif /* __x86_64__ */
   4.118  
   4.119      shadow_lock_init(d);
   4.120      INIT_LIST_HEAD(&d->arch.free_shadow_frames);
   4.121  
   4.122 +    if ( !is_idle_domain(d) )
   4.123 +    {
   4.124 +        d->arch.ioport_caps = 
   4.125 +            rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
   4.126 +        if ( d->arch.ioport_caps == NULL )
   4.127 +            goto fail_nomem;
   4.128 +
   4.129 +        if ( (d->shared_info = alloc_xenheap_page()) == NULL )
   4.130 +            goto fail_nomem;
   4.131 +
   4.132 +        if ( (rc = ptwr_init(d)) != 0 )
   4.133 +            goto fail_nomem;
   4.134 +
   4.135 +        memset(d->shared_info, 0, PAGE_SIZE);
   4.136 +        v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
   4.137 +        SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
   4.138 +
   4.139 +        v->arch.schedule_tail = continue_nonidle_domain;
   4.140 +    }
   4.141 +
   4.142      return 0;
   4.143 +
   4.144 + fail_nomem:
   4.145 +    free_xenheap_page(d->shared_info);
   4.146 +#ifdef __x86_64__
   4.147 +    free_xenheap_page(d->arch.mm_perdomain_l2);
   4.148 +    free_xenheap_page(d->arch.mm_perdomain_l3);
   4.149 +#endif
   4.150 +    free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
   4.151 +    return -ENOMEM;
   4.152  }
   4.153  
   4.154  /* This is called by arch_final_setup_guest and do_boot_vcpu */
   4.155 @@ -692,7 +717,7 @@ static void __context_switch(void)
   4.156      ASSERT(p != n);
   4.157      ASSERT(cpus_empty(n->vcpu_dirty_cpumask));
   4.158  
   4.159 -    if ( !is_idle_domain(p->domain) )
   4.160 +    if ( !is_idle_vcpu(p) )
   4.161      {
   4.162          memcpy(&p->arch.guest_context.user_regs,
   4.163                 stack_regs,
   4.164 @@ -701,7 +726,7 @@ static void __context_switch(void)
   4.165          save_segments(p);
   4.166      }
   4.167  
   4.168 -    if ( !is_idle_domain(n->domain) )
   4.169 +    if ( !is_idle_vcpu(n) )
   4.170      {
   4.171          memcpy(stack_regs,
   4.172                 &n->arch.guest_context.user_regs,
   4.173 @@ -767,7 +792,7 @@ void context_switch(struct vcpu *prev, s
   4.174  
   4.175      set_current(next);
   4.176  
   4.177 -    if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_domain(next->domain) )
   4.178 +    if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) )
   4.179      {
   4.180          local_irq_enable();
   4.181      }
     5.1 --- a/xen/arch/x86/domain_build.c	Mon Jan 09 14:43:46 2006 +0000
     5.2 +++ b/xen/arch/x86/domain_build.c	Mon Jan 09 19:44:30 2006 +0100
     5.3 @@ -366,27 +366,20 @@ int construct_dom0(struct domain *d,
     5.4          l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
     5.5              l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
     5.6      }
     5.7 -    {
     5.8 -        unsigned long va;
     5.9 -        for (va = PERDOMAIN_VIRT_START; va < PERDOMAIN_VIRT_END;
    5.10 -             va += (1 << L2_PAGETABLE_SHIFT)) {
    5.11 -            l2tab[va >> L2_PAGETABLE_SHIFT] =
    5.12 -                l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) +
    5.13 -                               (va-PERDOMAIN_VIRT_START),
    5.14 -                               __PAGE_HYPERVISOR);
    5.15 -        }
    5.16 -    }
    5.17      v->arch.guest_table = mk_pagetable((unsigned long)l3start);
    5.18  #else
    5.19      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
    5.20      memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
    5.21      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.22          l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
    5.23 -    l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.24 -        l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
    5.25      v->arch.guest_table = mk_pagetable((unsigned long)l2start);
    5.26  #endif
    5.27  
    5.28 +    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
    5.29 +        l2tab[l2_linear_offset(PERDOMAIN_VIRT_START) + i] =
    5.30 +            l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt) + i,
    5.31 +                          __PAGE_HYPERVISOR);
    5.32 +
    5.33      l2tab += l2_linear_offset(dsi.v_start);
    5.34      mfn = alloc_spfn;
    5.35      for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
     6.1 --- a/xen/arch/x86/idle0_task.c	Mon Jan 09 14:43:46 2006 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,28 +0,0 @@
     6.4 -
     6.5 -#include <xen/config.h>
     6.6 -#include <xen/sched.h>
     6.7 -#include <asm/desc.h>
     6.8 -
     6.9 -struct domain idle0_domain = {
    6.10 -    domain_id:   IDLE_DOMAIN_ID,
    6.11 -    domain_flags:DOMF_idle_domain,
    6.12 -    refcnt:      ATOMIC_INIT(1)
    6.13 -};
    6.14 -
    6.15 -struct vcpu idle0_vcpu = {
    6.16 -    processor:   0,
    6.17 -    cpu_affinity:CPU_MASK_CPU0,
    6.18 -    domain:      &idle0_domain
    6.19 -};
    6.20 -
    6.21 -struct tss_struct init_tss[NR_CPUS];
    6.22 -
    6.23 -/*
    6.24 - * Local variables:
    6.25 - * mode: C
    6.26 - * c-set-style: "BSD"
    6.27 - * c-basic-offset: 4
    6.28 - * tab-width: 4
    6.29 - * indent-tabs-mode: nil
    6.30 - * End:
    6.31 - */
     7.1 --- a/xen/arch/x86/mm.c	Mon Jan 09 14:43:46 2006 +0000
     7.2 +++ b/xen/arch/x86/mm.c	Mon Jan 09 19:44:30 2006 +0100
     7.3 @@ -841,10 +841,11 @@ static int alloc_l2_table(struct pfn_inf
     7.4             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
     7.5      pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
     7.6          l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
     7.7 -    pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
     7.8 -        l2e_from_page(
     7.9 -            virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
    7.10 -            __PAGE_HYPERVISOR);
    7.11 +    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
    7.12 +        pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
    7.13 +            l2e_from_page(
    7.14 +                virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
    7.15 +                __PAGE_HYPERVISOR);
    7.16  #endif
    7.17  
    7.18      unmap_domain_page(pl2e);
     8.1 --- a/xen/arch/x86/setup.c	Mon Jan 09 14:43:46 2006 +0000
     8.2 +++ b/xen/arch/x86/setup.c	Mon Jan 09 19:44:30 2006 +0100
     8.3 @@ -81,6 +81,10 @@ extern void early_time_init(void);
     8.4  extern void initialize_keytable(void);
     8.5  extern void early_cpu_init(void);
     8.6  
     8.7 +struct tss_struct init_tss[NR_CPUS];
     8.8 +
     8.9 +struct vcpu *idle_vcpu[NR_CPUS];
    8.10 +
    8.11  extern unsigned long cpu0_stack[];
    8.12  
    8.13  struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
    8.14 @@ -92,8 +96,6 @@ unsigned long mmu_cr4_features = X86_CR4
    8.15  #endif
    8.16  EXPORT_SYMBOL(mmu_cr4_features);
    8.17  
    8.18 -struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu };
    8.19 -
    8.20  int acpi_disabled;
    8.21  
    8.22  int acpi_force;
    8.23 @@ -144,8 +146,8 @@ static struct e820entry e820_raw[E820MAX
    8.24  
    8.25  void __init __start_xen(multiboot_info_t *mbi)
    8.26  {
    8.27 -    unsigned long vgdt, gdt_pfn;
    8.28      char *cmdline;
    8.29 +    struct domain *idle_domain;
    8.30      unsigned long _initrd_start = 0, _initrd_len = 0;
    8.31      unsigned int initrdidx = 1;
    8.32      module_t *mod = (module_t *)__va(mbi->mods_addr);
    8.33 @@ -164,7 +166,7 @@ void __init __start_xen(multiboot_info_t
    8.34          cmdline_parse(__va(mbi->cmdline));
    8.35  
    8.36      /* Must do this early -- e.g., spinlocks rely on get_current(). */
    8.37 -    set_current(&idle0_vcpu);
    8.38 +    set_current(NULL/*idle_vcpu[0]*/);
    8.39      set_processor_id(0);
    8.40  
    8.41      smp_prepare_boot_cpu();
    8.42 @@ -382,6 +384,14 @@ void __init __start_xen(multiboot_info_t
    8.43  
    8.44      early_cpu_init();
    8.45  
    8.46 +    scheduler_init();
    8.47 +
    8.48 +    idle_domain = do_createdomain(IDLE_DOMAIN_ID, 0);
    8.49 +    BUG_ON(idle_domain == NULL);
    8.50 +
    8.51 +    set_current(idle_domain->vcpu[0]);
    8.52 +    idle_vcpu[0] = current;
    8.53 +
    8.54      paging_init();
    8.55  
    8.56      /* Unmap the first page of CPU0's stack. */
    8.57 @@ -394,21 +404,6 @@ void __init __start_xen(multiboot_info_t
    8.58  
    8.59      sort_exception_tables();
    8.60  
    8.61 -    if ( arch_do_createdomain(current) != 0 )
    8.62 -        BUG();
    8.63 -
    8.64 -    /*
    8.65 -     * Map default GDT into its final positions in the idle page table. As
    8.66 -     * noted in arch_do_createdomain(), we must map for every possible VCPU#.
    8.67 -     */
    8.68 -    vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
    8.69 -    gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
    8.70 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    8.71 -    {
    8.72 -        map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
    8.73 -        vgdt += 1 << PDPT_VCPU_VA_SHIFT;
    8.74 -    }
    8.75 -
    8.76      find_smp_config();
    8.77  
    8.78      smp_alloc_memory();
    8.79 @@ -435,8 +430,6 @@ void __init __start_xen(multiboot_info_t
    8.80  
    8.81      arch_init_memory();
    8.82  
    8.83 -    scheduler_init();
    8.84 -
    8.85      identify_cpu(&boot_cpu_data);
    8.86      if ( cpu_has_fxsr )
    8.87          set_in_cr4(X86_CR4_OSFXSR);
     9.1 --- a/xen/arch/x86/smpboot.c	Mon Jan 09 14:43:46 2006 +0000
     9.2 +++ b/xen/arch/x86/smpboot.c	Mon Jan 09 19:44:30 2006 +0100
     9.3 @@ -435,7 +435,7 @@ void __init start_secondary(void *unused
     9.4  
     9.5  	extern void percpu_traps_init(void);
     9.6  
     9.7 -	set_current(idle_domain[cpu]);
     9.8 +	set_current(idle_vcpu[cpu]);
     9.9  	set_processor_id(cpu);
    9.10  
    9.11  	percpu_traps_init();
    9.12 @@ -761,7 +761,6 @@ static int __init do_boot_cpu(int apicid
    9.13   * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
    9.14   */
    9.15  {
    9.16 -	struct domain *idle;
    9.17  	struct vcpu *v;
    9.18  	unsigned long boot_error;
    9.19  	int timeout, cpu;
    9.20 @@ -770,14 +769,10 @@ static int __init do_boot_cpu(int apicid
    9.21  
    9.22  	cpu = ++cpucount;
    9.23  
    9.24 -	if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
    9.25 -		panic("failed 'createdomain' for CPU %d", cpu);
    9.26 +	v = idle_vcpu[cpu] = alloc_vcpu(idle_vcpu[0]->domain, cpu, cpu);
    9.27 +        BUG_ON(v == NULL);
    9.28  
    9.29 -	v = idle_domain[cpu] = idle->vcpu[0];
    9.30 -
    9.31 -	set_bit(_DOMF_idle_domain, &idle->domain_flags);
    9.32 -
    9.33 -	v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    9.34 +        v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    9.35  
    9.36  	/* start_eip had better be page-aligned! */
    9.37  	start_eip = setup_trampoline();
    10.1 --- a/xen/arch/x86/traps.c	Mon Jan 09 14:43:46 2006 +0000
    10.2 +++ b/xen/arch/x86/traps.c	Mon Jan 09 19:44:30 2006 +0100
    10.3 @@ -427,7 +427,7 @@ void propagate_page_fault(unsigned long 
    10.4          tb->flags |= TBF_INTERRUPT;
    10.5  }
    10.6  
    10.7 -static int handle_perdomain_mapping_fault(
    10.8 +static int handle_gdt_ldt_mapping_fault(
    10.9      unsigned long offset, struct cpu_user_regs *regs)
   10.10  {
   10.11      extern int map_ldt_shadow_page(unsigned int);
   10.12 @@ -437,14 +437,14 @@ static int handle_perdomain_mapping_faul
   10.13      int ret;
   10.14  
   10.15      /* Which vcpu's area did we fault in, and is it in the ldt sub-area? */
   10.16 -    unsigned int is_ldt_area = (offset >> (PDPT_VCPU_VA_SHIFT-1)) & 1;
   10.17 -    unsigned int vcpu_area   = (offset >> PDPT_VCPU_VA_SHIFT);
   10.18 +    unsigned int is_ldt_area = (offset >> (GDT_LDT_VCPU_VA_SHIFT-1)) & 1;
   10.19 +    unsigned int vcpu_area   = (offset >> GDT_LDT_VCPU_VA_SHIFT);
   10.20  
   10.21      /* Should never fault in another vcpu's area. */
   10.22      BUG_ON(vcpu_area != current->vcpu_id);
   10.23  
   10.24      /* Byte offset within the gdt/ldt sub-area. */
   10.25 -    offset &= (1UL << (PDPT_VCPU_VA_SHIFT-1)) - 1UL;
   10.26 +    offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT-1)) - 1UL;
   10.27  
   10.28      if ( likely(is_ldt_area) )
   10.29      {
   10.30 @@ -490,9 +490,9 @@ static int fixup_page_fault(unsigned lon
   10.31      {
   10.32          if ( shadow_mode_external(d) && GUEST_CONTEXT(v, regs) )
   10.33              return shadow_fault(addr, regs);
   10.34 -        if ( (addr >= PERDOMAIN_VIRT_START) && (addr < PERDOMAIN_VIRT_END) )
   10.35 -            return handle_perdomain_mapping_fault(
   10.36 -                addr - PERDOMAIN_VIRT_START, regs);
   10.37 +        if ( (addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
   10.38 +            return handle_gdt_ldt_mapping_fault(
   10.39 +                addr - GDT_LDT_VIRT_START, regs);
   10.40      }
   10.41      else if ( unlikely(shadow_mode_enabled(d)) )
   10.42      {
    11.1 --- a/xen/arch/x86/x86_32/domain_page.c	Mon Jan 09 14:43:46 2006 +0000
    11.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Mon Jan 09 19:44:30 2006 +0100
    11.3 @@ -23,28 +23,24 @@
    11.4  #define MAPCACHE_ORDER    10
    11.5  #define MAPCACHE_ENTRIES  (1 << MAPCACHE_ORDER)
    11.6  
    11.7 -l1_pgentry_t *mapcache;
    11.8 -static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
    11.9 -static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
   11.10 -
   11.11  /* Use a spare PTE bit to mark entries ready for recycling. */
   11.12  #define READY_FOR_TLB_FLUSH (1<<10)
   11.13  
   11.14  static void flush_all_ready_maps(void)
   11.15  {
   11.16 -    l1_pgentry_t *cache = mapcache;
   11.17 +    struct mapcache *cache = &current->domain->arch.mapcache;
   11.18      unsigned int i;
   11.19  
   11.20      for ( i = 0; i < MAPCACHE_ENTRIES; i++ )
   11.21 -        if ( (l1e_get_flags(cache[i]) & READY_FOR_TLB_FLUSH) )
   11.22 -            cache[i] = l1e_empty();
   11.23 +        if ( (l1e_get_flags(cache->l1tab[i]) & READY_FOR_TLB_FLUSH) )
   11.24 +            cache->l1tab[i] = l1e_empty();
   11.25  }
   11.26  
   11.27  void *map_domain_pages(unsigned long pfn, unsigned int order)
   11.28  {
   11.29      unsigned long va;
   11.30 -    unsigned int idx, i, flags, cpu = smp_processor_id();
   11.31 -    l1_pgentry_t *cache = mapcache;
   11.32 +    unsigned int idx, i, flags, vcpu = current->vcpu_id;
   11.33 +    struct mapcache *cache = &current->domain->arch.mapcache;
   11.34  #ifndef NDEBUG
   11.35      unsigned int flush_count = 0;
   11.36  #endif
   11.37 @@ -52,37 +48,41 @@ void *map_domain_pages(unsigned long pfn
   11.38      ASSERT(!in_irq());
   11.39      perfc_incrc(map_domain_page_count);
   11.40  
   11.41 -    spin_lock(&map_lock);
   11.42 +    /* If we are the idle domain, ensure that we run on our own page tables. */
   11.43 +    if ( unlikely(is_idle_vcpu(current)) )
   11.44 +        __sync_lazy_execstate();
   11.45 +
   11.46 +    spin_lock(&cache->lock);
   11.47  
   11.48      /* Has some other CPU caused a wrap? We must flush if so. */
   11.49 -    if ( epoch != shadow_epoch[cpu] )
   11.50 +    if ( cache->epoch != cache->shadow_epoch[vcpu] )
   11.51      {
   11.52          perfc_incrc(domain_page_tlb_flush);
   11.53          local_flush_tlb();
   11.54 -        shadow_epoch[cpu] = epoch;
   11.55 +        cache->shadow_epoch[vcpu] = cache->epoch;
   11.56      }
   11.57  
   11.58      do {
   11.59 -        idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1);
   11.60 +        idx = cache->cursor = (cache->cursor + 1) & (MAPCACHE_ENTRIES - 1);
   11.61          if ( unlikely(idx == 0) )
   11.62          {
   11.63              ASSERT(flush_count++ == 0);
   11.64              flush_all_ready_maps();
   11.65              perfc_incrc(domain_page_tlb_flush);
   11.66              local_flush_tlb();
   11.67 -            shadow_epoch[cpu] = ++epoch;
   11.68 +            cache->shadow_epoch[vcpu] = ++cache->epoch;
   11.69          }
   11.70  
   11.71          flags = 0;
   11.72          for ( i = 0; i < (1U << order); i++ )
   11.73 -            flags |= l1e_get_flags(cache[idx+i]);
   11.74 +            flags |= l1e_get_flags(cache->l1tab[idx+i]);
   11.75      }
   11.76      while ( flags & _PAGE_PRESENT );
   11.77  
   11.78      for ( i = 0; i < (1U << order); i++ )
   11.79 -        cache[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR);
   11.80 +        cache->l1tab[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR);
   11.81  
   11.82 -    spin_unlock(&map_lock);
   11.83 +    spin_unlock(&cache->lock);
   11.84  
   11.85      va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
   11.86      return (void *)va;
   11.87 @@ -91,9 +91,13 @@ void *map_domain_pages(unsigned long pfn
   11.88  void unmap_domain_pages(void *va, unsigned int order)
   11.89  {
   11.90      unsigned int idx, i;
   11.91 +    struct mapcache *cache = &current->domain->arch.mapcache;
   11.92 +
   11.93      ASSERT((void *)MAPCACHE_VIRT_START <= va);
   11.94      ASSERT(va < (void *)MAPCACHE_VIRT_END);
   11.95 +
   11.96      idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
   11.97 +
   11.98      for ( i = 0; i < (1U << order); i++ )
   11.99 -        l1e_add_flags(mapcache[idx+i], READY_FOR_TLB_FLUSH);
  11.100 +        l1e_add_flags(cache->l1tab[idx+i], READY_FOR_TLB_FLUSH);
  11.101  }
    12.1 --- a/xen/arch/x86/x86_32/mm.c	Mon Jan 09 14:43:46 2006 +0000
    12.2 +++ b/xen/arch/x86/x86_32/mm.c	Mon Jan 09 19:44:30 2006 +0100
    12.3 @@ -29,8 +29,6 @@
    12.4  #include <asm/fixmap.h>
    12.5  #include <public/memory.h>
    12.6  
    12.7 -extern l1_pgentry_t *mapcache;
    12.8 -
    12.9  unsigned int PAGE_HYPERVISOR         = __PAGE_HYPERVISOR;
   12.10  unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
   12.11  
   12.12 @@ -68,7 +66,7 @@ void __init paging_init(void)
   12.13      void *ioremap_pt;
   12.14      unsigned long v;
   12.15      struct pfn_info *pg;
   12.16 -    int i, mapcache_order;
   12.17 +    int i;
   12.18  
   12.19  #ifdef CONFIG_X86_PAE
   12.20      printk("PAE enabled, limit: %d GB\n", MACHPHYS_MBYTES);
   12.21 @@ -76,7 +74,7 @@ void __init paging_init(void)
   12.22      printk("PAE disabled.\n");
   12.23  #endif
   12.24  
   12.25 -    idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   12.26 +    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   12.27  
   12.28      if ( cpu_has_pge )
   12.29      {
   12.30 @@ -121,14 +119,12 @@ void __init paging_init(void)
   12.31              l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
   12.32      }
   12.33  
   12.34 -    /* Set up mapping cache for domain pages. */
   12.35 -    mapcache_order = get_order_from_bytes(
   12.36 -        MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
   12.37 -    mapcache = alloc_xenheap_pages(mapcache_order);
   12.38 -    memset(mapcache, 0, PAGE_SIZE << mapcache_order);
   12.39 -    for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
   12.40 -        idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] =
   12.41 -            l2e_from_page(virt_to_page(mapcache) + i, __PAGE_HYPERVISOR);
   12.42 +    /* Install per-domain mappings for idle domain. */
   12.43 +    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   12.44 +        idle_pg_table_l2[l2_linear_offset(PERDOMAIN_VIRT_START) + i] =
   12.45 +            l2e_from_page(virt_to_page(idle_vcpu[0]->domain->
   12.46 +                                       arch.mm_perdomain_pt) + i,
   12.47 +                          __PAGE_HYPERVISOR);
   12.48  }
   12.49  
   12.50  void __init zap_low_mappings(l2_pgentry_t *base)
    13.1 --- a/xen/arch/x86/x86_64/mm.c	Mon Jan 09 14:43:46 2006 +0000
    13.2 +++ b/xen/arch/x86/x86_64/mm.c	Mon Jan 09 19:44:30 2006 +0100
    13.3 @@ -80,7 +80,7 @@ void __init paging_init(void)
    13.4      l2_pgentry_t *l2_ro_mpt;
    13.5      struct pfn_info *pg;
    13.6  
    13.7 -    idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    13.8 +    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    13.9  
   13.10      /* Create user-accessible L2 directory to map the MPT for guests. */
   13.11      l3_ro_mpt = alloc_xenheap_page();
   13.12 @@ -119,6 +119,12 @@ void __init paging_init(void)
   13.13      /* Set up linear page table mapping. */
   13.14      idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
   13.15          l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
   13.16 +
   13.17 +    /* Install per-domain mappings for idle domain. */
   13.18 +    idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] =
   13.19 +        l4e_from_page(
   13.20 +            virt_to_page(idle_vcpu[0]->domain->arch.mm_perdomain_l3),
   13.21 +            __PAGE_HYPERVISOR);
   13.22  }
   13.23  
   13.24  void __init zap_low_mappings(void)
    14.1 --- a/xen/common/domain.c	Mon Jan 09 14:43:46 2006 +0000
    14.2 +++ b/xen/common/domain.c	Mon Jan 09 19:44:30 2006 +0100
    14.3 @@ -46,9 +46,7 @@ struct domain *do_createdomain(domid_t d
    14.4      INIT_LIST_HEAD(&d->page_list);
    14.5      INIT_LIST_HEAD(&d->xenpage_list);
    14.6  
    14.7 -    if ( d->domain_id == IDLE_DOMAIN_ID )
    14.8 -        set_bit(_DOMF_idle_domain, &d->domain_flags);
    14.9 -    else
   14.10 +    if ( !is_idle_domain(d) )
   14.11          set_bit(_DOMF_ctrl_pause, &d->domain_flags);
   14.12  
   14.13      if ( !is_idle_domain(d) &&
    15.1 --- a/xen/common/sched_bvt.c	Mon Jan 09 14:43:46 2006 +0000
    15.2 +++ b/xen/common/sched_bvt.c	Mon Jan 09 19:44:30 2006 +0100
    15.3 @@ -220,7 +220,7 @@ static void bvt_add_task(struct vcpu *v)
    15.4  
    15.5      einf->vcpu = v;
    15.6  
    15.7 -    if ( is_idle_domain(v->domain) )
    15.8 +    if ( is_idle_vcpu(v) )
    15.9      {
   15.10          einf->avt = einf->evt = ~0U;
   15.11          BUG_ON(__task_on_runqueue(v));
   15.12 @@ -268,7 +268,7 @@ static void bvt_wake(struct vcpu *v)
   15.13          ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) +
   15.14          ctx_allow;
   15.15  
   15.16 -    if ( is_idle_domain(curr->domain) || (einf->evt <= curr_evt) )
   15.17 +    if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) )
   15.18          cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
   15.19      else if ( schedule_data[cpu].s_timer.expires > r_time )
   15.20          set_ac_timer(&schedule_data[cpu].s_timer, r_time);
   15.21 @@ -399,7 +399,7 @@ static struct task_slice bvt_do_schedule
   15.22      ASSERT(prev_einf != NULL);
   15.23      ASSERT(__task_on_runqueue(prev));
   15.24  
   15.25 -    if ( likely(!is_idle_domain(prev->domain)) ) 
   15.26 +    if ( likely(!is_idle_vcpu(prev)) )
   15.27      {
   15.28          prev_einf->avt = calc_avt(prev, now);
   15.29          prev_einf->evt = calc_evt(prev, prev_einf->avt);
   15.30 @@ -490,13 +490,13 @@ static struct task_slice bvt_do_schedule
   15.31      }
   15.32  
   15.33      /* work out time for next run through scheduler */
   15.34 -    if ( is_idle_domain(next->domain) ) 
   15.35 +    if ( is_idle_vcpu(next) )
   15.36      {
   15.37          r_time = ctx_allow;
   15.38          goto sched_done;
   15.39      }
   15.40  
   15.41 -    if ( (next_prime == NULL) || is_idle_domain(next_prime->domain) )
   15.42 +    if ( (next_prime == NULL) || is_idle_vcpu(next_prime) )
   15.43      {
   15.44          /* We have only one runnable task besides the idle task. */
   15.45          r_time = 10 * ctx_allow;     /* RN: random constant */
    16.1 --- a/xen/common/sched_sedf.c	Mon Jan 09 14:43:46 2006 +0000
    16.2 +++ b/xen/common/sched_sedf.c	Mon Jan 09 19:44:30 2006 +0100
    16.3 @@ -396,7 +396,7 @@ static void sedf_add_task(struct vcpu *d
    16.4      INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
    16.5      INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
    16.6   
    16.7 -    if ( !is_idle_domain(d->domain) )
    16.8 +    if ( !is_idle_vcpu(d) )
    16.9      {
   16.10          extraq_check(d);
   16.11      }
   16.12 @@ -777,7 +777,7 @@ static struct task_slice sedf_do_schedul
   16.13      struct task_slice      ret;
   16.14  
   16.15      /*idle tasks don't need any of the following stuf*/
   16.16 -    if (is_idle_domain(current->domain))
   16.17 +    if ( is_idle_vcpu(current) )
   16.18          goto check_waitq;
   16.19   
   16.20      /* create local state of the status of the domain, in order to avoid
   16.21 @@ -874,7 +874,7 @@ static void sedf_sleep(struct vcpu *d)
   16.22      PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",
   16.23            d->domain->domain_id, d->vcpu_id);
   16.24   
   16.25 -    if ( is_idle_domain(d->domain) )
   16.26 +    if ( is_idle_vcpu(d) )
   16.27          return;
   16.28  
   16.29      EDOM_INFO(d)->status |= SEDF_ASLEEP;
   16.30 @@ -1194,7 +1194,7 @@ static void unblock_long_burst(struct se
   16.31  static inline int get_run_type(struct vcpu* d)
   16.32  {
   16.33      struct sedf_vcpu_info* inf = EDOM_INFO(d);
   16.34 -    if (is_idle_domain(d->domain))
   16.35 +    if (is_idle_vcpu(d))
   16.36          return DOMAIN_IDLE;
   16.37      if (inf->status & EXTRA_RUN_PEN)
   16.38          return DOMAIN_EXTRA_PEN;
   16.39 @@ -1258,7 +1258,7 @@ void sedf_wake(struct vcpu *d)
   16.40      PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id,
   16.41            d->vcpu_id);
   16.42  
   16.43 -    if ( unlikely(is_idle_domain(d->domain)) )
   16.44 +    if ( unlikely(is_idle_vcpu(d)) )
   16.45          return;
   16.46     
   16.47      if ( unlikely(__task_on_queue(d)) )
    17.1 --- a/xen/common/schedule.c	Mon Jan 09 14:43:46 2006 +0000
    17.2 +++ b/xen/common/schedule.c	Mon Jan 09 19:44:30 2006 +0100
    17.3 @@ -140,12 +140,10 @@ struct domain *alloc_domain(void)
    17.4   */
    17.5  void sched_add_domain(struct vcpu *v) 
    17.6  {
    17.7 -    struct domain *d = v->domain;
    17.8 -
    17.9      /* Initialise the per-domain timer. */
   17.10      init_ac_timer(&v->timer, dom_timer_fn, v, v->processor);
   17.11  
   17.12 -    if ( is_idle_domain(d) )
   17.13 +    if ( is_idle_vcpu(v) )
   17.14      {
   17.15          schedule_data[v->processor].curr = v;
   17.16          schedule_data[v->processor].idle = v;
   17.17 @@ -153,7 +151,7 @@ void sched_add_domain(struct vcpu *v)
   17.18      }
   17.19  
   17.20      SCHED_OP(add_task, v);
   17.21 -    TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, v->vcpu_id);
   17.22 +    TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
   17.23  }
   17.24  
   17.25  void sched_rem_domain(struct vcpu *v) 
   17.26 @@ -435,7 +433,7 @@ static void __enter_scheduler(void)
   17.27      prev->wokenup = now;
   17.28  
   17.29  #if defined(WAKE_HISTO)
   17.30 -    if ( !is_idle_domain(next->domain) && next->wokenup )
   17.31 +    if ( !is_idle_vcpu(next) && next->wokenup )
   17.32      {
   17.33          ulong diff = (ulong)(now - next->wokenup);
   17.34          diff /= (ulong)MILLISECS(1);
   17.35 @@ -445,7 +443,7 @@ static void __enter_scheduler(void)
   17.36      next->wokenup = (s_time_t)0;
   17.37  #elif defined(BLOCKTIME_HISTO)
   17.38      prev->lastdeschd = now;
   17.39 -    if ( !is_idle_domain(next->domain) )
   17.40 +    if ( !is_idle_vcpu(next) )
   17.41      {
   17.42          ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10));
   17.43          if (diff <= BUCKETS-2)  schedule_data[cpu].hist[diff]++;
   17.44 @@ -462,7 +460,7 @@ static void __enter_scheduler(void)
   17.45      prev->sleep_tick = schedule_data[cpu].tick;
   17.46  
   17.47      /* Ensure that the domain has an up-to-date time base. */
   17.48 -    if ( !is_idle_domain(next->domain) )
   17.49 +    if ( !is_idle_vcpu(next) )
   17.50      {
   17.51          update_dom_time(next);
   17.52          if ( next->sleep_tick != schedule_data[cpu].tick )
   17.53 @@ -499,7 +497,7 @@ static void t_timer_fn(void *unused)
   17.54  
   17.55      schedule_data[cpu].tick++;
   17.56  
   17.57 -    if ( !is_idle_domain(v->domain) )
   17.58 +    if ( !is_idle_vcpu(v) )
   17.59      {
   17.60          update_dom_time(v);
   17.61          send_guest_virq(v, VIRQ_TIMER);
   17.62 @@ -533,9 +531,6 @@ void __init scheduler_init(void)
   17.63          init_ac_timer(&t_timer[i], t_timer_fn, NULL, i);
   17.64      }
   17.65  
   17.66 -    schedule_data[0].curr = idle_domain[0];
   17.67 -    schedule_data[0].idle = idle_domain[0];
   17.68 -
   17.69      for ( i = 0; schedulers[i] != NULL; i++ )
   17.70      {
   17.71          ops = *schedulers[i];
   17.72 @@ -548,10 +543,16 @@ void __init scheduler_init(void)
   17.73  
   17.74      printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
   17.75  
   17.76 -    rc = SCHED_OP(alloc_task, idle_domain[0]);
   17.77 -    BUG_ON(rc < 0);
   17.78 +    if ( idle_vcpu[0] != NULL )
   17.79 +    {
   17.80 +        schedule_data[0].curr = idle_vcpu[0];
   17.81 +        schedule_data[0].idle = idle_vcpu[0];
   17.82  
   17.83 -    sched_add_domain(idle_domain[0]);
   17.84 +        rc = SCHED_OP(alloc_task, idle_vcpu[0]);
   17.85 +        BUG_ON(rc < 0);
   17.86 +
   17.87 +        sched_add_domain(idle_vcpu[0]);
   17.88 +    }
   17.89  }
   17.90  
   17.91  /*
    18.1 --- a/xen/include/asm-x86/config.h	Mon Jan 09 14:43:46 2006 +0000
    18.2 +++ b/xen/include/asm-x86/config.h	Mon Jan 09 19:44:30 2006 +0100
    18.3 @@ -148,7 +148,8 @@ extern unsigned long _end; /* standard E
    18.4  #define SH_LINEAR_PT_VIRT_END   (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES)
    18.5  /* Slot 260: per-domain mappings. */
    18.6  #define PERDOMAIN_VIRT_START    (PML4_ADDR(260))
    18.7 -#define PERDOMAIN_VIRT_END      (PERDOMAIN_VIRT_START + PML4_ENTRY_BYTES)
    18.8 +#define PERDOMAIN_VIRT_END      (PERDOMAIN_VIRT_START + (PERDOMAIN_MBYTES<<20))
    18.9 +#define PERDOMAIN_MBYTES        ((unsigned long)GDT_LDT_MBYTES)
   18.10  /* Slot 261: machine-to-phys conversion table (16GB). */
   18.11  #define RDWR_MPT_VIRT_START     (PML4_ADDR(261))
   18.12  #define RDWR_MPT_VIRT_END       (RDWR_MPT_VIRT_START + (16UL<<30))
   18.13 @@ -195,8 +196,7 @@ extern unsigned long _end; /* standard E
   18.14   *                                                       ------ ------
   18.15   *  I/O remapping area                                   ( 4MB)
   18.16   *  Direct-map (1:1) area [Xen code/data/heap]           (12MB)
   18.17 - *  map_domain_page cache                                ( 4MB)
   18.18 - *  Per-domain mappings                                  ( 4MB)
   18.19 + *  Per-domain mappings (inc. 4MB map_domain_page cache) ( 4MB)
   18.20   *  Shadow linear pagetable                              ( 4MB) ( 8MB)
   18.21   *  Guest linear pagetable                               ( 4MB) ( 8MB)
   18.22   *  Machine-to-physical translation table [writable]     ( 4MB) (16MB)
   18.23 @@ -209,7 +209,7 @@ extern unsigned long _end; /* standard E
   18.24  #define IOREMAP_MBYTES           4
   18.25  #define DIRECTMAP_MBYTES        12
   18.26  #define MAPCACHE_MBYTES          4
   18.27 -#define PERDOMAIN_MBYTES         4
   18.28 +#define PERDOMAIN_MBYTES         8
   18.29  
   18.30  #ifdef CONFIG_X86_PAE
   18.31  # define LINEARPT_MBYTES         8
   18.32 @@ -227,7 +227,7 @@ extern unsigned long _end; /* standard E
   18.33  #define DIRECTMAP_VIRT_START	(DIRECTMAP_VIRT_END - (DIRECTMAP_MBYTES<<20))
   18.34  #define MAPCACHE_VIRT_END	DIRECTMAP_VIRT_START
   18.35  #define MAPCACHE_VIRT_START	(MAPCACHE_VIRT_END - (MAPCACHE_MBYTES<<20))
   18.36 -#define PERDOMAIN_VIRT_END	MAPCACHE_VIRT_START
   18.37 +#define PERDOMAIN_VIRT_END	DIRECTMAP_VIRT_START
   18.38  #define PERDOMAIN_VIRT_START	(PERDOMAIN_VIRT_END - (PERDOMAIN_MBYTES<<20))
   18.39  #define SH_LINEAR_PT_VIRT_END	PERDOMAIN_VIRT_START
   18.40  #define SH_LINEAR_PT_VIRT_START	(SH_LINEAR_PT_VIRT_END - (LINEARPT_MBYTES<<20))
   18.41 @@ -282,14 +282,21 @@ extern unsigned long _end; /* standard E
   18.42  extern unsigned long xenheap_phys_end; /* user-configurable */
   18.43  #endif
   18.44  
   18.45 -#define GDT_VIRT_START(ed)    \
   18.46 -    (PERDOMAIN_VIRT_START + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT))
   18.47 -#define LDT_VIRT_START(ed)    \
   18.48 -    (GDT_VIRT_START(ed) + (64*1024))
   18.49 +/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
   18.50 +#define GDT_LDT_VCPU_SHIFT       5
   18.51 +#define GDT_LDT_VCPU_VA_SHIFT    (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
   18.52 +#define GDT_LDT_MBYTES           (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT))
   18.53 +#define GDT_LDT_VIRT_START       PERDOMAIN_VIRT_START
   18.54 +#define GDT_LDT_VIRT_END         (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20))
   18.55  
   18.56 -#define PDPT_VCPU_SHIFT       5
   18.57 -#define PDPT_VCPU_VA_SHIFT    (PDPT_VCPU_SHIFT + PAGE_SHIFT)
   18.58 -#define PDPT_L1_ENTRIES       (MAX_VIRT_CPUS << PDPT_VCPU_SHIFT)
   18.59 +/* The address of a particular VCPU's GDT or LDT. */
   18.60 +#define GDT_VIRT_START(v)    \
   18.61 +    (PERDOMAIN_VIRT_START + ((v)->vcpu_id << GDT_LDT_VCPU_VA_SHIFT))
   18.62 +#define LDT_VIRT_START(v)    \
   18.63 +    (GDT_VIRT_START(v) + (64*1024))
   18.64 +
   18.65 +#define PDPT_L1_ENTRIES       \
   18.66 +    ((PERDOMAIN_VIRT_END - PERDOMAIN_VIRT_START) >> PAGE_SHIFT)
   18.67  #define PDPT_L2_ENTRIES       \
   18.68      ((PDPT_L1_ENTRIES + (1 << PAGETABLE_ORDER) - 1) >> PAGETABLE_ORDER)
   18.69  
    19.1 --- a/xen/include/asm-x86/domain.h	Mon Jan 09 14:43:46 2006 +0000
    19.2 +++ b/xen/include/asm-x86/domain.h	Mon Jan 09 19:44:30 2006 +0100
    19.3 @@ -13,6 +13,13 @@ struct trap_bounce {
    19.4      unsigned long  eip;
    19.5  };
    19.6  
    19.7 +struct mapcache {
    19.8 +    l1_pgentry_t *l1tab;
    19.9 +    unsigned int cursor;
   19.10 +    unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
   19.11 +    spinlock_t lock;
   19.12 +};
   19.13 +
   19.14  struct arch_domain
   19.15  {
   19.16      l1_pgentry_t *mm_perdomain_pt;
   19.17 @@ -21,6 +28,11 @@ struct arch_domain
   19.18      l3_pgentry_t *mm_perdomain_l3;
   19.19  #endif
   19.20  
   19.21 +#ifdef CONFIG_X86_32
   19.22 +    /* map_domain_page() mapping cache. */
   19.23 +    struct mapcache mapcache;
   19.24 +#endif
   19.25 +
   19.26      /* Writable pagetables. */
   19.27      struct ptwr_info ptwr[2];
   19.28  
    20.1 --- a/xen/include/xen/sched.h	Mon Jan 09 14:43:46 2006 +0000
    20.2 +++ b/xen/include/xen/sched.h	Mon Jan 09 19:44:30 2006 +0100
    20.3 @@ -172,12 +172,10 @@ struct domain_setup_info
    20.4      char *xen_section_string;
    20.5  };
    20.6  
    20.7 -extern struct domain idle0_domain;
    20.8 -extern struct vcpu idle0_vcpu;
    20.9 -
   20.10 -extern struct vcpu *idle_domain[NR_CPUS];
   20.11 +extern struct vcpu *idle_vcpu[NR_CPUS];
   20.12  #define IDLE_DOMAIN_ID   (0x7FFFU)
   20.13 -#define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
   20.14 +#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
   20.15 +#define is_idle_vcpu(v)   (is_idle_domain((v)->domain))
   20.16  
   20.17  struct vcpu *alloc_vcpu(
   20.18      struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
   20.19 @@ -367,23 +365,20 @@ extern struct domain *domain_list;
   20.20  /*
   20.21   * Per-domain flags (domain_flags).
   20.22   */
   20.23 - /* Is this one of the per-CPU idle domains? */
   20.24 -#define _DOMF_idle_domain      0
   20.25 -#define DOMF_idle_domain       (1UL<<_DOMF_idle_domain)
   20.26   /* Is this domain privileged? */
   20.27 -#define _DOMF_privileged       1
   20.28 +#define _DOMF_privileged       0
   20.29  #define DOMF_privileged        (1UL<<_DOMF_privileged)
   20.30   /* Guest shut itself down for some reason. */
   20.31 -#define _DOMF_shutdown         2
   20.32 +#define _DOMF_shutdown         1
   20.33  #define DOMF_shutdown          (1UL<<_DOMF_shutdown)
   20.34   /* Death rattle. */
   20.35 -#define _DOMF_dying            3
   20.36 +#define _DOMF_dying            2
   20.37  #define DOMF_dying             (1UL<<_DOMF_dying)
   20.38   /* Domain is paused by controller software. */
   20.39 -#define _DOMF_ctrl_pause       4
   20.40 +#define _DOMF_ctrl_pause       3
   20.41  #define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
   20.42   /* Domain is being debugged by controller software. */
   20.43 -#define _DOMF_debugging        5
   20.44 +#define _DOMF_debugging        4
   20.45  #define DOMF_debugging         (1UL<<_DOMF_debugging)
   20.46  
   20.47