ia64/xen-unstable

changeset 19788:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents cecc76506afc
children cf6d3211ec59
files docs/src/interface.tex tools/debugger/xenitp/xenitp.c tools/include/xen-foreign/structs.py tools/libxc/ia64/xc_ia64_linux_restore.c tools/libxc/ia64/xc_ia64_linux_save.c tools/libxc/xc_core.c tools/libxc/xc_core_ia64.c tools/libxc/xc_core_ia64.h tools/libxc/xc_dom_ia64.c tools/libxc/xc_dom_x86.c tools/libxc/xc_domain_restore.c tools/libxc/xc_private.h tools/libxc/xc_ptrace.c tools/libxc/xc_ptrace.h tools/libxc/xc_ptrace_core.c xen/arch/ia64/xen/dom_fw_common.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/xensetup.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/domctl.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/mtrr.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vpic.c xen/arch/x86/mm.c xen/arch/x86/mm/paging.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/nmi.c xen/arch/x86/setup.c xen/arch/x86/traps.c xen/common/compat/domain.c xen/common/domain.c xen/common/domctl.c xen/common/event_channel.c xen/common/keyhandler.c xen/common/sched_sedf.c xen/common/xenoprof.c xen/include/asm-ia64/config.h xen/include/asm-x86/config.h xen/include/asm-x86/paging.h xen/include/asm-x86/shared.h xen/include/public/arch-ia64.h xen/include/public/arch-x86/xen.h xen/include/public/xen.h xen/include/xen/domain.h xen/include/xen/numa.h xen/include/xen/sched.h xen/include/xen/shared.h xen/include/xen/xenoprof.h
line diff
     1.1 --- a/docs/src/interface.tex	Thu Jun 18 10:05:23 2009 +0100
     1.2 +++ b/docs/src/interface.tex	Thu Jun 18 10:14:16 2009 +0100
     1.3 @@ -462,7 +462,7 @@ The structure is declared in {\bf xen/in
     1.4  \scriptsize
     1.5  \begin{verbatim}
     1.6  typedef struct shared_info {
     1.7 -    vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
     1.8 +    vcpu_info_t vcpu_info[XEN_LEGACY_MAX_VCPUS];
     1.9  
    1.10      /*
    1.11       * A domain can create "event channels" on which it can send and receive
     2.1 --- a/tools/debugger/xenitp/xenitp.c	Thu Jun 18 10:05:23 2009 +0100
     2.2 +++ b/tools/debugger/xenitp/xenitp.c	Thu Jun 18 10:14:16 2009 +0100
     2.3 @@ -955,7 +955,7 @@ char *parse_arg (char **buf)
     2.4      return res;
     2.5  }
     2.6  
     2.7 -vcpu_guest_context_any_t vcpu_ctx_any[MAX_VIRT_CPUS];
     2.8 +vcpu_guest_context_any_t *vcpu_ctx_any;
     2.9  
    2.10  int vcpu_setcontext (int vcpu)
    2.11  {
    2.12 @@ -1584,11 +1584,23 @@ void xenitp (int vcpu)
    2.13  {
    2.14      int ret;
    2.15      struct sigaction sa;
    2.16 -
    2.17 -    cur_ctx = &vcpu_ctx_any[vcpu].c;
    2.18 +    xc_dominfo_t dominfo;
    2.19  
    2.20      xc_handle = xc_interface_open (); /* for accessing control interface */
    2.21  
    2.22 +    ret = xc_domain_getinfo (xc_handle, domid, 1, &dominfo);
    2.23 +    if (ret < 0) {
    2.24 +        perror ("xc_domain_getinfo");
    2.25 +        exit (-1);
    2.26 +    }
    2.27 +
    2.28 +    vcpu_ctx_any = calloc (sizeof(vcpu_ctx_any), dominfo.max_vcpu_id + 1);
    2.29 +    if (!vcpu_ctx_any) {
    2.30 +        perror ("vcpu context array alloc");
    2.31 +        exit (-1);
    2.32 +    }
    2.33 +    cur_ctx = &vcpu_ctx_any[vcpu].c;
    2.34 +
    2.35      if (xc_domain_setdebugging (xc_handle, domid, 1) != 0)
    2.36          perror ("setdebugging");
    2.37  
     3.1 --- a/tools/include/xen-foreign/structs.py	Thu Jun 18 10:05:23 2009 +0100
     3.2 +++ b/tools/include/xen-foreign/structs.py	Thu Jun 18 10:14:16 2009 +0100
     3.3 @@ -53,6 +53,6 @@ defines = [ "__i386__",
     3.4  
     3.5              # all archs
     3.6              "xen_pfn_to_cr3",
     3.7 -            "MAX_VIRT_CPUS",
     3.8 +            "XEN_LEGACY_MAX_VCPUS",
     3.9              "MAX_GUEST_CMDLINE" ];
    3.10  
     4.1 --- a/tools/libxc/ia64/xc_ia64_linux_restore.c	Thu Jun 18 10:05:23 2009 +0100
     4.2 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c	Thu Jun 18 10:14:16 2009 +0100
     4.3 @@ -174,7 +174,7 @@ xc_ia64_recv_shared_info(int xc_handle, 
     4.4      /* clear any pending events and the selector */
     4.5      memset(&(shared_info->evtchn_pending[0]), 0,
     4.6             sizeof (shared_info->evtchn_pending));
     4.7 -    for (i = 0; i < MAX_VIRT_CPUS; i++)
     4.8 +    for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
     4.9          shared_info->vcpu_info[i].evtchn_pending_sel = 0;
    4.10  
    4.11      if (start_info_pfn != NULL)
     5.1 --- a/tools/libxc/ia64/xc_ia64_linux_save.c	Thu Jun 18 10:05:23 2009 +0100
     5.2 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c	Thu Jun 18 10:14:16 2009 +0100
     5.3 @@ -238,7 +238,7 @@ xc_ia64_pv_send_context(int xc_handle, i
     5.4  
     5.5      /* vcpu map */
     5.6      uint64_t *vcpumap = NULL;
     5.7 -    if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
     5.8 +    if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
     5.9                               &vcpumap))
    5.10          goto out;
    5.11  
    5.12 @@ -308,7 +308,7 @@ xc_ia64_hvm_send_context(int xc_handle, 
    5.13          return -1;
    5.14  
    5.15      /* vcpu map */
    5.16 -    if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
    5.17 +    if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
    5.18                               &vcpumap))
    5.19          goto out;
    5.20  
     6.1 --- a/tools/libxc/xc_core.c	Thu Jun 18 10:05:23 2009 +0100
     6.2 +++ b/tools/libxc/xc_core.c	Thu Jun 18 10:14:16 2009 +0100
     6.3 @@ -430,7 +430,7 @@ xc_domain_dumpcore_via_callback(int xc_h
     6.4  
     6.5      int nr_vcpus = 0;
     6.6      char *dump_mem, *dump_mem_start = NULL;
     6.7 -    vcpu_guest_context_any_t  ctxt[MAX_VIRT_CPUS];
     6.8 +    vcpu_guest_context_any_t *ctxt = NULL;
     6.9      struct xc_core_arch_context arch_ctxt;
    6.10      char dummy[PAGE_SIZE];
    6.11      int dummy_len;
    6.12 @@ -495,6 +495,13 @@ xc_domain_dumpcore_via_callback(int xc_h
    6.13          goto out;
    6.14      }
    6.15  
    6.16 +    ctxt = calloc(sizeof(*ctxt), info.max_vcpu_id + 1);
    6.17 +    if ( !ctxt )
    6.18 +    {
    6.19 +        PERROR("Could not allocate vcpu context array", domid);
    6.20 +        goto out;
    6.21 +    }
    6.22 +
    6.23      for ( i = 0; i <= info.max_vcpu_id; i++ )
    6.24      {
    6.25          if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0 )
    6.26 @@ -900,6 +907,8 @@ out:
    6.27          xc_core_shdr_free(sheaders);
    6.28      if ( strtab != NULL )
    6.29          xc_core_strtab_free(strtab);
    6.30 +    if ( ctxt != NULL )
    6.31 +        free(ctxt);
    6.32      if ( dump_mem_start != NULL )
    6.33          free(dump_mem_start);
    6.34      if ( live_shinfo != NULL )
     7.1 --- a/tools/libxc/xc_core_ia64.c	Thu Jun 18 10:05:23 2009 +0100
     7.2 +++ b/tools/libxc/xc_core_ia64.c	Thu Jun 18 10:14:16 2009 +0100
     7.3 @@ -251,13 +251,10 @@ xc_core_arch_map_p2m(int xc_handle, unsi
     7.4  void
     7.5  xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
     7.6  {
     7.7 -    int i;
     7.8 -
     7.9      arch_ctxt->mapped_regs_size =
    7.10          (XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
    7.11      arch_ctxt->nr_vcpus = 0;
    7.12 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    7.13 -        arch_ctxt->mapped_regs[i] = NULL;
    7.14 +    arch_ctxt->mapped_regs = NULL;
    7.15  
    7.16      xc_ia64_p2m_init(&arch_ctxt->p2m_table);
    7.17  }
    7.18 @@ -269,6 +266,7 @@ xc_core_arch_context_free(struct xc_core
    7.19      for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
    7.20          if ( arch_ctxt->mapped_regs[i] != NULL )
    7.21              munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
    7.22 +    free(arch_ctxt->mapped_regs);
    7.23      xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
    7.24  }
    7.25  
    7.26 @@ -289,6 +287,21 @@ xc_core_arch_context_get(struct xc_core_
    7.27          errno = ENOENT;
    7.28          return -1;
    7.29      }
    7.30 +    if ( !(arch_ctxt->nr_vcpus & (arch_ctxt->nr_vcpus - 1)) ) {
    7.31 +        unsigned int nr = arch_ctxt->nr_vcpus ? arch_ctxt->nr_vcpus << 1 : 1;
    7.32 +        mapped_regs_t** new = realloc(arch_ctxt->mapped_regs,
    7.33 +                                      nr * sizeof(*new));
    7.34 +
    7.35 +        if ( !new )
    7.36 +        {
    7.37 +            PERROR("Could not alloc mapped regs pointer array");
    7.38 +            return -1;
    7.39 +        }
    7.40 +        memset(new + arch_ctxt->nr_vcpus, 0,
    7.41 +               (nr - arch_ctxt->nr_vcpus) * sizeof(*new));
    7.42 +        arch_ctxt->mapped_regs = new;
    7.43 +    }
    7.44 +
    7.45      mapped_regs = xc_map_foreign_range(xc_handle, domid,
    7.46                                         arch_ctxt->mapped_regs_size,
    7.47                                         PROT_READ, ctxt->privregs_pfn);
     8.1 --- a/tools/libxc/xc_core_ia64.h	Thu Jun 18 10:05:23 2009 +0100
     8.2 +++ b/tools/libxc/xc_core_ia64.h	Thu Jun 18 10:14:16 2009 +0100
     8.3 @@ -29,7 +29,7 @@
     8.4  struct xc_core_arch_context {
     8.5      size_t mapped_regs_size;
     8.6      int nr_vcpus;
     8.7 -    mapped_regs_t* mapped_regs[MAX_VIRT_CPUS];
     8.8 +    mapped_regs_t** mapped_regs;
     8.9  
    8.10      struct xen_ia64_p2m_table p2m_table;
    8.11  };
     9.1 --- a/tools/libxc/xc_dom_ia64.c	Thu Jun 18 10:05:23 2009 +0100
     9.2 +++ b/tools/libxc/xc_dom_ia64.c	Thu Jun 18 10:14:16 2009 +0100
     9.3 @@ -87,7 +87,7 @@ int shared_info_ia64(struct xc_dom_image
     9.4      xc_dom_printf("%s: called\n", __FUNCTION__);
     9.5  
     9.6      memset(shared_info, 0, sizeof(*shared_info));
     9.7 -    for (i = 0; i < MAX_VIRT_CPUS; i++)
     9.8 +    for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
     9.9          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
    9.10      shared_info->arch.start_info_pfn = dom->start_info_pfn;
    9.11      shared_info->arch.memmap_info_num_pages = 1; //XXX
    10.1 --- a/tools/libxc/xc_dom_x86.c	Thu Jun 18 10:05:23 2009 +0100
    10.2 +++ b/tools/libxc/xc_dom_x86.c	Thu Jun 18 10:14:16 2009 +0100
    10.3 @@ -498,7 +498,7 @@ static int shared_info_x86_32(struct xc_
    10.4      xc_dom_printf("%s: called\n", __FUNCTION__);
    10.5  
    10.6      memset(shared_info, 0, sizeof(*shared_info));
    10.7 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    10.8 +    for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
    10.9          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   10.10      return 0;
   10.11  }
   10.12 @@ -511,7 +511,7 @@ static int shared_info_x86_64(struct xc_
   10.13      xc_dom_printf("%s: called\n", __FUNCTION__);
   10.14  
   10.15      memset(shared_info, 0, sizeof(*shared_info));
   10.16 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   10.17 +    for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
   10.18          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   10.19      return 0;
   10.20  }
    11.1 --- a/tools/libxc/xc_domain_restore.c	Thu Jun 18 10:05:23 2009 +0100
    11.2 +++ b/tools/libxc/xc_domain_restore.c	Thu Jun 18 10:14:16 2009 +0100
    11.3 @@ -1146,7 +1146,7 @@ int xc_domain_restore(int xc_handle, int
    11.4  
    11.5      /* clear any pending events and the selector */
    11.6      MEMSET_ARRAY_FIELD(new_shared_info, evtchn_pending, 0);
    11.7 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    11.8 +    for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
    11.9  	    SET_FIELD(new_shared_info, vcpu_info[i].evtchn_pending_sel, 0);
   11.10  
   11.11      /* mask event channels */
    12.1 --- a/tools/libxc/xc_private.h	Thu Jun 18 10:05:23 2009 +0100
    12.2 +++ b/tools/libxc/xc_private.h	Thu Jun 18 10:14:16 2009 +0100
    12.3 @@ -191,11 +191,6 @@ void *xc_map_foreign_ranges(int xc_handl
    12.4                              size_t size, int prot, size_t chunksize,
    12.5                              privcmd_mmap_entry_t entries[], int nentries);
    12.6  
    12.7 -void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
    12.8 -                         vcpu_guest_context_any_t *ctxt);
    12.9 -int xc_waitdomain_core(int xc_handle, int domain, int *status,
   12.10 -    int options, vcpu_guest_context_any_t *ctxt);
   12.11 -
   12.12  void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits);
   12.13  void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
   12.14  
    13.1 --- a/tools/libxc/xc_ptrace.c	Thu Jun 18 10:05:23 2009 +0100
    13.2 +++ b/tools/libxc/xc_ptrace.c	Thu Jun 18 10:14:16 2009 +0100
    13.3 @@ -42,7 +42,8 @@ static int current_is_hvm;
    13.4  
    13.5  static uint64_t                         online_cpumap;
    13.6  static uint64_t                         regs_valid;
    13.7 -static vcpu_guest_context_any_t      ctxt[MAX_VIRT_CPUS];
    13.8 +static unsigned int                     nr_vcpu_ids;
    13.9 +static vcpu_guest_context_any_t        *ctxt;
   13.10  
   13.11  #define FOREACH_CPU(cpumap, i)  for ( cpumap = online_cpumap; (i = xc_ffs64(cpumap)); cpumap &= ~(1 << (index - 1)) )
   13.12  
   13.13 @@ -101,6 +102,21 @@ paging_enabled(vcpu_guest_context_any_t 
   13.14      return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
   13.15  }
   13.16  
   13.17 +vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus)
   13.18 +{
   13.19 +    if (nr_cpus > nr_vcpu_ids) {
   13.20 +        vcpu_guest_context_any_t *new;
   13.21 +
   13.22 +        new = realloc(ctxt, nr_cpus * sizeof(*ctxt));
   13.23 +        if (!new)
   13.24 +            return NULL;
   13.25 +        ctxt = new;
   13.26 +        nr_vcpu_ids = nr_cpus;
   13.27 +    }
   13.28 +
   13.29 +    return ctxt;
   13.30 +}
   13.31 +
   13.32  /*
   13.33   * Fetch registers for all online cpus and set the cpumap
   13.34   * to indicate which cpus are online
   13.35 @@ -113,6 +129,9 @@ get_online_cpumap(int xc_handle, struct 
   13.36  {
   13.37      int i, online;
   13.38  
   13.39 +    if (!xc_ptrace_get_vcpu_ctxt(d->max_vcpu_id + 1))
   13.40 +        return -ENOMEM;
   13.41 +
   13.42      *cpumap = 0;
   13.43      for (i = 0; i <= d->max_vcpu_id; i++) {
   13.44          fetch_regs(xc_handle, i, &online);
   13.45 @@ -261,7 +280,7 @@ xc_ptrace(
   13.46      case PTRACE_PEEKDATA:
   13.47          if (current_isfile)
   13.48              guest_va = (unsigned long *)map_domain_va_core(
   13.49 -                current_domid, cpu, addr, ctxt);
   13.50 +                current_domid, cpu, addr);
   13.51          else
   13.52              guest_va = (unsigned long *)map_domain_va(
   13.53                  xc_handle, cpu, addr, PROT_READ);
   13.54 @@ -277,7 +296,7 @@ xc_ptrace(
   13.55          /* XXX assume that all CPUs have the same address space */
   13.56          if (current_isfile)
   13.57              guest_va = (unsigned long *)map_domain_va_core(
   13.58 -                current_domid, cpu, addr, ctxt);
   13.59 +                current_domid, cpu, addr);
   13.60          else
   13.61              guest_va = (unsigned long *)map_domain_va(
   13.62                  xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
   13.63 @@ -433,7 +452,7 @@ xc_waitdomain(
   13.64      int options)
   13.65  {
   13.66      if (current_isfile)
   13.67 -        return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
   13.68 +        return xc_waitdomain_core(xc_handle, domain, status, options);
   13.69      return __xc_waitdomain(xc_handle, domain, status, options);
   13.70  }
   13.71  
    14.1 --- a/tools/libxc/xc_ptrace.h	Thu Jun 18 10:05:23 2009 +0100
    14.2 +++ b/tools/libxc/xc_ptrace.h	Thu Jun 18 10:14:16 2009 +0100
    14.3 @@ -157,4 +157,9 @@ struct gdb_regs {
    14.4  }
    14.5  #endif
    14.6  
    14.7 +void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va);
    14.8 +int xc_waitdomain_core(int xc_handle, int domain, int *status, int options);
    14.9 +vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus);
   14.10 +
   14.11 +
   14.12  #endif /* XC_PTRACE */
    15.1 --- a/tools/libxc/xc_ptrace_core.c	Thu Jun 18 10:05:23 2009 +0100
    15.2 +++ b/tools/libxc/xc_ptrace_core.c	Thu Jun 18 10:14:16 2009 +0100
    15.3 @@ -12,6 +12,44 @@
    15.4  #include <time.h>
    15.5  #include <inttypes.h>
    15.6  
    15.7 +static unsigned int    max_nr_vcpus;
    15.8 +static unsigned long  *cr3;
    15.9 +static unsigned long  *cr3_phys;
   15.10 +static unsigned long **cr3_virt;
   15.11 +static unsigned long  *pde_phys;
   15.12 +static unsigned long **pde_virt;
   15.13 +static unsigned long  *page_phys;
   15.14 +static unsigned long **page_virt;
   15.15 +
   15.16 +static vcpu_guest_context_t *
   15.17 +ptrace_core_get_vcpu_ctxt(unsigned int nr_vcpus)
   15.18 +{
   15.19 +    if (nr_vcpus > max_nr_vcpus) {
   15.20 +        void *new;
   15.21 +
   15.22 +#define REALLOC(what) \
   15.23 +        new = realloc(what, nr_vcpus * sizeof(*what)); \
   15.24 +        if (!new) \
   15.25 +            return NULL; \
   15.26 +        memset(what + max_nr_vcpus, 0, \
   15.27 +              (nr_vcpus - max_nr_vcpus) * sizeof(*what)); \
   15.28 +        what = new
   15.29 +
   15.30 +        REALLOC(cr3);
   15.31 +        REALLOC(cr3_phys);
   15.32 +        REALLOC(cr3_virt);
   15.33 +        REALLOC(pde_phys);
   15.34 +        REALLOC(pde_virt);
   15.35 +        REALLOC(page_phys);
   15.36 +        REALLOC(page_virt);
   15.37 +
   15.38 +#undef REALLOC
   15.39 +        max_nr_vcpus = nr_vcpus;
   15.40 +    }
   15.41 +
   15.42 +    return &xc_ptrace_get_vcpu_ctxt(nr_vcpus)->c;
   15.43 +}
   15.44 +
   15.45  /* Leave the code for the old format as is. */
   15.46  /* --- compatible layer for old format ------------------------------------- */
   15.47  /* XXX application state */
   15.48 @@ -21,7 +59,6 @@ static long   nr_pages_compat = 0;
   15.49  static unsigned long  *p2m_array_compat = NULL;
   15.50  static unsigned long  *m2p_array_compat = NULL;
   15.51  static unsigned long   pages_offset_compat;
   15.52 -static unsigned long   cr3_compat[MAX_VIRT_CPUS];
   15.53  
   15.54  /* --------------------- */
   15.55  
   15.56 @@ -34,23 +71,15 @@ map_mtop_offset_compat(unsigned long ma)
   15.57  
   15.58  
   15.59  static void *
   15.60 -map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va,
   15.61 -                          vcpu_guest_context_t *ctxt)
   15.62 +map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va)
   15.63  {
   15.64      unsigned long pde, page;
   15.65      unsigned long va = (unsigned long)guest_va;
   15.66      void *v;
   15.67  
   15.68 -    static unsigned long  cr3_phys[MAX_VIRT_CPUS];
   15.69 -    static unsigned long *cr3_virt[MAX_VIRT_CPUS];
   15.70 -    static unsigned long  pde_phys[MAX_VIRT_CPUS];
   15.71 -    static unsigned long *pde_virt[MAX_VIRT_CPUS];
   15.72 -    static unsigned long  page_phys[MAX_VIRT_CPUS];
   15.73 -    static unsigned long *page_virt[MAX_VIRT_CPUS];
   15.74 -
   15.75 -    if (cr3_compat[cpu] != cr3_phys[cpu])
   15.76 +    if (cr3[cpu] != cr3_phys[cpu])
   15.77      {
   15.78 -        cr3_phys[cpu] = cr3_compat[cpu];
   15.79 +        cr3_phys[cpu] = cr3[cpu];
   15.80          if (cr3_virt[cpu])
   15.81              munmap(cr3_virt[cpu], PAGE_SIZE);
   15.82          v = mmap(
   15.83 @@ -93,7 +122,7 @@ map_domain_va_core_compat(unsigned long 
   15.84              map_mtop_offset_compat(page_phys[cpu]));
   15.85          if (v == MAP_FAILED)
   15.86          {
   15.87 -            IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3_compat[cpu], pde, page, l1_table_offset_i386(va));
   15.88 +            IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, l1_table_offset_i386(va));
   15.89              page_phys[cpu] = 0;
   15.90              return NULL;
   15.91          }
   15.92 @@ -107,11 +136,11 @@ xc_waitdomain_core_compat(
   15.93      int xc_handle,
   15.94      int domfd,
   15.95      int *status,
   15.96 -    int options,
   15.97 -    vcpu_guest_context_t *ctxt)
   15.98 +    int options)
   15.99  {
  15.100      int nr_vcpus;
  15.101      int i;
  15.102 +    vcpu_guest_context_t *ctxt;
  15.103      xc_core_header_t header;
  15.104  
  15.105      if ( nr_pages_compat == 0 )
  15.106 @@ -132,12 +161,18 @@ xc_waitdomain_core_compat(
  15.107          nr_vcpus = header.xch_nr_vcpus;
  15.108          pages_offset_compat = header.xch_pages_offset;
  15.109  
  15.110 +        if ((ctxt = ptrace_core_get_vcpu_ctxt(nr_vcpus)) == NULL)
  15.111 +        {
  15.112 +            IPRINTF("Could not allocate vcpu context array\n");
  15.113 +            return -1;
  15.114 +        }
  15.115 +
  15.116          if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
  15.117              sizeof(vcpu_guest_context_t)*nr_vcpus)
  15.118              return -1;
  15.119  
  15.120          for (i = 0; i < nr_vcpus; i++)
  15.121 -            cr3_compat[i] = ctxt[i].ctrlreg[3];
  15.122 +            cr3[i] = ctxt[i].ctrlreg[3];
  15.123  
  15.124          if ((p2m_array_compat = malloc(nr_pages_compat * sizeof(unsigned long))) == NULL)
  15.125          {
  15.126 @@ -375,7 +410,6 @@ static uint64_t* pfn_array = NULL; /* fo
  15.127  static uint64_t pfn_array_size = 0;
  15.128  static long nr_pages = 0;
  15.129  static uint64_t pages_offset;
  15.130 -static unsigned long cr3[MAX_VIRT_CPUS];
  15.131  
  15.132  static const struct xen_dumpcore_elfnote_format_version_desc
  15.133  known_format_version[] =
  15.134 @@ -413,21 +447,13 @@ map_gmfn_to_offset_elf(unsigned long gmf
  15.135  }
  15.136  
  15.137  static void *
  15.138 -map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va,
  15.139 -                       vcpu_guest_context_t *ctxt)
  15.140 +map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va)
  15.141  {
  15.142      unsigned long pde, page;
  15.143      unsigned long va = (unsigned long)guest_va;
  15.144      unsigned long offset;
  15.145      void *v;
  15.146  
  15.147 -    static unsigned long  cr3_phys[MAX_VIRT_CPUS];
  15.148 -    static unsigned long *cr3_virt[MAX_VIRT_CPUS];
  15.149 -    static unsigned long  pde_phys[MAX_VIRT_CPUS];
  15.150 -    static unsigned long *pde_virt[MAX_VIRT_CPUS];
  15.151 -    static unsigned long  page_phys[MAX_VIRT_CPUS];
  15.152 -    static unsigned long *page_virt[MAX_VIRT_CPUS];
  15.153 -
  15.154      if (cr3[cpu] != cr3_phys[cpu])
  15.155      {
  15.156          if (cr3_virt[cpu])
  15.157 @@ -498,10 +524,10 @@ xc_waitdomain_core_elf(
  15.158      int xc_handle,
  15.159      int domfd,
  15.160      int *status,
  15.161 -    int options,
  15.162 -    vcpu_guest_context_t *ctxt)
  15.163 +    int options)
  15.164  {
  15.165      int i;
  15.166 +    vcpu_guest_context_t *ctxt;
  15.167      struct elf_core ecore;
  15.168  
  15.169      struct xen_dumpcore_elfnote_none *none;
  15.170 @@ -527,14 +553,13 @@ xc_waitdomain_core_elf(
  15.171      if ((header->header.xch_magic != XC_CORE_MAGIC &&
  15.172           header->header.xch_magic != XC_CORE_MAGIC_HVM) ||
  15.173          header->header.xch_nr_vcpus == 0 ||
  15.174 -        header->header.xch_nr_vcpus >= MAX_VIRT_CPUS ||
  15.175          header->header.xch_nr_pages == 0 ||
  15.176          header->header.xch_page_size != PAGE_SIZE)
  15.177          goto out;
  15.178      current_is_auto_translated_physmap =
  15.179          (header->header.xch_magic == XC_CORE_MAGIC_HVM);
  15.180      nr_pages = header->header.xch_nr_pages;
  15.181 -    
  15.182 +
  15.183      /* .note.Xen: xen_version */
  15.184      if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
  15.185                               XEN_ELFNOTE_DUMPCORE_XEN_VERSION,
  15.186 @@ -561,6 +586,9 @@ xc_waitdomain_core_elf(
  15.187                  format_version->format_version.version);
  15.188      }
  15.189  
  15.190 +    if ((ctxt = ptrace_core_get_vcpu_ctxt(header->header.xch_nr_vcpus)) == NULL)
  15.191 +        goto out;
  15.192 +
  15.193      /* .xen_prstatus: read vcpu_guest_context_t*/
  15.194      if (elf_core_read_sec_by_name(&ecore, XEN_DUMPCORE_SEC_PRSTATUS,
  15.195                                    (char*)ctxt) < 0)
  15.196 @@ -621,12 +649,10 @@ out:
  15.197  typedef int (*xc_waitdomain_core_t)(int xc_handle,
  15.198                                      int domfd,
  15.199                                      int *status,
  15.200 -                                    int options,
  15.201 -                                    vcpu_guest_context_t *ctxt);
  15.202 +                                    int options);
  15.203  typedef void *(*map_domain_va_core_t)(unsigned long domfd,
  15.204                                        int cpu,
  15.205 -                                      void *guest_va,
  15.206 -                                      vcpu_guest_context_t *ctxt);
  15.207 +                                      void *guest_va);
  15.208  struct xc_core_format_type {
  15.209      xc_waitdomain_core_t waitdomain_core;
  15.210      map_domain_va_core_t map_domain_va_core;
  15.211 @@ -642,25 +668,22 @@ static const struct xc_core_format_type 
  15.212  static const struct xc_core_format_type* current_format_type = NULL;
  15.213  
  15.214  void *
  15.215 -map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
  15.216 -                   vcpu_guest_context_any_t *ctxt)
  15.217 +map_domain_va_core(unsigned long domfd, int cpu, void *guest_va)
  15.218  {
  15.219      if (current_format_type == NULL)
  15.220          return NULL;
  15.221 -    return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va,
  15.222 -                                                     &ctxt->c);
  15.223 +    return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va);
  15.224  }
  15.225  
  15.226  int
  15.227 -xc_waitdomain_core(int xc_handle, int domfd, int *status, int options,
  15.228 -                   vcpu_guest_context_any_t *ctxt)
  15.229 +xc_waitdomain_core(int xc_handle, int domfd, int *status, int options)
  15.230  {
  15.231      int ret;
  15.232      int i;
  15.233  
  15.234      for (i = 0; i < NR_FORMAT_TYPE; i++) {
  15.235          ret = (format_type[i].waitdomain_core)(xc_handle, domfd, status,
  15.236 -                                               options, &ctxt->c);
  15.237 +                                               options);
  15.238          if (ret == 0) {
  15.239              current_format_type = &format_type[i];
  15.240              break;
    16.1 --- a/xen/arch/ia64/xen/dom_fw_common.c	Thu Jun 18 10:05:23 2009 +0100
    16.2 +++ b/xen/arch/ia64/xen/dom_fw_common.c	Thu Jun 18 10:14:16 2009 +0100
    16.3 @@ -28,6 +28,7 @@
    16.4  #include "ia64/xc_dom_ia64_util.h"
    16.5  
    16.6  #define ia64_fc(addr)   asm volatile ("fc %0" :: "r"(addr) : "memory")
    16.7 +#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS /* XXX */
    16.8  #endif /* __XEN__ */
    16.9  
   16.10  #include <xen/acpi.h>
    17.1 --- a/xen/arch/ia64/xen/domain.c	Thu Jun 18 10:05:23 2009 +0100
    17.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Jun 18 10:14:16 2009 +0100
    17.3 @@ -2225,13 +2225,6 @@ int __init construct_dom0(struct domain 
    17.4  	for ( i = 1; i < MAX_VIRT_CPUS; i++ )
    17.5  	    d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
    17.6  
    17.7 -	if (dom0_max_vcpus == 0)
    17.8 -	    dom0_max_vcpus = MAX_VIRT_CPUS;
    17.9 -	if (dom0_max_vcpus > num_online_cpus())
   17.10 -	    dom0_max_vcpus = num_online_cpus();
   17.11 -	if (dom0_max_vcpus > MAX_VIRT_CPUS)
   17.12 -	    dom0_max_vcpus = MAX_VIRT_CPUS;
   17.13 -	
   17.14  	printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
   17.15  	for ( i = 1; i < dom0_max_vcpus; i++ )
   17.16  	    if (alloc_vcpu(d, i, i) == NULL)
   17.17 @@ -2306,6 +2299,24 @@ int __init construct_dom0(struct domain 
   17.18  	return 0;
   17.19  }
   17.20  
   17.21 +struct vcpu *__init alloc_dom0_vcpu0(void)
   17.22 +{
   17.23 +       if (dom0_max_vcpus == 0)
   17.24 +           dom0_max_vcpus = MAX_VIRT_CPUS;
   17.25 +       if (dom0_max_vcpus > num_online_cpus())
   17.26 +           dom0_max_vcpus = num_online_cpus();
   17.27 +       if (dom0_max_vcpus > MAX_VIRT_CPUS)
   17.28 +           dom0_max_vcpus = MAX_VIRT_CPUS;
   17.29 +
   17.30 +       dom0->vcpu = xmalloc_array(struct vcpu *, dom0_max_vcpus);
   17.31 +       if ( !dom0->vcpu )
   17.32 +               return NULL;
   17.33 +       memset(dom0->vcpu, 0, dom0_max_vcpus * sizeof(*dom0->vcpu));
   17.34 +       dom0->max_vcpus = dom0_max_vcpus;
   17.35 +
   17.36 +       return alloc_vcpu(dom0, 0, 0);
   17.37 +}
   17.38 +
   17.39  void machine_restart(unsigned int delay_millisecs)
   17.40  {
   17.41  	mdelay(delay_millisecs);
    18.1 --- a/xen/arch/ia64/xen/xensetup.c	Thu Jun 18 10:05:23 2009 +0100
    18.2 +++ b/xen/arch/ia64/xen/xensetup.c	Thu Jun 18 10:14:16 2009 +0100
    18.3 @@ -570,7 +570,11 @@ skip_move:
    18.4      scheduler_init();
    18.5      idle_vcpu[0] = (struct vcpu*) ia64_r13;
    18.6      idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
    18.7 -    if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
    18.8 +    if ( idle_domain == NULL )
    18.9 +        BUG();
   18.10 +    idle_domain->vcpu = idle_vcpu;
   18.11 +    idle_domain->max_vcpus = NR_CPUS;
   18.12 +    if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
   18.13          BUG();
   18.14  
   18.15      alloc_dom_xen_and_dom_io();
   18.16 @@ -657,7 +661,7 @@ printk("num_online_cpus=%d, max_cpus=%d\
   18.17      if (dom0 == NULL)
   18.18          panic("Error creating domain 0\n");
   18.19      domain_set_vhpt_size(dom0, dom0_vhpt_size_log2);
   18.20 -    dom0_vcpu0 = alloc_vcpu(dom0, 0, 0);
   18.21 +    dom0_vcpu0 = alloc_dom0_vcpu0();
   18.22      if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
   18.23          panic("Cannot allocate dom0 vcpu 0\n");
   18.24  
    19.1 --- a/xen/arch/x86/domain.c	Thu Jun 18 10:05:23 2009 +0100
    19.2 +++ b/xen/arch/x86/domain.c	Thu Jun 18 10:14:16 2009 +0100
    19.3 @@ -263,7 +263,7 @@ int switch_native(struct domain *d)
    19.4  
    19.5      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
    19.6  
    19.7 -    for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
    19.8 +    for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
    19.9      {
   19.10          if (d->vcpu[vcpuid])
   19.11              release_compat_l4(d->vcpu[vcpuid]);
   19.12 @@ -285,7 +285,7 @@ int switch_compat(struct domain *d)
   19.13  
   19.14      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
   19.15  
   19.16 -    for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
   19.17 +    for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
   19.18      {
   19.19          if ( (d->vcpu[vcpuid] != NULL) &&
   19.20               (setup_compat_l4(d->vcpu[vcpuid]) != 0) )
   19.21 @@ -423,12 +423,13 @@ int arch_domain_create(struct domain *d,
   19.22  
   19.23  #else /* __x86_64__ */
   19.24  
   19.25 -    d->arch.mm_perdomain_pt_pages = xmalloc_array(struct page_info *,
   19.26 -                                                  PDPT_L2_ENTRIES);
   19.27 -    if ( !d->arch.mm_perdomain_pt_pages )
   19.28 +    BUILD_BUG_ON(PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages)
   19.29 +                 != PAGE_SIZE);
   19.30 +    pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
   19.31 +    if ( !pg )
   19.32          goto fail;
   19.33 -    memset(d->arch.mm_perdomain_pt_pages, 0,
   19.34 -           PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages));
   19.35 +    d->arch.mm_perdomain_pt_pages = page_to_virt(pg);
   19.36 +    clear_page(d->arch.mm_perdomain_pt_pages);
   19.37  
   19.38      pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
   19.39      if ( pg == NULL )
   19.40 @@ -523,7 +524,8 @@ int arch_domain_create(struct domain *d,
   19.41          free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
   19.42      if ( d->arch.mm_perdomain_l3 )
   19.43          free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
   19.44 -    xfree(d->arch.mm_perdomain_pt_pages);
   19.45 +    if ( d->arch.mm_perdomain_pt_pages )
   19.46 +        free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
   19.47  #else
   19.48      free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
   19.49  #endif
   19.50 @@ -556,7 +558,7 @@ void arch_domain_destroy(struct domain *
   19.51          if ( perdomain_pt_page(d, i) )
   19.52              free_domheap_page(perdomain_pt_page(d, i));
   19.53      }
   19.54 -    xfree(d->arch.mm_perdomain_pt_pages);
   19.55 +    free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
   19.56      free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
   19.57      free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
   19.58  #endif
   19.59 @@ -872,7 +874,13 @@ map_vcpu_info(struct vcpu *v, unsigned l
   19.60  
   19.61      new_info = (vcpu_info_t *)(mapping + offset);
   19.62  
   19.63 -    memcpy(new_info, v->vcpu_info, sizeof(*new_info));
   19.64 +    if ( v->vcpu_info )
   19.65 +        memcpy(new_info, v->vcpu_info, sizeof(*new_info));
   19.66 +    else
   19.67 +    {
   19.68 +        memset(new_info, 0, sizeof(*new_info));
   19.69 +        __vcpu_info(v, new_info, evtchn_upcall_mask) = 1;
   19.70 +    }
   19.71  
   19.72      v->vcpu_info = new_info;
   19.73      v->arch.vcpu_info_mfn = mfn;
    20.1 --- a/xen/arch/x86/domain_build.c	Thu Jun 18 10:05:23 2009 +0100
    20.2 +++ b/xen/arch/x86/domain_build.c	Thu Jun 18 10:14:16 2009 +0100
    20.3 @@ -82,9 +82,25 @@ static void __init parse_dom0_mem(const 
    20.4  }
    20.5  custom_param("dom0_mem", parse_dom0_mem);
    20.6  
    20.7 -static unsigned int opt_dom0_max_vcpus;
    20.8 +static unsigned int __initdata opt_dom0_max_vcpus;
    20.9  integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
   20.10  
   20.11 +struct vcpu *__init alloc_dom0_vcpu0(void)
   20.12 +{
   20.13 +    if ( opt_dom0_max_vcpus == 0 )
   20.14 +        opt_dom0_max_vcpus = num_online_cpus();
   20.15 +    if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
   20.16 +        opt_dom0_max_vcpus = MAX_VIRT_CPUS;
   20.17 +
   20.18 +    dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus);
   20.19 +    if ( !dom0->vcpu )
   20.20 +        return NULL;
   20.21 +    memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu));
   20.22 +    dom0->max_vcpus = opt_dom0_max_vcpus;
   20.23 +
   20.24 +    return alloc_vcpu(dom0, 0, 0);
   20.25 +}
   20.26 +
   20.27  static unsigned int opt_dom0_shadow;
   20.28  boolean_param("dom0_shadow", opt_dom0_shadow);
   20.29  
   20.30 @@ -701,13 +717,9 @@ int __init construct_dom0(
   20.31  #endif /* __x86_64__ */
   20.32  
   20.33      /* Mask all upcalls... */
   20.34 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   20.35 +    for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
   20.36          shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
   20.37  
   20.38 -    if ( opt_dom0_max_vcpus == 0 )
   20.39 -        opt_dom0_max_vcpus = num_online_cpus();
   20.40 -    if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
   20.41 -        opt_dom0_max_vcpus = MAX_VIRT_CPUS;
   20.42      printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
   20.43  
   20.44      for ( i = 1; i < opt_dom0_max_vcpus; i++ )
    21.1 --- a/xen/arch/x86/domctl.c	Thu Jun 18 10:05:23 2009 +0100
    21.2 +++ b/xen/arch/x86/domctl.c	Thu Jun 18 10:14:16 2009 +0100
    21.3 @@ -574,7 +574,8 @@ long arch_do_domctl(
    21.4              goto sendtrigger_out;
    21.5  
    21.6          ret = -ESRCH;
    21.7 -        if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
    21.8 +        if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
    21.9 +             (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
   21.10              goto sendtrigger_out;
   21.11  
   21.12          switch ( domctl->u.sendtrigger.trigger )
   21.13 @@ -963,7 +964,7 @@ long arch_do_domctl(
   21.14              goto ext_vcpucontext_out;
   21.15  
   21.16          ret = -ESRCH;
   21.17 -        if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
   21.18 +        if ( (evc->vcpu >= d->max_vcpus) ||
   21.19               ((v = d->vcpu[evc->vcpu]) == NULL) )
   21.20              goto ext_vcpucontext_out;
   21.21  
   21.22 @@ -1085,7 +1086,7 @@ long arch_do_domctl(
   21.23              break;
   21.24  
   21.25          ret = -EINVAL;
   21.26 -        if ( (domctl->u.debug_op.vcpu >= MAX_VIRT_CPUS) ||
   21.27 +        if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) ||
   21.28               ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
   21.29              goto debug_op_out;
   21.30  
    22.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Jun 18 10:05:23 2009 +0100
    22.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Jun 18 10:14:16 2009 +0100
    22.3 @@ -367,7 +367,7 @@ void hvm_domain_relinquish_resources(str
    22.4  
    22.5      /* Stop all asynchronous timer actions. */
    22.6      rtc_deinit(d);
    22.7 -    if ( d->vcpu[0] != NULL )
    22.8 +    if ( d->vcpu != NULL && d->vcpu[0] != NULL )
    22.9      {
   22.10          pit_deinit(d);
   22.11          pmtimer_deinit(d);
   22.12 @@ -507,7 +507,7 @@ static int hvm_load_cpu_ctxt(struct doma
   22.13  
   22.14      /* Which vcpu is this? */
   22.15      vcpuid = hvm_load_instance(h);
   22.16 -    if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
   22.17 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
   22.18      {
   22.19          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
   22.20          return -EINVAL;
   22.21 @@ -2285,7 +2285,7 @@ static void hvm_s3_suspend(struct domain
   22.22      domain_pause(d);
   22.23      domain_lock(d);
   22.24  
   22.25 -    if ( d->is_dying || (d->vcpu[0] == NULL) ||
   22.26 +    if ( d->is_dying || (d->vcpu == NULL) || (d->vcpu[0] == NULL) ||
   22.27           test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
   22.28      {
   22.29          domain_unlock(d);
   22.30 @@ -2660,7 +2660,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
   22.31              goto param_fail2;
   22.32  
   22.33          rc = -EINVAL;
   22.34 -        if ( d->vcpu[0] == NULL )
   22.35 +        if ( d->vcpu == NULL || d->vcpu[0] == NULL )
   22.36              goto param_fail2;
   22.37  
   22.38          if ( shadow_mode_enabled(d) )
    23.1 --- a/xen/arch/x86/hvm/mtrr.c	Thu Jun 18 10:05:23 2009 +0100
    23.2 +++ b/xen/arch/x86/hvm/mtrr.c	Thu Jun 18 10:14:16 2009 +0100
    23.3 @@ -676,7 +676,7 @@ static int hvm_load_mtrr_msr(struct doma
    23.4      struct hvm_hw_mtrr hw_mtrr;
    23.5  
    23.6      vcpuid = hvm_load_instance(h);
    23.7 -    if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
    23.8 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
    23.9      {
   23.10          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
   23.11          return -EINVAL;
   23.12 @@ -720,7 +720,8 @@ uint8_t epte_get_entry_emt(
   23.13  
   23.14      *igmt = 0;
   23.15  
   23.16 -    if ( (current->domain != d) && ((v = d->vcpu[0]) == NULL) )
   23.17 +    if ( (current->domain != d) &&
   23.18 +         ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
   23.19          return MTRR_TYPE_WRBACK;
   23.20  
   23.21      if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
    24.1 --- a/xen/arch/x86/hvm/vioapic.c	Thu Jun 18 10:05:23 2009 +0100
    24.2 +++ b/xen/arch/x86/hvm/vioapic.c	Thu Jun 18 10:14:16 2009 +0100
    24.3 @@ -339,7 +339,8 @@ static void vioapic_deliver(struct hvm_h
    24.4          /* Force round-robin to pick VCPU 0 */
    24.5          if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
    24.6          {
    24.7 -            v = vioapic_domain(vioapic)->vcpu[0];
    24.8 +            v = vioapic_domain(vioapic)->vcpu ?
    24.9 +                vioapic_domain(vioapic)->vcpu[0] : NULL;
   24.10              target = v ? vcpu_vlapic(v) : NULL;
   24.11          }
   24.12          else
   24.13 @@ -367,12 +368,14 @@ static void vioapic_deliver(struct hvm_h
   24.14              if ( !(deliver_bitmask & (1 << bit)) )
   24.15                  continue;
   24.16              deliver_bitmask &= ~(1 << bit);
   24.17 +            if ( vioapic_domain(vioapic)->vcpu == NULL )
   24.18 +                v = NULL;
   24.19  #ifdef IRQ0_SPECIAL_ROUTING
   24.20              /* Do not deliver timer interrupts to VCPU != 0 */
   24.21 -            if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
   24.22 +            else if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
   24.23                  v = vioapic_domain(vioapic)->vcpu[0];
   24.24 +#endif
   24.25              else
   24.26 -#endif
   24.27                  v = vioapic_domain(vioapic)->vcpu[bit];
   24.28              if ( v != NULL )
   24.29              {
   24.30 @@ -392,7 +395,8 @@ static void vioapic_deliver(struct hvm_h
   24.31              if ( !(deliver_bitmask & (1 << bit)) )
   24.32                  continue;
   24.33              deliver_bitmask &= ~(1 << bit);
   24.34 -            if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
   24.35 +            if ( (vioapic_domain(vioapic)->vcpu != NULL) &&
   24.36 +                 ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
   24.37                   !test_and_set_bool(v->nmi_pending) )
   24.38                  vcpu_kick(v);
   24.39          }
    25.1 --- a/xen/arch/x86/hvm/vlapic.c	Thu Jun 18 10:05:23 2009 +0100
    25.2 +++ b/xen/arch/x86/hvm/vlapic.c	Thu Jun 18 10:14:16 2009 +0100
    25.3 @@ -384,7 +384,7 @@ struct vlapic *apic_lowest_prio(struct d
    25.4      struct vlapic *vlapic, *target = NULL;
    25.5      struct vcpu *v;
    25.6  
    25.7 -    if ( unlikely((v = d->vcpu[old]) == NULL) )
    25.8 +    if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) )
    25.9          return NULL;
   25.10  
   25.11      do {
   25.12 @@ -913,7 +913,7 @@ static int lapic_load_hidden(struct doma
   25.13      
   25.14      /* Which vlapic to load? */
   25.15      vcpuid = hvm_load_instance(h); 
   25.16 -    if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
   25.17 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
   25.18      {
   25.19          gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
   25.20          return -EINVAL;
   25.21 @@ -936,7 +936,7 @@ static int lapic_load_regs(struct domain
   25.22      
   25.23      /* Which vlapic to load? */
   25.24      vcpuid = hvm_load_instance(h); 
   25.25 -    if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
   25.26 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
   25.27      {
   25.28          gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
   25.29          return -EINVAL;
    26.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Jun 18 10:05:23 2009 +0100
    26.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Jun 18 10:14:16 2009 +0100
    26.3 @@ -1221,7 +1221,7 @@ static void __ept_sync_domain(void *info
    26.4  void ept_sync_domain(struct domain *d)
    26.5  {
    26.6      /* Only if using EPT and this domain has some VCPUs to dirty. */
    26.7 -    if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
    26.8 +    if ( d->arch.hvm_domain.hap_enabled && d->vcpu && d->vcpu[0] )
    26.9      {
   26.10          ASSERT(local_irq_is_enabled());
   26.11          on_each_cpu(__ept_sync_domain, d, 1);
   26.12 @@ -1399,7 +1399,7 @@ static struct hvm_function_table vmx_fun
   26.13  };
   26.14  
   26.15  static unsigned long *vpid_bitmap;
   26.16 -#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / MAX_VIRT_CPUS)
   26.17 +#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / XEN_LEGACY_MAX_VCPUS)
   26.18  
   26.19  void start_vmx(void)
   26.20  {
   26.21 @@ -1921,7 +1921,7 @@ static int vmx_alloc_vpid(struct domain 
   26.22      }
   26.23      while ( test_and_set_bit(idx, vpid_bitmap) );
   26.24  
   26.25 -    d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS;
   26.26 +    d->arch.hvm_domain.vmx.vpid_base = idx * XEN_LEGACY_MAX_VCPUS;
   26.27      return 0;
   26.28  }
   26.29  
   26.30 @@ -1930,7 +1930,8 @@ static void vmx_free_vpid(struct domain 
   26.31      if ( !cpu_has_vmx_vpid )
   26.32          return;
   26.33  
   26.34 -    clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);
   26.35 +    clear_bit(d->arch.hvm_domain.vmx.vpid_base / XEN_LEGACY_MAX_VCPUS,
   26.36 +              vpid_bitmap);
   26.37  }
   26.38  
   26.39  static void vmx_install_vlapic_mapping(struct vcpu *v)
    27.1 --- a/xen/arch/x86/hvm/vpic.c	Thu Jun 18 10:05:23 2009 +0100
    27.2 +++ b/xen/arch/x86/hvm/vpic.c	Thu Jun 18 10:14:16 2009 +0100
    27.3 @@ -110,7 +110,9 @@ static void vpic_update_int_output(struc
    27.4          if ( vpic->is_master )
    27.5          {
    27.6              /* Master INT line is connected to VCPU0's VLAPIC LVT0. */
    27.7 -            struct vcpu *v = vpic_domain(vpic)->vcpu[0];
    27.8 +            struct vcpu *v = vpic_domain(vpic)->vcpu ?
    27.9 +                vpic_domain(vpic)->vcpu[0] : NULL;
   27.10 +
   27.11              if ( (v != NULL) && vlapic_accept_pic_intr(v) )
   27.12                  vcpu_kick(v);
   27.13          }
    28.1 --- a/xen/arch/x86/mm.c	Thu Jun 18 10:05:23 2009 +0100
    28.2 +++ b/xen/arch/x86/mm.c	Thu Jun 18 10:14:16 2009 +0100
    28.3 @@ -1336,7 +1336,7 @@ static int alloc_l3_table(struct page_in
    28.4       */
    28.5      if ( (pfn >= 0x100000) &&
    28.6           unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) &&
    28.7 -         d->vcpu[0] && d->vcpu[0]->is_initialised )
    28.8 +         d->vcpu && d->vcpu[0] && d->vcpu[0]->is_initialised )
    28.9      {
   28.10          MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
   28.11          return -EINVAL;
   28.12 @@ -2575,7 +2575,7 @@ static inline int vcpumask_to_pcpumask(
   28.13      for ( vmask = 0, offs = 0; ; ++offs)
   28.14      {
   28.15          vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
   28.16 -        if ( vcpu_bias >= MAX_VIRT_CPUS )
   28.17 +        if ( vcpu_bias >= d->max_vcpus )
   28.18              return 0;
   28.19  
   28.20          if ( unlikely(is_native ?
   28.21 @@ -2592,7 +2592,7 @@ static inline int vcpumask_to_pcpumask(
   28.22              vcpu_id = find_first_set_bit(vmask);
   28.23              vmask &= ~(1UL << vcpu_id);
   28.24              vcpu_id += vcpu_bias;
   28.25 -            if ( (vcpu_id >= MAX_VIRT_CPUS) )
   28.26 +            if ( (vcpu_id >= d->max_vcpus) )
   28.27                  return 0;
   28.28              if ( ((v = d->vcpu[vcpu_id]) != NULL) )
   28.29                  cpus_or(*pmask, *pmask, v->vcpu_dirty_cpumask);
    29.1 --- a/xen/arch/x86/mm/paging.c	Thu Jun 18 10:05:23 2009 +0100
    29.2 +++ b/xen/arch/x86/mm/paging.c	Thu Jun 18 10:14:16 2009 +0100
    29.3 @@ -684,7 +684,7 @@ int paging_domctl(struct domain *d, xen_
    29.4          return 0;
    29.5      }
    29.6  
    29.7 -    if ( unlikely(d->vcpu[0] == NULL) )
    29.8 +    if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
    29.9      {
   29.10          PAGING_ERROR("Paging op on a domain (%u) with no vcpus\n",
   29.11                       d->domain_id);
    30.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu Jun 18 10:05:23 2009 +0100
    30.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Jun 18 10:14:16 2009 +0100
    30.3 @@ -1452,7 +1452,7 @@ static void shadow_blow_tables(struct do
    30.4  
    30.5  void shadow_blow_tables_per_domain(struct domain *d)
    30.6  {
    30.7 -    if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL ) {
    30.8 +    if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) {
    30.9          shadow_lock(d);
   30.10          shadow_blow_tables(d);
   30.11          shadow_unlock(d);
   30.12 @@ -1470,7 +1470,7 @@ static void shadow_blow_all_tables(unsig
   30.13      rcu_read_lock(&domlist_read_lock);
   30.14      for_each_domain(d)
   30.15      {
   30.16 -        if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL )
   30.17 +        if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL )
   30.18          {
   30.19              shadow_lock(d);
   30.20              shadow_blow_tables(d);
    31.1 --- a/xen/arch/x86/nmi.c	Thu Jun 18 10:05:23 2009 +0100
    31.2 +++ b/xen/arch/x86/nmi.c	Thu Jun 18 10:14:16 2009 +0100
    31.3 @@ -463,7 +463,8 @@ static void do_nmi_stats(unsigned char k
    31.4      for_each_cpu ( i )
    31.5          printk("%3d\t%3d\n", i, nmi_count(i));
    31.6  
    31.7 -    if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
    31.8 +    if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
    31.9 +         ((v = d->vcpu[0]) == NULL) )
   31.10          return;
   31.11  
   31.12      if ( v->nmi_pending || (v->trap_priority >= VCPU_TRAP_NMI) )
    32.1 --- a/xen/arch/x86/setup.c	Thu Jun 18 10:05:23 2009 +0100
    32.2 +++ b/xen/arch/x86/setup.c	Thu Jun 18 10:14:16 2009 +0100
    32.3 @@ -234,11 +234,15 @@ static void __init init_idle_domain(void
    32.4      scheduler_init();
    32.5  
    32.6      idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
    32.7 -    if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
    32.8 +    if ( idle_domain == NULL )
    32.9 +        BUG();
   32.10 +    idle_domain->vcpu = idle_vcpu;
   32.11 +    idle_domain->max_vcpus = NR_CPUS;
   32.12 +    if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
   32.13          BUG();
   32.14  
   32.15 -    set_current(idle_domain->vcpu[0]);
   32.16 -    idle_vcpu[0] = this_cpu(curr_vcpu) = current;
   32.17 +    set_current(idle_vcpu[0]);
   32.18 +    this_cpu(curr_vcpu) = current;
   32.19  
   32.20      setup_idle_pagetable();
   32.21  }
   32.22 @@ -998,7 +1002,7 @@ void __init __start_xen(unsigned long mb
   32.23  
   32.24      /* Create initial domain 0. */
   32.25      dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
   32.26 -    if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
   32.27 +    if ( (dom0 == NULL) || (alloc_dom0_vcpu0() == NULL) )
   32.28          panic("Error creating domain 0\n");
   32.29  
   32.30      dom0->is_privileged = 1;
    33.1 --- a/xen/arch/x86/traps.c	Thu Jun 18 10:05:23 2009 +0100
    33.2 +++ b/xen/arch/x86/traps.c	Thu Jun 18 10:14:16 2009 +0100
    33.3 @@ -2868,7 +2868,7 @@ static void nmi_dom0_report(unsigned int
    33.4  {
    33.5      struct domain *d = dom0;
    33.6  
    33.7 -    if ( (d == NULL) || (d->vcpu[0] == NULL) )
    33.8 +    if ( (d == NULL) || (d->vcpu == NULL) || (d->vcpu[0] == NULL) )
    33.9          return;
   33.10  
   33.11      set_bit(reason_idx, nmi_reason(d));
   33.12 @@ -3205,7 +3205,7 @@ int guest_has_trap_callback(struct domai
   33.13      struct trap_info *t;
   33.14  
   33.15      BUG_ON(d == NULL);
   33.16 -    BUG_ON(vcpuid >= MAX_VIRT_CPUS);
   33.17 +    BUG_ON(vcpuid >= d->max_vcpus);
   33.18  
   33.19      /* Sanity check - XXX should be more fine grained. */
   33.20      BUG_ON(trap_nr > TRAP_syscall);
   33.21 @@ -3223,7 +3223,7 @@ int send_guest_trap(struct domain *d, ui
   33.22      struct softirq_trap *st;
   33.23  
   33.24      BUG_ON(d == NULL);
   33.25 -    BUG_ON(vcpuid >= MAX_VIRT_CPUS);
   33.26 +    BUG_ON(vcpuid >= d->max_vcpus);
   33.27      v = d->vcpu[vcpuid];
   33.28  
   33.29      switch (trap_nr) {
    34.1 --- a/xen/common/compat/domain.c	Thu Jun 18 10:05:23 2009 +0100
    34.2 +++ b/xen/common/compat/domain.c	Thu Jun 18 10:14:16 2009 +0100
    34.3 @@ -24,7 +24,7 @@ int compat_vcpu_op(int cmd, int vcpuid, 
    34.4      if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
    34.5          return -EINVAL;
    34.6  
    34.7 -    if ( (v = d->vcpu[vcpuid]) == NULL )
    34.8 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
    34.9          return -ENOENT;
   34.10  
   34.11      switch ( cmd )
    35.1 --- a/xen/common/domain.c	Thu Jun 18 10:05:23 2009 +0100
    35.2 +++ b/xen/common/domain.c	Thu Jun 18 10:14:16 2009 +0100
    35.3 @@ -134,7 +134,7 @@ struct vcpu *alloc_vcpu(
    35.4  {
    35.5      struct vcpu *v;
    35.6  
    35.7 -    BUG_ON(d->vcpu[vcpu_id] != NULL);
    35.8 +    BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);
    35.9  
   35.10      if ( (v = alloc_vcpu_struct()) == NULL )
   35.11          return NULL;
   35.12 @@ -153,7 +153,8 @@ struct vcpu *alloc_vcpu(
   35.13          v->runstate.state = RUNSTATE_offline;        
   35.14          v->runstate.state_entry_time = NOW();
   35.15          set_bit(_VPF_down, &v->pause_flags);
   35.16 -        v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
   35.17 +        if ( vcpu_id < XEN_LEGACY_MAX_VCPUS )
   35.18 +            v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
   35.19      }
   35.20  
   35.21      if ( sched_init_vcpu(v, cpu_id) != 0 )
   35.22 @@ -181,22 +182,8 @@ struct vcpu *alloc_vcpu(
   35.23  
   35.24  struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
   35.25  {
   35.26 -    struct domain *d;
   35.27 -    struct vcpu *v;
   35.28 -    unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
   35.29 -
   35.30 -    if ( (v = idle_vcpu[cpu_id]) != NULL )
   35.31 -        return v;
   35.32 -
   35.33 -    d = (vcpu_id == 0) ?
   35.34 -        domain_create(IDLE_DOMAIN_ID, 0, 0) :
   35.35 -        idle_vcpu[cpu_id - vcpu_id]->domain;
   35.36 -    BUG_ON(d == NULL);
   35.37 -
   35.38 -    v = alloc_vcpu(d, vcpu_id, cpu_id);
   35.39 -    idle_vcpu[cpu_id] = v;
   35.40 -
   35.41 -    return v;
   35.42 +    return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain,
   35.43 +                                           cpu_id, cpu_id);
   35.44  }
   35.45  
   35.46  static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
   35.47 @@ -575,7 +562,7 @@ static void complete_domain_destroy(stru
   35.48      struct vcpu *v;
   35.49      int i;
   35.50  
   35.51 -    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
   35.52 +    for ( i = d->max_vcpus - 1; i >= 0; i-- )
   35.53      {
   35.54          if ( (v = d->vcpu[i]) == NULL )
   35.55              continue;
   35.56 @@ -594,7 +581,7 @@ static void complete_domain_destroy(stru
   35.57      /* Free page used by xen oprofile buffer. */
   35.58      free_xenoprof_pages(d);
   35.59  
   35.60 -    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
   35.61 +    for ( i = d->max_vcpus - 1; i >= 0; i-- )
   35.62          if ( (v = d->vcpu[i]) != NULL )
   35.63              free_vcpu_struct(v);
   35.64  
   35.65 @@ -742,12 +729,15 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
   35.66      if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
   35.67          return -EINVAL;
   35.68  
   35.69 -    if ( (v = d->vcpu[vcpuid]) == NULL )
   35.70 +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
   35.71          return -ENOENT;
   35.72  
   35.73      switch ( cmd )
   35.74      {
   35.75      case VCPUOP_initialise:
   35.76 +        if ( !v->vcpu_info )
   35.77 +            return -EINVAL;
   35.78 +
   35.79          if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
   35.80              return -ENOMEM;
   35.81  
    36.1 --- a/xen/common/domctl.c	Thu Jun 18 10:05:23 2009 +0100
    36.2 +++ b/xen/common/domctl.c	Thu Jun 18 10:14:16 2009 +0100
    36.3 @@ -253,7 +253,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
    36.4  
    36.5          ret = -EINVAL;
    36.6          if ( (d == current->domain) || /* no domain_pause() */
    36.7 -             (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
    36.8 +             (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
    36.9              goto svc_out;
   36.10  
   36.11          if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
   36.12 @@ -433,7 +433,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
   36.13  
   36.14          ret = -EINVAL;
   36.15          if ( (d == current->domain) || /* no domain_pause() */
   36.16 -             (max > MAX_VIRT_CPUS) )
   36.17 +             (max > MAX_VIRT_CPUS) ||
   36.18 +             (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) )
   36.19          {
   36.20              rcu_unlock_domain(d);
   36.21              break;
   36.22 @@ -446,15 +447,40 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
   36.23              break;
   36.24          }
   36.25  
   36.26 +        /* Until Xenoprof can dynamically grow its vcpu-s array... */
   36.27 +        if ( d->xenoprof )
   36.28 +        {
   36.29 +            rcu_unlock_domain(d);
   36.30 +            ret = -EAGAIN;
   36.31 +            break;
   36.32 +        }
   36.33 +
   36.34          /* Needed, for example, to ensure writable p.t. state is synced. */
   36.35          domain_pause(d);
   36.36  
   36.37          /* We cannot reduce maximum VCPUs. */
   36.38          ret = -EINVAL;
   36.39 -        if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
   36.40 +        if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
   36.41              goto maxvcpu_out;
   36.42  
   36.43          ret = -ENOMEM;
   36.44 +        if ( max > d->max_vcpus )
   36.45 +        {
   36.46 +            struct vcpu **vcpus = xmalloc_array(struct vcpu *, max);
   36.47 +            void *ptr;
   36.48 +
   36.49 +            if ( !vcpus )
   36.50 +                goto maxvcpu_out;
   36.51 +            memcpy(vcpus, d->vcpu, d->max_vcpus * sizeof(*vcpus));
   36.52 +            memset(vcpus + d->max_vcpus, 0,
   36.53 +                   (max - d->max_vcpus) * sizeof(*vcpus));
   36.54 +
   36.55 +            ptr = d->vcpu;
   36.56 +            d->vcpu = vcpus;
   36.57 +            wmb();
   36.58 +            d->max_vcpus = max;
   36.59 +            xfree(ptr);
   36.60 +        }
   36.61          for ( i = 0; i < max; i++ )
   36.62          {
   36.63              if ( d->vcpu[i] != NULL )
   36.64 @@ -505,7 +531,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
   36.65              goto vcpuaffinity_out;
   36.66  
   36.67          ret = -EINVAL;
   36.68 -        if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
   36.69 +        if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
   36.70              goto vcpuaffinity_out;
   36.71  
   36.72          ret = -ESRCH;
   36.73 @@ -599,7 +625,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
   36.74              goto getvcpucontext_out;
   36.75  
   36.76          ret = -EINVAL;
   36.77 -        if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
   36.78 +        if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
   36.79              goto getvcpucontext_out;
   36.80  
   36.81          ret = -ESRCH;
   36.82 @@ -661,7 +687,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
   36.83              goto getvcpuinfo_out;
   36.84  
   36.85          ret = -EINVAL;
   36.86 -        if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
   36.87 +        if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
   36.88              goto getvcpuinfo_out;
   36.89  
   36.90          ret = -ESRCH;
    37.1 --- a/xen/common/event_channel.c	Thu Jun 18 10:05:23 2009 +0100
    37.2 +++ b/xen/common/event_channel.c	Thu Jun 18 10:14:16 2009 +0100
    37.3 @@ -240,10 +240,13 @@ static long evtchn_bind_virq(evtchn_bind
    37.4      if ( virq_is_global(virq) && (vcpu != 0) )
    37.5          return -EINVAL;
    37.6  
    37.7 -    if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
    37.8 +    if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
    37.9           ((v = d->vcpu[vcpu]) == NULL) )
   37.10          return -ENOENT;
   37.11  
   37.12 +    if ( unlikely(!v->vcpu_info) )
   37.13 +        return -EAGAIN;
   37.14 +
   37.15      spin_lock(&d->event_lock);
   37.16  
   37.17      if ( v->virq_to_evtchn[virq] != 0 )
   37.18 @@ -273,10 +276,13 @@ static long evtchn_bind_ipi(evtchn_bind_
   37.19      int            port, vcpu = bind->vcpu;
   37.20      long           rc = 0;
   37.21  
   37.22 -    if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
   37.23 +    if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
   37.24           (d->vcpu[vcpu] == NULL) )
   37.25          return -ENOENT;
   37.26  
   37.27 +    if ( unlikely(!d->vcpu[vcpu]->vcpu_info) )
   37.28 +        return -EAGAIN;
   37.29 +
   37.30      spin_lock(&d->event_lock);
   37.31  
   37.32      if ( (port = get_free_port(d)) < 0 )
   37.33 @@ -555,13 +561,13 @@ static int evtchn_set_pending(struct vcp
   37.34      }
   37.35      
   37.36      /* Check if some VCPU might be polling for this event. */
   37.37 -    if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) )
   37.38 +    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
   37.39          return 0;
   37.40  
   37.41      /* Wake any interested (or potentially interested) pollers. */
   37.42 -    for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS);
   37.43 -          vcpuid < MAX_VIRT_CPUS;
   37.44 -          vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) )
   37.45 +    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
   37.46 +          vcpuid < d->max_vcpus;
   37.47 +          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
   37.48      {
   37.49          v = d->vcpu[vcpuid];
   37.50          if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
   37.51 @@ -608,7 +614,7 @@ void send_guest_global_virq(struct domai
   37.52  
   37.53      ASSERT(virq_is_global(virq));
   37.54  
   37.55 -    if ( unlikely(d == NULL) )
   37.56 +    if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
   37.57          return;
   37.58  
   37.59      v = d->vcpu[0];
   37.60 @@ -717,9 +723,12 @@ long evtchn_bind_vcpu(unsigned int port,
   37.61      struct evtchn *chn;
   37.62      long           rc = 0;
   37.63  
   37.64 -    if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
   37.65 +    if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
   37.66          return -ENOENT;
   37.67  
   37.68 +    if ( unlikely(!d->vcpu[vcpu_id]->vcpu_info) )
   37.69 +        return -EAGAIN;
   37.70 +
   37.71      spin_lock(&d->event_lock);
   37.72  
   37.73      if ( !port_is_valid(d, port) )
   37.74 @@ -943,6 +952,9 @@ int alloc_unbound_xen_event_channel(
   37.75      struct domain *d = local_vcpu->domain;
   37.76      int            port;
   37.77  
   37.78 +    if ( unlikely(!local_vcpu->vcpu_info) )
   37.79 +        return -EAGAIN;
   37.80 +
   37.81      spin_lock(&d->event_lock);
   37.82  
   37.83      if ( (port = get_free_port(d)) < 0 )
   37.84 @@ -1016,6 +1028,14 @@ int evtchn_init(struct domain *d)
   37.85      if ( get_free_port(d) != 0 )
   37.86          return -EINVAL;
   37.87      evtchn_from_port(d, 0)->state = ECS_RESERVED;
   37.88 +
   37.89 +#if MAX_VIRT_CPUS > BITS_PER_LONG
   37.90 +    d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
   37.91 +    if ( !d->poll_mask )
   37.92 +        return -ENOMEM;
   37.93 +    bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
   37.94 +#endif
   37.95 +
   37.96      return 0;
   37.97  }
   37.98  
   37.99 @@ -1044,6 +1064,11 @@ void evtchn_destroy(struct domain *d)
  37.100          d->evtchn[i] = NULL;
  37.101      }
  37.102      spin_unlock(&d->event_lock);
  37.103 +
  37.104 +#if MAX_VIRT_CPUS > BITS_PER_LONG
  37.105 +    xfree(d->poll_mask);
  37.106 +    d->poll_mask = NULL;
  37.107 +#endif
  37.108  }
  37.109  
  37.110  static void domain_dump_evtchn_info(struct domain *d)
    38.1 --- a/xen/common/keyhandler.c	Thu Jun 18 10:05:23 2009 +0100
    38.2 +++ b/xen/common/keyhandler.c	Thu Jun 18 10:14:16 2009 +0100
    38.3 @@ -209,8 +209,8 @@ static void dump_domains(unsigned char k
    38.4                     v->vcpu_id, v->processor,
    38.5                     v->is_running ? 'T':'F',
    38.6                     v->pause_flags, v->poll_evtchn,
    38.7 -                   vcpu_info(v, evtchn_upcall_pending),
    38.8 -                   vcpu_info(v, evtchn_upcall_mask));
    38.9 +                   v->vcpu_info ? vcpu_info(v, evtchn_upcall_pending) : 0,
   38.10 +                   v->vcpu_info ? vcpu_info(v, evtchn_upcall_mask) : 1);
   38.11              cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
   38.12              printk("dirty_cpus=%s ", tmpstr);
   38.13              cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
   38.14 @@ -218,6 +218,8 @@ static void dump_domains(unsigned char k
   38.15              arch_dump_vcpu_info(v);
   38.16              periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
   38.17              printk("    %s\n", tmpstr);
   38.18 +            if ( !v->vcpu_info )
   38.19 +                continue;
   38.20              printk("    Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
   38.21                     VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
   38.22                     test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
    39.1 --- a/xen/common/sched_sedf.c	Thu Jun 18 10:05:23 2009 +0100
    39.2 +++ b/xen/common/sched_sedf.c	Thu Jun 18 10:14:16 2009 +0100
    39.3 @@ -1376,6 +1376,9 @@ static int sedf_adjust(struct domain *p,
    39.4            p->domain_id, op->u.sedf.period, op->u.sedf.slice,
    39.5            op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
    39.6  
    39.7 +    if ( !p->vcpu )
    39.8 +        return -EINVAL;
    39.9 +
   39.10      if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
   39.11      {
   39.12          /* Check for sane parameters. */
    40.1 --- a/xen/common/xenoprof.c	Thu Jun 18 10:05:23 2009 +0100
    40.2 +++ b/xen/common/xenoprof.c	Thu Jun 18 10:14:16 2009 +0100
    40.3 @@ -120,7 +120,7 @@ static void xenoprof_reset_buf(struct do
    40.4          return;
    40.5      }
    40.6  
    40.7 -    for ( j = 0; j < MAX_VIRT_CPUS; j++ )
    40.8 +    for ( j = 0; j < d->max_vcpus; j++ )
    40.9      {
   40.10          buf = d->xenoprof->vcpu[j].buffer;
   40.11          if ( buf != NULL )
   40.12 @@ -201,6 +201,17 @@ static int alloc_xenoprof_struct(
   40.13  
   40.14      memset(d->xenoprof, 0, sizeof(*d->xenoprof));
   40.15  
   40.16 +    d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
   40.17 +    if ( d->xenoprof->vcpu == NULL )
   40.18 +    {
   40.19 +        xfree(d->xenoprof);
   40.20 +        d->xenoprof = NULL;
   40.21 +        printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
   40.22 +        return -ENOMEM;
   40.23 +    }
   40.24 +
   40.25 +    memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
   40.26 +
   40.27      nvcpu = 0;
   40.28      for_each_vcpu ( d, v )
   40.29          nvcpu++;
    41.1 --- a/xen/include/asm-ia64/config.h	Thu Jun 18 10:05:23 2009 +0100
    41.2 +++ b/xen/include/asm-ia64/config.h	Thu Jun 18 10:14:16 2009 +0100
    41.3 @@ -31,6 +31,7 @@
    41.4  #else
    41.5  #define NR_CPUS 64
    41.6  #endif
    41.7 +#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
    41.8  #define CONFIG_NUMA
    41.9  #define CONFIG_ACPI_NUMA
   41.10  #define NODES_SHIFT 8				/* linux/asm/numnodes.h */
    42.1 --- a/xen/include/asm-x86/config.h	Thu Jun 18 10:05:23 2009 +0100
    42.2 +++ b/xen/include/asm-x86/config.h	Thu Jun 18 10:14:16 2009 +0100
    42.3 @@ -52,9 +52,13 @@
    42.4  #define NR_CPUS 32
    42.5  #endif
    42.6  
    42.7 -#if defined(__i386__) && (NR_CPUS > 32)
    42.8 +#ifdef __i386__
    42.9 +#if NR_CPUS > 32
   42.10  #error "Maximum of 32 physical processors supported by Xen on x86_32"
   42.11  #endif
   42.12 +/* Maximum number of virtual CPUs in multi-processor guests. */
   42.13 +#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
   42.14 +#endif
   42.15  
   42.16  #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
   42.17  # define supervisor_mode_kernel (1)
   42.18 @@ -203,7 +207,7 @@ extern unsigned int video_mode, video_fl
   42.19  /* Slot 260: per-domain mappings. */
   42.20  #define PERDOMAIN_VIRT_START    (PML4_ADDR(260))
   42.21  #define PERDOMAIN_VIRT_END      (PERDOMAIN_VIRT_START + (PERDOMAIN_MBYTES<<20))
   42.22 -#define PERDOMAIN_MBYTES        ((unsigned long)GDT_LDT_MBYTES)
   42.23 +#define PERDOMAIN_MBYTES        (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
   42.24  /* Slot 261: machine-to-phys conversion table (16GB). */
   42.25  #define RDWR_MPT_VIRT_START     (PML4_ADDR(261))
   42.26  #define RDWR_MPT_VIRT_END       (RDWR_MPT_VIRT_START + (16UL<<30))
   42.27 @@ -242,6 +246,8 @@ extern unsigned int video_mode, video_fl
   42.28  #define COMPAT_L2_PAGETABLE_XEN_SLOTS(d) \
   42.29      (COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) + 1)
   42.30  
   42.31 +#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
   42.32 +
   42.33  #endif
   42.34  
   42.35  #define PGT_base_page_table     PGT_l4_page_table
   42.36 @@ -347,7 +353,12 @@ extern unsigned long xenheap_phys_end;
   42.37  /* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
   42.38  #define GDT_LDT_VCPU_SHIFT       5
   42.39  #define GDT_LDT_VCPU_VA_SHIFT    (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
   42.40 +#ifdef MAX_VIRT_CPUS
   42.41  #define GDT_LDT_MBYTES           (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT))
   42.42 +#else
   42.43 +#define GDT_LDT_MBYTES           PERDOMAIN_MBYTES
   42.44 +#define MAX_VIRT_CPUS            (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT))
   42.45 +#endif
   42.46  #define GDT_LDT_VIRT_START       PERDOMAIN_VIRT_START
   42.47  #define GDT_LDT_VIRT_END         (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20))
   42.48  
    43.1 --- a/xen/include/asm-x86/paging.h	Thu Jun 18 10:05:23 2009 +0100
    43.2 +++ b/xen/include/asm-x86/paging.h	Thu Jun 18 10:14:16 2009 +0100
    43.3 @@ -332,7 +332,7 @@ static inline void paging_write_p2m_entr
    43.4  {
    43.5      struct vcpu *v = current;
    43.6      if ( v->domain != d )
    43.7 -        v = d->vcpu[0];
    43.8 +        v = d->vcpu ? d->vcpu[0] : NULL;
    43.9      if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )
   43.10      {
   43.11          return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,
    44.1 --- a/xen/include/asm-x86/shared.h	Thu Jun 18 10:05:23 2009 +0100
    44.2 +++ b/xen/include/asm-x86/shared.h	Thu Jun 18 10:14:16 2009 +0100
    44.3 @@ -26,6 +26,8 @@ static inline void arch_set_##field(stru
    44.4  #define GET_SET_VCPU(type, field)                               \
    44.5  static inline type arch_get_##field(const struct vcpu *v)       \
    44.6  {                                                               \
    44.7 +    if ( unlikely(!v->vcpu_info) )                              \
    44.8 +        return 0;                                               \
    44.9      return !has_32bit_shinfo(v->domain) ?                       \
   44.10             v->vcpu_info->native.arch.field :                    \
   44.11             v->vcpu_info->compat.arch.field;                     \
   44.12 @@ -57,7 +59,7 @@ static inline void arch_set_##field(stru
   44.13  #define GET_SET_VCPU(type, field)                               \
   44.14  static inline type arch_get_##field(const struct vcpu *v)       \
   44.15  {                                                               \
   44.16 -    return v->vcpu_info->arch.field;                            \
   44.17 +    return v->vcpu_info ? v->vcpu_info->arch.field : 0;         \
   44.18  }                                                               \
   44.19  static inline void arch_set_##field(struct vcpu *v,             \
   44.20                                      type val)                   \
    45.1 --- a/xen/include/public/arch-ia64.h	Thu Jun 18 10:05:23 2009 +0100
    45.2 +++ b/xen/include/public/arch-ia64.h	Thu Jun 18 10:14:16 2009 +0100
    45.3 @@ -66,7 +66,7 @@ typedef unsigned long xen_pfn_t;
    45.4  
    45.5  /* Maximum number of virtual CPUs in multi-processor guests. */
    45.6  /* WARNING: before changing this, check that shared_info fits on a page */
    45.7 -#define MAX_VIRT_CPUS 64
    45.8 +#define XEN_LEGACY_MAX_VCPUS 64
    45.9  
   45.10  /* IO ports location for PV.  */
   45.11  #define IO_PORTS_PADDR          0x00000ffffc000000UL
    46.1 --- a/xen/include/public/arch-x86/xen.h	Thu Jun 18 10:05:23 2009 +0100
    46.2 +++ b/xen/include/public/arch-x86/xen.h	Thu Jun 18 10:14:16 2009 +0100
    46.3 @@ -73,8 +73,8 @@ typedef unsigned long xen_pfn_t;
    46.4  #define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
    46.5  #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
    46.6  
    46.7 -/* Maximum number of virtual CPUs in multi-processor guests. */
    46.8 -#define MAX_VIRT_CPUS 32
    46.9 +/* Maximum number of virtual CPUs in legacy multi-processor guests. */
   46.10 +#define XEN_LEGACY_MAX_VCPUS 32
   46.11  
   46.12  #ifndef __ASSEMBLY__
   46.13  
    47.1 --- a/xen/include/public/xen.h	Thu Jun 18 10:05:23 2009 +0100
    47.2 +++ b/xen/include/public/xen.h	Thu Jun 18 10:14:16 2009 +0100
    47.3 @@ -458,7 +458,7 @@ typedef struct vcpu_info vcpu_info_t;
    47.4   * of this structure remaining constant.
    47.5   */
    47.6  struct shared_info {
    47.7 -    struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
    47.8 +    struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
    47.9  
   47.10      /*
   47.11       * A domain can create "event channels" on which it can send and receive
    48.1 --- a/xen/include/xen/domain.h	Thu Jun 18 10:05:23 2009 +0100
    48.2 +++ b/xen/include/xen/domain.h	Thu Jun 18 10:14:16 2009 +0100
    48.3 @@ -14,6 +14,7 @@ struct vcpu *alloc_vcpu(
    48.4  int boot_vcpu(
    48.5      struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
    48.6  struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
    48.7 +struct vcpu *alloc_dom0_vcpu0(void);
    48.8  void vcpu_reset(struct vcpu *v);
    48.9  
   48.10  struct xen_domctl_getdomaininfo;
    49.1 --- a/xen/include/xen/numa.h	Thu Jun 18 10:05:23 2009 +0100
    49.2 +++ b/xen/include/xen/numa.h	Thu Jun 18 10:14:16 2009 +0100
    49.3 @@ -15,6 +15,7 @@
    49.4  #define vcpu_to_node(v) (cpu_to_node((v)->processor))
    49.5  
    49.6  #define domain_to_node(d) \
    49.7 -  (((d)->vcpu[0] != NULL) ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
    49.8 +  (((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
    49.9 +   ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
   49.10  
   49.11  #endif /* _XEN_NUMA_H */
    50.1 --- a/xen/include/xen/sched.h	Thu Jun 18 10:05:23 2009 +0100
    50.2 +++ b/xen/include/xen/sched.h	Thu Jun 18 10:14:16 2009 +0100
    50.3 @@ -180,6 +180,8 @@ struct domain
    50.4      unsigned int     max_pages;       /* maximum value for tot_pages        */
    50.5      unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
    50.6  
    50.7 +    unsigned int     max_vcpus;
    50.8 +
    50.9      /* Scheduling. */
   50.10      void            *sched_priv;    /* scheduler-specific data */
   50.11  
   50.12 @@ -226,7 +228,11 @@ struct domain
   50.13      bool_t           is_pinned;
   50.14  
   50.15      /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
   50.16 +#if MAX_VIRT_CPUS <= BITS_PER_LONG
   50.17      DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
   50.18 +#else
   50.19 +    unsigned long   *poll_mask;
   50.20 +#endif
   50.21  
   50.22      /* Guest has shut down (inc. reason code)? */
   50.23      spinlock_t       shutdown_lock;
   50.24 @@ -244,7 +250,7 @@ struct domain
   50.25  
   50.26      atomic_t         refcnt;
   50.27  
   50.28 -    struct vcpu *vcpu[MAX_VIRT_CPUS];
   50.29 +    struct vcpu    **vcpu;
   50.30  
   50.31      /* Bitmask of CPUs which are holding onto this domain's state. */
   50.32      cpumask_t        domain_dirty_cpumask;
   50.33 @@ -497,7 +503,7 @@ extern struct domain *domain_list;
   50.34         (_d) = rcu_dereference((_d)->next_in_list )) \
   50.35  
   50.36  #define for_each_vcpu(_d,_v)                    \
   50.37 - for ( (_v) = (_d)->vcpu[0];                    \
   50.38 + for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \
   50.39         (_v) != NULL;                            \
   50.40         (_v) = (_v)->next_in_list )
   50.41  
    51.1 --- a/xen/include/xen/shared.h	Thu Jun 18 10:05:23 2009 +0100
    51.2 +++ b/xen/include/xen/shared.h	Thu Jun 18 10:14:16 2009 +0100
    51.3 @@ -21,8 +21,6 @@ typedef union {
    51.4      (*(!has_32bit_shinfo(d) ?                           \
    51.5         (typeof(&(s)->compat.field))&(s)->native.field : \
    51.6         (typeof(&(s)->compat.field))&(s)->compat.field))
    51.7 -#define shared_info(d, field)                   \
    51.8 -    __shared_info(d, (d)->shared_info, field)
    51.9  
   51.10  typedef union {
   51.11      struct vcpu_info native;
   51.12 @@ -30,19 +28,22 @@ typedef union {
   51.13  } vcpu_info_t;
   51.14  
   51.15  /* As above, cast to compat field type. */
   51.16 -#define vcpu_info(v, field)                                                   \
   51.17 -    (*(!has_32bit_shinfo((v)->domain) ?                                       \
   51.18 -       (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \
   51.19 -       (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field))
   51.20 +#define __vcpu_info(v, i, field)                        \
   51.21 +    (*(!has_32bit_shinfo((v)->domain) ?                 \
   51.22 +       (typeof(&(i)->compat.field))&(i)->native.field : \
   51.23 +       (typeof(&(i)->compat.field))&(i)->compat.field))
   51.24  
   51.25  #else
   51.26  
   51.27  typedef struct shared_info shared_info_t;
   51.28 -#define shared_info(d, field)           ((d)->shared_info->field)
   51.29 +#define __shared_info(d, s, field) ((s)->field)
   51.30  
   51.31  typedef struct vcpu_info vcpu_info_t;
   51.32 -#define vcpu_info(v, field)             ((v)->vcpu_info->field)
   51.33 +#define __vcpu_info(v, i, field)   ((i)->field)
   51.34  
   51.35  #endif
   51.36  
   51.37 +#define shared_info(d, field)      __shared_info(d, (d)->shared_info, field)
   51.38 +#define vcpu_info(v, field)        __vcpu_info(v, (v)->vcpu_info, field)
   51.39 +
   51.40  #endif /* __XEN_SHARED_H__ */
    52.1 --- a/xen/include/xen/xenoprof.h	Thu Jun 18 10:05:23 2009 +0100
    52.2 +++ b/xen/include/xen/xenoprof.h	Thu Jun 18 10:14:16 2009 +0100
    52.3 @@ -50,7 +50,7 @@ struct xenoprof {
    52.4  #ifdef CONFIG_COMPAT
    52.5      int is_compat;
    52.6  #endif
    52.7 -    struct xenoprof_vcpu vcpu [MAX_VIRT_CPUS];
    52.8 +    struct xenoprof_vcpu *vcpu;
    52.9  };
   52.10  
   52.11  #ifndef CONFIG_COMPAT